diff options
Diffstat (limited to 'drivers/iommu/amd')
| -rw-r--r-- | drivers/iommu/amd/Kconfig | 20 | ||||
| -rw-r--r-- | drivers/iommu/amd/Makefile | 3 | ||||
| -rw-r--r-- | drivers/iommu/amd/amd_iommu.h | 196 | ||||
| -rw-r--r-- | drivers/iommu/amd/amd_iommu_types.h | 639 | ||||
| -rw-r--r-- | drivers/iommu/amd/debugfs.c | 378 | ||||
| -rw-r--r-- | drivers/iommu/amd/init.c | 2176 | ||||
| -rw-r--r-- | drivers/iommu/amd/io_pgtable.c | 560 | ||||
| -rw-r--r-- | drivers/iommu/amd/iommu.c | 3472 | ||||
| -rw-r--r-- | drivers/iommu/amd/iommu_v2.c | 986 | ||||
| -rw-r--r-- | drivers/iommu/amd/pasid.c | 203 | ||||
| -rw-r--r-- | drivers/iommu/amd/ppr.c | 273 | ||||
| -rw-r--r-- | drivers/iommu/amd/quirks.c | 4 |
12 files changed, 4828 insertions, 4082 deletions
diff --git a/drivers/iommu/amd/Kconfig b/drivers/iommu/amd/Kconfig index a3cbafb603f5..f2acf471cb5d 100644 --- a/drivers/iommu/amd/Kconfig +++ b/drivers/iommu/amd/Kconfig @@ -7,10 +7,17 @@ config AMD_IOMMU select PCI_ATS select PCI_PRI select PCI_PASID + select IRQ_MSI_LIB + select MMU_NOTIFIER select IOMMU_API select IOMMU_IOVA - select IOMMU_DMA - select IOMMU_IO_PGTABLE + select IOMMU_SVA + select IOMMU_IOPF + select IOMMUFD_DRIVER if IOMMUFD + select GENERIC_PT + select IOMMU_PT + select IOMMU_PT_AMDV1 + select IOMMU_PT_X86_64 depends on X86_64 && PCI && ACPI && HAVE_CMPXCHG_DOUBLE help With this option you can enable support for AMD IOMMU hardware in @@ -23,15 +30,6 @@ config AMD_IOMMU your BIOS for an option to enable it or if you have an IVRS ACPI table. -config AMD_IOMMU_V2 - tristate "AMD IOMMU Version 2 driver" - depends on AMD_IOMMU - select MMU_NOTIFIER - help - This option enables support for the AMD IOMMUv2 features of the IOMMU - hardware. Select this option if you want to use devices that support - the PCI PRI and PASID interface. - config AMD_IOMMU_DEBUGFS bool "Enable AMD IOMMU internals in DebugFS" depends on AMD_IOMMU && IOMMU_DEBUGFS diff --git a/drivers/iommu/amd/Makefile b/drivers/iommu/amd/Makefile index a935f8f4b974..5412a563c697 100644 --- a/drivers/iommu/amd/Makefile +++ b/drivers/iommu/amd/Makefile @@ -1,4 +1,3 @@ # SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_AMD_IOMMU) += iommu.o init.o quirks.o io_pgtable.o +obj-y += iommu.o init.o quirks.o ppr.o pasid.o obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += debugfs.o -obj-$(CONFIG_AMD_IOMMU_V2) += iommu_v2.o diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h index 416815a525d6..25044d28f28a 100644 --- a/drivers/iommu/amd/amd_iommu.h +++ b/drivers/iommu/amd/amd_iommu.h @@ -11,60 +11,90 @@ #include "amd_iommu_types.h" -extern irqreturn_t amd_iommu_int_thread(int irq, void *data); -extern irqreturn_t amd_iommu_int_handler(int irq, void *data); -extern void amd_iommu_apply_erratum_63(u16 devid); -extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); -extern int amd_iommu_init_devices(void); -extern void amd_iommu_uninit_devices(void); -extern void amd_iommu_init_notifier(void); -extern int amd_iommu_init_api(void); +irqreturn_t amd_iommu_int_thread(int irq, void *data); +irqreturn_t amd_iommu_int_thread_evtlog(int irq, void *data); +irqreturn_t amd_iommu_int_thread_pprlog(int irq, void *data); +irqreturn_t amd_iommu_int_thread_galog(int irq, void *data); +irqreturn_t amd_iommu_int_handler(int irq, void *data); +void amd_iommu_restart_log(struct amd_iommu *iommu, const char *evt_type, + u8 cntrl_intr, u8 cntrl_log, + u32 status_run_mask, u32 status_overflow_mask); +void amd_iommu_restart_event_logging(struct amd_iommu *iommu); +void amd_iommu_restart_ga_log(struct amd_iommu *iommu); +void amd_iommu_restart_ppr_log(struct amd_iommu *iommu); +void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid); +void iommu_feature_enable(struct amd_iommu *iommu, u8 bit); +void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, + gfp_t gfp, size_t size); #ifdef CONFIG_AMD_IOMMU_DEBUGFS -void amd_iommu_debugfs_setup(struct amd_iommu *iommu); +void amd_iommu_debugfs_setup(void); #else -static inline void amd_iommu_debugfs_setup(struct amd_iommu *iommu) {} +static inline void amd_iommu_debugfs_setup(void) {} #endif /* Needed for interrupt remapping */ -extern int amd_iommu_prepare(void); -extern int amd_iommu_enable(void); -extern void amd_iommu_disable(void); -extern int amd_iommu_reenable(int); -extern int amd_iommu_enable_faulting(void); +int amd_iommu_prepare(void); +int amd_iommu_enable(void); +void amd_iommu_disable(void); +int amd_iommu_reenable(int mode); +int amd_iommu_enable_faulting(unsigned int cpu); extern int amd_iommu_guest_ir; -extern enum io_pgtable_fmt amd_iommu_pgtable; - -/* IOMMUv2 specific functions */ -struct iommu_domain; - -extern bool amd_iommu_v2_supported(void); -extern struct amd_iommu *get_amd_iommu(unsigned int idx); -extern u8 amd_iommu_pc_get_max_banks(unsigned int idx); -extern bool amd_iommu_pc_supported(void); -extern u8 amd_iommu_pc_get_max_counters(unsigned int idx); -extern int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, - u8 fxn, u64 *value); -extern int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, - u8 fxn, u64 *value); - -extern int amd_iommu_register_ppr_notifier(struct notifier_block *nb); -extern int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb); -extern void amd_iommu_domain_direct_map(struct iommu_domain *dom); -extern int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids); -extern int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid, - u64 address); -extern void amd_iommu_update_and_flush_device_table(struct protection_domain *domain); -extern void amd_iommu_domain_update(struct protection_domain *domain); -extern void amd_iommu_domain_flush_complete(struct protection_domain *domain); -extern void amd_iommu_domain_flush_tlb_pde(struct protection_domain *domain); -extern int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid); -extern int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid, - unsigned long cr3); -extern int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, u32 pasid); +extern enum protection_domain_mode amd_iommu_pgtable; +extern int amd_iommu_gpt_level; +extern u8 amd_iommu_hpt_level; +extern unsigned long amd_iommu_pgsize_bitmap; +extern bool amd_iommu_hatdis; + +/* Protection domain ops */ +void amd_iommu_init_identity_domain(void); +struct protection_domain *protection_domain_alloc(void); +struct iommu_domain *amd_iommu_domain_alloc_sva(struct device *dev, + struct mm_struct *mm); +void amd_iommu_domain_free(struct iommu_domain *dom); +int iommu_sva_set_dev_pasid(struct iommu_domain *domain, + struct device *dev, ioasid_t pasid, + struct iommu_domain *old); +void amd_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid, + struct iommu_domain *domain); + +/* SVA/PASID */ +bool amd_iommu_pasid_supported(void); + +/* IOPF */ +int amd_iommu_iopf_init(struct amd_iommu *iommu); +void amd_iommu_iopf_uninit(struct amd_iommu *iommu); +void amd_iommu_page_response(struct device *dev, struct iopf_fault *evt, + struct iommu_page_response *resp); +int amd_iommu_iopf_add_device(struct amd_iommu *iommu, + struct iommu_dev_data *dev_data); +void amd_iommu_iopf_remove_device(struct amd_iommu *iommu, + struct iommu_dev_data *dev_data); + +/* GCR3 setup */ +int amd_iommu_set_gcr3(struct iommu_dev_data *dev_data, + ioasid_t pasid, unsigned long gcr3); +int amd_iommu_clear_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid); + +/* PPR */ +int __init amd_iommu_alloc_ppr_log(struct amd_iommu *iommu); +void __init amd_iommu_free_ppr_log(struct amd_iommu *iommu); +void amd_iommu_enable_ppr_log(struct amd_iommu *iommu); +void amd_iommu_poll_ppr_log(struct amd_iommu *iommu); +int amd_iommu_complete_ppr(struct device *dev, u32 pasid, int status, int tag); + +/* + * This function flushes all internal caches of + * the IOMMU used by this driver. + */ +void amd_iommu_flush_all_caches(struct amd_iommu *iommu); +void amd_iommu_domain_flush_pages(struct protection_domain *domain, + u64 address, size_t size); +void amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data *dev_data, + ioasid_t pasid, u64 address, size_t size); #ifdef CONFIG_IRQ_REMAP -extern int amd_iommu_create_irq_domain(struct amd_iommu *iommu); +int amd_iommu_create_irq_domain(struct amd_iommu *iommu); #else static inline int amd_iommu_create_irq_domain(struct amd_iommu *iommu) { @@ -72,22 +102,32 @@ static inline int amd_iommu_create_irq_domain(struct amd_iommu *iommu) } #endif -#define PPR_SUCCESS 0x0 -#define PPR_INVALID 0x1 -#define PPR_FAILURE 0xf - -extern int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid, - int status, int tag); - static inline bool is_rd890_iommu(struct pci_dev *pdev) { return (pdev->vendor == PCI_VENDOR_ID_ATI) && (pdev->device == PCI_DEVICE_ID_RD890_IOMMU); } -static inline bool iommu_feature(struct amd_iommu *iommu, u64 mask) +static inline bool check_feature(u64 mask) { - return !!(iommu->features & mask); + return (amd_iommu_efr & mask); +} + +static inline bool check_feature2(u64 mask) +{ + return (amd_iommu_efr2 & mask); +} + +static inline bool amd_iommu_v2_pgtbl_supported(void) +{ + return (check_feature(FEATURE_GIOSUP) && check_feature(FEATURE_GT)); +} + +static inline bool amd_iommu_gt_ppr_supported(void) +{ + return (amd_iommu_v2_pgtbl_supported() && + check_feature(FEATURE_PPR) && + check_feature(FEATURE_EPHSUP)); } static inline u64 iommu_virt_to_phys(void *vaddr) @@ -100,33 +140,49 @@ static inline void *iommu_phys_to_virt(unsigned long paddr) return phys_to_virt(__sme_clr(paddr)); } -static inline -void amd_iommu_domain_set_pt_root(struct protection_domain *domain, u64 root) +static inline int get_pci_sbdf_id(struct pci_dev *pdev) +{ + int seg = pci_domain_nr(pdev->bus); + u16 devid = pci_dev_id(pdev); + + return PCI_SEG_DEVID_TO_SBDF(seg, devid); +} + +bool amd_iommu_ht_range_ignore(void); + +/* + * This must be called after device probe completes. During probe + * use rlookup_amd_iommu() get the iommu. + */ +static inline struct amd_iommu *get_amd_iommu_from_dev(struct device *dev) { - atomic64_set(&domain->iop.pt_root, root); - domain->iop.root = (u64 *)(root & PAGE_MASK); - domain->iop.mode = root & 7; /* lowest 3 bits encode pgtable mode */ + return iommu_get_iommu_dev(dev, struct amd_iommu, iommu); } -static inline -void amd_iommu_domain_clr_pt_root(struct protection_domain *domain) +/* This must be called after device probe completes. */ +static inline struct amd_iommu *get_amd_iommu_from_dev_data(struct iommu_dev_data *dev_data) { - amd_iommu_domain_set_pt_root(domain, 0); + return iommu_get_iommu_dev(dev_data->dev, struct amd_iommu, iommu); } +static inline struct protection_domain *to_pdomain(struct iommu_domain *dom) +{ + return container_of(dom, struct protection_domain, domain); +} -extern bool translation_pre_enabled(struct amd_iommu *iommu); -extern bool amd_iommu_is_attach_deferred(struct iommu_domain *domain, - struct device *dev); -extern int __init add_special_device(u8 type, u8 id, u16 *devid, - bool cmd_line); +bool translation_pre_enabled(struct amd_iommu *iommu); +int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line); #ifdef CONFIG_DMI void amd_iommu_apply_ivrs_quirks(void); #else static inline void amd_iommu_apply_ivrs_quirks(void) { } #endif +struct dev_table_entry *amd_iommu_get_ivhd_dte_flags(u16 segid, u16 devid); -extern void amd_iommu_domain_set_pgtable(struct protection_domain *domain, - u64 *root, int mode); -#endif +void amd_iommu_domain_set_pgtable(struct protection_domain *domain, + u64 *root, int mode); +struct dev_table_entry *get_dev_table(struct amd_iommu *iommu); +struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid); + +#endif /* AMD_IOMMU_H */ diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h index 94c1a7a9876d..320733e7d8b4 100644 --- a/drivers/iommu/amd/amd_iommu_types.h +++ b/drivers/iommu/amd/amd_iommu_types.h @@ -8,14 +8,17 @@ #ifndef _ASM_X86_AMD_IOMMU_TYPES_H #define _ASM_X86_AMD_IOMMU_TYPES_H +#include <linux/bitfield.h> +#include <linux/iommu.h> #include <linux/types.h> +#include <linux/mmu_notifier.h> #include <linux/mutex.h> #include <linux/msi.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/pci.h> #include <linux/irqreturn.h> -#include <linux/io-pgtable.h> +#include <linux/generic_pt/iommu.h> /* * Maximum number of IOMMUs supported @@ -26,8 +29,6 @@ * some size calculation constants */ #define DEV_TABLE_ENTRY_SIZE 32 -#define ALIAS_TABLE_ENTRY_SIZE 2 -#define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *)) /* Capability offsets used by the driver */ #define MMIO_CAP_HDR_OFFSET 0x00 @@ -67,6 +68,7 @@ #define MMIO_INTCAPXT_EVT_OFFSET 0x0170 #define MMIO_INTCAPXT_PPR_OFFSET 0x0178 #define MMIO_INTCAPXT_GALOG_OFFSET 0x0180 +#define MMIO_EXT_FEATURES2 0x01A0 #define MMIO_CMD_HEAD_OFFSET 0x2000 #define MMIO_CMD_TAIL_OFFSET 0x2008 #define MMIO_EVT_HEAD_OFFSET 0x2010 @@ -83,24 +85,37 @@ /* Extended Feature Bits */ -#define FEATURE_PREFETCH (1ULL<<0) -#define FEATURE_PPR (1ULL<<1) -#define FEATURE_X2APIC (1ULL<<2) -#define FEATURE_NX (1ULL<<3) -#define FEATURE_GT (1ULL<<4) -#define FEATURE_IA (1ULL<<6) -#define FEATURE_GA (1ULL<<7) -#define FEATURE_HE (1ULL<<8) -#define FEATURE_PC (1ULL<<9) -#define FEATURE_GAM_VAPIC (1ULL<<21) -#define FEATURE_EPHSUP (1ULL<<50) -#define FEATURE_SNP (1ULL<<63) - -#define FEATURE_PASID_SHIFT 32 -#define FEATURE_PASID_MASK (0x1fULL << FEATURE_PASID_SHIFT) - -#define FEATURE_GLXVAL_SHIFT 14 -#define FEATURE_GLXVAL_MASK (0x03ULL << FEATURE_GLXVAL_SHIFT) +#define FEATURE_PREFETCH BIT_ULL(0) +#define FEATURE_PPR BIT_ULL(1) +#define FEATURE_X2APIC BIT_ULL(2) +#define FEATURE_NX BIT_ULL(3) +#define FEATURE_GT BIT_ULL(4) +#define FEATURE_IA BIT_ULL(6) +#define FEATURE_GA BIT_ULL(7) +#define FEATURE_HE BIT_ULL(8) +#define FEATURE_PC BIT_ULL(9) +#define FEATURE_HATS GENMASK_ULL(11, 10) +#define FEATURE_GATS GENMASK_ULL(13, 12) +#define FEATURE_GLX GENMASK_ULL(15, 14) +#define FEATURE_GAM_VAPIC BIT_ULL(21) +#define FEATURE_PASMAX GENMASK_ULL(36, 32) +#define FEATURE_GIOSUP BIT_ULL(48) +#define FEATURE_HASUP BIT_ULL(49) +#define FEATURE_EPHSUP BIT_ULL(50) +#define FEATURE_HDSUP BIT_ULL(52) +#define FEATURE_SNP BIT_ULL(63) + + +/* Extended Feature 2 Bits */ +#define FEATURE_SEVSNPIO_SUP BIT_ULL(1) +#define FEATURE_SNPAVICSUP GENMASK_ULL(7, 5) +#define FEATURE_SNPAVICSUP_GAM(x) \ + (FIELD_GET(FEATURE_SNPAVICSUP, x) == 0x1) +#define FEATURE_HT_RANGE_IGNORE BIT_ULL(11) + +#define FEATURE_NUM_INT_REMAP_SUP GENMASK_ULL(9, 8) +#define FEATURE_NUM_INT_REMAP_SUP_2K(x) \ + (FIELD_GET(FEATURE_NUM_INT_REMAP_SUP, x) == 0x1) /* Note: * The current driver only support 16-bit PASID. @@ -110,12 +125,16 @@ #define PASID_MASK 0x0000ffff /* MMIO status bits */ -#define MMIO_STATUS_EVT_INT_MASK (1 << 1) -#define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2) -#define MMIO_STATUS_PPR_INT_MASK (1 << 6) -#define MMIO_STATUS_GALOG_RUN_MASK (1 << 8) -#define MMIO_STATUS_GALOG_OVERFLOW_MASK (1 << 9) -#define MMIO_STATUS_GALOG_INT_MASK (1 << 10) +#define MMIO_STATUS_EVT_OVERFLOW_MASK BIT(0) +#define MMIO_STATUS_EVT_INT_MASK BIT(1) +#define MMIO_STATUS_COM_WAIT_INT_MASK BIT(2) +#define MMIO_STATUS_EVT_RUN_MASK BIT(3) +#define MMIO_STATUS_PPR_OVERFLOW_MASK BIT(5) +#define MMIO_STATUS_PPR_INT_MASK BIT(6) +#define MMIO_STATUS_PPR_RUN_MASK BIT(7) +#define MMIO_STATUS_GALOG_RUN_MASK BIT(8) +#define MMIO_STATUS_GALOG_OVERFLOW_MASK BIT(9) +#define MMIO_STATUS_GALOG_INT_MASK BIT(10) /* event logging constants */ #define EVENT_ENTRY_SIZE 0x10 @@ -138,31 +157,39 @@ #define EVENT_DOMID_MASK_HI 0xf0000 #define EVENT_FLAGS_MASK 0xfff #define EVENT_FLAGS_SHIFT 0x10 +#define EVENT_FLAG_RW 0x020 +#define EVENT_FLAG_I 0x008 /* feature control bits */ -#define CONTROL_IOMMU_EN 0x00ULL -#define CONTROL_HT_TUN_EN 0x01ULL -#define CONTROL_EVT_LOG_EN 0x02ULL -#define CONTROL_EVT_INT_EN 0x03ULL -#define CONTROL_COMWAIT_EN 0x04ULL -#define CONTROL_INV_TIMEOUT 0x05ULL -#define CONTROL_PASSPW_EN 0x08ULL -#define CONTROL_RESPASSPW_EN 0x09ULL -#define CONTROL_COHERENT_EN 0x0aULL -#define CONTROL_ISOC_EN 0x0bULL -#define CONTROL_CMDBUF_EN 0x0cULL -#define CONTROL_PPRLOG_EN 0x0dULL -#define CONTROL_PPRINT_EN 0x0eULL -#define CONTROL_PPR_EN 0x0fULL -#define CONTROL_GT_EN 0x10ULL -#define CONTROL_GA_EN 0x11ULL -#define CONTROL_GAM_EN 0x19ULL -#define CONTROL_GALOG_EN 0x1CULL -#define CONTROL_GAINT_EN 0x1DULL -#define CONTROL_XT_EN 0x32ULL -#define CONTROL_INTCAPXT_EN 0x33ULL - -#define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT) +#define CONTROL_IOMMU_EN 0 +#define CONTROL_HT_TUN_EN 1 +#define CONTROL_EVT_LOG_EN 2 +#define CONTROL_EVT_INT_EN 3 +#define CONTROL_COMWAIT_EN 4 +#define CONTROL_INV_TIMEOUT 5 +#define CONTROL_PASSPW_EN 8 +#define CONTROL_RESPASSPW_EN 9 +#define CONTROL_COHERENT_EN 10 +#define CONTROL_ISOC_EN 11 +#define CONTROL_CMDBUF_EN 12 +#define CONTROL_PPRLOG_EN 13 +#define CONTROL_PPRINT_EN 14 +#define CONTROL_PPR_EN 15 +#define CONTROL_GT_EN 16 +#define CONTROL_GA_EN 17 +#define CONTROL_GAM_EN 25 +#define CONTROL_GALOG_EN 28 +#define CONTROL_GAINT_EN 29 +#define CONTROL_NUM_INT_REMAP_MODE 43 +#define CONTROL_NUM_INT_REMAP_MODE_MASK 0x03 +#define CONTROL_NUM_INT_REMAP_MODE_2K 0x01 +#define CONTROL_EPH_EN 45 +#define CONTROL_XT_EN 50 +#define CONTROL_INTCAPXT_EN 51 +#define CONTROL_IRTCACHEDIS 59 +#define CONTROL_SNPAVIC_EN 61 + +#define CTRL_INV_TO_MASK 7 #define CTRL_INV_TO_NONE 0 #define CTRL_INV_TO_1MS 1 #define CTRL_INV_TO_10MS 2 @@ -194,6 +221,7 @@ /* macros and definitions for device table entries */ #define DEV_ENTRY_VALID 0x00 #define DEV_ENTRY_TRANSLATION 0x01 +#define DEV_ENTRY_HAD 0x07 #define DEV_ENTRY_PPR 0x34 #define DEV_ENTRY_IR 0x3d #define DEV_ENTRY_IW 0x3e @@ -201,6 +229,8 @@ #define DEV_ENTRY_EX 0x67 #define DEV_ENTRY_SYSMGT1 0x68 #define DEV_ENTRY_SYSMGT2 0x69 +#define DTE_DATA1_SYSMGT_MASK GENMASK_ULL(41, 40) + #define DEV_ENTRY_IRQ_TBL_EN 0x80 #define DEV_ENTRY_INIT_PASS 0xb8 #define DEV_ENTRY_EINT_PASS 0xb9 @@ -218,6 +248,10 @@ #define CMD_BUFFER_ENTRIES 512 #define MMIO_CMD_SIZE_SHIFT 56 #define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT) +#define MMIO_CMD_HEAD_MASK GENMASK_ULL(18, 4) /* Command buffer head ptr field [18:4] */ +#define MMIO_CMD_BUFFER_HEAD(x) FIELD_GET(MMIO_CMD_HEAD_MASK, (x)) +#define MMIO_CMD_TAIL_MASK GENMASK_ULL(18, 4) /* Command buffer tail ptr field [18:4] */ +#define MMIO_CMD_BUFFER_TAIL(x) FIELD_GET(MMIO_CMD_TAIL_MASK, (x)) /* constants for event buffer handling */ #define EVT_BUFFER_SIZE 8192 /* 512 entries */ @@ -230,6 +264,14 @@ #define PPR_ENTRY_SIZE 16 #define PPR_LOG_SIZE (PPR_ENTRY_SIZE * PPR_LOG_ENTRIES) +/* PAGE_SERVICE_REQUEST PPR Log Buffer Entry flags */ +#define PPR_FLAG_EXEC 0x002 /* Execute permission requested */ +#define PPR_FLAG_READ 0x004 /* Read permission requested */ +#define PPR_FLAG_WRITE 0x020 /* Write permission requested */ +#define PPR_FLAG_US 0x040 /* 1: User, 0: Supervisor */ +#define PPR_FLAG_RVSD 0x080 /* Reserved bit not zero */ +#define PPR_FLAG_GN 0x100 /* GVA and PASID is valid */ + #define PPR_REQ_TYPE(x) (((x) >> 60) & 0xfULL) #define PPR_FLAGS(x) (((x) >> 48) & 0xfffULL) #define PPR_DEVID(x) ((x) & 0xffffULL) @@ -263,24 +305,30 @@ * that we support. * * 512GB Pages are not supported due to a hardware bug + * Page sizes >= the 52 bit max physical address of the CPU are not supported. */ -#define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38)) +#define AMD_IOMMU_PGSIZES (GENMASK_ULL(51, 12) ^ SZ_512G) + +/* Special mode where page-sizes are limited to 4 KiB */ +#define AMD_IOMMU_PGSIZES_4K (PAGE_SIZE) + +/* 4K, 2MB, 1G page sizes are supported */ +#define AMD_IOMMU_PGSIZES_V2 (PAGE_SIZE | (1ULL << 21) | (1ULL << 30)) /* Bit value definition for dte irq remapping fields*/ -#define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6) +#define DTE_IRQ_PHYS_ADDR_MASK GENMASK_ULL(51, 6) #define DTE_IRQ_REMAP_INTCTL_MASK (0x3ULL << 60) #define DTE_IRQ_REMAP_INTCTL (2ULL << 60) #define DTE_IRQ_REMAP_ENABLE 1ULL -/* - * AMD IOMMU hardware only support 512 IRTEs despite - * the architectural limitation of 2048 entries. - */ #define DTE_INTTAB_ALIGNMENT 128 -#define DTE_INTTABLEN_VALUE 9ULL -#define DTE_INTTABLEN (DTE_INTTABLEN_VALUE << 1) #define DTE_INTTABLEN_MASK (0xfULL << 1) -#define MAX_IRQS_PER_TABLE (1 << DTE_INTTABLEN_VALUE) +#define DTE_INTTABLEN_VALUE_512 9ULL +#define DTE_INTTABLEN_512 (DTE_INTTABLEN_VALUE_512 << 1) +#define MAX_IRQS_PER_TABLE_512 BIT(DTE_INTTABLEN_VALUE_512) +#define DTE_INTTABLEN_VALUE_2K 11ULL +#define DTE_INTTABLEN_2K (DTE_INTTABLEN_VALUE_2K << 1) +#define MAX_IRQS_PER_TABLE_2K BIT(DTE_INTTABLEN_VALUE_2K) #define PAGE_MODE_NONE 0x00 #define PAGE_MODE_1_LEVEL 0x01 @@ -291,104 +339,38 @@ #define PAGE_MODE_6_LEVEL 0x06 #define PAGE_MODE_7_LEVEL 0x07 -#define PM_LEVEL_SHIFT(x) (12 + ((x) * 9)) -#define PM_LEVEL_SIZE(x) (((x) < 6) ? \ - ((1ULL << PM_LEVEL_SHIFT((x))) - 1): \ - (0xffffffffffffffffULL)) -#define PM_LEVEL_INDEX(x, a) (((a) >> PM_LEVEL_SHIFT((x))) & 0x1ffULL) -#define PM_LEVEL_ENC(x) (((x) << 9) & 0xe00ULL) -#define PM_LEVEL_PDE(x, a) ((a) | PM_LEVEL_ENC((x)) | \ - IOMMU_PTE_PR | IOMMU_PTE_IR | IOMMU_PTE_IW) -#define PM_PTE_LEVEL(pte) (((pte) >> 9) & 0x7ULL) - -#define PM_MAP_4k 0 -#define PM_ADDR_MASK 0x000ffffffffff000ULL -#define PM_MAP_MASK(lvl) (PM_ADDR_MASK & \ - (~((1ULL << (12 + ((lvl) * 9))) - 1))) -#define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr)) - -/* - * Returns the page table level to use for a given page size - * Pagesize is expected to be a power-of-two - */ -#define PAGE_SIZE_LEVEL(pagesize) \ - ((__ffs(pagesize) - 12) / 9) -/* - * Returns the number of ptes to use for a given page size - * Pagesize is expected to be a power-of-two - */ -#define PAGE_SIZE_PTE_COUNT(pagesize) \ - (1ULL << ((__ffs(pagesize) - 12) % 9)) - -/* - * Aligns a given io-virtual address to a given page size - * Pagesize is expected to be a power-of-two - */ -#define PAGE_SIZE_ALIGN(address, pagesize) \ - ((address) & ~((pagesize) - 1)) -/* - * Creates an IOMMU PTE for an address and a given pagesize - * The PTE has no permission bits set - * Pagesize is expected to be a power-of-two larger than 4096 - */ -#define PAGE_SIZE_PTE(address, pagesize) \ - (((address) | ((pagesize) - 1)) & \ - (~(pagesize >> 1)) & PM_ADDR_MASK) - -/* - * Takes a PTE value with mode=0x07 and returns the page size it maps - */ -#define PTE_PAGE_SIZE(pte) \ - (1ULL << (1 + ffz(((pte) | 0xfffULL)))) - -/* - * Takes a page-table level and returns the default page-size for this level - */ -#define PTE_LEVEL_PAGE_SIZE(level) \ - (1ULL << (12 + (9 * (level)))) +#define GUEST_PGTABLE_4_LEVEL 0x00 +#define GUEST_PGTABLE_5_LEVEL 0x01 -/* - * Bit value definition for I/O PTE fields - */ -#define IOMMU_PTE_PR (1ULL << 0) -#define IOMMU_PTE_U (1ULL << 59) -#define IOMMU_PTE_FC (1ULL << 60) -#define IOMMU_PTE_IR (1ULL << 61) -#define IOMMU_PTE_IW (1ULL << 62) +#define PM_ADDR_MASK 0x000ffffffffff000ULL /* * Bit value definition for DTE fields */ -#define DTE_FLAG_V (1ULL << 0) -#define DTE_FLAG_TV (1ULL << 1) -#define DTE_FLAG_IR (1ULL << 61) -#define DTE_FLAG_IW (1ULL << 62) - -#define DTE_FLAG_IOTLB (1ULL << 32) -#define DTE_FLAG_GV (1ULL << 55) +#define DTE_FLAG_V BIT_ULL(0) +#define DTE_FLAG_TV BIT_ULL(1) +#define DTE_FLAG_HAD (3ULL << 7) +#define DTE_FLAG_GIOV BIT_ULL(54) +#define DTE_FLAG_GV BIT_ULL(55) +#define DTE_GLX GENMASK_ULL(57, 56) +#define DTE_FLAG_IR BIT_ULL(61) +#define DTE_FLAG_IW BIT_ULL(62) + +#define DTE_FLAG_IOTLB BIT_ULL(32) #define DTE_FLAG_MASK (0x3ffULL << 32) -#define DTE_GLX_SHIFT (56) -#define DTE_GLX_MASK (3) #define DEV_DOMID_MASK 0xffffULL -#define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL) -#define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL) -#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0x1fffffULL) +#define DTE_GCR3_14_12 GENMASK_ULL(60, 58) +#define DTE_GCR3_30_15 GENMASK_ULL(31, 16) +#define DTE_GCR3_51_31 GENMASK_ULL(63, 43) -#define DTE_GCR3_INDEX_A 0 -#define DTE_GCR3_INDEX_B 1 -#define DTE_GCR3_INDEX_C 1 - -#define DTE_GCR3_SHIFT_A 58 -#define DTE_GCR3_SHIFT_B 16 -#define DTE_GCR3_SHIFT_C 43 +#define DTE_GPT_LEVEL_SHIFT 54 +#define DTE_GPT_LEVEL_MASK GENMASK_ULL(55, 54) #define GCR3_VALID 0x01ULL -#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) -#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_PR) -#define IOMMU_PTE_PAGE(pte) (iommu_phys_to_virt((pte) & IOMMU_PAGE_MASK)) -#define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07) +/* DTE[128:179] | DTE[184:191] */ +#define DTE_DATA2_INTR_MASK ~GENMASK_ULL(55, 52) #define IOMMU_PROT_MASK 0x03 #define IOMMU_PROT_IR 0x01 @@ -404,10 +386,14 @@ /* IOMMU IVINFO */ #define IOMMU_IVINFO_OFFSET 36 #define IOMMU_IVINFO_EFRSUP BIT(0) +#define IOMMU_IVINFO_DMA_REMAP BIT(1) /* IOMMU Feature Reporting Field (for IVHD type 10h */ #define IOMMU_FEAT_GASUP_SHIFT 6 +/* IOMMU HATDIS for IVHD type 11h and 40h */ +#define IOMMU_IVHD_ATTR_HATDIS_SHIFT 0 + /* IOMMU Extended Feature Register (EFR) */ #define IOMMU_EFR_XTSUP_SHIFT 2 #define IOMMU_EFR_GASUP_SHIFT 7 @@ -415,19 +401,15 @@ #define MAX_DOMAIN_ID 65536 -/* Protection domain flags */ -#define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */ -#define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops - domain for an IOMMU */ -#define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page - translation */ -#define PD_IOMMUV2_MASK (1UL << 3) /* domain has gcr3 table */ +/* Timeout stuff */ +#define LOOP_TIMEOUT 100000 +#define MMIO_STATUS_TIMEOUT 2000000 extern bool amd_iommu_dump; #define DUMP_printk(format, arg...) \ do { \ if (amd_iommu_dump) \ - pr_info("AMD-Vi: " format, ## arg); \ + pr_info(format, ## arg); \ } while(0); /* global flag if IOMMUs cache non-present entries */ @@ -441,14 +423,24 @@ struct irq_remap_table { u32 *table; }; -extern struct irq_remap_table **irq_lookup_table; - /* Interrupt remapping feature used? */ extern bool amd_iommu_irq_remap; -/* kmem_cache to get tables with 128 byte alignement */ -extern struct kmem_cache *amd_iommu_irq_cache; +extern const struct iommu_ops amd_iommu_ops; +/* IVRS indicates that pre-boot remapping was enabled */ +extern bool amdr_ivrs_remap_support; + +#define PCI_SBDF_TO_SEGID(sbdf) (((sbdf) >> 16) & 0xffff) +#define PCI_SBDF_TO_DEVID(sbdf) ((sbdf) & 0xffff) +#define PCI_SEG_DEVID_TO_SBDF(seg, devid) ((((u32)(seg) & 0xffff) << 16) | \ + ((devid) & 0xffff)) + +/* Make iterating over all pci segment easier */ +#define for_each_pci_segment(pci_seg) \ + list_for_each_entry((pci_seg), &amd_iommu_pci_seg_list, list) +#define for_each_pci_segment_safe(pci_seg, next) \ + list_for_each_entry_safe((pci_seg), (next), &amd_iommu_pci_seg_list, list) /* * Make iterating over all IOMMUs easier */ @@ -456,53 +448,49 @@ extern struct kmem_cache *amd_iommu_irq_cache; list_for_each_entry((iommu), &amd_iommu_list, list) #define for_each_iommu_safe(iommu, next) \ list_for_each_entry_safe((iommu), (next), &amd_iommu_list, list) +/* Making iterating over protection_domain->dev_data_list easier */ +#define for_each_pdom_dev_data(pdom_dev_data, pdom) \ + list_for_each_entry(pdom_dev_data, &pdom->dev_data_list, list) +#define for_each_pdom_dev_data_safe(pdom_dev_data, next, pdom) \ + list_for_each_entry_safe((pdom_dev_data), (next), &pdom->dev_data_list, list) -#define APERTURE_RANGE_SHIFT 27 /* 128 MB */ -#define APERTURE_RANGE_SIZE (1ULL << APERTURE_RANGE_SHIFT) -#define APERTURE_RANGE_PAGES (APERTURE_RANGE_SIZE >> PAGE_SHIFT) -#define APERTURE_MAX_RANGES 32 /* allows 4GB of DMA address space */ -#define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT) -#define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL) - -/* - * This struct is used to pass information about - * incoming PPR faults around. - */ -struct amd_iommu_fault { - u64 address; /* IO virtual address of the fault*/ - u32 pasid; /* Address space identifier */ - u16 device_id; /* Originating PCI device id */ - u16 tag; /* PPR tag */ - u16 flags; /* Fault flags */ - -}; - +#define for_each_ivhd_dte_flags(entry) \ + list_for_each_entry((entry), &amd_ivhd_dev_flags_list, list) +struct amd_iommu; struct iommu_domain; struct irq_domain; struct amd_irte_ops; #define AMD_IOMMU_FLAG_TRANS_PRE_ENABLED (1 << 0) -#define io_pgtable_to_data(x) \ - container_of((x), struct amd_io_pgtable, iop) - -#define io_pgtable_ops_to_data(x) \ - io_pgtable_to_data(io_pgtable_ops_to_pgtable(x)) +struct gcr3_tbl_info { + u64 *gcr3_tbl; /* Guest CR3 table */ + int glx; /* Number of levels for GCR3 table */ + u32 pasid_cnt; /* Track attached PASIDs */ + u16 domid; /* Per device domain ID */ +}; -#define io_pgtable_ops_to_domain(x) \ - container_of(io_pgtable_ops_to_data(x), \ - struct protection_domain, iop) +enum protection_domain_mode { + PD_MODE_NONE, + PD_MODE_V1, + PD_MODE_V2, +}; -#define io_pgtable_cfg_to_data(x) \ - container_of((x), struct amd_io_pgtable, pgtbl_cfg) +/* Track dev_data/PASID list for the protection domain */ +struct pdom_dev_data { + /* Points to attached device data */ + struct iommu_dev_data *dev_data; + /* PASID attached to the protection domain */ + ioasid_t pasid; + /* For protection_domain->dev_data_list */ + struct list_head list; +}; -struct amd_io_pgtable { - struct io_pgtable_cfg pgtbl_cfg; - struct io_pgtable iop; - int mode; - u64 *root; - atomic64_t pt_root; /* pgtable root and pgtable mode */ +/* Keeps track of the IOMMUs attached to protection domain */ +struct pdom_iommu_info { + struct amd_iommu *iommu; /* IOMMUs attach to protection domain */ + u32 refcnt; /* Count of attached dev/pasid per domain/IOMMU */ }; /* @@ -510,17 +498,87 @@ struct amd_io_pgtable { * independent of their use. */ struct protection_domain { + union { + struct iommu_domain domain; + struct pt_iommu iommu; + struct pt_iommu_amdv1 amdv1; + struct pt_iommu_x86_64 amdv2; + }; struct list_head dev_list; /* List of all devices in this domain */ - struct iommu_domain domain; /* generic domain handle used by - iommu core code */ - struct amd_io_pgtable iop; spinlock_t lock; /* mostly used to lock the page table*/ u16 id; /* the domain id written to the device table */ - int glx; /* Number of levels for GCR3 table */ - u64 *gcr3_tbl; /* Guest CR3 table */ - unsigned long flags; /* flags to find out type of domain */ - unsigned dev_cnt; /* devices assigned to this domain */ - unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */ + enum protection_domain_mode pd_mode; /* Track page table type */ + bool dirty_tracking; /* dirty tracking is enabled in the domain */ + struct xarray iommu_array; /* per-IOMMU reference count */ + + struct mmu_notifier mn; /* mmu notifier for the SVA domain */ + struct list_head dev_data_list; /* List of pdom_dev_data */ +}; +PT_IOMMU_CHECK_DOMAIN(struct protection_domain, iommu, domain); +PT_IOMMU_CHECK_DOMAIN(struct protection_domain, amdv1.iommu, domain); +PT_IOMMU_CHECK_DOMAIN(struct protection_domain, amdv2.iommu, domain); + +/* + * This structure contains information about one PCI segment in the system. + */ +struct amd_iommu_pci_seg { + /* List with all PCI segments in the system */ + struct list_head list; + + /* List of all available dev_data structures */ + struct llist_head dev_data_list; + + /* PCI segment number */ + u16 id; + + /* Largest PCI device id we expect translation requests for */ + u16 last_bdf; + + /* Size of the device table */ + u32 dev_table_size; + + /* + * device table virtual address + * + * Pointer to the per PCI segment device table. + * It is indexed by the PCI device id or the HT unit id and contains + * information about the domain the device belongs to as well as the + * page table root pointer. + */ + struct dev_table_entry *dev_table; + + /* + * The rlookup iommu table is used to find the IOMMU which is + * responsible for a specific device. It is indexed by the PCI + * device id. + */ + struct amd_iommu **rlookup_table; + + /* + * This table is used to find the irq remapping table for a given + * device id quickly. + */ + struct irq_remap_table **irq_lookup_table; + + /* + * Pointer to a device table which the content of old device table + * will be copied to. It's only be used in kdump kernel. + */ + struct dev_table_entry *old_dev_tbl_cpy; + + /* + * The alias table is a driver specific data structure which contains the + * mappings of the PCI device ids to the actual requestor ids on the IOMMU. + * More than one device can share the same requestor id. + */ + u16 *alias_table; + + /* + * A list of required unity mappings we find in ACPI. It is not locked + * because as runtime it is only read. It is created at ACPI table + * parsing time. + */ + struct list_head unity_map; }; /* @@ -560,8 +618,8 @@ struct amd_iommu { /* Extended features */ u64 features; - /* IOMMUv2 */ - bool is_iommu_v2; + /* Extended features 2 */ + u64 features2; /* PCI device id of the IOMMU device */ u16 devid; @@ -574,7 +632,7 @@ struct amd_iommu { u16 cap_ptr; /* pci domain of this IOMMU */ - u16 pci_seg; + struct amd_iommu_pci_seg *pci_seg; /* start of exclusion range of that IOMMU */ u64 exclusion_start; @@ -589,12 +647,21 @@ struct amd_iommu { /* event buffer virtual address */ u8 *evt_buf; + /* Name for event log interrupt */ + unsigned char evt_irq_name[16]; + /* Base of the PPR log, if present */ u8 *ppr_log; + /* Name for PPR log interrupt */ + unsigned char ppr_irq_name[16]; + /* Base of the GA log, if present */ u8 *ga_log; + /* Name for GA log interrupt */ + unsigned char ga_irq_name[16]; + /* Tail of the GA log, if present */ u8 *ga_log_tail; @@ -604,6 +671,9 @@ struct amd_iommu { /* if one, we need to send a completion wait command */ bool need_sync; + /* true if disable irte caching */ + bool irtcachedis_enabled; + /* Handle for IOMMU core code */ struct iommu_device iommu; @@ -630,21 +700,29 @@ struct amd_iommu { u8 max_counters; #ifdef CONFIG_IRQ_REMAP struct irq_domain *ir_domain; - struct irq_domain *msi_domain; struct amd_irte_ops *irte_ops; #endif u32 flags; volatile u64 *cmd_sem; - u64 cmd_sem_val; + atomic64_t cmd_sem_val; + /* + * Track physical address to directly use it in build_completion_wait() + * and avoid adding any special checks and handling for kdump. + */ + u64 cmd_sem_paddr; #ifdef CONFIG_AMD_IOMMU_DEBUGFS /* DebugFS Info */ struct dentry *debugfs; + int dbg_mmio_offset; + int dbg_cap_offset; #endif - /* IRQ notifier for IntCapXT interrupt */ - struct irq_affinity_notify intcapxt_notify; + + /* IOPF support */ + struct iopf_queue *iopf_queue; + unsigned char iopfq_name[32]; }; static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev) @@ -661,8 +739,8 @@ struct acpihid_map_entry { struct list_head list; u8 uid[ACPIHID_UID_LEN]; u8 hid[ACPIHID_HID_LEN]; - u16 devid; - u16 root_devid; + u32 devid; + u32 root_devid; bool cmd_line; struct iommu_group *group; }; @@ -670,29 +748,43 @@ struct acpihid_map_entry { struct devid_map { struct list_head list; u8 id; - u16 devid; + u32 devid; bool cmd_line; }; +#define AMD_IOMMU_DEVICE_FLAG_ATS_SUP 0x1 /* ATS feature supported */ +#define AMD_IOMMU_DEVICE_FLAG_PRI_SUP 0x2 /* PRI feature supported */ +#define AMD_IOMMU_DEVICE_FLAG_PASID_SUP 0x4 /* PASID context supported */ +/* Device may request execution on memory pages */ +#define AMD_IOMMU_DEVICE_FLAG_EXEC_SUP 0x8 +/* Device may request super-user privileges */ +#define AMD_IOMMU_DEVICE_FLAG_PRIV_SUP 0x10 + /* * This struct contains device specific data for the IOMMU */ struct iommu_dev_data { /*Protect against attach/detach races */ - spinlock_t lock; + struct mutex mutex; + spinlock_t dte_lock; /* DTE lock for 256-bit access */ struct list_head list; /* For domain->dev_list */ struct llist_node dev_data_list; /* For global dev_data_list */ struct protection_domain *domain; /* Domain the device is bound to */ - struct pci_dev *pdev; + struct gcr3_tbl_info gcr3_info; /* Per-device GCR3 table */ + struct device *dev; u16 devid; /* PCI Device ID */ - bool iommu_v2; /* Device can make use of IOMMUv2 */ - struct { - bool enabled; - int qdep; - } ats; /* ATS state */ - bool pri_tlp; /* PASID TLB required for + + unsigned int max_irqs; /* Maximum IRQs supported by device */ + u32 max_pasids; /* Max supported PASIDs */ + u32 flags; /* Holds AMD_IOMMU_DEVICE_FLAG_<*> */ + int ats_qdep; + u8 ats_enabled :1; /* ATS state */ + u8 pri_enabled :1; /* PRI state */ + u8 pasid_enabled:1; /* PASID state */ + u8 pri_tlp :1; /* PASID TLB required for PPR completions */ + u8 ppr :1; /* Enable device PPR support */ bool use_vapic; /* Enable device to use vapic mode */ bool defer_attach; @@ -705,22 +797,43 @@ extern struct list_head hpet_map; extern struct list_head acpihid_map; /* + * List with all PCI segments in the system. This list is not locked because + * it is only written at driver initialization time + */ +extern struct list_head amd_iommu_pci_seg_list; + +/* * List with all IOMMUs in the system. This list is not locked because it is * only written and read at driver initialization or suspend time */ extern struct list_head amd_iommu_list; /* - * Array with pointers to each IOMMU struct - * The indices are referenced in the protection domains + * Structure defining one entry in the device table */ -extern struct amd_iommu *amd_iommus[MAX_IOMMUS]; +struct dev_table_entry { + union { + u64 data[4]; + u128 data128[2]; + }; +}; /* - * Structure defining one entry in the device table + * Structure defining one entry in the command buffer */ -struct dev_table_entry { - u64 data[4]; +struct iommu_cmd { + u32 data[4]; +}; + +/* + * Structure to sture persistent DTE flags from IVHD + */ +struct ivhd_dte_flags { + struct list_head list; + u16 segid; + u16 devid_first; + u16 devid_last; + struct dev_table_entry dte; }; /* @@ -744,62 +857,20 @@ struct unity_map_entry { }; /* - * List of all unity mappings. It is not locked because as runtime it is only - * read. It is created at ACPI table parsing time. - */ -extern struct list_head amd_iommu_unity_map; - -/* * Data structures for device handling */ -/* - * Device table used by hardware. Read and write accesses by software are - * locked with the amd_iommu_pd_table lock. - */ -extern struct dev_table_entry *amd_iommu_dev_table; - -/* - * Alias table to find requestor ids to device ids. Not locked because only - * read on runtime. - */ -extern u16 *amd_iommu_alias_table; - -/* - * Reverse lookup table to find the IOMMU which translates a specific device. - */ -extern struct amd_iommu **amd_iommu_rlookup_table; - -/* size of the dma_ops aperture as power of 2 */ -extern unsigned amd_iommu_aperture_order; - -/* largest PCI device id we expect translation requests for */ -extern u16 amd_iommu_last_bdf; - -/* allocation bitmap for domain ids */ -extern unsigned long *amd_iommu_pd_alloc_bitmap; - -/* - * If true, the addresses will be flushed on unmap time, not when - * they are reused - */ -extern bool amd_iommu_unmap_flush; - -/* Smallest max PASID supported by any IOMMU in the system */ -extern u32 amd_iommu_max_pasid; - -extern bool amd_iommu_v2_present; - extern bool amd_iommu_force_isolation; /* Max levels of glxval supported */ extern int amd_iommu_max_glx_val; -/* - * This function flushes all internal caches of - * the IOMMU used by this driver. - */ -extern void iommu_flush_all_caches(struct amd_iommu *iommu); +/* IDA to track protection domain IDs */ +extern struct ida pdom_ids; + +/* Global EFR and EFR2 registers */ +extern u64 amd_iommu_efr; +extern u64 amd_iommu_efr2; static inline int get_ioapic_devid(int id) { @@ -903,8 +974,13 @@ union irte_ga_hi { }; struct irte_ga { - union irte_ga_lo lo; - union irte_ga_hi hi; + union { + struct { + union irte_ga_lo lo; + union irte_ga_hi hi; + }; + u128 irte; + }; }; struct irq_2_irte { @@ -913,11 +989,10 @@ struct irq_2_irte { }; struct amd_ir_data { - u32 cached_ga_tag; + struct amd_iommu *iommu; struct irq_2_irte irq_2_irte; struct msi_msg msi_entry; void *entry; /* Pointer to union irte or struct irte_ga */ - void *ref; /* Pointer to the actual irte */ /** * Store information for activate/de-activate @@ -925,15 +1000,15 @@ struct amd_ir_data { */ struct irq_cfg *cfg; int ga_vector; - int ga_root_ptr; - int ga_tag; + u64 ga_root_ptr; + u32 ga_tag; }; struct amd_irte_ops { void (*prepare)(void *, u32, bool, u8, u32, int); - void (*activate)(void *, u16, u16); - void (*deactivate)(void *, u16, u16); - void (*set_affinity)(void *, u16, u16, u8, u32); + void (*activate)(struct amd_iommu *iommu, void *, u16, u16); + void (*deactivate)(struct amd_iommu *iommu, void *, u16, u16); + void (*set_affinity)(struct amd_iommu *iommu, void *, u16, u16, u8, u32); void *(*get)(struct irq_remap_table *, int); void (*set_allocated)(struct irq_remap_table *, int); bool (*is_allocated)(struct irq_remap_table *, int); diff --git a/drivers/iommu/amd/debugfs.c b/drivers/iommu/amd/debugfs.c index 545372fcc72f..20b04996441d 100644 --- a/drivers/iommu/amd/debugfs.c +++ b/drivers/iommu/amd/debugfs.c @@ -11,22 +11,382 @@ #include <linux/pci.h> #include "amd_iommu.h" +#include "../irq_remapping.h" static struct dentry *amd_iommu_debugfs; -static DEFINE_MUTEX(amd_iommu_debugfs_lock); #define MAX_NAME_LEN 20 +#define OFS_IN_SZ 8 +#define DEVID_IN_SZ 16 -void amd_iommu_debugfs_setup(struct amd_iommu *iommu) +static int sbdf = -1; + +static ssize_t iommu_mmio_write(struct file *filp, const char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + struct seq_file *m = filp->private_data; + struct amd_iommu *iommu = m->private; + int ret; + + iommu->dbg_mmio_offset = -1; + + if (cnt > OFS_IN_SZ) + return -EINVAL; + + ret = kstrtou32_from_user(ubuf, cnt, 0, &iommu->dbg_mmio_offset); + if (ret) + return ret; + + if (iommu->dbg_mmio_offset > iommu->mmio_phys_end - sizeof(u64)) { + iommu->dbg_mmio_offset = -1; + return -EINVAL; + } + + return cnt; +} + +static int iommu_mmio_show(struct seq_file *m, void *unused) +{ + struct amd_iommu *iommu = m->private; + u64 value; + + if (iommu->dbg_mmio_offset < 0) { + seq_puts(m, "Please provide mmio register's offset\n"); + return 0; + } + + value = readq(iommu->mmio_base + iommu->dbg_mmio_offset); + seq_printf(m, "Offset:0x%x Value:0x%016llx\n", iommu->dbg_mmio_offset, value); + + return 0; +} +DEFINE_SHOW_STORE_ATTRIBUTE(iommu_mmio); + +static ssize_t iommu_capability_write(struct file *filp, const char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + struct seq_file *m = filp->private_data; + struct amd_iommu *iommu = m->private; + int ret; + + iommu->dbg_cap_offset = -1; + + if (cnt > OFS_IN_SZ) + return -EINVAL; + + ret = kstrtou32_from_user(ubuf, cnt, 0, &iommu->dbg_cap_offset); + if (ret) + return ret; + + /* Capability register at offset 0x14 is the last IOMMU capability register. */ + if (iommu->dbg_cap_offset > 0x14) { + iommu->dbg_cap_offset = -1; + return -EINVAL; + } + + return cnt; +} + +static int iommu_capability_show(struct seq_file *m, void *unused) +{ + struct amd_iommu *iommu = m->private; + u32 value; + int err; + + if (iommu->dbg_cap_offset < 0) { + seq_puts(m, "Please provide capability register's offset in the range [0x00 - 0x14]\n"); + return 0; + } + + err = pci_read_config_dword(iommu->dev, iommu->cap_ptr + iommu->dbg_cap_offset, &value); + if (err) { + seq_printf(m, "Not able to read capability register at 0x%x\n", + iommu->dbg_cap_offset); + return 0; + } + + seq_printf(m, "Offset:0x%x Value:0x%08x\n", iommu->dbg_cap_offset, value); + + return 0; +} +DEFINE_SHOW_STORE_ATTRIBUTE(iommu_capability); + +static int iommu_cmdbuf_show(struct seq_file *m, void *unused) +{ + struct amd_iommu *iommu = m->private; + struct iommu_cmd *cmd; + unsigned long flag; + u32 head, tail; + int i; + + raw_spin_lock_irqsave(&iommu->lock, flag); + head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); + tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); + seq_printf(m, "CMD Buffer Head Offset:%d Tail Offset:%d\n", + (head >> 4) & 0x7fff, (tail >> 4) & 0x7fff); + for (i = 0; i < CMD_BUFFER_ENTRIES; i++) { + cmd = (struct iommu_cmd *)(iommu->cmd_buf + i * sizeof(*cmd)); + seq_printf(m, "%3d: %08x %08x %08x %08x\n", i, cmd->data[0], + cmd->data[1], cmd->data[2], cmd->data[3]); + } + raw_spin_unlock_irqrestore(&iommu->lock, flag); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(iommu_cmdbuf); + +static ssize_t devid_write(struct file *filp, const char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + struct amd_iommu_pci_seg *pci_seg; + int seg, bus, slot, func; + struct amd_iommu *iommu; + char *srcid_ptr; + u16 devid; + int i; + + sbdf = -1; + + if (cnt >= DEVID_IN_SZ) + return -EINVAL; + + srcid_ptr = memdup_user_nul(ubuf, cnt); + if (IS_ERR(srcid_ptr)) + return PTR_ERR(srcid_ptr); + + i = sscanf(srcid_ptr, "%x:%x:%x.%x", &seg, &bus, &slot, &func); + if (i != 4) { + i = sscanf(srcid_ptr, "%x:%x.%x", &bus, &slot, &func); + if (i != 3) { + kfree(srcid_ptr); + return -EINVAL; + } + seg = 0; + } + + devid = PCI_DEVID(bus, PCI_DEVFN(slot, func)); + + /* Check if user device id input is a valid input */ + for_each_pci_segment(pci_seg) { + if (pci_seg->id != seg) + continue; + if (devid > pci_seg->last_bdf) { + kfree(srcid_ptr); + return -EINVAL; + } + iommu = pci_seg->rlookup_table[devid]; + if (!iommu) { + kfree(srcid_ptr); + return -ENODEV; + } + break; + } + + if (pci_seg->id != seg) { + kfree(srcid_ptr); + return -EINVAL; + } + + sbdf = PCI_SEG_DEVID_TO_SBDF(seg, devid); + + kfree(srcid_ptr); + + return cnt; +} + +static int devid_show(struct seq_file *m, void *unused) { + u16 devid; + + if (sbdf >= 0) { + devid = PCI_SBDF_TO_DEVID(sbdf); + seq_printf(m, "%04x:%02x:%02x.%x\n", PCI_SBDF_TO_SEGID(sbdf), + PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid)); + } else + seq_puts(m, "No or Invalid input provided\n"); + + return 0; +} +DEFINE_SHOW_STORE_ATTRIBUTE(devid); + +static void dump_dte(struct seq_file *m, struct amd_iommu_pci_seg *pci_seg, u16 devid) +{ + struct dev_table_entry *dev_table; + struct amd_iommu *iommu; + + iommu = pci_seg->rlookup_table[devid]; + if (!iommu) + return; + + dev_table = get_dev_table(iommu); + if (!dev_table) { + seq_puts(m, "Device table not found"); + return; + } + + seq_printf(m, "%-12s %16s %16s %16s %16s iommu\n", "DeviceId", + "QWORD[3]", "QWORD[2]", "QWORD[1]", "QWORD[0]"); + seq_printf(m, "%04x:%02x:%02x.%x ", pci_seg->id, PCI_BUS_NUM(devid), + PCI_SLOT(devid), PCI_FUNC(devid)); + for (int i = 3; i >= 0; --i) + seq_printf(m, "%016llx ", dev_table[devid].data[i]); + seq_printf(m, "iommu%d\n", iommu->index); +} + +static int iommu_devtbl_show(struct seq_file *m, void *unused) +{ + struct amd_iommu_pci_seg *pci_seg; + u16 seg, devid; + + if (sbdf < 0) { + seq_puts(m, "Enter a valid device ID to 'devid' file\n"); + return 0; + } + seg = PCI_SBDF_TO_SEGID(sbdf); + devid = PCI_SBDF_TO_DEVID(sbdf); + + for_each_pci_segment(pci_seg) { + if (pci_seg->id != seg) + continue; + dump_dte(m, pci_seg, devid); + break; + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(iommu_devtbl); + +static void dump_128_irte(struct seq_file *m, struct irq_remap_table *table, u16 int_tab_len) +{ + struct irte_ga *ptr, *irte; + int index; + + for (index = 0; index < int_tab_len; index++) { + ptr = (struct irte_ga *)table->table; + irte = &ptr[index]; + + if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) && + !irte->lo.fields_vapic.valid) + continue; + else if (!irte->lo.fields_remap.valid) + continue; + seq_printf(m, "IRT[%04d] %016llx %016llx\n", index, irte->hi.val, irte->lo.val); + } +} + +static void dump_32_irte(struct seq_file *m, struct irq_remap_table *table, u16 int_tab_len) +{ + union irte *ptr, *irte; + int index; + + for (index = 0; index < int_tab_len; index++) { + ptr = (union irte *)table->table; + irte = &ptr[index]; + + if (!irte->fields.valid) + continue; + seq_printf(m, "IRT[%04d] %08x\n", index, irte->val); + } +} + +static void dump_irte(struct seq_file *m, u16 devid, struct amd_iommu_pci_seg *pci_seg) +{ + struct dev_table_entry *dev_table; + struct irq_remap_table *table; + struct amd_iommu *iommu; + unsigned long flags; + u16 int_tab_len; + + table = pci_seg->irq_lookup_table[devid]; + if (!table) { + seq_printf(m, "IRQ lookup table not set for %04x:%02x:%02x:%x\n", + pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid)); + return; + } + + iommu = pci_seg->rlookup_table[devid]; + if (!iommu) + return; + + dev_table = get_dev_table(iommu); + if (!dev_table) { + seq_puts(m, "Device table not found"); + return; + } + + int_tab_len = dev_table[devid].data[2] & DTE_INTTABLEN_MASK; + if (int_tab_len != DTE_INTTABLEN_512 && int_tab_len != DTE_INTTABLEN_2K) { + seq_puts(m, "The device's DTE contains an invalid IRT length value."); + return; + } + + seq_printf(m, "DeviceId %04x:%02x:%02x.%x\n", pci_seg->id, PCI_BUS_NUM(devid), + PCI_SLOT(devid), PCI_FUNC(devid)); + + raw_spin_lock_irqsave(&table->lock, flags); + if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir)) + dump_128_irte(m, table, BIT(int_tab_len >> 1)); + else + dump_32_irte(m, table, BIT(int_tab_len >> 1)); + seq_puts(m, "\n"); + raw_spin_unlock_irqrestore(&table->lock, flags); +} + +static int iommu_irqtbl_show(struct seq_file *m, void *unused) +{ + struct amd_iommu_pci_seg *pci_seg; + u16 devid, seg; + + if (!irq_remapping_enabled) { + seq_puts(m, "Interrupt remapping is disabled\n"); + return 0; + } + + if (sbdf < 0) { + seq_puts(m, "Enter a valid device ID to 'devid' file\n"); + return 0; + } + + seg = PCI_SBDF_TO_SEGID(sbdf); + devid = PCI_SBDF_TO_DEVID(sbdf); + + for_each_pci_segment(pci_seg) { + if (pci_seg->id != seg) + continue; + dump_irte(m, devid, pci_seg); + break; + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(iommu_irqtbl); + +void amd_iommu_debugfs_setup(void) +{ + struct amd_iommu *iommu; char name[MAX_NAME_LEN + 1]; - mutex_lock(&amd_iommu_debugfs_lock); - if (!amd_iommu_debugfs) - amd_iommu_debugfs = debugfs_create_dir("amd", - iommu_debugfs_dir); - mutex_unlock(&amd_iommu_debugfs_lock); + amd_iommu_debugfs = debugfs_create_dir("amd", iommu_debugfs_dir); + + for_each_iommu(iommu) { + iommu->dbg_mmio_offset = -1; + iommu->dbg_cap_offset = -1; + + snprintf(name, MAX_NAME_LEN, "iommu%02d", iommu->index); + iommu->debugfs = debugfs_create_dir(name, amd_iommu_debugfs); + + debugfs_create_file("mmio", 0644, iommu->debugfs, iommu, + &iommu_mmio_fops); + debugfs_create_file("capability", 0644, iommu->debugfs, iommu, + &iommu_capability_fops); + debugfs_create_file("cmdbuf", 0444, iommu->debugfs, iommu, + &iommu_cmdbuf_fops); + } - snprintf(name, MAX_NAME_LEN, "iommu%02d", iommu->index); - iommu->debugfs = debugfs_create_dir(name, amd_iommu_debugfs); + debugfs_create_file("devid", 0644, amd_iommu_debugfs, NULL, + &devid_fops); + debugfs_create_file("devtbl", 0444, amd_iommu_debugfs, NULL, + &iommu_devtbl_fops); + debugfs_create_file("irqtbl", 0444, amd_iommu_debugfs, NULL, + &iommu_irqtbl_fops); } diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 46280e6e1535..4b2953418977 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -12,7 +12,6 @@ #include <linux/acpi.h> #include <linux/list.h> #include <linux/bitmap.h> -#include <linux/slab.h> #include <linux/syscore_ops.h> #include <linux/interrupt.h> #include <linux/msi.h> @@ -20,21 +19,23 @@ #include <linux/amd-iommu.h> #include <linux/export.h> #include <linux/kmemleak.h> -#include <linux/mem_encrypt.h> +#include <linux/cc_platform.h> +#include <linux/iopoll.h> #include <asm/pci-direct.h> #include <asm/iommu.h> #include <asm/apic.h> #include <asm/gart.h> #include <asm/x86_init.h> -#include <asm/iommu_table.h> #include <asm/io_apic.h> #include <asm/irq_remapping.h> #include <asm/set_memory.h> +#include <asm/sev.h> #include <linux/crash_dump.h> #include "amd_iommu.h" #include "../irq_remapping.h" +#include "../iommu-pages.h" /* * definitions for the ACPI scanning code @@ -83,7 +84,9 @@ #define ACPI_DEVFLAG_LINT1 0x80 #define ACPI_DEVFLAG_ATSDIS 0x10000000 -#define LOOP_TIMEOUT 100000 +#define IVRS_GET_SBDF_ID(seg, bus, dev, fn) (((seg & 0xffff) << 16) | ((bus & 0xff) << 8) \ + | ((dev & 0x1f) << 3) | (fn & 0x7)) + /* * ACPI table definitions * @@ -91,8 +94,6 @@ * out of it. */ -extern const struct iommu_ops amd_iommu_ops; - /* * structure describing one IOMMU in the ACPI table. Typically followed by one * or more ivhd_entrys. @@ -110,7 +111,7 @@ struct ivhd_header { /* Following only valid on IVHD type 11h and 40h */ u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */ - u64 res; + u64 efr_reg2; } __attribute__((packed)); /* @@ -121,8 +122,10 @@ struct ivhd_entry { u8 type; u16 devid; u8 flags; - u32 ext; - u32 hidh; + struct_group(ext_hid, + u32 ext; + u32 hidh; + ); u64 cid; u8 uidf; u8 uidl; @@ -139,7 +142,8 @@ struct ivmd_header { u16 length; u16 devid; u16 aux; - u64 resv; + u16 pci_seg; + u8 resv[6]; u64 range_start; u64 range_length; } __attribute__((packed)); @@ -147,7 +151,11 @@ struct ivmd_header { bool amd_iommu_dump; bool amd_iommu_irq_remap __read_mostly; -enum io_pgtable_fmt amd_iommu_pgtable = AMD_IOMMU_V1; +enum protection_domain_mode amd_iommu_pgtable = PD_MODE_V1; +/* Host page table level */ +u8 amd_iommu_hpt_level; +/* Guest page table level */ +int amd_iommu_gpt_level = PAGE_MODE_4_LEVEL; int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC; static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE; @@ -155,19 +163,23 @@ static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE; static bool amd_iommu_detected; static bool amd_iommu_disabled __initdata; static bool amd_iommu_force_enable __initdata; +static bool amd_iommu_irtcachedis; static int amd_iommu_target_ivhd_type; -u16 amd_iommu_last_bdf; /* largest PCI device id we have - to handle */ -LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings - we find in ACPI */ -bool amd_iommu_unmap_flush; /* if true, flush on every unmap */ +/* Global EFR and EFR2 registers */ +u64 amd_iommu_efr; +u64 amd_iommu_efr2; -LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the - system */ +/* Host (v1) page table is not supported*/ +bool amd_iommu_hatdis; -/* Array to assign indices to IOMMUs*/ -struct amd_iommu *amd_iommus[MAX_IOMMUS]; +/* SNP is enabled on the system? */ +bool amd_iommu_snp_en; +EXPORT_SYMBOL(amd_iommu_snp_en); + +LIST_HEAD(amd_iommu_pci_seg_list); /* list of all PCI segments */ +LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the system */ +LIST_HEAD(amd_ivhd_dev_flags_list); /* list of all IVHD device entry settings */ /* Number of IOMMUs present in the system */ static int amd_iommus_present; @@ -176,54 +188,12 @@ static int amd_iommus_present; bool amd_iommu_np_cache __read_mostly; bool amd_iommu_iotlb_sup __read_mostly = true; -u32 amd_iommu_max_pasid __read_mostly = ~0; - -bool amd_iommu_v2_present __read_mostly; static bool amd_iommu_pc_present __read_mostly; +bool amdr_ivrs_remap_support __read_mostly; bool amd_iommu_force_isolation __read_mostly; -/* - * Pointer to the device table which is shared by all AMD IOMMUs - * it is indexed by the PCI device id or the HT unit id and contains - * information about the domain the device belongs to as well as the - * page table root pointer. - */ -struct dev_table_entry *amd_iommu_dev_table; -/* - * Pointer to a device table which the content of old device table - * will be copied to. It's only be used in kdump kernel. - */ -static struct dev_table_entry *old_dev_tbl_cpy; - -/* - * The alias table is a driver specific data structure which contains the - * mappings of the PCI device ids to the actual requestor ids on the IOMMU. - * More than one device can share the same requestor id. - */ -u16 *amd_iommu_alias_table; - -/* - * The rlookup table is used to find the IOMMU which is responsible - * for a specific device. It is also indexed by the PCI device id. - */ -struct amd_iommu **amd_iommu_rlookup_table; - -/* - * This table is used to find the irq remapping table for a given device id - * quickly. - */ -struct irq_remap_table **irq_lookup_table; - -/* - * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap - * to know which ones are already in use. - */ -unsigned long *amd_iommu_pd_alloc_bitmap; - -static u32 dev_table_size; /* size of the device table */ -static u32 alias_table_size; /* size of the alias table */ -static u32 rlookup_table_size; /* size if the rlookup table */ +unsigned long amd_iommu_pgsize_bitmap __ro_after_init = AMD_IOMMU_PGSIZES; enum iommu_init_state { IOMMU_START_STATE, @@ -253,8 +223,7 @@ static bool __initdata cmdline_maps; static enum iommu_init_state init_state = IOMMU_START_STATE; static int amd_iommu_enable_interrupts(void); -static int __init iommu_go_to_state(enum iommu_init_state state); -static void init_device_table_dma(void); +static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg); static bool amd_iommu_pre_enabled = true; @@ -279,23 +248,50 @@ static void init_translation_status(struct amd_iommu *iommu) iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; } -static inline void update_last_devid(u16 devid) +int amd_iommu_get_num_iommus(void) { - if (devid > amd_iommu_last_bdf) - amd_iommu_last_bdf = devid; + return amd_iommus_present; } -static inline unsigned long tbl_size(int entry_size) +bool amd_iommu_ht_range_ignore(void) { - unsigned shift = PAGE_SHIFT + - get_order(((int)amd_iommu_last_bdf + 1) * entry_size); - - return 1UL << shift; + return check_feature2(FEATURE_HT_RANGE_IGNORE); } -int amd_iommu_get_num_iommus(void) +/* + * Iterate through all the IOMMUs to get common EFR + * masks among all IOMMUs and warn if found inconsistency. + */ +static __init void get_global_efr(void) { - return amd_iommus_present; + struct amd_iommu *iommu; + + for_each_iommu(iommu) { + u64 tmp = iommu->features; + u64 tmp2 = iommu->features2; + + if (list_is_first(&iommu->list, &amd_iommu_list)) { + amd_iommu_efr = tmp; + amd_iommu_efr2 = tmp2; + continue; + } + + if (amd_iommu_efr == tmp && + amd_iommu_efr2 == tmp2) + continue; + + pr_err(FW_BUG + "Found inconsistent EFR/EFR2 %#llx,%#llx (global %#llx,%#llx) on iommu%d (%04x:%02x:%02x.%01x).\n", + tmp, tmp2, amd_iommu_efr, amd_iommu_efr2, + iommu->index, iommu->pci_seg->id, + PCI_BUS_NUM(iommu->devid), PCI_SLOT(iommu->devid), + PCI_FUNC(iommu->devid)); + + amd_iommu_efr &= tmp; + amd_iommu_efr2 &= tmp2; + } + + pr_info("Using global IVHD EFR:%#llx, EFR2:%#llx\n", amd_iommu_efr, amd_iommu_efr2); } /* @@ -306,8 +302,12 @@ int amd_iommu_get_num_iommus(void) static void __init early_iommu_features_init(struct amd_iommu *iommu, struct ivhd_header *h) { - if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP) + if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP) { iommu->features = h->efr_reg; + iommu->features2 = h->efr_reg2; + } + if (amd_iommu_ivinfo & IOMMU_IVINFO_DMA_REMAP) + amdr_ivrs_remap_support = true; } /* Access to l1 and l2 indexed register spaces */ @@ -379,7 +379,7 @@ static void iommu_set_cwwb_range(struct amd_iommu *iommu) u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem); u64 entry = start & PM_ADDR_MASK; - if (!iommu_feature(iommu, FEATURE_SNP)) + if (!check_feature(FEATURE_SNP)) return; /* Note: @@ -401,42 +401,40 @@ static void iommu_set_cwwb_range(struct amd_iommu *iommu) static void iommu_set_device_table(struct amd_iommu *iommu) { u64 entry; + u32 dev_table_size = iommu->pci_seg->dev_table_size; + void *dev_table = (void *)get_dev_table(iommu); BUG_ON(iommu->mmio_base == NULL); - entry = iommu_virt_to_phys(amd_iommu_dev_table); + if (is_kdump_kernel()) + return; + + entry = iommu_virt_to_phys(dev_table); entry |= (dev_table_size >> 12) - 1; memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET, &entry, sizeof(entry)); } -/* Generic functions to enable/disable certain features of the IOMMU. */ -static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit) +static void iommu_feature_set(struct amd_iommu *iommu, u64 val, u64 mask, u8 shift) { u64 ctrl; ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); - ctrl |= (1ULL << bit); + mask <<= shift; + ctrl &= ~mask; + ctrl |= (val << shift) & mask; writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); } -static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit) +/* Generic functions to enable/disable certain features of the IOMMU. */ +void iommu_feature_enable(struct amd_iommu *iommu, u8 bit) { - u64 ctrl; - - ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); - ctrl &= ~(1ULL << bit); - writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); + iommu_feature_set(iommu, 1ULL, 1ULL, bit); } -static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout) +static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit) { - u64 ctrl; - - ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); - ctrl &= ~CTRL_INV_TO_MASK; - ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK; - writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); + iommu_feature_set(iommu, 0ULL, 1ULL, bit); } /* Function to enable the hardware */ @@ -461,8 +459,15 @@ static void iommu_disable(struct amd_iommu *iommu) iommu_feature_disable(iommu, CONTROL_GALOG_EN); iommu_feature_disable(iommu, CONTROL_GAINT_EN); + /* Disable IOMMU PPR logging */ + iommu_feature_disable(iommu, CONTROL_PPRLOG_EN); + iommu_feature_disable(iommu, CONTROL_PPRINT_EN); + /* Disable IOMMU hardware itself */ iommu_feature_disable(iommu, CONTROL_IOMMU_EN); + + /* Clear IRTE cache disabling bit */ + iommu_feature_disable(iommu, CONTROL_IRTCACHEDIS); } /* @@ -537,6 +542,7 @@ static int __init find_last_devid_from_ivhd(struct ivhd_header *h) { u8 *p = (void *)h, *end = (void *)h; struct ivhd_entry *dev; + int last_devid = -EINVAL; u32 ivhd_size = get_ivhd_header_size(h); @@ -553,14 +559,14 @@ static int __init find_last_devid_from_ivhd(struct ivhd_header *h) switch (dev->type) { case IVHD_DEV_ALL: /* Use maximum BDF value for DEV_ALL */ - update_last_devid(0xffff); - break; + return 0xffff; case IVHD_DEV_SELECT: case IVHD_DEV_RANGE_END: case IVHD_DEV_ALIAS: case IVHD_DEV_EXT_SELECT: /* all the above subfield types refer to device ids */ - update_last_devid(dev->devid); + if (dev->devid > last_devid) + last_devid = dev->devid; break; default: break; @@ -570,7 +576,7 @@ static int __init find_last_devid_from_ivhd(struct ivhd_header *h) WARN_ON(p != end); - return 0; + return last_devid; } static int __init check_ivrs_checksum(struct acpi_table_header *table) @@ -594,38 +600,142 @@ static int __init check_ivrs_checksum(struct acpi_table_header *table) * id which we need to handle. This is the first of three functions which parse * the ACPI table. So we check the checksum here. */ -static int __init find_last_devid_acpi(struct acpi_table_header *table) +static int __init find_last_devid_acpi(struct acpi_table_header *table, u16 pci_seg) { u8 *p = (u8 *)table, *end = (u8 *)table; struct ivhd_header *h; + int last_devid, last_bdf = 0; p += IVRS_HEADER_LENGTH; end += table->length; while (p < end) { h = (struct ivhd_header *)p; - if (h->type == amd_iommu_target_ivhd_type) { - int ret = find_last_devid_from_ivhd(h); - - if (ret) - return ret; + if (h->pci_seg == pci_seg && + h->type == amd_iommu_target_ivhd_type) { + last_devid = find_last_devid_from_ivhd(h); + + if (last_devid < 0) + return -EINVAL; + if (last_devid > last_bdf) + last_bdf = last_devid; } p += h->length; } WARN_ON(p != end); - return 0; + return last_bdf; } /**************************************************************************** * * The following functions belong to the code path which parses the ACPI table * the second time. In this ACPI parsing iteration we allocate IOMMU specific - * data structures, initialize the device/alias/rlookup table and also - * basically initialize the hardware. + * data structures, initialize the per PCI segment device/alias/rlookup table + * and also basically initialize the hardware. * ****************************************************************************/ +/* Allocate per PCI segment device table */ +static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg) +{ + pci_seg->dev_table = iommu_alloc_pages_sz(GFP_KERNEL | GFP_DMA32, + pci_seg->dev_table_size); + if (!pci_seg->dev_table) + return -ENOMEM; + + return 0; +} + +static inline void free_dev_table(struct amd_iommu_pci_seg *pci_seg) +{ + if (is_kdump_kernel()) + memunmap((void *)pci_seg->dev_table); + else + iommu_free_pages(pci_seg->dev_table); + pci_seg->dev_table = NULL; +} + +/* Allocate per PCI segment IOMMU rlookup table. */ +static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg) +{ + pci_seg->rlookup_table = kvcalloc(pci_seg->last_bdf + 1, + sizeof(*pci_seg->rlookup_table), + GFP_KERNEL); + if (pci_seg->rlookup_table == NULL) + return -ENOMEM; + + return 0; +} + +static inline void free_rlookup_table(struct amd_iommu_pci_seg *pci_seg) +{ + kvfree(pci_seg->rlookup_table); + pci_seg->rlookup_table = NULL; +} + +static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg) +{ + pci_seg->irq_lookup_table = kvcalloc(pci_seg->last_bdf + 1, + sizeof(*pci_seg->irq_lookup_table), + GFP_KERNEL); + if (pci_seg->irq_lookup_table == NULL) + return -ENOMEM; + + return 0; +} + +static inline void free_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg) +{ + kvfree(pci_seg->irq_lookup_table); + pci_seg->irq_lookup_table = NULL; +} + +static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg) +{ + int i; + + pci_seg->alias_table = kvmalloc_array(pci_seg->last_bdf + 1, + sizeof(*pci_seg->alias_table), + GFP_KERNEL); + if (!pci_seg->alias_table) + return -ENOMEM; + + /* + * let all alias entries point to itself + */ + for (i = 0; i <= pci_seg->last_bdf; ++i) + pci_seg->alias_table[i] = i; + + return 0; +} + +static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg) +{ + kvfree(pci_seg->alias_table); + pci_seg->alias_table = NULL; +} + +static inline void *iommu_memremap(unsigned long paddr, size_t size) +{ + phys_addr_t phys; + + if (!paddr) + return NULL; + + /* + * Obtain true physical address in kdump kernel when SME is enabled. + * Currently, previous kernel with SME enabled and kdump kernel + * with SME support disabled is not supported. + */ + phys = __sme_clr(paddr); + + if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) + return (__force void *)ioremap_encrypted(phys, size); + else + return memremap(phys, size, MEMREMAP_WB); +} + /* * Allocates the command buffer. This buffer is per AMD IOMMU. We can * write commands to that buffer later and the IOMMU will execute them @@ -633,17 +743,63 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table) */ static int __init alloc_command_buffer(struct amd_iommu *iommu) { - iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, - get_order(CMD_BUFFER_SIZE)); + iommu->cmd_buf = iommu_alloc_pages_sz(GFP_KERNEL, CMD_BUFFER_SIZE); return iommu->cmd_buf ? 0 : -ENOMEM; } /* + * Interrupt handler has processed all pending events and adjusted head + * and tail pointer. Reset overflow mask and restart logging again. + */ +void amd_iommu_restart_log(struct amd_iommu *iommu, const char *evt_type, + u8 cntrl_intr, u8 cntrl_log, + u32 status_run_mask, u32 status_overflow_mask) +{ + u32 status; + + status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); + if (status & status_run_mask) + return; + + pr_info_ratelimited("IOMMU %s log restarting\n", evt_type); + + iommu_feature_disable(iommu, cntrl_log); + iommu_feature_disable(iommu, cntrl_intr); + + writel(status_overflow_mask, iommu->mmio_base + MMIO_STATUS_OFFSET); + + iommu_feature_enable(iommu, cntrl_intr); + iommu_feature_enable(iommu, cntrl_log); +} + +/* + * This function restarts event logging in case the IOMMU experienced + * an event log buffer overflow. + */ +void amd_iommu_restart_event_logging(struct amd_iommu *iommu) +{ + amd_iommu_restart_log(iommu, "Event", CONTROL_EVT_INT_EN, + CONTROL_EVT_LOG_EN, MMIO_STATUS_EVT_RUN_MASK, + MMIO_STATUS_EVT_OVERFLOW_MASK); +} + +/* + * This function restarts event logging in case the IOMMU experienced + * GA log overflow. + */ +void amd_iommu_restart_ga_log(struct amd_iommu *iommu) +{ + amd_iommu_restart_log(iommu, "GA", CONTROL_GAINT_EN, + CONTROL_GALOG_EN, MMIO_STATUS_GALOG_RUN_MASK, + MMIO_STATUS_GALOG_OVERFLOW_MASK); +} + +/* * This function resets the command buffer if the IOMMU stopped fetching * commands from it. */ -void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) +static void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) { iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); @@ -665,11 +821,16 @@ static void iommu_enable_command_buffer(struct amd_iommu *iommu) BUG_ON(iommu->cmd_buf == NULL); - entry = iommu_virt_to_phys(iommu->cmd_buf); - entry |= MMIO_CMD_SIZE_512; - - memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, - &entry, sizeof(entry)); + if (!is_kdump_kernel()) { + /* + * Command buffer is re-used for kdump kernel and setting + * of MMIO register is not required. + */ + entry = iommu_virt_to_phys(iommu->cmd_buf); + entry |= MMIO_CMD_SIZE_512; + memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, + &entry, sizeof(entry)); + } amd_iommu_reset_cmd_buffer(iommu); } @@ -684,20 +845,22 @@ static void iommu_disable_command_buffer(struct amd_iommu *iommu) static void __init free_command_buffer(struct amd_iommu *iommu) { - free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE)); + iommu_free_pages(iommu->cmd_buf); } -static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, - gfp_t gfp, size_t size) +void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, gfp_t gfp, + size_t size) { - int order = get_order(size); - void *buf = (void *)__get_free_pages(gfp, order); + void *buf; - if (buf && - iommu_feature(iommu, FEATURE_SNP) && - set_memory_4k((unsigned long)buf, (1 << order))) { - free_pages((unsigned long)buf, order); - buf = NULL; + size = PAGE_ALIGN(size); + buf = iommu_alloc_pages_sz(gfp, size); + if (!buf) + return NULL; + if (check_feature(FEATURE_SNP) && + set_memory_4k((unsigned long)buf, size / PAGE_SIZE)) { + iommu_free_pages(buf); + return NULL; } return buf; @@ -706,7 +869,7 @@ static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, /* allocates the memory where the IOMMU will log its events to */ static int __init alloc_event_buffer(struct amd_iommu *iommu) { - iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, + iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL, EVT_BUFFER_SIZE); return iommu->evt_buf ? 0 : -ENOMEM; @@ -718,10 +881,15 @@ static void iommu_enable_event_buffer(struct amd_iommu *iommu) BUG_ON(iommu->evt_buf == NULL); - entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK; - - memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET, - &entry, sizeof(entry)); + if (!is_kdump_kernel()) { + /* + * Event buffer is re-used for kdump kernel and setting + * of MMIO register is not required. + */ + entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK; + memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET, + &entry, sizeof(entry)); + } /* set head and tail to zero manually */ writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); @@ -740,108 +908,66 @@ static void iommu_disable_event_buffer(struct amd_iommu *iommu) static void __init free_event_buffer(struct amd_iommu *iommu) { - free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE)); -} - -/* allocates the memory where the IOMMU will log its events to */ -static int __init alloc_ppr_log(struct amd_iommu *iommu) -{ - iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, - PPR_LOG_SIZE); - - return iommu->ppr_log ? 0 : -ENOMEM; -} - -static void iommu_enable_ppr_log(struct amd_iommu *iommu) -{ - u64 entry; - - if (iommu->ppr_log == NULL) - return; - - entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512; - - memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET, - &entry, sizeof(entry)); - - /* set head and tail to zero manually */ - writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); - writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); - - iommu_feature_enable(iommu, CONTROL_PPRLOG_EN); - iommu_feature_enable(iommu, CONTROL_PPR_EN); -} - -static void __init free_ppr_log(struct amd_iommu *iommu) -{ - free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE)); + iommu_free_pages(iommu->evt_buf); } static void free_ga_log(struct amd_iommu *iommu) { #ifdef CONFIG_IRQ_REMAP - free_pages((unsigned long)iommu->ga_log, get_order(GA_LOG_SIZE)); - free_pages((unsigned long)iommu->ga_log_tail, get_order(8)); + iommu_free_pages(iommu->ga_log); + iommu_free_pages(iommu->ga_log_tail); #endif } +#ifdef CONFIG_IRQ_REMAP static int iommu_ga_log_enable(struct amd_iommu *iommu) { -#ifdef CONFIG_IRQ_REMAP u32 status, i; + u64 entry; if (!iommu->ga_log) return -EINVAL; - status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); + entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512; + memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET, + &entry, sizeof(entry)); + entry = (iommu_virt_to_phys(iommu->ga_log_tail) & + (BIT_ULL(52)-1)) & ~7ULL; + memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET, + &entry, sizeof(entry)); + writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); + writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET); - /* Check if already running */ - if (status & (MMIO_STATUS_GALOG_RUN_MASK)) - return 0; iommu_feature_enable(iommu, CONTROL_GAINT_EN); iommu_feature_enable(iommu, CONTROL_GALOG_EN); - for (i = 0; i < LOOP_TIMEOUT; ++i) { + for (i = 0; i < MMIO_STATUS_TIMEOUT; ++i) { status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); if (status & (MMIO_STATUS_GALOG_RUN_MASK)) break; + udelay(10); } - if (i >= LOOP_TIMEOUT) + if (WARN_ON(i >= MMIO_STATUS_TIMEOUT)) return -EINVAL; -#endif /* CONFIG_IRQ_REMAP */ + return 0; } -#ifdef CONFIG_IRQ_REMAP static int iommu_init_ga_log(struct amd_iommu *iommu) { - u64 entry; - if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) return 0; - iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, - get_order(GA_LOG_SIZE)); + iommu->ga_log = iommu_alloc_pages_sz(GFP_KERNEL, GA_LOG_SIZE); if (!iommu->ga_log) goto err_out; - iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, - get_order(8)); + iommu->ga_log_tail = iommu_alloc_pages_sz(GFP_KERNEL, 8); if (!iommu->ga_log_tail) goto err_out; - entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512; - memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET, - &entry, sizeof(entry)); - entry = (iommu_virt_to_phys(iommu->ga_log_tail) & - (BIT_ULL(52)-1)) & ~7ULL; - memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET, - &entry, sizeof(entry)); - writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); - writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET); - return 0; err_out: free_ga_log(iommu); @@ -849,35 +975,132 @@ err_out: } #endif /* CONFIG_IRQ_REMAP */ -static int iommu_init_ga(struct amd_iommu *iommu) +static int __init alloc_cwwb_sem(struct amd_iommu *iommu) { - int ret = 0; + iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL, 1); + if (!iommu->cmd_sem) + return -ENOMEM; + iommu->cmd_sem_paddr = iommu_virt_to_phys((void *)iommu->cmd_sem); + return 0; +} -#ifdef CONFIG_IRQ_REMAP - /* Note: We have already checked GASup from IVRS table. - * Now, we need to make sure that GAMSup is set. - */ - if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) && - !iommu_feature(iommu, FEATURE_GAM_VAPIC)) - amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA; +static int __init remap_event_buffer(struct amd_iommu *iommu) +{ + u64 paddr; - ret = iommu_init_ga_log(iommu); -#endif /* CONFIG_IRQ_REMAP */ + pr_info_once("Re-using event buffer from the previous kernel\n"); + paddr = readq(iommu->mmio_base + MMIO_EVT_BUF_OFFSET) & PM_ADDR_MASK; + iommu->evt_buf = iommu_memremap(paddr, EVT_BUFFER_SIZE); - return ret; + return iommu->evt_buf ? 0 : -ENOMEM; } -static int __init alloc_cwwb_sem(struct amd_iommu *iommu) +static int __init remap_command_buffer(struct amd_iommu *iommu) { - iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 1); + u64 paddr; - return iommu->cmd_sem ? 0 : -ENOMEM; + pr_info_once("Re-using command buffer from the previous kernel\n"); + paddr = readq(iommu->mmio_base + MMIO_CMD_BUF_OFFSET) & PM_ADDR_MASK; + iommu->cmd_buf = iommu_memremap(paddr, CMD_BUFFER_SIZE); + + return iommu->cmd_buf ? 0 : -ENOMEM; +} + +static int __init remap_or_alloc_cwwb_sem(struct amd_iommu *iommu) +{ + u64 paddr; + + if (check_feature(FEATURE_SNP)) { + /* + * When SNP is enabled, the exclusion base register is used for the + * completion wait buffer (CWB) address. Read and re-use it. + */ + pr_info_once("Re-using CWB buffers from the previous kernel\n"); + paddr = readq(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET) & PM_ADDR_MASK; + iommu->cmd_sem = iommu_memremap(paddr, PAGE_SIZE); + if (!iommu->cmd_sem) + return -ENOMEM; + iommu->cmd_sem_paddr = paddr; + } else { + return alloc_cwwb_sem(iommu); + } + + return 0; +} + +static int __init alloc_iommu_buffers(struct amd_iommu *iommu) +{ + int ret; + + /* + * Reuse/Remap the previous kernel's allocated completion wait + * command and event buffers for kdump boot. + */ + if (is_kdump_kernel()) { + ret = remap_or_alloc_cwwb_sem(iommu); + if (ret) + return ret; + + ret = remap_command_buffer(iommu); + if (ret) + return ret; + + ret = remap_event_buffer(iommu); + if (ret) + return ret; + } else { + ret = alloc_cwwb_sem(iommu); + if (ret) + return ret; + + ret = alloc_command_buffer(iommu); + if (ret) + return ret; + + ret = alloc_event_buffer(iommu); + if (ret) + return ret; + } + + return 0; } static void __init free_cwwb_sem(struct amd_iommu *iommu) { if (iommu->cmd_sem) - free_page((unsigned long)iommu->cmd_sem); + iommu_free_pages((void *)iommu->cmd_sem); +} +static void __init unmap_cwwb_sem(struct amd_iommu *iommu) +{ + if (iommu->cmd_sem) { + if (check_feature(FEATURE_SNP)) + memunmap((void *)iommu->cmd_sem); + else + iommu_free_pages((void *)iommu->cmd_sem); + } +} + +static void __init unmap_command_buffer(struct amd_iommu *iommu) +{ + memunmap((void *)iommu->cmd_buf); +} + +static void __init unmap_event_buffer(struct amd_iommu *iommu) +{ + memunmap(iommu->evt_buf); +} + +static void __init free_iommu_buffers(struct amd_iommu *iommu) +{ + if (is_kdump_kernel()) { + unmap_cwwb_sem(iommu); + unmap_command_buffer(iommu); + unmap_event_buffer(iommu); + } else { + free_cwwb_sem(iommu); + free_command_buffer(iommu); + free_event_buffer(iommu); + } } static void iommu_enable_xt(struct amd_iommu *iommu) @@ -895,63 +1118,38 @@ static void iommu_enable_xt(struct amd_iommu *iommu) static void iommu_enable_gt(struct amd_iommu *iommu) { - if (!iommu_feature(iommu, FEATURE_GT)) + if (!check_feature(FEATURE_GT)) return; iommu_feature_enable(iommu, CONTROL_GT_EN); } /* sets a specific bit in the device table entry. */ -static void set_dev_entry_bit(u16 devid, u8 bit) +static void set_dte_bit(struct dev_table_entry *dte, u8 bit) { int i = (bit >> 6) & 0x03; int _bit = bit & 0x3f; - amd_iommu_dev_table[devid].data[i] |= (1UL << _bit); + dte->data[i] |= (1UL << _bit); } -static int get_dev_entry_bit(u16 devid, u8 bit) +static bool __reuse_device_table(struct amd_iommu *iommu) { - int i = (bit >> 6) & 0x03; - int _bit = bit & 0x3f; - - return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit; -} - - -static bool copy_device_table(void) -{ - u64 int_ctl, int_tab_len, entry = 0, last_entry = 0; - struct dev_table_entry *old_devtb = NULL; - u32 lo, hi, devid, old_devtb_size; + struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; + u32 lo, hi, old_devtb_size; phys_addr_t old_devtb_phys; - struct amd_iommu *iommu; - u16 dom_id, dte_v, irq_v; - gfp_t gfp_flag; - u64 tmp; + u64 entry; - if (!amd_iommu_pre_enabled) - return false; + /* Each IOMMU use separate device table with the same size */ + lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET); + hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4); + entry = (((u64) hi) << 32) + lo; - pr_warn("Translation is already enabled - trying to copy translation structures\n"); - for_each_iommu(iommu) { - /* All IOMMUs should use the same device table with the same size */ - lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET); - hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4); - entry = (((u64) hi) << 32) + lo; - if (last_entry && last_entry != entry) { - pr_err("IOMMU:%d should use the same dev table as others!\n", - iommu->index); - return false; - } - last_entry = entry; - - old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12; - if (old_devtb_size != dev_table_size) { - pr_err("The device table size of IOMMU:%d is not expected!\n", - iommu->index); - return false; - } + old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12; + if (old_devtb_size != pci_seg->dev_table_size) { + pr_err("The device table size of IOMMU:%d is not expected!\n", + iommu->index); + return false; } /* @@ -965,105 +1163,150 @@ static bool copy_device_table(void) pr_err("The address of old device table is above 4G, not trustworthy!\n"); return false; } - old_devtb = (sme_active() && is_kdump_kernel()) - ? (__force void *)ioremap_encrypted(old_devtb_phys, - dev_table_size) - : memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB); - if (!old_devtb) + /* + * Re-use the previous kernel's device table for kdump. + */ + pci_seg->old_dev_tbl_cpy = iommu_memremap(old_devtb_phys, pci_seg->dev_table_size); + if (pci_seg->old_dev_tbl_cpy == NULL) { + pr_err("Failed to remap memory for reusing old device table!\n"); return false; + } - gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32; - old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag, - get_order(dev_table_size)); - if (old_dev_tbl_cpy == NULL) { - pr_err("Failed to allocate memory for copying old device table!\n"); + return true; +} + +static bool reuse_device_table(void) +{ + struct amd_iommu *iommu; + struct amd_iommu_pci_seg *pci_seg; + + if (!amd_iommu_pre_enabled) return false; - } - for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { - old_dev_tbl_cpy[devid] = old_devtb[devid]; - dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK; - dte_v = old_devtb[devid].data[0] & DTE_FLAG_V; - - if (dte_v && dom_id) { - old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0]; - old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1]; - __set_bit(dom_id, amd_iommu_pd_alloc_bitmap); - /* If gcr3 table existed, mask it out */ - if (old_devtb[devid].data[0] & DTE_FLAG_GV) { - tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B; - tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C; - old_dev_tbl_cpy[devid].data[1] &= ~tmp; - tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A; - tmp |= DTE_FLAG_GV; - old_dev_tbl_cpy[devid].data[0] &= ~tmp; - } - } + pr_warn("Translation is already enabled - trying to reuse translation structures\n"); - irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE; - int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK; - int_tab_len = old_devtb[devid].data[2] & DTE_INTTABLEN_MASK; - if (irq_v && (int_ctl || int_tab_len)) { - if ((int_ctl != DTE_IRQ_REMAP_INTCTL) || - (int_tab_len != DTE_INTTABLEN)) { - pr_err("Wrong old irq remapping flag: %#x\n", devid); + /* + * All IOMMUs within PCI segment shares common device table. + * Hence reuse device table only once per PCI segment. + */ + for_each_pci_segment(pci_seg) { + for_each_iommu(iommu) { + if (pci_seg->id != iommu->pci_seg->id) + continue; + if (!__reuse_device_table(iommu)) return false; - } - - old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2]; + break; } } - memunmap(old_devtb); return true; } -void amd_iommu_apply_erratum_63(u16 devid) +struct dev_table_entry *amd_iommu_get_ivhd_dte_flags(u16 segid, u16 devid) { - int sysmgt; + struct ivhd_dte_flags *e; + unsigned int best_len = UINT_MAX; + struct dev_table_entry *dte = NULL; - sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) | - (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1); + for_each_ivhd_dte_flags(e) { + /* + * Need to go through the whole list to find the smallest range, + * which contains the devid. + */ + if ((e->segid == segid) && + (e->devid_first <= devid) && (devid <= e->devid_last)) { + unsigned int len = e->devid_last - e->devid_first; - if (sysmgt == 0x01) - set_dev_entry_bit(devid, DEV_ENTRY_IW); + if (len < best_len) { + dte = &(e->dte); + best_len = len; + } + } + } + return dte; } -/* Writes the specific IOMMU for a device into the rlookup table */ -static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid) +static bool search_ivhd_dte_flags(u16 segid, u16 first, u16 last) { - amd_iommu_rlookup_table[devid] = iommu; + struct ivhd_dte_flags *e; + + for_each_ivhd_dte_flags(e) { + if ((e->segid == segid) && + (e->devid_first == first) && + (e->devid_last == last)) + return true; + } + return false; } /* * This function takes the device specific flags read from the ACPI * table and sets up the device table entry with that information */ +static void __init +set_dev_entry_from_acpi_range(struct amd_iommu *iommu, u16 first, u16 last, + u32 flags, u32 ext_flags) +{ + int i; + struct dev_table_entry dte = {}; + + /* Parse IVHD DTE setting flags and store information */ + if (flags) { + struct ivhd_dte_flags *d; + + if (search_ivhd_dte_flags(iommu->pci_seg->id, first, last)) + return; + + d = kzalloc(sizeof(struct ivhd_dte_flags), GFP_KERNEL); + if (!d) + return; + + pr_debug("%s: devid range %#x:%#x\n", __func__, first, last); + + if (flags & ACPI_DEVFLAG_INITPASS) + set_dte_bit(&dte, DEV_ENTRY_INIT_PASS); + if (flags & ACPI_DEVFLAG_EXTINT) + set_dte_bit(&dte, DEV_ENTRY_EINT_PASS); + if (flags & ACPI_DEVFLAG_NMI) + set_dte_bit(&dte, DEV_ENTRY_NMI_PASS); + if (flags & ACPI_DEVFLAG_SYSMGT1) + set_dte_bit(&dte, DEV_ENTRY_SYSMGT1); + if (flags & ACPI_DEVFLAG_SYSMGT2) + set_dte_bit(&dte, DEV_ENTRY_SYSMGT2); + if (flags & ACPI_DEVFLAG_LINT0) + set_dte_bit(&dte, DEV_ENTRY_LINT0_PASS); + if (flags & ACPI_DEVFLAG_LINT1) + set_dte_bit(&dte, DEV_ENTRY_LINT1_PASS); + + /* Apply erratum 63, which needs info in initial_dte */ + if (FIELD_GET(DTE_DATA1_SYSMGT_MASK, dte.data[1]) == 0x1) + dte.data[0] |= DTE_FLAG_IW; + + memcpy(&d->dte, &dte, sizeof(dte)); + d->segid = iommu->pci_seg->id; + d->devid_first = first; + d->devid_last = last; + list_add_tail(&d->list, &amd_ivhd_dev_flags_list); + } + + for (i = first; i <= last; i++) { + if (flags) { + struct dev_table_entry *dev_table = get_dev_table(iommu); + + memcpy(&dev_table[i], &dte, sizeof(dte)); + } + amd_iommu_set_rlookup_table(iommu, i); + } +} + static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, u16 devid, u32 flags, u32 ext_flags) { - if (flags & ACPI_DEVFLAG_INITPASS) - set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS); - if (flags & ACPI_DEVFLAG_EXTINT) - set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS); - if (flags & ACPI_DEVFLAG_NMI) - set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS); - if (flags & ACPI_DEVFLAG_SYSMGT1) - set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1); - if (flags & ACPI_DEVFLAG_SYSMGT2) - set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2); - if (flags & ACPI_DEVFLAG_LINT0) - set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS); - if (flags & ACPI_DEVFLAG_LINT1) - set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS); - - amd_iommu_apply_erratum_63(devid); - - set_iommu_for_device(iommu, devid); + set_dev_entry_from_acpi_range(iommu, devid, devid, flags, ext_flags); } -int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line) +int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line) { struct devid_map *entry; struct list_head *list; @@ -1100,7 +1343,7 @@ int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line) return 0; } -static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid, +static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u32 *devid, bool cmd_line) { struct acpihid_map_entry *entry; @@ -1128,7 +1371,7 @@ static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid, entry->cmd_line = cmd_line; entry->root_devid = (entry->devid & (~0x7)); - pr_info("%s, add hid:%s, uid:%s, rdevid:%d\n", + pr_info("%s, add hid:%s, uid:%s, rdevid:%#x\n", entry->cmd_line ? "cmd" : "ivrs", entry->hid, entry->uid, entry->root_devid); @@ -1179,10 +1422,11 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, { u8 *p = (u8 *)h; u8 *end = p, flags = 0; - u16 devid = 0, devid_start = 0, devid_to = 0; + u16 devid = 0, devid_start = 0, devid_to = 0, seg_id; u32 dev_i, ext_flags = 0; bool alias = false; struct ivhd_entry *e; + struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; u32 ivhd_size; int ret; @@ -1214,19 +1458,18 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, while (p < end) { e = (struct ivhd_entry *)p; + seg_id = pci_seg->id; + switch (e->type) { case IVHD_DEV_ALL: - DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags); - - for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i) - set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0); + DUMP_printk(" DEV_ALL\t\t\tsetting: %#02x\n", e->flags); + set_dev_entry_from_acpi_range(iommu, 0, pci_seg->last_bdf, e->flags, 0); break; case IVHD_DEV_SELECT: - DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x " - "flags: %02x\n", - PCI_BUS_NUM(e->devid), + DUMP_printk(" DEV_SELECT\t\t\tdevid: %04x:%02x:%02x.%x flags: %#02x\n", + seg_id, PCI_BUS_NUM(e->devid), PCI_SLOT(e->devid), PCI_FUNC(e->devid), e->flags); @@ -1236,9 +1479,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, break; case IVHD_DEV_SELECT_RANGE_START: - DUMP_printk(" DEV_SELECT_RANGE_START\t " - "devid: %02x:%02x.%x flags: %02x\n", - PCI_BUS_NUM(e->devid), + DUMP_printk(" DEV_SELECT_RANGE_START\tdevid: %04x:%02x:%02x.%x flags: %#02x\n", + seg_id, PCI_BUS_NUM(e->devid), PCI_SLOT(e->devid), PCI_FUNC(e->devid), e->flags); @@ -1250,9 +1492,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, break; case IVHD_DEV_ALIAS: - DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x " - "flags: %02x devid_to: %02x:%02x.%x\n", - PCI_BUS_NUM(e->devid), + DUMP_printk(" DEV_ALIAS\t\t\tdevid: %04x:%02x:%02x.%x flags: %#02x devid_to: %02x:%02x.%x\n", + seg_id, PCI_BUS_NUM(e->devid), PCI_SLOT(e->devid), PCI_FUNC(e->devid), e->flags, @@ -1264,18 +1505,16 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, devid_to = e->ext >> 8; set_dev_entry_from_acpi(iommu, devid , e->flags, 0); set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0); - amd_iommu_alias_table[devid] = devid_to; + pci_seg->alias_table[devid] = devid_to; break; case IVHD_DEV_ALIAS_RANGE: - DUMP_printk(" DEV_ALIAS_RANGE\t\t " - "devid: %02x:%02x.%x flags: %02x " - "devid_to: %02x:%02x.%x\n", - PCI_BUS_NUM(e->devid), + DUMP_printk(" DEV_ALIAS_RANGE\t\tdevid: %04x:%02x:%02x.%x flags: %#02x devid_to: %04x:%02x:%02x.%x\n", + seg_id, PCI_BUS_NUM(e->devid), PCI_SLOT(e->devid), PCI_FUNC(e->devid), e->flags, - PCI_BUS_NUM(e->ext >> 8), + seg_id, PCI_BUS_NUM(e->ext >> 8), PCI_SLOT(e->ext >> 8), PCI_FUNC(e->ext >> 8)); @@ -1287,9 +1526,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, break; case IVHD_DEV_EXT_SELECT: - DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x " - "flags: %02x ext: %08x\n", - PCI_BUS_NUM(e->devid), + DUMP_printk(" DEV_EXT_SELECT\t\tdevid: %04x:%02x:%02x.%x flags: %#02x ext: %08x\n", + seg_id, PCI_BUS_NUM(e->devid), PCI_SLOT(e->devid), PCI_FUNC(e->devid), e->flags, e->ext); @@ -1300,9 +1538,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, break; case IVHD_DEV_EXT_SELECT_RANGE: - DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: " - "%02x:%02x.%x flags: %02x ext: %08x\n", - PCI_BUS_NUM(e->devid), + DUMP_printk(" DEV_EXT_SELECT_RANGE\tdevid: %04x:%02x:%02x.%x flags: %#02x ext: %08x\n", + seg_id, PCI_BUS_NUM(e->devid), PCI_SLOT(e->devid), PCI_FUNC(e->devid), e->flags, e->ext); @@ -1314,30 +1551,27 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, break; case IVHD_DEV_RANGE_END: - DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n", - PCI_BUS_NUM(e->devid), + DUMP_printk(" DEV_RANGE_END\t\tdevid: %04x:%02x:%02x.%x\n", + seg_id, PCI_BUS_NUM(e->devid), PCI_SLOT(e->devid), PCI_FUNC(e->devid)); devid = e->devid; - for (dev_i = devid_start; dev_i <= devid; ++dev_i) { - if (alias) { - amd_iommu_alias_table[dev_i] = devid_to; - set_dev_entry_from_acpi(iommu, - devid_to, flags, ext_flags); - } - set_dev_entry_from_acpi(iommu, dev_i, - flags, ext_flags); + if (alias) { + for (dev_i = devid_start; dev_i <= devid; ++dev_i) + pci_seg->alias_table[dev_i] = devid_to; + set_dev_entry_from_acpi(iommu, devid_to, flags, ext_flags); } + set_dev_entry_from_acpi_range(iommu, devid_start, devid, flags, ext_flags); break; case IVHD_DEV_SPECIAL: { u8 handle, type; const char *var; - u16 devid; + u32 devid; int ret; handle = e->ext & 0xff; - devid = (e->ext >> 8) & 0xffff; + devid = PCI_SEG_DEVID_TO_SBDF(seg_id, (e->ext >> 8)); type = (e->ext >> 24) & 0xff; if (type == IVHD_SPECIAL_IOAPIC) @@ -1347,11 +1581,12 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, else var = "UNKNOWN"; - DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n", + DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %04x:%02x:%02x.%x, flags: %#02x\n", var, (int)handle, - PCI_BUS_NUM(devid), + seg_id, PCI_BUS_NUM(devid), PCI_SLOT(devid), - PCI_FUNC(devid)); + PCI_FUNC(devid), + e->flags); ret = add_special_device(type, handle, &devid, false); if (ret) @@ -1367,7 +1602,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, break; } case IVHD_DEV_ACPI_HID: { - u16 devid; + u32 devid; u8 hid[ACPIHID_HID_LEN]; u8 uid[ACPIHID_UID_LEN]; int ret; @@ -1378,7 +1613,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, break; } - memcpy(hid, (u8 *)(&e->ext), ACPIHID_HID_LEN - 1); + BUILD_BUG_ON(sizeof(e->ext_hid) != ACPIHID_HID_LEN - 1); + memcpy(hid, &e->ext_hid, ACPIHID_HID_LEN - 1); hid[ACPIHID_HID_LEN - 1] = '\0'; if (!(*hid)) { @@ -1409,12 +1645,13 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, break; } - devid = e->devid; - DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n", - hid, uid, + devid = PCI_SEG_DEVID_TO_SBDF(seg_id, e->devid); + DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %04x:%02x:%02x.%x, flags: %#02x\n", + hid, uid, seg_id, PCI_BUS_NUM(devid), PCI_SLOT(devid), - PCI_FUNC(devid)); + PCI_FUNC(devid), + e->flags); flags = e->flags; @@ -1441,14 +1678,99 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, return 0; } +/* Allocate PCI segment data structure */ +static struct amd_iommu_pci_seg *__init alloc_pci_segment(u16 id, + struct acpi_table_header *ivrs_base) +{ + struct amd_iommu_pci_seg *pci_seg; + int last_bdf; + + /* + * First parse ACPI tables to find the largest Bus/Dev/Func we need to + * handle in this PCI segment. Upon this information the shared data + * structures for the PCI segments in the system will be allocated. + */ + last_bdf = find_last_devid_acpi(ivrs_base, id); + if (last_bdf < 0) + return NULL; + + pci_seg = kzalloc(sizeof(struct amd_iommu_pci_seg), GFP_KERNEL); + if (pci_seg == NULL) + return NULL; + + pci_seg->last_bdf = last_bdf; + DUMP_printk("PCI segment : 0x%0x, last bdf : 0x%04x\n", id, last_bdf); + pci_seg->dev_table_size = + max(roundup_pow_of_two((last_bdf + 1) * DEV_TABLE_ENTRY_SIZE), + SZ_4K); + + pci_seg->id = id; + init_llist_head(&pci_seg->dev_data_list); + INIT_LIST_HEAD(&pci_seg->unity_map); + list_add_tail(&pci_seg->list, &amd_iommu_pci_seg_list); + + if (alloc_dev_table(pci_seg)) + goto err_free_pci_seg; + if (alloc_alias_table(pci_seg)) + goto err_free_dev_table; + if (alloc_rlookup_table(pci_seg)) + goto err_free_alias_table; + + return pci_seg; + +err_free_alias_table: + free_alias_table(pci_seg); +err_free_dev_table: + free_dev_table(pci_seg); +err_free_pci_seg: + list_del(&pci_seg->list); + kfree(pci_seg); + return NULL; +} + +static struct amd_iommu_pci_seg *__init get_pci_segment(u16 id, + struct acpi_table_header *ivrs_base) +{ + struct amd_iommu_pci_seg *pci_seg; + + for_each_pci_segment(pci_seg) { + if (pci_seg->id == id) + return pci_seg; + } + + return alloc_pci_segment(id, ivrs_base); +} + +static void __init free_pci_segments(void) +{ + struct amd_iommu_pci_seg *pci_seg, *next; + + for_each_pci_segment_safe(pci_seg, next) { + list_del(&pci_seg->list); + free_irq_lookup_table(pci_seg); + free_rlookup_table(pci_seg); + free_alias_table(pci_seg); + free_dev_table(pci_seg); + kfree(pci_seg); + } +} + +static void __init free_sysfs(struct amd_iommu *iommu) +{ + if (iommu->iommu.dev) { + iommu_device_unregister(&iommu->iommu); + iommu_device_sysfs_remove(&iommu->iommu); + } +} + static void __init free_iommu_one(struct amd_iommu *iommu) { - free_cwwb_sem(iommu); - free_command_buffer(iommu); - free_event_buffer(iommu); - free_ppr_log(iommu); + free_sysfs(iommu); + free_iommu_buffers(iommu); + amd_iommu_free_ppr_log(iommu); free_ga_log(iommu); iommu_unmap_mmio_space(iommu); + amd_iommu_iopf_uninit(iommu); } static void __init free_iommu_all(void) @@ -1521,16 +1843,22 @@ static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu) } /* - * This function clues the initialization function for one IOMMU + * This function glues the initialization function for one IOMMU * together and also allocates the command buffer and programs the * hardware. It does NOT enable the IOMMU. This is done afterwards. */ -static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) +static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h, + struct acpi_table_header *ivrs_base) { - int ret; + struct amd_iommu_pci_seg *pci_seg; + + pci_seg = get_pci_segment(h->pci_seg, ivrs_base); + if (pci_seg == NULL) + return -ENOMEM; + iommu->pci_seg = pci_seg; raw_spin_lock_init(&iommu->lock); - iommu->cmd_sem_val = 0; + atomic64_set(&iommu->cmd_sem_val, 0); /* Add IOMMU to internal data structures */ list_add_tail(&iommu->list, &amd_iommu_list); @@ -1541,15 +1869,11 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) return -ENOSYS; } - /* Index is fine - add IOMMU to the array */ - amd_iommus[iommu->index] = iommu; - /* * Copy data from ACPI table entry to the iommu struct */ iommu->devid = h->devid; iommu->cap_ptr = h->cap_ptr; - iommu->pci_seg = h->pci_seg; iommu->mmio_phys = h->mmio_phys; switch (h->type) { @@ -1562,13 +1886,8 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) else iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; - /* - * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports. - * GAM also requires GA mode. Therefore, we need to - * check cmpxchg16b support before enabling it. - */ - if (!boot_cpu_has(X86_FEATURE_CX16) || - ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0)) + /* GAM requires GA mode. */ + if ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0) amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; break; case 0x11: @@ -1578,13 +1897,8 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) else iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; - /* - * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports. - * XT, GAM also requires GA mode. Therefore, we need to - * check cmpxchg16b support before enabling them. - */ - if (!boot_cpu_has(X86_FEATURE_CX16) || - ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) { + /* XT and GAM require GA mode. */ + if ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0) { amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; break; } @@ -1592,6 +1906,11 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) if (h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT)) amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE; + if (h->efr_attr & BIT(IOMMU_IVHD_ATTR_HATDIS_SHIFT)) { + pr_warn_once("Host Address Translation is not supported.\n"); + amd_iommu_hatdis = true; + } + early_iommu_features_init(iommu, h); break; @@ -1604,14 +1923,16 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) if (!iommu->mmio_base) return -ENOMEM; - if (alloc_cwwb_sem(iommu)) - return -ENOMEM; + return init_iommu_from_acpi(iommu, h); +} - if (alloc_command_buffer(iommu)) - return -ENOMEM; +static int __init init_iommu_one_late(struct amd_iommu *iommu) +{ + int ret; - if (alloc_event_buffer(iommu)) - return -ENOMEM; + ret = alloc_iommu_buffers(iommu); + if (ret) + return ret; iommu->int_enabled = false; @@ -1625,10 +1946,6 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) if (amd_iommu_pre_enabled) amd_iommu_pre_enabled = translation_pre_enabled(iommu); - ret = init_iommu_from_acpi(iommu, h); - if (ret) - return ret; - if (amd_iommu_irq_remap) { ret = amd_iommu_create_irq_domain(iommu); if (ret) @@ -1639,7 +1956,7 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) * Make sure IOMMU is not considered to translate itself. The IVRS * table tells us so, but this is a lie! */ - amd_iommu_rlookup_table[iommu->devid] = NULL; + iommu->pci_seg->rlookup_table[iommu->devid] = NULL; return 0; } @@ -1684,15 +2001,16 @@ static int __init init_iommu_all(struct acpi_table_header *table) end += table->length; p += IVRS_HEADER_LENGTH; + /* Phase 1: Process all IVHD blocks */ while (p < end) { h = (struct ivhd_header *)p; if (*p == amd_iommu_target_ivhd_type) { - DUMP_printk("device: %02x:%02x.%01x cap: %04x " - "seg: %d flags: %01x info %04x\n", - PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid), - PCI_FUNC(h->devid), h->cap_ptr, - h->pci_seg, h->flags, h->info); + DUMP_printk("device: %04x:%02x:%02x.%01x cap: %04x " + "flags: %01x info %04x\n", + h->pci_seg, PCI_BUS_NUM(h->devid), + PCI_SLOT(h->devid), PCI_FUNC(h->devid), + h->cap_ptr, h->flags, h->info); DUMP_printk(" mmio-addr: %016llx\n", h->mmio_phys); @@ -1700,7 +2018,7 @@ static int __init init_iommu_all(struct acpi_table_header *table) if (iommu == NULL) return -ENOMEM; - ret = init_iommu_one(iommu, h); + ret = init_iommu_one(iommu, h, table); if (ret) return ret; } @@ -1709,6 +2027,16 @@ static int __init init_iommu_all(struct acpi_table_header *table) } WARN_ON(p != end); + /* Phase 2 : Early feature support check */ + get_global_efr(); + + /* Phase 3 : Enabling IOMMU features */ + for_each_iommu(iommu) { + ret = init_iommu_one_late(iommu); + if (ret) + return ret; + } + return 0; } @@ -1717,7 +2045,7 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu) u64 val; struct pci_dev *pdev = iommu->dev; - if (!iommu_feature(iommu, FEATURE_PC)) + if (!check_feature(FEATURE_PC)) return; amd_iommu_pc_present = true; @@ -1736,7 +2064,7 @@ static ssize_t amd_iommu_show_cap(struct device *dev, char *buf) { struct amd_iommu *iommu = dev_to_amd_iommu(dev); - return sprintf(buf, "%x\n", iommu->cap); + return sysfs_emit(buf, "%x\n", iommu->cap); } static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL); @@ -1744,8 +2072,7 @@ static ssize_t amd_iommu_show_features(struct device *dev, struct device_attribute *attr, char *buf) { - struct amd_iommu *iommu = dev_to_amd_iommu(dev); - return sprintf(buf, "%llx\n", iommu->features); + return sysfs_emit(buf, "%llx:%llx\n", amd_iommu_efr, amd_iommu_efr2); } static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL); @@ -1772,16 +2099,18 @@ static const struct attribute_group *amd_iommu_groups[] = { */ static void __init late_iommu_features_init(struct amd_iommu *iommu) { - u64 features; + u64 features, features2; if (!(iommu->cap & (1 << IOMMU_CAP_EFR))) return; /* read extended feature bits */ features = readq(iommu->mmio_base + MMIO_EXT_FEATURES); + features2 = readq(iommu->mmio_base + MMIO_EXT_FEATURES2); - if (!iommu->features) { - iommu->features = features; + if (!amd_iommu_efr) { + amd_iommu_efr = features; + amd_iommu_efr2 = features2; return; } @@ -1789,9 +2118,13 @@ static void __init late_iommu_features_init(struct amd_iommu *iommu) * Sanity check and warn if EFR values from * IVHD and MMIO conflict. */ - if (features != iommu->features) - pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx).\n", - features, iommu->features); + if (features != amd_iommu_efr || + features2 != amd_iommu_efr2) { + pr_warn(FW_WARN + "EFR mismatch. Use IVHD EFR (%#llx : %#llx), EFR2 (%#llx : %#llx).\n", + features, amd_iommu_efr, + features2, amd_iommu_efr2); + } } static int __init iommu_init_pci(struct amd_iommu *iommu) @@ -1799,13 +2132,14 @@ static int __init iommu_init_pci(struct amd_iommu *iommu) int cap_ptr = iommu->cap_ptr; int ret; - iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid), + iommu->dev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, + PCI_BUS_NUM(iommu->devid), iommu->devid & 0xff); if (!iommu->dev) return -ENODEV; - /* Prevent binding other PCI device drivers to IOMMU devices */ - iommu->dev->match_driver = false; + /* ACPI _PRT won't have an IRQ for IOMMU */ + iommu->dev->irq_managed = 1; pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, &iommu->cap); @@ -1815,43 +2149,33 @@ static int __init iommu_init_pci(struct amd_iommu *iommu) late_iommu_features_init(iommu); - if (iommu_feature(iommu, FEATURE_GT)) { + if (check_feature(FEATURE_GT)) { int glxval; - u32 max_pasid; u64 pasmax; - pasmax = iommu->features & FEATURE_PASID_MASK; - pasmax >>= FEATURE_PASID_SHIFT; - max_pasid = (1 << (pasmax + 1)) - 1; + pasmax = FIELD_GET(FEATURE_PASMAX, amd_iommu_efr); + iommu->iommu.max_pasids = (1 << (pasmax + 1)) - 1; - amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid); + BUG_ON(iommu->iommu.max_pasids & ~PASID_MASK); - BUG_ON(amd_iommu_max_pasid & ~PASID_MASK); - - glxval = iommu->features & FEATURE_GLXVAL_MASK; - glxval >>= FEATURE_GLXVAL_SHIFT; + glxval = FIELD_GET(FEATURE_GLX, amd_iommu_efr); if (amd_iommu_max_glx_val == -1) amd_iommu_max_glx_val = glxval; else amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval); - } - if (iommu_feature(iommu, FEATURE_GT) && - iommu_feature(iommu, FEATURE_PPR)) { - iommu->is_iommu_v2 = true; - amd_iommu_v2_present = true; + iommu_enable_gt(iommu); } - if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu)) + if (check_feature(FEATURE_PPR) && amd_iommu_alloc_ppr_log(iommu)) return -ENOMEM; - ret = iommu_init_ga(iommu); - if (ret) - return ret; - - if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) + if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) { + pr_info("Using strict mode due to virtualization\n"); + iommu_set_dma_strict(); amd_iommu_np_cache = true; + } init_iommu_perf_ctr(iommu); @@ -1859,7 +2183,8 @@ static int __init iommu_init_pci(struct amd_iommu *iommu) int i, j; iommu->root_pdev = - pci_get_domain_bus_and_slot(0, iommu->dev->bus->number, + pci_get_domain_bus_and_slot(iommu->pci_seg->id, + iommu->dev->bus->number, PCI_DEVFN(0, 0)); /* @@ -1886,60 +2211,89 @@ static int __init iommu_init_pci(struct amd_iommu *iommu) amd_iommu_erratum_746_workaround(iommu); amd_iommu_ats_write_check_workaround(iommu); - iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev, + ret = iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev, amd_iommu_groups, "ivhd%d", iommu->index); - iommu_device_register(&iommu->iommu, &amd_iommu_ops, NULL); + if (ret) + return ret; + + /* + * Allocate per IOMMU IOPF queue here so that in attach device path, + * PRI capable device can be added to IOPF queue + */ + if (amd_iommu_gt_ppr_supported()) { + ret = amd_iommu_iopf_init(iommu); + if (ret) + return ret; + } + + ret = iommu_device_register(&iommu->iommu, &amd_iommu_ops, NULL); + if (ret || amd_iommu_pgtable == PD_MODE_NONE) { + /* + * Remove sysfs if DMA translation is not supported by the + * IOMMU. Do not return an error to enable IRQ remapping + * in state_next(), DTE[V, TV] must eventually be set to 0. + */ + iommu_device_sysfs_remove(&iommu->iommu); + } return pci_enable_device(iommu->dev); } static void print_iommu_info(void) { + int i; static const char * const feat_str[] = { "PreF", "PPR", "X2APIC", "NX", "GT", "[5]", "IA", "GA", "HE", "PC" }; - struct amd_iommu *iommu; - for_each_iommu(iommu) { - struct pci_dev *pdev = iommu->dev; - int i; + if (amd_iommu_efr) { + pr_info("Extended features (%#llx, %#llx):", amd_iommu_efr, amd_iommu_efr2); - pci_info(pdev, "Found IOMMU cap 0x%x\n", iommu->cap_ptr); + for (i = 0; i < ARRAY_SIZE(feat_str); ++i) { + if (check_feature(1ULL << i)) + pr_cont(" %s", feat_str[i]); + } - if (iommu->cap & (1 << IOMMU_CAP_EFR)) { - pr_info("Extended features (%#llx):", iommu->features); + if (check_feature(FEATURE_GAM_VAPIC)) + pr_cont(" GA_vAPIC"); - for (i = 0; i < ARRAY_SIZE(feat_str); ++i) { - if (iommu_feature(iommu, (1ULL << i))) - pr_cont(" %s", feat_str[i]); - } + if (check_feature(FEATURE_SNP)) + pr_cont(" SNP"); - if (iommu->features & FEATURE_GAM_VAPIC) - pr_cont(" GA_vAPIC"); + if (check_feature2(FEATURE_SEVSNPIO_SUP)) + pr_cont(" SEV-TIO"); - pr_cont("\n"); - } + pr_cont("\n"); } + if (irq_remapping_enabled) { pr_info("Interrupt remapping enabled\n"); - if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) - pr_info("Virtual APIC enabled\n"); if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) pr_info("X2APIC enabled\n"); } + if (amd_iommu_pgtable == PD_MODE_V2) { + pr_info("V2 page table enabled (Paging mode : %d level)\n", + amd_iommu_gpt_level); + } } static int __init amd_iommu_init_pci(void) { struct amd_iommu *iommu; + struct amd_iommu_pci_seg *pci_seg; int ret; + /* Init global identity domain before registering IOMMU */ + amd_iommu_init_identity_domain(); + for_each_iommu(iommu) { ret = iommu_init_pci(iommu); - if (ret) - break; - + if (ret) { + pr_err("IOMMU%d: Failed to initialize IOMMU Hardware (error=%d)!\n", + iommu->index, ret); + goto out; + } /* Need to setup range after PCI init */ iommu_set_cwwb_range(iommu); } @@ -1947,23 +2301,22 @@ static int __init amd_iommu_init_pci(void) /* * Order is important here to make sure any unity map requirements are * fulfilled. The unity mappings are created and written to the device - * table during the amd_iommu_init_api() call. + * table during the iommu_init_pci() call. * * After that we call init_device_table_dma() to make sure any * uninitialized DTE will block DMA, and in the end we flush the caches * of all IOMMUs to make sure the changes to the device table are * active. */ - ret = amd_iommu_init_api(); - - init_device_table_dma(); + for_each_pci_segment(pci_seg) + init_device_table_dma(pci_seg); for_each_iommu(iommu) - iommu_flush_all_caches(iommu); + amd_iommu_flush_all_caches(iommu); - if (!ret) - print_iommu_info(); + print_iommu_info(); +out: return ret; } @@ -2011,48 +2364,18 @@ union intcapxt { }; } __attribute__ ((packed)); -/* - * There isn't really any need to mask/unmask at the irqchip level because - * the 64-bit INTCAPXT registers can be updated atomically without tearing - * when the affinity is being updated. - */ -static void intcapxt_unmask_irq(struct irq_data *data) -{ -} - -static void intcapxt_mask_irq(struct irq_data *data) -{ -} static struct irq_chip intcapxt_controller; static int intcapxt_irqdomain_activate(struct irq_domain *domain, struct irq_data *irqd, bool reserve) { - struct amd_iommu *iommu = irqd->chip_data; - struct irq_cfg *cfg = irqd_cfg(irqd); - union intcapxt xt; - - xt.capxt = 0ULL; - xt.dest_mode_logical = apic->dest_mode_logical; - xt.vector = cfg->vector; - xt.destid_0_23 = cfg->dest_apicid & GENMASK(23, 0); - xt.destid_24_31 = cfg->dest_apicid >> 24; - - /** - * Current IOMMU implemtation uses the same IRQ for all - * 3 IOMMU interrupts. - */ - writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET); - writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET); - writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET); return 0; } static void intcapxt_irqdomain_deactivate(struct irq_domain *domain, struct irq_data *irqd) { - intcapxt_mask_irq(irqd); } @@ -2073,6 +2396,7 @@ static int intcapxt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq struct irq_data *irqd = irq_domain_get_irq_data(domain, i); irqd->chip = &intcapxt_controller; + irqd->hwirq = info->hwirq; irqd->chip_data = info->data; __irq_set_handler(i, handle_edge_irq, 0, "edge"); } @@ -2086,6 +2410,30 @@ static void intcapxt_irqdomain_free(struct irq_domain *domain, unsigned int virq irq_domain_free_irqs_top(domain, virq, nr_irqs); } + +static void intcapxt_unmask_irq(struct irq_data *irqd) +{ + struct amd_iommu *iommu = irqd->chip_data; + struct irq_cfg *cfg = irqd_cfg(irqd); + union intcapxt xt; + + xt.capxt = 0ULL; + xt.dest_mode_logical = apic->dest_mode_logical; + xt.vector = cfg->vector; + xt.destid_0_23 = cfg->dest_apicid & GENMASK(23, 0); + xt.destid_24_31 = cfg->dest_apicid >> 24; + + writeq(xt.capxt, iommu->mmio_base + irqd->hwirq); +} + +static void intcapxt_mask_irq(struct irq_data *irqd) +{ + struct amd_iommu *iommu = irqd->chip_data; + + writeq(0, iommu->mmio_base + irqd->hwirq); +} + + static int intcapxt_set_affinity(struct irq_data *irqd, const struct cpumask *mask, bool force) { @@ -2095,8 +2443,12 @@ static int intcapxt_set_affinity(struct irq_data *irqd, ret = parent->chip->irq_set_affinity(parent, mask, force); if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE) return ret; + return 0; +} - return intcapxt_irqdomain_activate(irqd->domain, irqd, false); +static int intcapxt_set_wake(struct irq_data *irqd, unsigned int on) +{ + return on ? -EOPNOTSUPP : 0; } static struct irq_chip intcapxt_controller = { @@ -2106,7 +2458,8 @@ static struct irq_chip intcapxt_controller = { .irq_ack = irq_chip_ack_parent, .irq_retrigger = irq_chip_retrigger_hierarchy, .irq_set_affinity = intcapxt_set_affinity, - .flags = IRQCHIP_SKIP_SET_WAKE, + .irq_set_wake = intcapxt_set_wake, + .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_MOVE_DEFERRED, }; static const struct irq_domain_ops intcapxt_domain_ops = { @@ -2140,11 +2493,13 @@ static struct irq_domain *iommu_get_irqdomain(void) return iommu_irqdomain; } -static int iommu_setup_intcapxt(struct amd_iommu *iommu) +static int __iommu_setup_intcapxt(struct amd_iommu *iommu, const char *devname, + int hwirq, irq_handler_t thread_fn) { struct irq_domain *domain; struct irq_alloc_info info; int irq, ret; + int node = dev_to_node(&iommu->dev->dev); domain = iommu_get_irqdomain(); if (!domain) @@ -2153,25 +2508,56 @@ static int iommu_setup_intcapxt(struct amd_iommu *iommu) init_irq_alloc_info(&info, NULL); info.type = X86_IRQ_ALLOC_TYPE_AMDVI; info.data = iommu; + info.hwirq = hwirq; - irq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, &info); + irq = irq_domain_alloc_irqs(domain, 1, node, &info); if (irq < 0) { irq_domain_remove(domain); return irq; } ret = request_threaded_irq(irq, amd_iommu_int_handler, - amd_iommu_int_thread, 0, "AMD-Vi", iommu); + thread_fn, 0, devname, iommu); if (ret) { irq_domain_free_irqs(irq, 1); irq_domain_remove(domain); return ret; } - iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN); return 0; } +static int iommu_setup_intcapxt(struct amd_iommu *iommu) +{ + int ret; + + snprintf(iommu->evt_irq_name, sizeof(iommu->evt_irq_name), + "AMD-Vi%d-Evt", iommu->index); + ret = __iommu_setup_intcapxt(iommu, iommu->evt_irq_name, + MMIO_INTCAPXT_EVT_OFFSET, + amd_iommu_int_thread_evtlog); + if (ret) + return ret; + + snprintf(iommu->ppr_irq_name, sizeof(iommu->ppr_irq_name), + "AMD-Vi%d-PPR", iommu->index); + ret = __iommu_setup_intcapxt(iommu, iommu->ppr_irq_name, + MMIO_INTCAPXT_PPR_OFFSET, + amd_iommu_int_thread_pprlog); + if (ret) + return ret; + +#ifdef CONFIG_IRQ_REMAP + snprintf(iommu->ga_irq_name, sizeof(iommu->ga_irq_name), + "AMD-Vi%d-GA", iommu->index); + ret = __iommu_setup_intcapxt(iommu, iommu->ga_irq_name, + MMIO_INTCAPXT_GALOG_OFFSET, + amd_iommu_int_thread_galog); +#endif + + return ret; +} + static int iommu_init_irq(struct amd_iommu *iommu) { int ret; @@ -2191,12 +2577,11 @@ static int iommu_init_irq(struct amd_iommu *iommu) iommu->int_enabled = true; enable_faults: - iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); - if (iommu->ppr_log != NULL) - iommu_feature_enable(iommu, CONTROL_PPRINT_EN); + if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) + iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN); - iommu_ga_log_enable(iommu); + iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); return 0; } @@ -2212,19 +2597,28 @@ enable_faults: static void __init free_unity_maps(void) { struct unity_map_entry *entry, *next; + struct amd_iommu_pci_seg *p, *pci_seg; - list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) { - list_del(&entry->list); - kfree(entry); + for_each_pci_segment_safe(pci_seg, p) { + list_for_each_entry_safe(entry, next, &pci_seg->unity_map, list) { + list_del(&entry->list); + kfree(entry); + } } } /* called for unity map ACPI definition */ -static int __init init_unity_map_range(struct ivmd_header *m) +static int __init init_unity_map_range(struct ivmd_header *m, + struct acpi_table_header *ivrs_base) { struct unity_map_entry *e = NULL; + struct amd_iommu_pci_seg *pci_seg; char *s; + pci_seg = get_pci_segment(m->pci_seg, ivrs_base); + if (pci_seg == NULL) + return -ENOMEM; + e = kzalloc(sizeof(*e), GFP_KERNEL); if (e == NULL) return -ENOMEM; @@ -2240,7 +2634,7 @@ static int __init init_unity_map_range(struct ivmd_header *m) case ACPI_IVMD_TYPE_ALL: s = "IVMD_TYPE_ALL\t\t"; e->devid_start = 0; - e->devid_end = amd_iommu_last_bdf; + e->devid_end = pci_seg->last_bdf; break; case ACPI_IVMD_TYPE_RANGE: s = "IVMD_TYPE_RANGE\t\t"; @@ -2262,14 +2656,16 @@ static int __init init_unity_map_range(struct ivmd_header *m) if (m->flags & IVMD_FLAG_EXCL_RANGE) e->prot = (IVMD_FLAG_IW | IVMD_FLAG_IR) >> 1; - DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x" - " range_start: %016llx range_end: %016llx flags: %x\n", s, + DUMP_printk("%s devid_start: %04x:%02x:%02x.%x devid_end: " + "%04x:%02x:%02x.%x range_start: %016llx range_end: %016llx" + " flags: %x\n", s, m->pci_seg, PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start), - PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end), + PCI_FUNC(e->devid_start), m->pci_seg, + PCI_BUS_NUM(e->devid_end), PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end), e->address_start, e->address_end, m->flags); - list_add_tail(&e->list, &amd_iommu_unity_map); + list_add_tail(&e->list, &pci_seg->unity_map); return 0; } @@ -2286,7 +2682,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table) while (p < end) { m = (struct ivmd_header *)p; if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE)) - init_unity_map_range(m); + init_unity_map_range(m, table); p += m->length; } @@ -2297,35 +2693,47 @@ static int __init init_memory_definitions(struct acpi_table_header *table) /* * Init the device table to not allow DMA access for devices */ -static void init_device_table_dma(void) +static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg) { u32 devid; + struct dev_table_entry *dev_table = pci_seg->dev_table; + + if (!dev_table || amd_iommu_pgtable == PD_MODE_NONE) + return; - for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { - set_dev_entry_bit(devid, DEV_ENTRY_VALID); - set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION); + for (devid = 0; devid <= pci_seg->last_bdf; ++devid) { + set_dte_bit(&dev_table[devid], DEV_ENTRY_VALID); + if (!amd_iommu_snp_en) + set_dte_bit(&dev_table[devid], DEV_ENTRY_TRANSLATION); } } -static void __init uninit_device_table_dma(void) +static void __init uninit_device_table_dma(struct amd_iommu_pci_seg *pci_seg) { u32 devid; + struct dev_table_entry *dev_table = pci_seg->dev_table; + + if (dev_table == NULL) + return; - for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { - amd_iommu_dev_table[devid].data[0] = 0ULL; - amd_iommu_dev_table[devid].data[1] = 0ULL; + for (devid = 0; devid <= pci_seg->last_bdf; ++devid) { + dev_table[devid].data[0] = 0ULL; + dev_table[devid].data[1] = 0ULL; } } static void init_device_table(void) { + struct amd_iommu_pci_seg *pci_seg; u32 devid; if (!amd_iommu_irq_remap) return; - for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) - set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN); + for_each_pci_segment(pci_seg) { + for (devid = 0; devid <= pci_seg->last_bdf; ++devid) + set_dte_bit(&pci_seg->dev_table[devid], DEV_ENTRY_IRQ_TBL_EN); + } } static void iommu_init_flags(struct amd_iommu *iommu) @@ -2352,7 +2760,11 @@ static void iommu_init_flags(struct amd_iommu *iommu) iommu_feature_enable(iommu, CONTROL_COHERENT_EN); /* Set IOTLB invalidation timeout to 1s */ - iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S); + iommu_feature_set(iommu, CTRL_INV_TO_1S, CTRL_INV_TO_MASK, CONTROL_INV_TIMEOUT); + + /* Enable Enhanced Peripheral Page Request Handling */ + if (check_feature(FEATURE_EPHSUP)) + iommu_feature_enable(iommu, CONTROL_EPH_EN); } static void iommu_apply_resume_quirks(struct amd_iommu *iommu) @@ -2403,8 +2815,6 @@ static void iommu_enable_ga(struct amd_iommu *iommu) #ifdef CONFIG_IRQ_REMAP switch (amd_iommu_guest_ir) { case AMD_IOMMU_GUEST_IR_VAPIC: - iommu_feature_enable(iommu, CONTROL_GAM_EN); - fallthrough; case AMD_IOMMU_GUEST_IR_LEGACY_GA: iommu_feature_enable(iommu, CONTROL_GA_EN); iommu->irte_ops = &irte_128_ops; @@ -2416,6 +2826,44 @@ static void iommu_enable_ga(struct amd_iommu *iommu) #endif } +static void iommu_disable_irtcachedis(struct amd_iommu *iommu) +{ + iommu_feature_disable(iommu, CONTROL_IRTCACHEDIS); +} + +static void iommu_enable_irtcachedis(struct amd_iommu *iommu) +{ + u64 ctrl; + + if (!amd_iommu_irtcachedis) + return; + + /* + * Note: + * The support for IRTCacheDis feature is dertermined by + * checking if the bit is writable. + */ + iommu_feature_enable(iommu, CONTROL_IRTCACHEDIS); + ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); + ctrl &= (1ULL << CONTROL_IRTCACHEDIS); + if (ctrl) + iommu->irtcachedis_enabled = true; + pr_info("iommu%d (%#06x) : IRT cache is %s\n", + iommu->index, iommu->devid, + iommu->irtcachedis_enabled ? "disabled" : "enabled"); +} + +static void iommu_enable_2k_int(struct amd_iommu *iommu) +{ + if (!FEATURE_NUM_INT_REMAP_SUP_2K(amd_iommu_efr2)) + return; + + iommu_feature_set(iommu, + CONTROL_NUM_INT_REMAP_MODE_2K, + CONTROL_NUM_INT_REMAP_MODE_MASK, + CONTROL_NUM_INT_REMAP_MODE); +} + static void early_enable_iommu(struct amd_iommu *iommu) { iommu_disable(iommu); @@ -2424,79 +2872,150 @@ static void early_enable_iommu(struct amd_iommu *iommu) iommu_enable_command_buffer(iommu); iommu_enable_event_buffer(iommu); iommu_set_exclusion_range(iommu); + iommu_enable_gt(iommu); iommu_enable_ga(iommu); iommu_enable_xt(iommu); + iommu_enable_irtcachedis(iommu); + iommu_enable_2k_int(iommu); iommu_enable(iommu); - iommu_flush_all_caches(iommu); + amd_iommu_flush_all_caches(iommu); } /* * This function finally enables all IOMMUs found in the system after * they have been initialized. * - * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy - * the old content of device table entries. Not this case or copy failed, + * Or if in kdump kernel and IOMMUs are all pre-enabled, try to reuse + * the old content of device table entries. Not this case or reuse failed, * just continue as normal kernel does. */ static void early_enable_iommus(void) { struct amd_iommu *iommu; + struct amd_iommu_pci_seg *pci_seg; - - if (!copy_device_table()) { + if (!reuse_device_table()) { /* - * If come here because of failure in copying device table from old + * If come here because of failure in reusing device table from old * kernel with all IOMMUs enabled, print error message and try to * free allocated old_dev_tbl_cpy. */ - if (amd_iommu_pre_enabled) - pr_err("Failed to copy DEV table from previous kernel.\n"); - if (old_dev_tbl_cpy != NULL) - free_pages((unsigned long)old_dev_tbl_cpy, - get_order(dev_table_size)); + if (amd_iommu_pre_enabled) { + pr_err("Failed to reuse DEV table from previous kernel.\n"); + /* + * Bail out early if unable to remap/reuse DEV table from + * previous kernel if SNP enabled as IOMMU commands will + * time out without DEV table and cause kdump boot panic. + */ + BUG_ON(check_feature(FEATURE_SNP)); + } + + for_each_pci_segment(pci_seg) { + if (pci_seg->old_dev_tbl_cpy != NULL) { + memunmap((void *)pci_seg->old_dev_tbl_cpy); + pci_seg->old_dev_tbl_cpy = NULL; + } + } for_each_iommu(iommu) { clear_translation_pre_enabled(iommu); early_enable_iommu(iommu); } } else { - pr_info("Copied DEV table from previous kernel.\n"); - free_pages((unsigned long)amd_iommu_dev_table, - get_order(dev_table_size)); - amd_iommu_dev_table = old_dev_tbl_cpy; + pr_info("Reused DEV table from previous kernel.\n"); + + for_each_pci_segment(pci_seg) { + iommu_free_pages(pci_seg->dev_table); + pci_seg->dev_table = pci_seg->old_dev_tbl_cpy; + } + for_each_iommu(iommu) { iommu_disable_command_buffer(iommu); iommu_disable_event_buffer(iommu); + iommu_disable_irtcachedis(iommu); iommu_enable_command_buffer(iommu); iommu_enable_event_buffer(iommu); iommu_enable_ga(iommu); iommu_enable_xt(iommu); + iommu_enable_irtcachedis(iommu); + iommu_enable_2k_int(iommu); iommu_set_device_table(iommu); - iommu_flush_all_caches(iommu); + amd_iommu_flush_all_caches(iommu); } } +} -#ifdef CONFIG_IRQ_REMAP - if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) - amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP); -#endif +static void enable_iommus_ppr(void) +{ + struct amd_iommu *iommu; + + if (!amd_iommu_gt_ppr_supported()) + return; + + for_each_iommu(iommu) + amd_iommu_enable_ppr_log(iommu); } -static void enable_iommus_v2(void) +static void enable_iommus_vapic(void) { +#ifdef CONFIG_IRQ_REMAP + u32 status, i; struct amd_iommu *iommu; for_each_iommu(iommu) { - iommu_enable_ppr_log(iommu); - iommu_enable_gt(iommu); + /* + * Disable GALog if already running. It could have been enabled + * in the previous boot before kdump. + */ + status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); + if (!(status & MMIO_STATUS_GALOG_RUN_MASK)) + continue; + + iommu_feature_disable(iommu, CONTROL_GALOG_EN); + iommu_feature_disable(iommu, CONTROL_GAINT_EN); + + /* + * Need to set and poll check the GALOGRun bit to zero before + * we can set/ modify GA Log registers safely. + */ + for (i = 0; i < MMIO_STATUS_TIMEOUT; ++i) { + status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); + if (!(status & MMIO_STATUS_GALOG_RUN_MASK)) + break; + udelay(10); + } + + if (WARN_ON(i >= MMIO_STATUS_TIMEOUT)) + return; } -} -static void enable_iommus(void) -{ - early_enable_iommus(); + if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) && + !check_feature(FEATURE_GAM_VAPIC)) { + amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA; + return; + } - enable_iommus_v2(); + if (amd_iommu_snp_en && + !FEATURE_SNPAVICSUP_GAM(amd_iommu_efr2)) { + pr_warn("Force to disable Virtual APIC due to SNP\n"); + amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA; + return; + } + + /* Enabling GAM and SNPAVIC support */ + for_each_iommu(iommu) { + if (iommu_init_ga_log(iommu) || + iommu_ga_log_enable(iommu)) + return; + + iommu_feature_enable(iommu, CONTROL_GAM_EN); + if (amd_iommu_snp_en) + iommu_feature_enable(iommu, CONTROL_SNPAVIC_EN); + } + + amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP); + pr_info("Virtual APIC enabled\n"); +#endif } static void disable_iommus(void) @@ -2517,7 +3036,7 @@ static void disable_iommus(void) * disable suspend until real resume implemented */ -static void amd_iommu_resume(void) +static void amd_iommu_resume(void *data) { struct amd_iommu *iommu; @@ -2525,12 +3044,13 @@ static void amd_iommu_resume(void) iommu_apply_resume_quirks(iommu); /* re-load the hardware */ - enable_iommus(); + for_each_iommu(iommu) + early_enable_iommu(iommu); amd_iommu_enable_interrupts(); } -static int amd_iommu_suspend(void) +static int amd_iommu_suspend(void *data) { /* disable IOMMUs to go out of the way for BIOS */ disable_iommus(); @@ -2538,34 +3058,19 @@ static int amd_iommu_suspend(void) return 0; } -static struct syscore_ops amd_iommu_syscore_ops = { +static const struct syscore_ops amd_iommu_syscore_ops = { .suspend = amd_iommu_suspend, .resume = amd_iommu_resume, }; +static struct syscore amd_iommu_syscore = { + .ops = &amd_iommu_syscore_ops, +}; + static void __init free_iommu_resources(void) { - kmemleak_free(irq_lookup_table); - free_pages((unsigned long)irq_lookup_table, - get_order(rlookup_table_size)); - irq_lookup_table = NULL; - - kmem_cache_destroy(amd_iommu_irq_cache); - amd_iommu_irq_cache = NULL; - - free_pages((unsigned long)amd_iommu_rlookup_table, - get_order(rlookup_table_size)); - amd_iommu_rlookup_table = NULL; - - free_pages((unsigned long)amd_iommu_alias_table, - get_order(alias_table_size)); - amd_iommu_alias_table = NULL; - - free_pages((unsigned long)amd_iommu_dev_table, - get_order(dev_table_size)); - amd_iommu_dev_table = NULL; - free_iommu_all(); + free_pci_segments(); } /* SB IOAPIC is always on this device in AMD systems */ @@ -2622,9 +3127,7 @@ static bool __init check_ioapic_information(void) static void __init free_dma_resources(void) { - free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, - get_order(MAX_DOMAIN_ID/8)); - amd_iommu_pd_alloc_bitmap = NULL; + ida_destroy(&pdom_ids); free_unity_maps(); } @@ -2664,8 +3167,9 @@ static void __init ivinfo_init(void *ivrs) static int __init early_amd_iommu_init(void) { struct acpi_table_header *ivrs_base; - int i, remap_cache_sz, ret; + int ret; acpi_status status; + u8 efr_hats; if (!amd_iommu_detected) return -ENODEV; @@ -2679,6 +3183,12 @@ static int __init early_amd_iommu_init(void) return -EINVAL; } + if (!boot_cpu_has(X86_FEATURE_CX16)) { + pr_err("Failed to initialize. The CMPXCHG16B feature is required.\n"); + ret = -EINVAL; + goto out; + } + /* * Validate checksum here so we don't need to do it when * we actually parse the table @@ -2693,67 +3203,48 @@ static int __init early_amd_iommu_init(void) DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type); /* - * First parse ACPI tables to find the largest Bus/Dev/Func - * we need to handle. Upon this information the shared data - * structures for the IOMMUs in the system will be allocated + * now the data structures are allocated and basically initialized + * start the real acpi table scan */ - ret = find_last_devid_acpi(ivrs_base); + ret = init_iommu_all(ivrs_base); if (ret) goto out; - dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE); - alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); - rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); - - /* Device table - directly used by all IOMMUs */ - ret = -ENOMEM; - amd_iommu_dev_table = (void *)__get_free_pages( - GFP_KERNEL | __GFP_ZERO | GFP_DMA32, - get_order(dev_table_size)); - if (amd_iommu_dev_table == NULL) - goto out; - - /* - * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the - * IOMMU see for that device - */ - amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL, - get_order(alias_table_size)); - if (amd_iommu_alias_table == NULL) - goto out; - - /* IOMMU rlookup table - find the IOMMU for a specific device */ - amd_iommu_rlookup_table = (void *)__get_free_pages( - GFP_KERNEL | __GFP_ZERO, - get_order(rlookup_table_size)); - if (amd_iommu_rlookup_table == NULL) - goto out; - - amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages( - GFP_KERNEL | __GFP_ZERO, - get_order(MAX_DOMAIN_ID/8)); - if (amd_iommu_pd_alloc_bitmap == NULL) - goto out; + /* 5 level guest page table */ + if (cpu_feature_enabled(X86_FEATURE_LA57) && + FIELD_GET(FEATURE_GATS, amd_iommu_efr) == GUEST_PGTABLE_5_LEVEL) + amd_iommu_gpt_level = PAGE_MODE_5_LEVEL; - /* - * let all alias entries point to itself - */ - for (i = 0; i <= amd_iommu_last_bdf; ++i) - amd_iommu_alias_table[i] = i; + efr_hats = FIELD_GET(FEATURE_HATS, amd_iommu_efr); + if (efr_hats != 0x3) { + /* + * efr[HATS] bits specify the maximum host translation level + * supported, with LEVEL 4 being initial max level. + */ + amd_iommu_hpt_level = efr_hats + PAGE_MODE_4_LEVEL; + } else { + pr_warn_once(FW_BUG "Disable host address translation due to invalid translation level (%#x).\n", + efr_hats); + amd_iommu_hatdis = true; + } - /* - * never allocate domain 0 because its used as the non-allocated and - * error value placeholder - */ - __set_bit(0, amd_iommu_pd_alloc_bitmap); + if (amd_iommu_pgtable == PD_MODE_V2) { + if (!amd_iommu_v2_pgtbl_supported()) { + pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n"); + amd_iommu_pgtable = PD_MODE_V1; + } + } - /* - * now the data structures are allocated and basically initialized - * start the real acpi table scan - */ - ret = init_iommu_all(ivrs_base); - if (ret) - goto out; + if (amd_iommu_hatdis) { + /* + * Host (v1) page table is not available. Attempt to use + * Guest (v2) page table. + */ + if (amd_iommu_v2_pgtbl_supported()) + amd_iommu_pgtable = PD_MODE_V2; + else + amd_iommu_pgtable = PD_MODE_NONE; + } /* Disable any previously enabled IOMMUs */ if (!is_kdump_kernel() || amd_iommu_disabled) @@ -2763,29 +3254,12 @@ static int __init early_amd_iommu_init(void) amd_iommu_irq_remap = check_ioapic_information(); if (amd_iommu_irq_remap) { - /* - * Interrupt remapping enabled, create kmem_cache for the - * remapping tables. - */ + struct amd_iommu_pci_seg *pci_seg; ret = -ENOMEM; - if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir)) - remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32); - else - remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2); - amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache", - remap_cache_sz, - DTE_INTTAB_ALIGNMENT, - 0, NULL); - if (!amd_iommu_irq_cache) - goto out; - - irq_lookup_table = (void *)__get_free_pages( - GFP_KERNEL | __GFP_ZERO, - get_order(rlookup_table_size)); - kmemleak_alloc(irq_lookup_table, rlookup_table_size, - 1, GFP_KERNEL); - if (!irq_lookup_table) - goto out; + for_each_pci_segment(pci_seg) { + if (alloc_irq_lookup_table(pci_seg)) + goto out; + } } ret = init_memory_definitions(ivrs_base); @@ -2813,6 +3287,13 @@ static int amd_iommu_enable_interrupts(void) goto out; } + /* + * Interrupt handler is ready to process interrupts. Enable + * PPR and GA log interrupt for all IOMMUs. + */ + enable_iommus_vapic(); + enable_iommus_ppr(); + out: return ret; } @@ -2855,6 +3336,47 @@ out: return true; } +static __init void iommu_snp_enable(void) +{ +#ifdef CONFIG_KVM_AMD_SEV + if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP)) + return; + /* + * The SNP support requires that IOMMU must be enabled, and is + * configured with V1 page table (DTE[Mode] = 0 is not supported). + */ + if (no_iommu || iommu_default_passthrough()) { + pr_warn("SNP: IOMMU disabled or configured in passthrough mode, SNP cannot be supported.\n"); + goto disable_snp; + } + + if (amd_iommu_pgtable != PD_MODE_V1) { + pr_warn("SNP: IOMMU is configured with V2 page table mode, SNP cannot be supported.\n"); + goto disable_snp; + } + + amd_iommu_snp_en = check_feature(FEATURE_SNP); + if (!amd_iommu_snp_en) { + pr_warn("SNP: IOMMU SNP feature not enabled, SNP cannot be supported.\n"); + goto disable_snp; + } + + /* + * Enable host SNP support once SNP support is checked on IOMMU. + */ + if (snp_rmptable_init()) { + pr_warn("SNP: RMP initialization failed, SNP cannot be supported.\n"); + goto disable_snp; + } + + pr_info("IOMMU SNP support enabled.\n"); + return; + +disable_snp: + cc_platform_clear(CC_ATTR_HOST_SEV_SNP); +#endif +} + /**************************************************************************** * * AMD IOMMU Initialization State Machine @@ -2889,10 +3411,10 @@ static int __init state_next(void) init_state = IOMMU_ENABLED; break; case IOMMU_ENABLED: - register_syscore_ops(&amd_iommu_syscore_ops); + register_syscore(&amd_iommu_syscore); + iommu_snp_enable(); ret = amd_iommu_init_pci(); init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT; - enable_iommus_v2(); break; case IOMMU_PCI_INIT: ret = amd_iommu_enable_interrupts(); @@ -2922,10 +3444,13 @@ static int __init state_next(void) free_iommu_resources(); } else { struct amd_iommu *iommu; + struct amd_iommu_pci_seg *pci_seg; + + for_each_pci_segment(pci_seg) + uninit_device_table_dma(pci_seg); - uninit_device_table_dma(); for_each_iommu(iommu) - iommu_flush_all_caches(iommu); + amd_iommu_flush_all_caches(iommu); } } return ret; @@ -2943,6 +3468,19 @@ static int __init iommu_go_to_state(enum iommu_init_state state) ret = state_next(); } + /* + * SNP platform initilazation requires IOMMUs to be fully configured. + * If the SNP support on IOMMUs has NOT been checked, simply mark SNP + * as unsupported. If the SNP support on IOMMUs has been checked and + * host SNP support enabled but RMP enforcement has not been enabled + * in IOMMUs, then the system is in a half-baked state, but can limp + * along as all memory should be Hypervisor-Owned in the RMP. WARN, + * but leave SNP as "supported" to avoid confusing the kernel. + */ + if (ret && cc_platform_has(CC_ATTR_HOST_SEV_SNP) && + !WARN_ON_ONCE(amd_iommu_snp_en)) + cc_platform_clear(CC_ATTR_HOST_SEV_SNP); + return ret; } @@ -2976,17 +3514,17 @@ int __init amd_iommu_enable(void) void amd_iommu_disable(void) { - amd_iommu_suspend(); + amd_iommu_suspend(NULL); } int amd_iommu_reenable(int mode) { - amd_iommu_resume(); + amd_iommu_resume(NULL); return 0; } -int __init amd_iommu_enable_faulting(void) +int amd_iommu_enable_faulting(unsigned int cpu) { /* We enable MSI later when PCI is initialized */ return 0; @@ -3000,7 +3538,6 @@ int __init amd_iommu_enable_faulting(void) */ static int __init amd_iommu_init(void) { - struct amd_iommu *iommu; int ret; ret = iommu_go_to_state(IOMMU_INITIALIZED); @@ -3014,15 +3551,16 @@ static int __init amd_iommu_init(void) } #endif - for_each_iommu(iommu) - amd_iommu_debugfs_setup(iommu); + if (!ret) + amd_iommu_debugfs_setup(); return ret; } static bool amd_iommu_sme_check(void) { - if (!sme_active() || (boot_cpu_data.x86 != 0x17)) + if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) || + (boot_cpu_data.x86 != 0x17)) return true; /* For Fam17h, a specific level of support is required */ @@ -3045,25 +3583,28 @@ static bool amd_iommu_sme_check(void) * IOMMUs * ****************************************************************************/ -int __init amd_iommu_detect(void) +void __init amd_iommu_detect(void) { int ret; if (no_iommu || (iommu_detected && !gart_iommu_aperture)) - return -ENODEV; + goto disable_snp; if (!amd_iommu_sme_check()) - return -ENODEV; + goto disable_snp; ret = iommu_go_to_state(IOMMU_IVRS_DETECTED); if (ret) - return ret; + goto disable_snp; amd_iommu_detected = true; iommu_detected = 1; x86_init.iommu.iommu_init = amd_iommu_init; + return; - return 1; +disable_snp: + if (cc_platform_has(CC_ATTR_HOST_SEV_SNP)) + cc_platform_clear(CC_ATTR_HOST_SEV_SNP); } /**************************************************************************** @@ -3097,15 +3638,38 @@ static int __init parse_amd_iommu_intr(char *str) static int __init parse_amd_iommu_options(char *str) { - for (; *str; ++str) { - if (strncmp(str, "fullflush", 9) == 0) - amd_iommu_unmap_flush = true; - if (strncmp(str, "force_enable", 12) == 0) + if (!str) + return -EINVAL; + + while (*str) { + if (strncmp(str, "fullflush", 9) == 0) { + pr_warn("amd_iommu=fullflush deprecated; use iommu.strict=1 instead\n"); + iommu_set_dma_strict(); + } else if (strncmp(str, "force_enable", 12) == 0) { amd_iommu_force_enable = true; - if (strncmp(str, "off", 3) == 0) + } else if (strncmp(str, "off", 3) == 0) { amd_iommu_disabled = true; - if (strncmp(str, "force_isolation", 15) == 0) + } else if (strncmp(str, "force_isolation", 15) == 0) { amd_iommu_force_isolation = true; + } else if (strncmp(str, "pgtbl_v1", 8) == 0) { + amd_iommu_pgtable = PD_MODE_V1; + } else if (strncmp(str, "pgtbl_v2", 8) == 0) { + amd_iommu_pgtable = PD_MODE_V2; + } else if (strncmp(str, "irtcachedis", 11) == 0) { + amd_iommu_irtcachedis = true; + } else if (strncmp(str, "nohugepages", 11) == 0) { + pr_info("Restricting V1 page-sizes to 4KiB"); + amd_iommu_pgsize_bitmap = AMD_IOMMU_PGSIZES_4K; + } else if (strncmp(str, "v2_pgsizes_only", 15) == 0) { + pr_info("Restricting V1 page-sizes to 4KiB/2MiB/1GiB"); + amd_iommu_pgsize_bitmap = AMD_IOMMU_PGSIZES_V2; + } else { + pr_notice("Unknown option - '%s'\n", str); + } + + str += strcspn(str, ","); + while (*str == ',') + str++; } return 1; @@ -3113,24 +3677,32 @@ static int __init parse_amd_iommu_options(char *str) static int __init parse_ivrs_ioapic(char *str) { - unsigned int bus, dev, fn; - int ret, id, i; - u16 devid; + u32 seg = 0, bus, dev, fn; + int id, i; + u32 devid; - ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn); + if (sscanf(str, "=%d@%x:%x.%x", &id, &bus, &dev, &fn) == 4 || + sscanf(str, "=%d@%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) + goto found; - if (ret != 4) { - pr_err("Invalid command line: ivrs_ioapic%s\n", str); - return 1; + if (sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn) == 4 || + sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) { + pr_warn("ivrs_ioapic%s option format deprecated; use ivrs_ioapic=%d@%04x:%02x:%02x.%d instead\n", + str, id, seg, bus, dev, fn); + goto found; } + pr_err("Invalid command line: ivrs_ioapic%s\n", str); + return 1; + +found: if (early_ioapic_map_size == EARLY_MAP_SIZE) { pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n", str); return 1; } - devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); + devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn); cmdline_maps = true; i = early_ioapic_map_size++; @@ -3143,24 +3715,32 @@ static int __init parse_ivrs_ioapic(char *str) static int __init parse_ivrs_hpet(char *str) { - unsigned int bus, dev, fn; - int ret, id, i; - u16 devid; + u32 seg = 0, bus, dev, fn; + int id, i; + u32 devid; - ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn); + if (sscanf(str, "=%d@%x:%x.%x", &id, &bus, &dev, &fn) == 4 || + sscanf(str, "=%d@%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) + goto found; - if (ret != 4) { - pr_err("Invalid command line: ivrs_hpet%s\n", str); - return 1; + if (sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn) == 4 || + sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) { + pr_warn("ivrs_hpet%s option format deprecated; use ivrs_hpet=%d@%04x:%02x:%02x.%d instead\n", + str, id, seg, bus, dev, fn); + goto found; } + pr_err("Invalid command line: ivrs_hpet%s\n", str); + return 1; + +found: if (early_hpet_map_size == EARLY_MAP_SIZE) { pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n", str); return 1; } - devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); + devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn); cmdline_maps = true; i = early_hpet_map_size++; @@ -3171,19 +3751,53 @@ static int __init parse_ivrs_hpet(char *str) return 1; } +#define ACPIID_LEN (ACPIHID_UID_LEN + ACPIHID_HID_LEN) + static int __init parse_ivrs_acpihid(char *str) { - u32 bus, dev, fn; - char *hid, *uid, *p; - char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0}; - int ret, i; + u32 seg = 0, bus, dev, fn; + char *hid, *uid, *p, *addr; + char acpiid[ACPIID_LEN + 1] = { }; /* size with NULL terminator */ + int i; - ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid); - if (ret != 4) { - pr_err("Invalid command line: ivrs_acpihid(%s)\n", str); - return 1; + addr = strchr(str, '@'); + if (!addr) { + addr = strchr(str, '='); + if (!addr) + goto not_found; + + ++addr; + + if (strlen(addr) > ACPIID_LEN) + goto not_found; + + if (sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid) == 4 || + sscanf(str, "[%x:%x:%x.%x]=%s", &seg, &bus, &dev, &fn, acpiid) == 5) { + pr_warn("ivrs_acpihid%s option format deprecated; use ivrs_acpihid=%s@%04x:%02x:%02x.%d instead\n", + str, acpiid, seg, bus, dev, fn); + goto found; + } + goto not_found; } + /* We have the '@', make it the terminator to get just the acpiid */ + *addr++ = 0; + + if (strlen(str) > ACPIID_LEN) + goto not_found; + + if (sscanf(str, "=%s", acpiid) != 1) + goto not_found; + + if (sscanf(addr, "%x:%x.%x", &bus, &dev, &fn) == 3 || + sscanf(addr, "%x:%x:%x.%x", &seg, &bus, &dev, &fn) == 4) + goto found; + +not_found: + pr_err("Invalid command line: ivrs_acpihid%s\n", str); + return 1; + +found: p = acpiid; hid = strsep(&p, ":"); uid = p; @@ -3193,11 +3807,25 @@ static int __init parse_ivrs_acpihid(char *str) return 1; } + /* + * Ignore leading zeroes after ':', so e.g., AMDI0095:00 + * will match AMDI0095:0 in the second strcmp in acpi_dev_hid_uid_match + */ + while (*uid == '0' && *(uid + 1)) + uid++; + + if (strlen(hid) >= ACPIHID_HID_LEN) { + pr_err("Invalid command line: hid is too long\n"); + return 1; + } else if (strlen(uid) >= ACPIHID_UID_LEN) { + pr_err("Invalid command line: uid is too long\n"); + return 1; + } + i = early_acpihid_map_size++; memcpy(early_acpihid_map[i].hid, hid, strlen(hid)); memcpy(early_acpihid_map[i].uid, uid, strlen(uid)); - early_acpihid_map[i].devid = - ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); + early_acpihid_map[i].devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn); early_acpihid_map[i].cmd_line = true; return 1; @@ -3210,16 +3838,20 @@ __setup("ivrs_ioapic", parse_ivrs_ioapic); __setup("ivrs_hpet", parse_ivrs_hpet); __setup("ivrs_acpihid", parse_ivrs_acpihid); -IOMMU_INIT_FINISH(amd_iommu_detect, - gart_iommu_hole_init, - NULL, - NULL); - -bool amd_iommu_v2_supported(void) +bool amd_iommu_pasid_supported(void) { - return amd_iommu_v2_present; + /* CPU page table size should match IOMMU guest page table size */ + if (cpu_feature_enabled(X86_FEATURE_LA57) && + amd_iommu_gpt_level != PAGE_MODE_5_LEVEL) + return false; + + /* + * Since DTE[Mode]=0 is prohibited on SNP-enabled system + * (i.e. EFR[SNPSup]=1), IOMMUv2 page table cannot be used without + * setting up IOMMUv1 page table. + */ + return amd_iommu_gt_ppr_supported() && !amd_iommu_snp_en; } -EXPORT_SYMBOL(amd_iommu_v2_supported); struct amd_iommu *get_amd_iommu(unsigned int idx) { @@ -3248,13 +3880,11 @@ u8 amd_iommu_pc_get_max_banks(unsigned int idx) return 0; } -EXPORT_SYMBOL(amd_iommu_pc_get_max_banks); bool amd_iommu_pc_supported(void) { return amd_iommu_pc_present; } -EXPORT_SYMBOL(amd_iommu_pc_supported); u8 amd_iommu_pc_get_max_counters(unsigned int idx) { @@ -3265,7 +3895,6 @@ u8 amd_iommu_pc_get_max_counters(unsigned int idx) return 0; } -EXPORT_SYMBOL(amd_iommu_pc_get_max_counters); static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value, bool is_write) @@ -3320,3 +3949,92 @@ int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true); } + +#ifdef CONFIG_KVM_AMD_SEV +static int iommu_page_make_shared(void *page) +{ + unsigned long paddr, pfn; + + paddr = iommu_virt_to_phys(page); + /* Cbit maybe set in the paddr */ + pfn = __sme_clr(paddr) >> PAGE_SHIFT; + + if (!(pfn % PTRS_PER_PMD)) { + int ret, level; + bool assigned; + + ret = snp_lookup_rmpentry(pfn, &assigned, &level); + if (ret) { + pr_warn("IOMMU PFN %lx RMP lookup failed, ret %d\n", pfn, ret); + return ret; + } + + if (!assigned) { + pr_warn("IOMMU PFN %lx not assigned in RMP table\n", pfn); + return -EINVAL; + } + + if (level > PG_LEVEL_4K) { + ret = psmash(pfn); + if (!ret) + goto done; + + pr_warn("PSMASH failed for IOMMU PFN %lx huge RMP entry, ret: %d, level: %d\n", + pfn, ret, level); + return ret; + } + } + +done: + return rmp_make_shared(pfn, PG_LEVEL_4K); +} + +static int iommu_make_shared(void *va, size_t size) +{ + void *page; + int ret; + + if (!va) + return 0; + + for (page = va; page < (va + size); page += PAGE_SIZE) { + ret = iommu_page_make_shared(page); + if (ret) + return ret; + } + + return 0; +} + +int amd_iommu_snp_disable(void) +{ + struct amd_iommu *iommu; + int ret; + + if (!amd_iommu_snp_en) + return 0; + + for_each_iommu(iommu) { + ret = iommu_make_shared(iommu->evt_buf, EVT_BUFFER_SIZE); + if (ret) + return ret; + + ret = iommu_make_shared(iommu->ppr_log, PPR_LOG_SIZE); + if (ret) + return ret; + + ret = iommu_make_shared((void *)iommu->cmd_sem, PAGE_SIZE); + if (ret) + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(amd_iommu_snp_disable); + +bool amd_iommu_sev_tio_supported(void) +{ + return check_feature2(FEATURE_SEVSNPIO_SUP); +} +EXPORT_SYMBOL_GPL(amd_iommu_sev_tio_supported); +#endif diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c deleted file mode 100644 index bb0ee5c9fde7..000000000000 --- a/drivers/iommu/amd/io_pgtable.c +++ /dev/null @@ -1,560 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * CPU-agnostic AMD IO page table allocator. - * - * Copyright (C) 2020 Advanced Micro Devices, Inc. - * Author: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> - */ - -#define pr_fmt(fmt) "AMD-Vi: " fmt -#define dev_fmt(fmt) pr_fmt(fmt) - -#include <linux/atomic.h> -#include <linux/bitops.h> -#include <linux/io-pgtable.h> -#include <linux/kernel.h> -#include <linux/sizes.h> -#include <linux/slab.h> -#include <linux/types.h> -#include <linux/dma-mapping.h> - -#include <asm/barrier.h> - -#include "amd_iommu_types.h" -#include "amd_iommu.h" - -static void v1_tlb_flush_all(void *cookie) -{ -} - -static void v1_tlb_flush_walk(unsigned long iova, size_t size, - size_t granule, void *cookie) -{ -} - -static void v1_tlb_add_page(struct iommu_iotlb_gather *gather, - unsigned long iova, size_t granule, - void *cookie) -{ -} - -static const struct iommu_flush_ops v1_flush_ops = { - .tlb_flush_all = v1_tlb_flush_all, - .tlb_flush_walk = v1_tlb_flush_walk, - .tlb_add_page = v1_tlb_add_page, -}; - -/* - * Helper function to get the first pte of a large mapping - */ -static u64 *first_pte_l7(u64 *pte, unsigned long *page_size, - unsigned long *count) -{ - unsigned long pte_mask, pg_size, cnt; - u64 *fpte; - - pg_size = PTE_PAGE_SIZE(*pte); - cnt = PAGE_SIZE_PTE_COUNT(pg_size); - pte_mask = ~((cnt << 3) - 1); - fpte = (u64 *)(((unsigned long)pte) & pte_mask); - - if (page_size) - *page_size = pg_size; - - if (count) - *count = cnt; - - return fpte; -} - -/**************************************************************************** - * - * The functions below are used the create the page table mappings for - * unity mapped regions. - * - ****************************************************************************/ - -static void free_page_list(struct page *freelist) -{ - while (freelist != NULL) { - unsigned long p = (unsigned long)page_address(freelist); - - freelist = freelist->freelist; - free_page(p); - } -} - -static struct page *free_pt_page(unsigned long pt, struct page *freelist) -{ - struct page *p = virt_to_page((void *)pt); - - p->freelist = freelist; - - return p; -} - -#define DEFINE_FREE_PT_FN(LVL, FN) \ -static struct page *free_pt_##LVL (unsigned long __pt, struct page *freelist) \ -{ \ - unsigned long p; \ - u64 *pt; \ - int i; \ - \ - pt = (u64 *)__pt; \ - \ - for (i = 0; i < 512; ++i) { \ - /* PTE present? */ \ - if (!IOMMU_PTE_PRESENT(pt[i])) \ - continue; \ - \ - /* Large PTE? */ \ - if (PM_PTE_LEVEL(pt[i]) == 0 || \ - PM_PTE_LEVEL(pt[i]) == 7) \ - continue; \ - \ - p = (unsigned long)IOMMU_PTE_PAGE(pt[i]); \ - freelist = FN(p, freelist); \ - } \ - \ - return free_pt_page((unsigned long)pt, freelist); \ -} - -DEFINE_FREE_PT_FN(l2, free_pt_page) -DEFINE_FREE_PT_FN(l3, free_pt_l2) -DEFINE_FREE_PT_FN(l4, free_pt_l3) -DEFINE_FREE_PT_FN(l5, free_pt_l4) -DEFINE_FREE_PT_FN(l6, free_pt_l5) - -static struct page *free_sub_pt(unsigned long root, int mode, - struct page *freelist) -{ - switch (mode) { - case PAGE_MODE_NONE: - case PAGE_MODE_7_LEVEL: - break; - case PAGE_MODE_1_LEVEL: - freelist = free_pt_page(root, freelist); - break; - case PAGE_MODE_2_LEVEL: - freelist = free_pt_l2(root, freelist); - break; - case PAGE_MODE_3_LEVEL: - freelist = free_pt_l3(root, freelist); - break; - case PAGE_MODE_4_LEVEL: - freelist = free_pt_l4(root, freelist); - break; - case PAGE_MODE_5_LEVEL: - freelist = free_pt_l5(root, freelist); - break; - case PAGE_MODE_6_LEVEL: - freelist = free_pt_l6(root, freelist); - break; - default: - BUG(); - } - - return freelist; -} - -void amd_iommu_domain_set_pgtable(struct protection_domain *domain, - u64 *root, int mode) -{ - u64 pt_root; - - /* lowest 3 bits encode pgtable mode */ - pt_root = mode & 7; - pt_root |= (u64)root; - - amd_iommu_domain_set_pt_root(domain, pt_root); -} - -/* - * This function is used to add another level to an IO page table. Adding - * another level increases the size of the address space by 9 bits to a size up - * to 64 bits. - */ -static bool increase_address_space(struct protection_domain *domain, - unsigned long address, - gfp_t gfp) -{ - unsigned long flags; - bool ret = true; - u64 *pte; - - pte = (void *)get_zeroed_page(gfp); - if (!pte) - return false; - - spin_lock_irqsave(&domain->lock, flags); - - if (address <= PM_LEVEL_SIZE(domain->iop.mode)) - goto out; - - ret = false; - if (WARN_ON_ONCE(domain->iop.mode == PAGE_MODE_6_LEVEL)) - goto out; - - *pte = PM_LEVEL_PDE(domain->iop.mode, iommu_virt_to_phys(domain->iop.root)); - - domain->iop.root = pte; - domain->iop.mode += 1; - amd_iommu_update_and_flush_device_table(domain); - amd_iommu_domain_flush_complete(domain); - - /* - * Device Table needs to be updated and flushed before the new root can - * be published. - */ - amd_iommu_domain_set_pgtable(domain, pte, domain->iop.mode); - - pte = NULL; - ret = true; - -out: - spin_unlock_irqrestore(&domain->lock, flags); - free_page((unsigned long)pte); - - return ret; -} - -static u64 *alloc_pte(struct protection_domain *domain, - unsigned long address, - unsigned long page_size, - u64 **pte_page, - gfp_t gfp, - bool *updated) -{ - int level, end_lvl; - u64 *pte, *page; - - BUG_ON(!is_power_of_2(page_size)); - - while (address > PM_LEVEL_SIZE(domain->iop.mode)) { - /* - * Return an error if there is no memory to update the - * page-table. - */ - if (!increase_address_space(domain, address, gfp)) - return NULL; - } - - - level = domain->iop.mode - 1; - pte = &domain->iop.root[PM_LEVEL_INDEX(level, address)]; - address = PAGE_SIZE_ALIGN(address, page_size); - end_lvl = PAGE_SIZE_LEVEL(page_size); - - while (level > end_lvl) { - u64 __pte, __npte; - int pte_level; - - __pte = *pte; - pte_level = PM_PTE_LEVEL(__pte); - - /* - * If we replace a series of large PTEs, we need - * to tear down all of them. - */ - if (IOMMU_PTE_PRESENT(__pte) && - pte_level == PAGE_MODE_7_LEVEL) { - unsigned long count, i; - u64 *lpte; - - lpte = first_pte_l7(pte, NULL, &count); - - /* - * Unmap the replicated PTEs that still match the - * original large mapping - */ - for (i = 0; i < count; ++i) - cmpxchg64(&lpte[i], __pte, 0ULL); - - *updated = true; - continue; - } - - if (!IOMMU_PTE_PRESENT(__pte) || - pte_level == PAGE_MODE_NONE) { - page = (u64 *)get_zeroed_page(gfp); - - if (!page) - return NULL; - - __npte = PM_LEVEL_PDE(level, iommu_virt_to_phys(page)); - - /* pte could have been changed somewhere. */ - if (cmpxchg64(pte, __pte, __npte) != __pte) - free_page((unsigned long)page); - else if (IOMMU_PTE_PRESENT(__pte)) - *updated = true; - - continue; - } - - /* No level skipping support yet */ - if (pte_level != level) - return NULL; - - level -= 1; - - pte = IOMMU_PTE_PAGE(__pte); - - if (pte_page && level == end_lvl) - *pte_page = pte; - - pte = &pte[PM_LEVEL_INDEX(level, address)]; - } - - return pte; -} - -/* - * This function checks if there is a PTE for a given dma address. If - * there is one, it returns the pointer to it. - */ -static u64 *fetch_pte(struct amd_io_pgtable *pgtable, - unsigned long address, - unsigned long *page_size) -{ - int level; - u64 *pte; - - *page_size = 0; - - if (address > PM_LEVEL_SIZE(pgtable->mode)) - return NULL; - - level = pgtable->mode - 1; - pte = &pgtable->root[PM_LEVEL_INDEX(level, address)]; - *page_size = PTE_LEVEL_PAGE_SIZE(level); - - while (level > 0) { - - /* Not Present */ - if (!IOMMU_PTE_PRESENT(*pte)) - return NULL; - - /* Large PTE */ - if (PM_PTE_LEVEL(*pte) == 7 || - PM_PTE_LEVEL(*pte) == 0) - break; - - /* No level skipping support yet */ - if (PM_PTE_LEVEL(*pte) != level) - return NULL; - - level -= 1; - - /* Walk to the next level */ - pte = IOMMU_PTE_PAGE(*pte); - pte = &pte[PM_LEVEL_INDEX(level, address)]; - *page_size = PTE_LEVEL_PAGE_SIZE(level); - } - - /* - * If we have a series of large PTEs, make - * sure to return a pointer to the first one. - */ - if (PM_PTE_LEVEL(*pte) == PAGE_MODE_7_LEVEL) - pte = first_pte_l7(pte, page_size, NULL); - - return pte; -} - -static struct page *free_clear_pte(u64 *pte, u64 pteval, struct page *freelist) -{ - unsigned long pt; - int mode; - - while (cmpxchg64(pte, pteval, 0) != pteval) { - pr_warn("AMD-Vi: IOMMU pte changed since we read it\n"); - pteval = *pte; - } - - if (!IOMMU_PTE_PRESENT(pteval)) - return freelist; - - pt = (unsigned long)IOMMU_PTE_PAGE(pteval); - mode = IOMMU_PTE_MODE(pteval); - - return free_sub_pt(pt, mode, freelist); -} - -/* - * Generic mapping functions. It maps a physical address into a DMA - * address space. It allocates the page table pages if necessary. - * In the future it can be extended to a generic mapping function - * supporting all features of AMD IOMMU page tables like level skipping - * and full 64 bit address spaces. - */ -static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova, - phys_addr_t paddr, size_t size, int prot, gfp_t gfp) -{ - struct protection_domain *dom = io_pgtable_ops_to_domain(ops); - struct page *freelist = NULL; - bool updated = false; - u64 __pte, *pte; - int ret, i, count; - - BUG_ON(!IS_ALIGNED(iova, size)); - BUG_ON(!IS_ALIGNED(paddr, size)); - - ret = -EINVAL; - if (!(prot & IOMMU_PROT_MASK)) - goto out; - - count = PAGE_SIZE_PTE_COUNT(size); - pte = alloc_pte(dom, iova, size, NULL, gfp, &updated); - - ret = -ENOMEM; - if (!pte) - goto out; - - for (i = 0; i < count; ++i) - freelist = free_clear_pte(&pte[i], pte[i], freelist); - - if (freelist != NULL) - updated = true; - - if (count > 1) { - __pte = PAGE_SIZE_PTE(__sme_set(paddr), size); - __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC; - } else - __pte = __sme_set(paddr) | IOMMU_PTE_PR | IOMMU_PTE_FC; - - if (prot & IOMMU_PROT_IR) - __pte |= IOMMU_PTE_IR; - if (prot & IOMMU_PROT_IW) - __pte |= IOMMU_PTE_IW; - - for (i = 0; i < count; ++i) - pte[i] = __pte; - - ret = 0; - -out: - if (updated) { - unsigned long flags; - - spin_lock_irqsave(&dom->lock, flags); - /* - * Flush domain TLB(s) and wait for completion. Any Device-Table - * Updates and flushing already happened in - * increase_address_space(). - */ - amd_iommu_domain_flush_tlb_pde(dom); - amd_iommu_domain_flush_complete(dom); - spin_unlock_irqrestore(&dom->lock, flags); - } - - /* Everything flushed out, free pages now */ - free_page_list(freelist); - - return ret; -} - -static unsigned long iommu_v1_unmap_page(struct io_pgtable_ops *ops, - unsigned long iova, - size_t size, - struct iommu_iotlb_gather *gather) -{ - struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops); - unsigned long long unmapped; - unsigned long unmap_size; - u64 *pte; - - BUG_ON(!is_power_of_2(size)); - - unmapped = 0; - - while (unmapped < size) { - pte = fetch_pte(pgtable, iova, &unmap_size); - if (pte) { - int i, count; - - count = PAGE_SIZE_PTE_COUNT(unmap_size); - for (i = 0; i < count; i++) - pte[i] = 0ULL; - } - - iova = (iova & ~(unmap_size - 1)) + unmap_size; - unmapped += unmap_size; - } - - BUG_ON(unmapped && !is_power_of_2(unmapped)); - - return unmapped; -} - -static phys_addr_t iommu_v1_iova_to_phys(struct io_pgtable_ops *ops, unsigned long iova) -{ - struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops); - unsigned long offset_mask, pte_pgsize; - u64 *pte, __pte; - - if (pgtable->mode == PAGE_MODE_NONE) - return iova; - - pte = fetch_pte(pgtable, iova, &pte_pgsize); - - if (!pte || !IOMMU_PTE_PRESENT(*pte)) - return 0; - - offset_mask = pte_pgsize - 1; - __pte = __sme_clr(*pte & PM_ADDR_MASK); - - return (__pte & ~offset_mask) | (iova & offset_mask); -} - -/* - * ---------------------------------------------------- - */ -static void v1_free_pgtable(struct io_pgtable *iop) -{ - struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, iop); - struct protection_domain *dom; - struct page *freelist = NULL; - unsigned long root; - - if (pgtable->mode == PAGE_MODE_NONE) - return; - - dom = container_of(pgtable, struct protection_domain, iop); - - /* Update data structure */ - amd_iommu_domain_clr_pt_root(dom); - - /* Make changes visible to IOMMUs */ - amd_iommu_domain_update(dom); - - /* Page-table is not visible to IOMMU anymore, so free it */ - BUG_ON(pgtable->mode < PAGE_MODE_NONE || - pgtable->mode > PAGE_MODE_6_LEVEL); - - root = (unsigned long)pgtable->root; - freelist = free_sub_pt(root, pgtable->mode, freelist); - - free_page_list(freelist); -} - -static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie) -{ - struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg); - - cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES, - cfg->ias = IOMMU_IN_ADDR_BIT_SIZE, - cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE, - cfg->tlb = &v1_flush_ops; - - pgtable->iop.ops.map = iommu_v1_map_page; - pgtable->iop.ops.unmap = iommu_v1_unmap_page; - pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys; - - return &pgtable->iop; -} - -struct io_pgtable_init_fns io_pgtable_amd_iommu_v1_init_fns = { - .alloc = v1_alloc_pgtable, - .free = v1_free_pgtable, -}; diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 811a49a95d04..9f1d56a5e145 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -11,26 +11,26 @@ #include <linux/ratelimit.h> #include <linux/pci.h> #include <linux/acpi.h> -#include <linux/amba/bus.h> -#include <linux/platform_device.h> #include <linux/pci-ats.h> #include <linux/bitmap.h> #include <linux/slab.h> +#include <linux/string_choices.h> #include <linux/debugfs.h> #include <linux/scatterlist.h> #include <linux/dma-map-ops.h> #include <linux/dma-direct.h> -#include <linux/dma-iommu.h> +#include <linux/idr.h> #include <linux/iommu-helper.h> #include <linux/delay.h> #include <linux/amd-iommu.h> #include <linux/notifier.h> #include <linux/export.h> #include <linux/irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/msi.h> #include <linux/irqdomain.h> #include <linux/percpu.h> -#include <linux/io-pgtable.h> +#include <linux/cc_platform.h> #include <asm/irq_remapping.h> #include <asm/io_apic.h> #include <asm/apic.h> @@ -39,54 +39,51 @@ #include <asm/iommu.h> #include <asm/gart.h> #include <asm/dma.h> +#include <uapi/linux/iommufd.h> +#include <linux/generic_pt/iommu.h> #include "amd_iommu.h" #include "../irq_remapping.h" +#include "../iommu-pages.h" #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) -#define LOOP_TIMEOUT 100000 - -/* IO virtual address start page frame number */ -#define IOVA_START_PFN (1) -#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) - /* Reserved IOVA ranges */ #define MSI_RANGE_START (0xfee00000) #define MSI_RANGE_END (0xfeefffff) #define HT_RANGE_START (0xfd00000000ULL) #define HT_RANGE_END (0xffffffffffULL) -#define DEFAULT_PGTABLE_LEVEL PAGE_MODE_3_LEVEL - -static DEFINE_SPINLOCK(pd_bitmap_lock); - -/* List of all available dev_data structures */ -static LLIST_HEAD(dev_data_list); - LIST_HEAD(ioapic_map); LIST_HEAD(hpet_map); LIST_HEAD(acpihid_map); -/* - * Domain for untranslated devices - only allocated - * if iommu=pt passed on kernel cmd line. - */ const struct iommu_ops amd_iommu_ops; -static ATOMIC_NOTIFIER_HEAD(ppr_notifier); int amd_iommu_max_glx_val = -1; /* - * general struct to manage commands send to an IOMMU + * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap + * to know which ones are already in use. */ -struct iommu_cmd { - u32 data[4]; -}; +DEFINE_IDA(pdom_ids); + +static int amd_iommu_attach_device(struct iommu_domain *dom, struct device *dev, + struct iommu_domain *old); + +static void set_dte_entry(struct amd_iommu *iommu, + struct iommu_dev_data *dev_data, + phys_addr_t top_paddr, unsigned int top_level); + +static void amd_iommu_change_top(struct pt_iommu *iommu_table, + phys_addr_t top_paddr, unsigned int top_level); -struct kmem_cache *amd_iommu_irq_cache; +static void iommu_flush_dte_sync(struct amd_iommu *iommu, u16 devid); -static void detach_device(struct device *dev); +static struct iommu_dev_data *find_dev_data(struct amd_iommu *iommu, u16 devid); +static bool amd_iommu_enforce_cache_coherency(struct iommu_domain *domain); +static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain, + bool enable); /**************************************************************************** * @@ -94,18 +91,159 @@ static void detach_device(struct device *dev); * ****************************************************************************/ -static inline u16 get_pci_device_id(struct device *dev) +static __always_inline void amd_iommu_atomic128_set(__int128 *ptr, __int128 val) { - struct pci_dev *pdev = to_pci_dev(dev); + /* + * Note: + * We use arch_cmpxchg128_local() because: + * - Need cmpxchg16b instruction mainly for 128-bit store to DTE + * (not necessary for cmpxchg since this function is already + * protected by a spin_lock for this DTE). + * - Neither need LOCK_PREFIX nor try loop because of the spin_lock. + */ + arch_cmpxchg128_local(ptr, *ptr, val); +} + +static void write_dte_upper128(struct dev_table_entry *ptr, struct dev_table_entry *new) +{ + struct dev_table_entry old; + + old.data128[1] = ptr->data128[1]; + /* + * Preserve DTE_DATA2_INTR_MASK. This needs to be + * done here since it requires to be inside + * spin_lock(&dev_data->dte_lock) context. + */ + new->data[2] &= ~DTE_DATA2_INTR_MASK; + new->data[2] |= old.data[2] & DTE_DATA2_INTR_MASK; + + amd_iommu_atomic128_set(&ptr->data128[1], new->data128[1]); +} - return pci_dev_id(pdev); +static void write_dte_lower128(struct dev_table_entry *ptr, struct dev_table_entry *new) +{ + amd_iommu_atomic128_set(&ptr->data128[0], new->data128[0]); +} + +/* + * Note: + * IOMMU reads the entire Device Table entry in a single 256-bit transaction + * but the driver is programming DTE using 2 128-bit cmpxchg. So, the driver + * need to ensure the following: + * - DTE[V|GV] bit is being written last when setting. + * - DTE[V|GV] bit is being written first when clearing. + * + * This function is used only by code, which updates DMA translation part of the DTE. + * So, only consider control bits related to DMA when updating the entry. + */ +static void update_dte256(struct amd_iommu *iommu, struct iommu_dev_data *dev_data, + struct dev_table_entry *new) +{ + unsigned long flags; + struct dev_table_entry *dev_table = get_dev_table(iommu); + struct dev_table_entry *ptr = &dev_table[dev_data->devid]; + + spin_lock_irqsave(&dev_data->dte_lock, flags); + + if (!(ptr->data[0] & DTE_FLAG_V)) { + /* Existing DTE is not valid. */ + write_dte_upper128(ptr, new); + write_dte_lower128(ptr, new); + iommu_flush_dte_sync(iommu, dev_data->devid); + } else if (!(new->data[0] & DTE_FLAG_V)) { + /* Existing DTE is valid. New DTE is not valid. */ + write_dte_lower128(ptr, new); + write_dte_upper128(ptr, new); + iommu_flush_dte_sync(iommu, dev_data->devid); + } else if (!FIELD_GET(DTE_FLAG_GV, ptr->data[0])) { + /* + * Both DTEs are valid. + * Existing DTE has no guest page table. + */ + write_dte_upper128(ptr, new); + write_dte_lower128(ptr, new); + iommu_flush_dte_sync(iommu, dev_data->devid); + } else if (!FIELD_GET(DTE_FLAG_GV, new->data[0])) { + /* + * Both DTEs are valid. + * Existing DTE has guest page table, + * new DTE has no guest page table, + */ + write_dte_lower128(ptr, new); + write_dte_upper128(ptr, new); + iommu_flush_dte_sync(iommu, dev_data->devid); + } else if (FIELD_GET(DTE_GPT_LEVEL_MASK, ptr->data[2]) != + FIELD_GET(DTE_GPT_LEVEL_MASK, new->data[2])) { + /* + * Both DTEs are valid and have guest page table, + * but have different number of levels. So, we need + * to upadte both upper and lower 128-bit value, which + * require disabling and flushing. + */ + struct dev_table_entry clear = {}; + + /* First disable DTE */ + write_dte_lower128(ptr, &clear); + iommu_flush_dte_sync(iommu, dev_data->devid); + + /* Then update DTE */ + write_dte_upper128(ptr, new); + write_dte_lower128(ptr, new); + iommu_flush_dte_sync(iommu, dev_data->devid); + } else { + /* + * Both DTEs are valid and have guest page table, + * and same number of levels. We just need to only + * update the lower 128-bit. So no need to disable DTE. + */ + write_dte_lower128(ptr, new); + } + + spin_unlock_irqrestore(&dev_data->dte_lock, flags); +} + +static void get_dte256(struct amd_iommu *iommu, struct iommu_dev_data *dev_data, + struct dev_table_entry *dte) +{ + unsigned long flags; + struct dev_table_entry *ptr; + struct dev_table_entry *dev_table = get_dev_table(iommu); + + ptr = &dev_table[dev_data->devid]; + + spin_lock_irqsave(&dev_data->dte_lock, flags); + dte->data128[0] = ptr->data128[0]; + dte->data128[1] = ptr->data128[1]; + spin_unlock_irqrestore(&dev_data->dte_lock, flags); +} + +static inline bool pdom_is_v2_pgtbl_mode(struct protection_domain *pdom) +{ + return (pdom && (pdom->pd_mode == PD_MODE_V2)); +} + +static inline bool pdom_is_in_pt_mode(struct protection_domain *pdom) +{ + return (pdom->domain.type == IOMMU_DOMAIN_IDENTITY); +} + +/* + * We cannot support PASID w/ existing v1 page table in the same domain + * since it will be nested. However, existing domain w/ v2 page table + * or passthrough mode can be used for PASID. + */ +static inline bool pdom_is_sva_capable(struct protection_domain *pdom) +{ + return pdom_is_v2_pgtbl_mode(pdom) || pdom_is_in_pt_mode(pdom); } static inline int get_acpihid_device_id(struct device *dev, struct acpihid_map_entry **entry) { struct acpi_device *adev = ACPI_COMPANION(dev); - struct acpihid_map_entry *p; + struct acpihid_map_entry *p, *p1 = NULL; + int hid_count = 0; + bool fw_bug; if (!adev) return -ENODEV; @@ -113,56 +251,133 @@ static inline int get_acpihid_device_id(struct device *dev, list_for_each_entry(p, &acpihid_map, list) { if (acpi_dev_hid_uid_match(adev, p->hid, p->uid[0] ? p->uid : NULL)) { - if (entry) - *entry = p; - return p->devid; + p1 = p; + fw_bug = false; + hid_count = 1; + break; + } + + /* + * Count HID matches w/o UID, raise FW_BUG but allow exactly one match + */ + if (acpi_dev_hid_match(adev, p->hid)) { + p1 = p; + hid_count++; + fw_bug = true; } } - return -EINVAL; + + if (!p1) + return -EINVAL; + if (fw_bug) + dev_err_once(dev, FW_BUG "No ACPI device matched UID, but %d device%s matched HID.\n", + hid_count, str_plural(hid_count)); + if (hid_count > 1) + return -EINVAL; + if (entry) + *entry = p1; + + return p1->devid; } -static inline int get_device_id(struct device *dev) +static inline int get_device_sbdf_id(struct device *dev) { - int devid; + int sbdf; if (dev_is_pci(dev)) - devid = get_pci_device_id(dev); + sbdf = get_pci_sbdf_id(to_pci_dev(dev)); else - devid = get_acpihid_device_id(dev, NULL); + sbdf = get_acpihid_device_id(dev, NULL); + + return sbdf; +} + +struct dev_table_entry *get_dev_table(struct amd_iommu *iommu) +{ + struct dev_table_entry *dev_table; + struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; + + BUG_ON(pci_seg == NULL); + dev_table = pci_seg->dev_table; + BUG_ON(dev_table == NULL); - return devid; + return dev_table; } -static struct protection_domain *to_pdomain(struct iommu_domain *dom) +static inline u16 get_device_segment(struct device *dev) { - return container_of(dom, struct protection_domain, domain); + u16 seg; + + if (dev_is_pci(dev)) { + struct pci_dev *pdev = to_pci_dev(dev); + + seg = pci_domain_nr(pdev->bus); + } else { + u32 devid = get_acpihid_device_id(dev, NULL); + + seg = PCI_SBDF_TO_SEGID(devid); + } + + return seg; } -static struct iommu_dev_data *alloc_dev_data(u16 devid) +/* Writes the specific IOMMU for a device into the PCI segment rlookup table */ +void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid) +{ + struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; + + pci_seg->rlookup_table[devid] = iommu; +} + +static struct amd_iommu *__rlookup_amd_iommu(u16 seg, u16 devid) +{ + struct amd_iommu_pci_seg *pci_seg; + + for_each_pci_segment(pci_seg) { + if (pci_seg->id == seg) + return pci_seg->rlookup_table[devid]; + } + return NULL; +} + +static struct amd_iommu *rlookup_amd_iommu(struct device *dev) +{ + u16 seg = get_device_segment(dev); + int devid = get_device_sbdf_id(dev); + + if (devid < 0) + return NULL; + return __rlookup_amd_iommu(seg, PCI_SBDF_TO_DEVID(devid)); +} + +static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid) { struct iommu_dev_data *dev_data; + struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL); if (!dev_data) return NULL; - spin_lock_init(&dev_data->lock); + mutex_init(&dev_data->mutex); + spin_lock_init(&dev_data->dte_lock); dev_data->devid = devid; ratelimit_default_init(&dev_data->rs); - llist_add(&dev_data->dev_data_list, &dev_data_list); + llist_add(&dev_data->dev_data_list, &pci_seg->dev_data_list); return dev_data; } -static struct iommu_dev_data *search_dev_data(u16 devid) +struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid) { struct iommu_dev_data *dev_data; struct llist_node *node; + struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; - if (llist_empty(&dev_data_list)) + if (llist_empty(&pci_seg->dev_data_list)) return NULL; - node = dev_data_list.first; + node = pci_seg->dev_data_list.first; llist_for_each_entry(dev_data, node, dev_data_list) { if (dev_data->devid == devid) return dev_data; @@ -173,67 +388,90 @@ static struct iommu_dev_data *search_dev_data(u16 devid) static int clone_alias(struct pci_dev *pdev, u16 alias, void *data) { + struct dev_table_entry new; + struct amd_iommu *iommu; + struct iommu_dev_data *dev_data, *alias_data; u16 devid = pci_dev_id(pdev); + int ret = 0; if (devid == alias) return 0; - amd_iommu_rlookup_table[alias] = - amd_iommu_rlookup_table[devid]; - memcpy(amd_iommu_dev_table[alias].data, - amd_iommu_dev_table[devid].data, - sizeof(amd_iommu_dev_table[alias].data)); + iommu = rlookup_amd_iommu(&pdev->dev); + if (!iommu) + return 0; - return 0; + /* Copy the data from pdev */ + dev_data = dev_iommu_priv_get(&pdev->dev); + if (!dev_data) { + pr_err("%s : Failed to get dev_data for 0x%x\n", __func__, devid); + ret = -EINVAL; + goto out; + } + get_dte256(iommu, dev_data, &new); + + /* Setup alias */ + alias_data = find_dev_data(iommu, alias); + if (!alias_data) { + pr_err("%s : Failed to get alias dev_data for 0x%x\n", __func__, alias); + ret = -EINVAL; + goto out; + } + update_dte256(iommu, alias_data, &new); + + amd_iommu_set_rlookup_table(iommu, alias); +out: + return ret; } -static void clone_aliases(struct pci_dev *pdev) +static void clone_aliases(struct amd_iommu *iommu, struct device *dev) { - if (!pdev) + struct pci_dev *pdev; + + if (!dev_is_pci(dev)) return; + pdev = to_pci_dev(dev); /* * The IVRS alias stored in the alias table may not be * part of the PCI DMA aliases if it's bus differs * from the original device. */ - clone_alias(pdev, amd_iommu_alias_table[pci_dev_id(pdev)], NULL); + clone_alias(pdev, iommu->pci_seg->alias_table[pci_dev_id(pdev)], NULL); pci_for_each_dma_alias(pdev, clone_alias, NULL); } -static struct pci_dev *setup_aliases(struct device *dev) +static void setup_aliases(struct amd_iommu *iommu, struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); + struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; u16 ivrs_alias; /* For ACPI HID devices, there are no aliases */ if (!dev_is_pci(dev)) - return NULL; + return; /* * Add the IVRS alias to the pci aliases if it is on the same * bus. The IVRS table may know about a quirk that we don't. */ - ivrs_alias = amd_iommu_alias_table[pci_dev_id(pdev)]; + ivrs_alias = pci_seg->alias_table[pci_dev_id(pdev)]; if (ivrs_alias != pci_dev_id(pdev) && PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) pci_add_dma_alias(pdev, ivrs_alias & 0xff, 1); - clone_aliases(pdev); - - return pdev; + clone_aliases(iommu, dev); } -static struct iommu_dev_data *find_dev_data(u16 devid) +static struct iommu_dev_data *find_dev_data(struct amd_iommu *iommu, u16 devid) { struct iommu_dev_data *dev_data; - struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; - dev_data = search_dev_data(devid); + dev_data = search_dev_data(iommu, devid); if (dev_data == NULL) { - dev_data = alloc_dev_data(devid); + dev_data = alloc_dev_data(iommu, devid); if (!dev_data) return NULL; @@ -269,24 +507,143 @@ static struct iommu_group *acpihid_device_group(struct device *dev) return entry->group; } -static bool pci_iommuv2_capable(struct pci_dev *pdev) +static inline bool pdev_pasid_supported(struct iommu_dev_data *dev_data) { - static const int caps[] = { - PCI_EXT_CAP_ID_PRI, - PCI_EXT_CAP_ID_PASID, - }; - int i, pos; + return (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP); +} - if (!pci_ats_supported(pdev)) - return false; +static u32 pdev_get_caps(struct pci_dev *pdev) +{ + int features; + u32 flags = 0; + + if (pci_ats_supported(pdev)) + flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP; + + if (pci_pri_supported(pdev)) + flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP; + + features = pci_pasid_features(pdev); + if (features >= 0) { + flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP; - for (i = 0; i < 2; ++i) { - pos = pci_find_ext_capability(pdev, caps[i]); - if (pos == 0) - return false; + if (features & PCI_PASID_CAP_EXEC) + flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP; + + if (features & PCI_PASID_CAP_PRIV) + flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP; } - return true; + return flags; +} + +static inline int pdev_enable_cap_ats(struct pci_dev *pdev) +{ + struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); + int ret = -EINVAL; + + if (dev_data->ats_enabled) + return 0; + + if (amd_iommu_iotlb_sup && + (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP)) { + ret = pci_enable_ats(pdev, PAGE_SHIFT); + if (!ret) { + dev_data->ats_enabled = 1; + dev_data->ats_qdep = pci_ats_queue_depth(pdev); + } + } + + return ret; +} + +static inline void pdev_disable_cap_ats(struct pci_dev *pdev) +{ + struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); + + if (dev_data->ats_enabled) { + pci_disable_ats(pdev); + dev_data->ats_enabled = 0; + } +} + +static inline int pdev_enable_cap_pri(struct pci_dev *pdev) +{ + struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); + int ret = -EINVAL; + + if (dev_data->pri_enabled) + return 0; + + if (!dev_data->ats_enabled) + return 0; + + if (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) { + /* + * First reset the PRI state of the device. + * FIXME: Hardcode number of outstanding requests for now + */ + if (!pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32)) { + dev_data->pri_enabled = 1; + dev_data->pri_tlp = pci_prg_resp_pasid_required(pdev); + + ret = 0; + } + } + + return ret; +} + +static inline void pdev_disable_cap_pri(struct pci_dev *pdev) +{ + struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); + + if (dev_data->pri_enabled) { + pci_disable_pri(pdev); + dev_data->pri_enabled = 0; + } +} + +static inline int pdev_enable_cap_pasid(struct pci_dev *pdev) +{ + struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); + int ret = -EINVAL; + + if (dev_data->pasid_enabled) + return 0; + + if (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP) { + /* Only allow access to user-accessible pages */ + ret = pci_enable_pasid(pdev, 0); + if (!ret) + dev_data->pasid_enabled = 1; + } + + return ret; +} + +static inline void pdev_disable_cap_pasid(struct pci_dev *pdev) +{ + struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); + + if (dev_data->pasid_enabled) { + pci_disable_pasid(pdev); + dev_data->pasid_enabled = 0; + } +} + +static void pdev_enable_caps(struct pci_dev *pdev) +{ + pdev_enable_cap_pasid(pdev); + pdev_enable_cap_ats(pdev); + pdev_enable_cap_pri(pdev); +} + +static void pdev_disable_caps(struct pci_dev *pdev) +{ + pdev_disable_cap_ats(pdev); + pdev_disable_cap_pasid(pdev); + pdev_disable_cap_pri(pdev); } /* @@ -295,42 +652,55 @@ static bool pci_iommuv2_capable(struct pci_dev *pdev) */ static bool check_device(struct device *dev) { - int devid; + struct amd_iommu_pci_seg *pci_seg; + struct amd_iommu *iommu; + int devid, sbdf; if (!dev) return false; - devid = get_device_id(dev); - if (devid < 0) + sbdf = get_device_sbdf_id(dev); + if (sbdf < 0) return false; + devid = PCI_SBDF_TO_DEVID(sbdf); - /* Out of our scope? */ - if (devid > amd_iommu_last_bdf) + iommu = rlookup_amd_iommu(dev); + if (!iommu) return false; - if (amd_iommu_rlookup_table[devid] == NULL) + /* Out of our scope? */ + pci_seg = iommu->pci_seg; + if (devid > pci_seg->last_bdf) return false; return true; } -static int iommu_init_device(struct device *dev) +static int iommu_init_device(struct amd_iommu *iommu, struct device *dev) { struct iommu_dev_data *dev_data; - int devid; + int devid, sbdf; if (dev_iommu_priv_get(dev)) return 0; - devid = get_device_id(dev); - if (devid < 0) - return devid; + sbdf = get_device_sbdf_id(dev); + if (sbdf < 0) + return sbdf; - dev_data = find_dev_data(devid); + devid = PCI_SBDF_TO_DEVID(sbdf); + dev_data = find_dev_data(iommu, devid); if (!dev_data) return -ENOMEM; - dev_data->pdev = setup_aliases(dev); + dev_data->dev = dev; + + /* + * The dev_iommu_priv_set() needes to be called before setup_aliases. + * Otherwise, subsequent call to dev_iommu_priv_get() will fail. + */ + dev_iommu_priv_set(dev, dev_data); + setup_aliases(iommu, dev); /* * By default we use passthrough mode for IOMMUv2 capable device. @@ -339,50 +709,30 @@ static int iommu_init_device(struct device *dev) * it'll be forced to go into translation mode. */ if ((iommu_default_passthrough() || !amd_iommu_force_isolation) && - dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) { - struct amd_iommu *iommu; - - iommu = amd_iommu_rlookup_table[dev_data->devid]; - dev_data->iommu_v2 = iommu->is_iommu_v2; + dev_is_pci(dev) && amd_iommu_gt_ppr_supported()) { + dev_data->flags = pdev_get_caps(to_pci_dev(dev)); } - dev_iommu_priv_set(dev, dev_data); - return 0; } -static void iommu_ignore_device(struct device *dev) +static void iommu_ignore_device(struct amd_iommu *iommu, struct device *dev) { - int devid; + struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; + struct dev_table_entry *dev_table = get_dev_table(iommu); + int devid, sbdf; - devid = get_device_id(dev); - if (devid < 0) + sbdf = get_device_sbdf_id(dev); + if (sbdf < 0) return; - amd_iommu_rlookup_table[devid] = NULL; - memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry)); + devid = PCI_SBDF_TO_DEVID(sbdf); + pci_seg->rlookup_table[devid] = NULL; + memset(&dev_table[devid], 0, sizeof(struct dev_table_entry)); - setup_aliases(dev); + setup_aliases(iommu, dev); } -static void amd_iommu_uninit_device(struct device *dev) -{ - struct iommu_dev_data *dev_data; - - dev_data = dev_iommu_priv_get(dev); - if (!dev_data) - return; - - if (dev_data->domain) - detach_device(dev); - - dev_iommu_priv_set(dev, NULL); - - /* - * We keep dev_data around for unplugged devices and reuse it when the - * device is re-plugged - not doing so would introduce a ton of races. - */ -} /**************************************************************************** * @@ -390,13 +740,16 @@ static void amd_iommu_uninit_device(struct device *dev) * ****************************************************************************/ -static void dump_dte_entry(u16 devid) +static void dump_dte_entry(struct amd_iommu *iommu, u16 devid) { int i; + struct dev_table_entry dte; + struct iommu_dev_data *dev_data = find_dev_data(iommu, devid); + + get_dte256(iommu, dev_data, &dte); for (i = 0; i < 4; ++i) - pr_err("DTE[%d]: %016llx\n", i, - amd_iommu_dev_table[devid].data[i]); + pr_err("DTE[%d]: %016llx\n", i, dte.data[i]); } static void dump_command(unsigned long phys_addr) @@ -408,7 +761,7 @@ static void dump_command(unsigned long phys_addr) pr_err("CMD[%d]: %08x\n", i, cmd->data[i]); } -static void amd_iommu_report_rmp_hw_error(volatile u32 *event) +static void amd_iommu_report_rmp_hw_error(struct amd_iommu *iommu, volatile u32 *event) { struct iommu_dev_data *dev_data = NULL; int devid, vmg_tag, flags; @@ -420,17 +773,19 @@ static void amd_iommu_report_rmp_hw_error(volatile u32 *event) flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK; spa = ((u64)event[3] << 32) | (event[2] & 0xFFFFFFF8); - pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid), + pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid), devid & 0xff); if (pdev) dev_data = dev_iommu_priv_get(&pdev->dev); - if (dev_data && __ratelimit(&dev_data->rs)) { - pci_err(pdev, "Event logged [RMP_HW_ERROR vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n", - vmg_tag, spa, flags); + if (dev_data) { + if (__ratelimit(&dev_data->rs)) { + pci_err(pdev, "Event logged [RMP_HW_ERROR vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n", + vmg_tag, spa, flags); + } } else { - pr_err_ratelimited("Event logged [RMP_HW_ERROR device=%02x:%02x.%x, vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n", - PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), + pr_err_ratelimited("Event logged [RMP_HW_ERROR device=%04x:%02x:%02x.%x, vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n", + iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), vmg_tag, spa, flags); } @@ -438,7 +793,7 @@ static void amd_iommu_report_rmp_hw_error(volatile u32 *event) pci_dev_put(pdev); } -static void amd_iommu_report_rmp_fault(volatile u32 *event) +static void amd_iommu_report_rmp_fault(struct amd_iommu *iommu, volatile u32 *event) { struct iommu_dev_data *dev_data = NULL; int devid, flags_rmp, vmg_tag, flags; @@ -451,17 +806,19 @@ static void amd_iommu_report_rmp_fault(volatile u32 *event) flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK; gpa = ((u64)event[3] << 32) | event[2]; - pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid), + pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid), devid & 0xff); if (pdev) dev_data = dev_iommu_priv_get(&pdev->dev); - if (dev_data && __ratelimit(&dev_data->rs)) { - pci_err(pdev, "Event logged [RMP_PAGE_FAULT vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n", - vmg_tag, gpa, flags_rmp, flags); + if (dev_data) { + if (__ratelimit(&dev_data->rs)) { + pci_err(pdev, "Event logged [RMP_PAGE_FAULT vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n", + vmg_tag, gpa, flags_rmp, flags); + } } else { - pr_err_ratelimited("Event logged [RMP_PAGE_FAULT device=%02x:%02x.%x, vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n", - PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), + pr_err_ratelimited("Event logged [RMP_PAGE_FAULT device=%04x:%02x:%02x.%x, vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n", + iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), vmg_tag, gpa, flags_rmp, flags); } @@ -469,26 +826,59 @@ static void amd_iommu_report_rmp_fault(volatile u32 *event) pci_dev_put(pdev); } -static void amd_iommu_report_page_fault(u16 devid, u16 domain_id, +#define IS_IOMMU_MEM_TRANSACTION(flags) \ + (((flags) & EVENT_FLAG_I) == 0) + +#define IS_WRITE_REQUEST(flags) \ + ((flags) & EVENT_FLAG_RW) + +static void amd_iommu_report_page_fault(struct amd_iommu *iommu, + u16 devid, u16 domain_id, u64 address, int flags) { struct iommu_dev_data *dev_data = NULL; struct pci_dev *pdev; - pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid), + pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, PCI_BUS_NUM(devid), devid & 0xff); if (pdev) dev_data = dev_iommu_priv_get(&pdev->dev); - if (dev_data && __ratelimit(&dev_data->rs)) { - pci_err(pdev, "Event logged [IO_PAGE_FAULT domain=0x%04x address=0x%llx flags=0x%04x]\n", - domain_id, address, flags); - } else if (printk_ratelimit()) { - pr_err("Event logged [IO_PAGE_FAULT device=%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n", - PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), + if (dev_data) { + /* + * If this is a DMA fault (for which the I(nterrupt) + * bit will be unset), allow report_iommu_fault() to + * prevent logging it. + */ + if (IS_IOMMU_MEM_TRANSACTION(flags)) { + /* Device not attached to domain properly */ + if (dev_data->domain == NULL) { + pr_err_ratelimited("Event logged [Device not attached to domain properly]\n"); + pr_err_ratelimited(" device=%04x:%02x:%02x.%x domain=0x%04x\n", + iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), + PCI_FUNC(devid), domain_id); + goto out; + } + + if (!report_iommu_fault(&dev_data->domain->domain, + &pdev->dev, address, + IS_WRITE_REQUEST(flags) ? + IOMMU_FAULT_WRITE : + IOMMU_FAULT_READ)) + goto out; + } + + if (__ratelimit(&dev_data->rs)) { + pci_err(pdev, "Event logged [IO_PAGE_FAULT domain=0x%04x address=0x%llx flags=0x%04x]\n", + domain_id, address, flags); + } + } else { + pr_err_ratelimited("Event logged [IO_PAGE_FAULT device=%04x:%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n", + iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), domain_id, address, flags); } +out: if (pdev) pci_dev_put(pdev); } @@ -499,7 +889,7 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt) int type, devid, flags, tag; volatile u32 *event = __evt; int count = 0; - u64 address; + u64 address, ctrl; u32 pasid; retry: @@ -509,6 +899,7 @@ retry: (event[1] & EVENT_DOMID_MASK_LO); flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK; address = (u64)(((u64)event[3]) << 32) | event[2]; + ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); if (type == 0) { /* Did we hit the erratum? */ @@ -521,26 +912,27 @@ retry: } if (type == EVENT_TYPE_IO_FAULT) { - amd_iommu_report_page_fault(devid, pasid, address, flags); + amd_iommu_report_page_fault(iommu, devid, pasid, address, flags); return; } switch (type) { case EVENT_TYPE_ILL_DEV: - dev_err(dev, "Event logged [ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n", - PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), + dev_err(dev, "Event logged [ILLEGAL_DEV_TABLE_ENTRY device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n", + iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), pasid, address, flags); - dump_dte_entry(devid); + dev_err(dev, "Control Reg : 0x%llx\n", ctrl); + dump_dte_entry(iommu, devid); break; case EVENT_TYPE_DEV_TAB_ERR: - dev_err(dev, "Event logged [DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x " + dev_err(dev, "Event logged [DEV_TAB_HARDWARE_ERROR device=%04x:%02x:%02x.%x " "address=0x%llx flags=0x%04x]\n", - PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), + iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), address, flags); break; case EVENT_TYPE_PAGE_TAB_ERR: - dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x pasid=0x%04x address=0x%llx flags=0x%04x]\n", - PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), + dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%04x:%02x:%02x.%x pasid=0x%04x address=0x%llx flags=0x%04x]\n", + iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), pasid, address, flags); break; case EVENT_TYPE_ILL_CMD: @@ -552,26 +944,26 @@ retry: address, flags); break; case EVENT_TYPE_IOTLB_INV_TO: - dev_err(dev, "Event logged [IOTLB_INV_TIMEOUT device=%02x:%02x.%x address=0x%llx]\n", - PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), + dev_err(dev, "Event logged [IOTLB_INV_TIMEOUT device=%04x:%02x:%02x.%x address=0x%llx]\n", + iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), address); break; case EVENT_TYPE_INV_DEV_REQ: - dev_err(dev, "Event logged [INVALID_DEVICE_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n", - PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), + dev_err(dev, "Event logged [INVALID_DEVICE_REQUEST device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n", + iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), pasid, address, flags); break; case EVENT_TYPE_RMP_FAULT: - amd_iommu_report_rmp_fault(event); + amd_iommu_report_rmp_fault(iommu, event); break; case EVENT_TYPE_RMP_HW_ERR: - amd_iommu_report_rmp_hw_error(event); + amd_iommu_report_rmp_hw_error(iommu, event); break; case EVENT_TYPE_INV_PPR_REQ: pasid = PPR_PASID(*((u64 *)__evt)); tag = event[1] & 0x03FF; - dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n", - PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), + dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n", + iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), pasid, address, flags, tag); break; default: @@ -579,7 +971,14 @@ retry: event[0], event[1], event[2], event[3]); } - memset(__evt, 0, 4 * sizeof(u32)); + /* + * To detect the hardware errata 732 we need to clear the + * entry back to zero. This issue does not exist on SNP + * enabled system. Also this buffer is not writeable on + * SNP enabled system. + */ + if (!amd_iommu_snp_en) + memset(__evt, 0, 4 * sizeof(u32)); } static void iommu_poll_events(struct amd_iommu *iommu) @@ -591,79 +990,12 @@ static void iommu_poll_events(struct amd_iommu *iommu) while (head != tail) { iommu_print_event(iommu, iommu->evt_buf + head); - head = (head + EVENT_ENTRY_SIZE) % EVT_BUFFER_SIZE; - } - - writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); -} - -static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw) -{ - struct amd_iommu_fault fault; - - if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) { - pr_err_ratelimited("Unknown PPR request received\n"); - return; - } - - fault.address = raw[1]; - fault.pasid = PPR_PASID(raw[0]); - fault.device_id = PPR_DEVID(raw[0]); - fault.tag = PPR_TAG(raw[0]); - fault.flags = PPR_FLAGS(raw[0]); - - atomic_notifier_call_chain(&ppr_notifier, 0, &fault); -} - -static void iommu_poll_ppr_log(struct amd_iommu *iommu) -{ - u32 head, tail; - - if (iommu->ppr_log == NULL) - return; - - head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); - tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); - - while (head != tail) { - volatile u64 *raw; - u64 entry[2]; - int i; - - raw = (u64 *)(iommu->ppr_log + head); - - /* - * Hardware bug: Interrupt may arrive before the entry is - * written to memory. If this happens we need to wait for the - * entry to arrive. - */ - for (i = 0; i < LOOP_TIMEOUT; ++i) { - if (PPR_REQ_TYPE(raw[0]) != 0) - break; - udelay(1); - } - - /* Avoid memcpy function-call overhead */ - entry[0] = raw[0]; - entry[1] = raw[1]; - - /* - * To detect the hardware bug we need to clear the entry - * back to zero. - */ - raw[0] = raw[1] = 0UL; /* Update head pointer of hardware ring-buffer */ - head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE; - writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); - - /* Handle PPR entry */ - iommu_handle_ppr_entry(iommu, entry); - - /* Refresh ring-buffer information */ - head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); - tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); + head = (head + EVENT_ENTRY_SIZE) % EVT_BUFFER_SIZE; + writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); } + } #ifdef CONFIG_IRQ_REMAP @@ -673,13 +1005,21 @@ int amd_iommu_register_ga_log_notifier(int (*notifier)(u32)) { iommu_ga_log_notifier = notifier; + /* + * Ensure all in-flight IRQ handlers run to completion before returning + * to the caller, e.g. to ensure module code isn't unloaded while it's + * being executed in the IRQ handler. + */ + if (!notifier) + synchronize_rcu(); + return 0; } EXPORT_SYMBOL(amd_iommu_register_ga_log_notifier); static void iommu_poll_ga_log(struct amd_iommu *iommu) { - u32 head, tail, cnt = 0; + u32 head, tail; if (iommu->ga_log == NULL) return; @@ -692,7 +1032,6 @@ static void iommu_poll_ga_log(struct amd_iommu *iommu) u64 log_entry; raw = (u64 *)(iommu->ga_log + head); - cnt++; /* Avoid memcpy function-call overhead */ log_entry = *raw; @@ -724,10 +1063,10 @@ static void amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { if (!irq_remapping_enabled || !dev_is_pci(dev) || - pci_dev_has_special_msi_domain(to_pci_dev(dev))) + !pci_dev_has_default_msi_parent_domain(to_pci_dev(dev))) return; - dev_set_msi_domain(dev, iommu->msi_domain); + dev_set_msi_domain(dev, iommu->ir_domain); } #else /* CONFIG_IRQ_REMAP */ @@ -735,37 +1074,27 @@ static inline void amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { } #endif /* !CONFIG_IRQ_REMAP */ -#define AMD_IOMMU_INT_MASK \ - (MMIO_STATUS_EVT_INT_MASK | \ - MMIO_STATUS_PPR_INT_MASK | \ - MMIO_STATUS_GALOG_INT_MASK) - -irqreturn_t amd_iommu_int_thread(int irq, void *data) +static void amd_iommu_handle_irq(void *data, const char *evt_type, + u32 int_mask, u32 overflow_mask, + void (*int_handler)(struct amd_iommu *), + void (*overflow_handler)(struct amd_iommu *)) { struct amd_iommu *iommu = (struct amd_iommu *) data; u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); + u32 mask = int_mask | overflow_mask; - while (status & AMD_IOMMU_INT_MASK) { - /* Enable EVT and PPR and GA interrupts again */ - writel(AMD_IOMMU_INT_MASK, - iommu->mmio_base + MMIO_STATUS_OFFSET); + while (status & mask) { + /* Enable interrupt sources again */ + writel(mask, iommu->mmio_base + MMIO_STATUS_OFFSET); - if (status & MMIO_STATUS_EVT_INT_MASK) { - pr_devel("Processing IOMMU Event Log\n"); - iommu_poll_events(iommu); + if (int_handler) { + pr_devel("Processing IOMMU (ivhd%d) %s Log\n", + iommu->index, evt_type); + int_handler(iommu); } - if (status & MMIO_STATUS_PPR_INT_MASK) { - pr_devel("Processing IOMMU PPR Log\n"); - iommu_poll_ppr_log(iommu); - } - -#ifdef CONFIG_IRQ_REMAP - if (status & MMIO_STATUS_GALOG_INT_MASK) { - pr_devel("Processing IOMMU GA Log\n"); - iommu_poll_ga_log(iommu); - } -#endif + if ((status & overflow_mask) && overflow_handler) + overflow_handler(iommu); /* * Hardware bug: ERBT1312 @@ -782,6 +1111,43 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data) */ status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); } +} + +irqreturn_t amd_iommu_int_thread_evtlog(int irq, void *data) +{ + amd_iommu_handle_irq(data, "Evt", MMIO_STATUS_EVT_INT_MASK, + MMIO_STATUS_EVT_OVERFLOW_MASK, + iommu_poll_events, amd_iommu_restart_event_logging); + + return IRQ_HANDLED; +} + +irqreturn_t amd_iommu_int_thread_pprlog(int irq, void *data) +{ + amd_iommu_handle_irq(data, "PPR", MMIO_STATUS_PPR_INT_MASK, + MMIO_STATUS_PPR_OVERFLOW_MASK, + amd_iommu_poll_ppr_log, amd_iommu_restart_ppr_log); + + return IRQ_HANDLED; +} + +irqreturn_t amd_iommu_int_thread_galog(int irq, void *data) +{ +#ifdef CONFIG_IRQ_REMAP + amd_iommu_handle_irq(data, "GA", MMIO_STATUS_GALOG_INT_MASK, + MMIO_STATUS_GALOG_OVERFLOW_MASK, + iommu_poll_ga_log, amd_iommu_restart_ga_log); +#endif + + return IRQ_HANDLED; +} + +irqreturn_t amd_iommu_int_thread(int irq, void *data) +{ + amd_iommu_int_thread_evtlog(irq, data); + amd_iommu_int_thread_pprlog(irq, data); + amd_iommu_int_thread_galog(irq, data); + return IRQ_HANDLED; } @@ -796,6 +1162,25 @@ irqreturn_t amd_iommu_int_handler(int irq, void *data) * ****************************************************************************/ +static void dump_command_buffer(struct amd_iommu *iommu) +{ + struct iommu_cmd *cmd; + u32 head, tail; + int i; + + head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); + tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); + + pr_err("CMD Buffer head=%llu tail=%llu\n", MMIO_CMD_BUFFER_HEAD(head), + MMIO_CMD_BUFFER_TAIL(tail)); + + for (i = 0; i < CMD_BUFFER_ENTRIES; i++) { + cmd = (struct iommu_cmd *)(iommu->cmd_buf + i * sizeof(*cmd)); + pr_err("%3d: %08x %08x %08x %08x\n", i, cmd->data[0], cmd->data[1], cmd->data[2], + cmd->data[3]); + } +} + static int wait_on_sem(struct amd_iommu *iommu, u64 data) { int i = 0; @@ -806,7 +1191,14 @@ static int wait_on_sem(struct amd_iommu *iommu, u64 data) } if (i == LOOP_TIMEOUT) { - pr_alert("Completion-Wait loop timed out\n"); + + pr_alert("IOMMU %04x:%02x:%02x.%01x: Completion-Wait loop timed out\n", + iommu->pci_seg->id, PCI_BUS_NUM(iommu->devid), + PCI_SLOT(iommu->devid), PCI_FUNC(iommu->devid)); + + if (amd_iommu_dump) + DO_ONCE_LITE(dump_command_buffer, iommu); + return -EIO; } @@ -835,12 +1227,13 @@ static void build_completion_wait(struct iommu_cmd *cmd, struct amd_iommu *iommu, u64 data) { - u64 paddr = iommu_virt_to_phys((void *)iommu->cmd_sem); + u64 paddr = iommu->cmd_sem_paddr; memset(cmd, 0, sizeof(*cmd)); cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK; cmd->data[1] = upper_32_bits(paddr); - cmd->data[2] = data; + cmd->data[2] = lower_32_bits(data); + cmd->data[3] = upper_32_bits(data); CMD_SET_TYPE(cmd, CMD_COMPL_WAIT); } @@ -894,73 +1287,49 @@ static inline u64 build_inv_address(u64 address, size_t size) } static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address, - size_t size, u16 domid, int pde) + size_t size, u16 domid, + ioasid_t pasid, bool gn) { u64 inv_address = build_inv_address(address, size); memset(cmd, 0, sizeof(*cmd)); + cmd->data[1] |= domid; cmd->data[2] = lower_32_bits(inv_address); cmd->data[3] = upper_32_bits(inv_address); + /* PDE bit - we want to flush everything, not only the PTEs */ + cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; + if (gn) { + cmd->data[0] |= pasid; + cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK; + } CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES); - if (pde) /* PDE bit - we want to flush everything, not only the PTEs */ - cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; } static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep, - u64 address, size_t size) + u64 address, size_t size, + ioasid_t pasid, bool gn) { u64 inv_address = build_inv_address(address, size); memset(cmd, 0, sizeof(*cmd)); + cmd->data[0] = devid; cmd->data[0] |= (qdep & 0xff) << 24; cmd->data[1] = devid; cmd->data[2] = lower_32_bits(inv_address); cmd->data[3] = upper_32_bits(inv_address); - CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES); -} - -static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, u32 pasid, - u64 address, bool size) -{ - memset(cmd, 0, sizeof(*cmd)); - - address &= ~(0xfffULL); - - cmd->data[0] = pasid; - cmd->data[1] = domid; - cmd->data[2] = lower_32_bits(address); - cmd->data[3] = upper_32_bits(address); - cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; - cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK; - if (size) - cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; - CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES); -} - -static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, u32 pasid, - int qdep, u64 address, bool size) -{ - memset(cmd, 0, sizeof(*cmd)); - - address &= ~(0xfffULL); + if (gn) { + cmd->data[0] |= ((pasid >> 8) & 0xff) << 16; + cmd->data[1] |= (pasid & 0xff) << 16; + cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK; + } - cmd->data[0] = devid; - cmd->data[0] |= ((pasid >> 8) & 0xff) << 16; - cmd->data[0] |= (qdep & 0xff) << 24; - cmd->data[1] = devid; - cmd->data[1] |= (pasid & 0xff) << 16; - cmd->data[2] = lower_32_bits(address); - cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK; - cmd->data[3] = upper_32_bits(address); - if (size) - cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES); } static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, u32 pasid, - int status, int tag, bool gn) + int status, int tag, u8 gn) { memset(cmd, 0, sizeof(*cmd)); @@ -1062,11 +1431,11 @@ static int iommu_completion_wait(struct amd_iommu *iommu) if (!iommu->need_sync) return 0; - raw_spin_lock_irqsave(&iommu->lock, flags); - - data = ++iommu->cmd_sem_val; + data = atomic64_inc_return(&iommu->cmd_sem_val); build_completion_wait(&cmd, iommu, data); + raw_spin_lock_irqsave(&iommu->lock, flags); + ret = __iommu_queue_command_sync(iommu, &cmd, false); if (ret) goto out_unlock; @@ -1079,6 +1448,21 @@ out_unlock: return ret; } +static void domain_flush_complete(struct protection_domain *domain) +{ + struct pdom_iommu_info *pdom_iommu_info; + unsigned long i; + + lockdep_assert_held(&domain->lock); + + /* + * Devices of this domain are behind this IOMMU + * We need to wait for completion of all commands. + */ + xa_for_each(&domain->iommu_array, i, pdom_iommu_info) + iommu_completion_wait(pdom_iommu_info->iommu); +} + static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid) { struct iommu_cmd cmd; @@ -1088,11 +1472,21 @@ static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid) return iommu_queue_command(iommu, &cmd); } +static void iommu_flush_dte_sync(struct amd_iommu *iommu, u16 devid) +{ + int ret; + + ret = iommu_flush_dte(iommu, devid); + if (!ret) + iommu_completion_wait(iommu); +} + static void amd_iommu_flush_dte_all(struct amd_iommu *iommu) { u32 devid; + u16 last_bdf = iommu->pci_seg->last_bdf; - for (devid = 0; devid <= 0xffff; ++devid) + for (devid = 0; devid <= last_bdf; ++devid) iommu_flush_dte(iommu, devid); iommu_completion_wait(iommu); @@ -1105,11 +1499,12 @@ static void amd_iommu_flush_dte_all(struct amd_iommu *iommu) static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu) { u32 dom_id; + u16 last_bdf = iommu->pci_seg->last_bdf; - for (dom_id = 0; dom_id <= 0xffff; ++dom_id) { + for (dom_id = 0; dom_id <= last_bdf; ++dom_id) { struct iommu_cmd cmd; build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, - dom_id, 1); + dom_id, IOMMU_NO_PASID, false); iommu_queue_command(iommu, &cmd); } @@ -1121,7 +1516,7 @@ static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id) struct iommu_cmd cmd; build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, - dom_id, 1); + dom_id, IOMMU_NO_PASID, false); iommu_queue_command(iommu, &cmd); iommu_completion_wait(iommu); @@ -1149,16 +1544,20 @@ static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid) static void amd_iommu_flush_irt_all(struct amd_iommu *iommu) { u32 devid; + u16 last_bdf = iommu->pci_seg->last_bdf; - for (devid = 0; devid <= MAX_DEV_TABLE_ENTRIES; devid++) + if (iommu->irtcachedis_enabled) + return; + + for (devid = 0; devid <= last_bdf; devid++) iommu_flush_irt(iommu, devid); iommu_completion_wait(iommu); } -void iommu_flush_all_caches(struct amd_iommu *iommu) +void amd_iommu_flush_all_caches(struct amd_iommu *iommu) { - if (iommu_feature(iommu, FEATURE_IA)) { + if (check_feature(FEATURE_IA)) { amd_iommu_flush_all(iommu); } else { amd_iommu_flush_dte_all(iommu); @@ -1170,17 +1569,15 @@ void iommu_flush_all_caches(struct amd_iommu *iommu) /* * Command send function for flushing on-device TLB */ -static int device_flush_iotlb(struct iommu_dev_data *dev_data, - u64 address, size_t size) +static int device_flush_iotlb(struct iommu_dev_data *dev_data, u64 address, + size_t size, ioasid_t pasid, bool gn) { - struct amd_iommu *iommu; + struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); struct iommu_cmd cmd; - int qdep; + int qdep = dev_data->ats_qdep; - qdep = dev_data->ats.qdep; - iommu = amd_iommu_rlookup_table[dev_data->devid]; - - build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size); + build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, + size, pasid, gn); return iommu_queue_command(iommu, &cmd); } @@ -1197,121 +1594,212 @@ static int device_flush_dte_alias(struct pci_dev *pdev, u16 alias, void *data) */ static int device_flush_dte(struct iommu_dev_data *dev_data) { - struct amd_iommu *iommu; + struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); + struct pci_dev *pdev = NULL; + struct amd_iommu_pci_seg *pci_seg; u16 alias; int ret; - iommu = amd_iommu_rlookup_table[dev_data->devid]; + if (dev_is_pci(dev_data->dev)) + pdev = to_pci_dev(dev_data->dev); - if (dev_data->pdev) - ret = pci_for_each_dma_alias(dev_data->pdev, + if (pdev) + ret = pci_for_each_dma_alias(pdev, device_flush_dte_alias, iommu); else ret = iommu_flush_dte(iommu, dev_data->devid); if (ret) return ret; - alias = amd_iommu_alias_table[dev_data->devid]; + pci_seg = iommu->pci_seg; + alias = pci_seg->alias_table[dev_data->devid]; if (alias != dev_data->devid) { ret = iommu_flush_dte(iommu, alias); if (ret) return ret; } - if (dev_data->ats.enabled) - ret = device_flush_iotlb(dev_data, 0, ~0UL); + if (dev_data->ats_enabled) { + /* Invalidate the entire contents of an IOTLB */ + ret = device_flush_iotlb(dev_data, 0, ~0UL, + IOMMU_NO_PASID, false); + } return ret; } -/* - * TLB invalidation function which is called from the mapping functions. - * It invalidates a single PTE if the range to flush is within a single - * page. Otherwise it flushes the whole TLB of the IOMMU. - */ -static void __domain_flush_pages(struct protection_domain *domain, - u64 address, size_t size, int pde) +static int domain_flush_pages_v2(struct protection_domain *pdom, + u64 address, size_t size) { struct iommu_dev_data *dev_data; struct iommu_cmd cmd; - int ret = 0, i; + int ret = 0; - build_inv_iommu_pages(&cmd, address, size, domain->id, pde); + lockdep_assert_held(&pdom->lock); + list_for_each_entry(dev_data, &pdom->dev_list, list) { + struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev); + u16 domid = dev_data->gcr3_info.domid; - for (i = 0; i < amd_iommu_get_num_iommus(); ++i) { - if (!domain->dev_iommu[i]) - continue; + build_inv_iommu_pages(&cmd, address, size, + domid, IOMMU_NO_PASID, true); + + ret |= iommu_queue_command(iommu, &cmd); + } + return ret; +} + +static int domain_flush_pages_v1(struct protection_domain *pdom, + u64 address, size_t size) +{ + struct pdom_iommu_info *pdom_iommu_info; + struct iommu_cmd cmd; + int ret = 0; + unsigned long i; + + lockdep_assert_held(&pdom->lock); + + build_inv_iommu_pages(&cmd, address, size, + pdom->id, IOMMU_NO_PASID, false); + + xa_for_each(&pdom->iommu_array, i, pdom_iommu_info) { /* * Devices of this domain are behind this IOMMU * We need a TLB flush */ - ret |= iommu_queue_command(amd_iommus[i], &cmd); + ret |= iommu_queue_command(pdom_iommu_info->iommu, &cmd); + } + + return ret; +} + +/* + * TLB invalidation function which is called from the mapping functions. + * It flushes range of PTEs of the domain. + */ +static void __domain_flush_pages(struct protection_domain *domain, + u64 address, size_t size) +{ + struct iommu_dev_data *dev_data; + int ret = 0; + ioasid_t pasid = IOMMU_NO_PASID; + bool gn = false; + + lockdep_assert_held(&domain->lock); + + if (pdom_is_v2_pgtbl_mode(domain)) { + gn = true; + ret = domain_flush_pages_v2(domain, address, size); + } else { + ret = domain_flush_pages_v1(domain, address, size); } list_for_each_entry(dev_data, &domain->dev_list, list) { - if (!dev_data->ats.enabled) + if (!dev_data->ats_enabled) continue; - ret |= device_flush_iotlb(dev_data, address, size); + ret |= device_flush_iotlb(dev_data, address, size, pasid, gn); } WARN_ON(ret); } -static void domain_flush_pages(struct protection_domain *domain, - u64 address, size_t size) +void amd_iommu_domain_flush_pages(struct protection_domain *domain, + u64 address, size_t size) { - __domain_flush_pages(domain, address, size, 0); -} + lockdep_assert_held(&domain->lock); -/* Flush the whole IO/TLB for a given protection domain - including PDE */ -void amd_iommu_domain_flush_tlb_pde(struct protection_domain *domain) -{ - __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1); -} + if (likely(!amd_iommu_np_cache)) { + __domain_flush_pages(domain, address, size); -void amd_iommu_domain_flush_complete(struct protection_domain *domain) -{ - int i; + /* Wait until IOMMU TLB and all device IOTLB flushes are complete */ + domain_flush_complete(domain); - for (i = 0; i < amd_iommu_get_num_iommus(); ++i) { - if (domain && !domain->dev_iommu[i]) - continue; + return; + } + + /* + * When NpCache is on, we infer that we run in a VM and use a vIOMMU. + * In such setups it is best to avoid flushes of ranges which are not + * naturally aligned, since it would lead to flushes of unmodified + * PTEs. Such flushes would require the hypervisor to do more work than + * necessary. Therefore, perform repeated flushes of aligned ranges + * until you cover the range. Each iteration flushes the smaller + * between the natural alignment of the address that we flush and the + * greatest naturally aligned region that fits in the range. + */ + while (size != 0) { + int addr_alignment = __ffs(address); + int size_alignment = __fls(size); + int min_alignment; + size_t flush_size; /* - * Devices of this domain are behind this IOMMU - * We need to wait for completion of all commands. + * size is always non-zero, but address might be zero, causing + * addr_alignment to be negative. As the casting of the + * argument in __ffs(address) to long might trim the high bits + * of the address on x86-32, cast to long when doing the check. */ - iommu_completion_wait(amd_iommus[i]); + if (likely((unsigned long)address != 0)) + min_alignment = min(addr_alignment, size_alignment); + else + min_alignment = size_alignment; + + flush_size = 1ul << min_alignment; + + __domain_flush_pages(domain, address, flush_size); + address += flush_size; + size -= flush_size; } + + /* Wait until IOMMU TLB and all device IOTLB flushes are complete */ + domain_flush_complete(domain); } -/* Flush the not present cache if it exists */ -static void domain_flush_np_cache(struct protection_domain *domain, - dma_addr_t iova, size_t size) +/* Flush the whole IO/TLB for a given protection domain - including PDE */ +static void amd_iommu_domain_flush_all(struct protection_domain *domain) { - if (unlikely(amd_iommu_np_cache)) { - unsigned long flags; + amd_iommu_domain_flush_pages(domain, 0, + CMD_INV_IOMMU_ALL_PAGES_ADDRESS); +} - spin_lock_irqsave(&domain->lock, flags); - domain_flush_pages(domain, iova, size); - amd_iommu_domain_flush_complete(domain); - spin_unlock_irqrestore(&domain->lock, flags); - } +void amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data *dev_data, + ioasid_t pasid, u64 address, size_t size) +{ + struct iommu_cmd cmd; + struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev); + + build_inv_iommu_pages(&cmd, address, size, + dev_data->gcr3_info.domid, pasid, true); + iommu_queue_command(iommu, &cmd); + + if (dev_data->ats_enabled) + device_flush_iotlb(dev_data, address, size, pasid, true); + + iommu_completion_wait(iommu); } +static void dev_flush_pasid_all(struct iommu_dev_data *dev_data, + ioasid_t pasid) +{ + amd_iommu_dev_flush_pasid_pages(dev_data, pasid, 0, + CMD_INV_IOMMU_ALL_PAGES_ADDRESS); +} -/* - * This function flushes the DTEs for all devices in domain - */ -static void domain_flush_devices(struct protection_domain *domain) +int amd_iommu_complete_ppr(struct device *dev, u32 pasid, int status, int tag) { struct iommu_dev_data *dev_data; + struct amd_iommu *iommu; + struct iommu_cmd cmd; - list_for_each_entry(dev_data, &domain->dev_list, list) - device_flush_dte(dev_data); + dev_data = dev_iommu_priv_get(dev); + iommu = get_amd_iommu_from_dev(dev); + + build_complete_ppr(&cmd, dev_data->devid, pasid, status, + tag, dev_data->pri_tlp); + + return iommu_queue_command(iommu, &cmd); } /**************************************************************************** @@ -1324,28 +1812,14 @@ static void domain_flush_devices(struct protection_domain *domain) * ****************************************************************************/ -static u16 domain_id_alloc(void) +static int pdom_id_alloc(void) { - int id; - - spin_lock(&pd_bitmap_lock); - id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID); - BUG_ON(id == 0); - if (id > 0 && id < MAX_DOMAIN_ID) - __set_bit(id, amd_iommu_pd_alloc_bitmap); - else - id = 0; - spin_unlock(&pd_bitmap_lock); - - return id; + return ida_alloc_range(&pdom_ids, 1, MAX_DOMAIN_ID - 1, GFP_ATOMIC); } -static void domain_id_free(int id) +static void pdom_id_free(int id) { - spin_lock(&pd_bitmap_lock); - if (id > 0 && id < MAX_DOMAIN_ID) - __clear_bit(id, amd_iommu_pd_alloc_bitmap); - spin_unlock(&pd_bitmap_lock); + ida_free(&pdom_ids, id); } static void free_gcr3_tbl_level1(u64 *tbl) @@ -1359,7 +1833,7 @@ static void free_gcr3_tbl_level1(u64 *tbl) ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK); - free_page((unsigned long)ptr); + iommu_free_pages(ptr); } } @@ -1378,190 +1852,428 @@ static void free_gcr3_tbl_level2(u64 *tbl) } } -static void free_gcr3_table(struct protection_domain *domain) +static void free_gcr3_table(struct gcr3_tbl_info *gcr3_info) { - if (domain->glx == 2) - free_gcr3_tbl_level2(domain->gcr3_tbl); - else if (domain->glx == 1) - free_gcr3_tbl_level1(domain->gcr3_tbl); + if (gcr3_info->glx == 2) + free_gcr3_tbl_level2(gcr3_info->gcr3_tbl); + else if (gcr3_info->glx == 1) + free_gcr3_tbl_level1(gcr3_info->gcr3_tbl); else - BUG_ON(domain->glx != 0); + WARN_ON_ONCE(gcr3_info->glx != 0); - free_page((unsigned long)domain->gcr3_tbl); + gcr3_info->glx = 0; + + /* Free per device domain ID */ + pdom_id_free(gcr3_info->domid); + + iommu_free_pages(gcr3_info->gcr3_tbl); + gcr3_info->gcr3_tbl = NULL; } -static void set_dte_entry(u16 devid, struct protection_domain *domain, - bool ats, bool ppr) +/* + * Number of GCR3 table levels required. Level must be 4-Kbyte + * page and can contain up to 512 entries. + */ +static int get_gcr3_levels(int pasids) { - u64 pte_root = 0; - u64 flags = 0; - u32 old_domid; + int levels; - if (domain->iop.mode != PAGE_MODE_NONE) - pte_root = iommu_virt_to_phys(domain->iop.root); + if (pasids == -1) + return amd_iommu_max_glx_val; - pte_root |= (domain->iop.mode & DEV_ENTRY_MODE_MASK) - << DEV_ENTRY_MODE_SHIFT; - pte_root |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V | DTE_FLAG_TV; + levels = get_count_order(pasids); - flags = amd_iommu_dev_table[devid].data[1]; + return levels ? (DIV_ROUND_UP(levels, 9) - 1) : levels; +} + +static int setup_gcr3_table(struct gcr3_tbl_info *gcr3_info, + struct amd_iommu *iommu, int pasids) +{ + int levels = get_gcr3_levels(pasids); + int nid = iommu ? dev_to_node(&iommu->dev->dev) : NUMA_NO_NODE; + int domid; + + if (levels > amd_iommu_max_glx_val) + return -EINVAL; - if (ats) - flags |= DTE_FLAG_IOTLB; + if (gcr3_info->gcr3_tbl) + return -EBUSY; - if (ppr) { - struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; + /* Allocate per device domain ID */ + domid = pdom_id_alloc(); + if (domid <= 0) + return -ENOSPC; + gcr3_info->domid = domid; - if (iommu_feature(iommu, FEATURE_EPHSUP)) - pte_root |= 1ULL << DEV_ENTRY_PPR; + gcr3_info->gcr3_tbl = iommu_alloc_pages_node_sz(nid, GFP_ATOMIC, SZ_4K); + if (gcr3_info->gcr3_tbl == NULL) { + pdom_id_free(domid); + return -ENOMEM; } - if (domain->flags & PD_IOMMUV2_MASK) { - u64 gcr3 = iommu_virt_to_phys(domain->gcr3_tbl); - u64 glx = domain->glx; - u64 tmp; + gcr3_info->glx = levels; - pte_root |= DTE_FLAG_GV; - pte_root |= (glx & DTE_GLX_MASK) << DTE_GLX_SHIFT; + return 0; +} - /* First mask out possible old values for GCR3 table */ - tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B; - flags &= ~tmp; +static u64 *__get_gcr3_pte(struct gcr3_tbl_info *gcr3_info, + ioasid_t pasid, bool alloc) +{ + int index; + u64 *pte; + u64 *root = gcr3_info->gcr3_tbl; + int level = gcr3_info->glx; - tmp = DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C; - flags &= ~tmp; + while (true) { - /* Encode GCR3 table into DTE */ - tmp = DTE_GCR3_VAL_A(gcr3) << DTE_GCR3_SHIFT_A; - pte_root |= tmp; + index = (pasid >> (9 * level)) & 0x1ff; + pte = &root[index]; - tmp = DTE_GCR3_VAL_B(gcr3) << DTE_GCR3_SHIFT_B; - flags |= tmp; + if (level == 0) + break; - tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C; - flags |= tmp; - } + if (!(*pte & GCR3_VALID)) { + if (!alloc) + return NULL; - flags &= ~DEV_DOMID_MASK; - flags |= domain->id; + root = (void *)get_zeroed_page(GFP_ATOMIC); + if (root == NULL) + return NULL; - old_domid = amd_iommu_dev_table[devid].data[1] & DEV_DOMID_MASK; - amd_iommu_dev_table[devid].data[1] = flags; - amd_iommu_dev_table[devid].data[0] = pte_root; + *pte = iommu_virt_to_phys(root) | GCR3_VALID; + } - /* - * A kdump kernel might be replacing a domain ID that was copied from - * the previous kernel--if so, it needs to flush the translation cache - * entries for the old domain ID that is being overwritten - */ - if (old_domid) { - struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; + root = iommu_phys_to_virt(*pte & PAGE_MASK); - amd_iommu_flush_tlb_domid(iommu, old_domid); + level -= 1; } + + return pte; +} + +static int update_gcr3(struct iommu_dev_data *dev_data, + ioasid_t pasid, unsigned long gcr3, bool set) +{ + struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; + u64 *pte; + + pte = __get_gcr3_pte(gcr3_info, pasid, true); + if (pte == NULL) + return -ENOMEM; + + if (set) + *pte = (gcr3 & PAGE_MASK) | GCR3_VALID; + else + *pte = 0; + + dev_flush_pasid_all(dev_data, pasid); + return 0; } -static void clear_dte_entry(u16 devid) +int amd_iommu_set_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid, + unsigned long gcr3) { - /* remove entry from the device table seen by the hardware */ - amd_iommu_dev_table[devid].data[0] = DTE_FLAG_V | DTE_FLAG_TV; - amd_iommu_dev_table[devid].data[1] &= DTE_FLAG_MASK; + struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; + int ret; + + iommu_group_mutex_assert(dev_data->dev); - amd_iommu_apply_erratum_63(devid); + ret = update_gcr3(dev_data, pasid, gcr3, true); + if (ret) + return ret; + + gcr3_info->pasid_cnt++; + return ret; } -static void do_attach(struct iommu_dev_data *dev_data, - struct protection_domain *domain) +int amd_iommu_clear_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid) { - struct amd_iommu *iommu; - bool ats; + struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; + int ret; - iommu = amd_iommu_rlookup_table[dev_data->devid]; - ats = dev_data->ats.enabled; + iommu_group_mutex_assert(dev_data->dev); - /* Update data structures */ - dev_data->domain = domain; - list_add(&dev_data->list, &domain->dev_list); + ret = update_gcr3(dev_data, pasid, 0, false); + if (ret) + return ret; - /* Do reference counting */ - domain->dev_iommu[iommu->index] += 1; - domain->dev_cnt += 1; + gcr3_info->pasid_cnt--; + return ret; +} - /* Update device table */ - set_dte_entry(dev_data->devid, domain, - ats, dev_data->iommu_v2); - clone_aliases(dev_data->pdev); +static void make_clear_dte(struct iommu_dev_data *dev_data, struct dev_table_entry *ptr, + struct dev_table_entry *new) +{ + /* All existing DTE must have V bit set */ + new->data128[0] = DTE_FLAG_V; + new->data128[1] = 0; +} - device_flush_dte(dev_data); +/* + * Note: + * The old value for GCR3 table and GPT have been cleared from caller. + */ +static void set_dte_gcr3_table(struct amd_iommu *iommu, + struct iommu_dev_data *dev_data, + struct dev_table_entry *target) +{ + struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; + u64 gcr3; + + if (!gcr3_info->gcr3_tbl) + return; + + pr_debug("%s: devid=%#x, glx=%#x, gcr3_tbl=%#llx\n", + __func__, dev_data->devid, gcr3_info->glx, + (unsigned long long)gcr3_info->gcr3_tbl); + + gcr3 = iommu_virt_to_phys(gcr3_info->gcr3_tbl); + + target->data[0] |= DTE_FLAG_GV | + FIELD_PREP(DTE_GLX, gcr3_info->glx) | + FIELD_PREP(DTE_GCR3_14_12, gcr3 >> 12); + if (pdom_is_v2_pgtbl_mode(dev_data->domain)) + target->data[0] |= DTE_FLAG_GIOV; + + target->data[1] |= FIELD_PREP(DTE_GCR3_30_15, gcr3 >> 15) | + FIELD_PREP(DTE_GCR3_51_31, gcr3 >> 31); + + /* Guest page table can only support 4 and 5 levels */ + if (amd_iommu_gpt_level == PAGE_MODE_5_LEVEL) + target->data[2] |= FIELD_PREP(DTE_GPT_LEVEL_MASK, GUEST_PGTABLE_5_LEVEL); + else + target->data[2] |= FIELD_PREP(DTE_GPT_LEVEL_MASK, GUEST_PGTABLE_4_LEVEL); } -static void do_detach(struct iommu_dev_data *dev_data) +static void set_dte_entry(struct amd_iommu *iommu, + struct iommu_dev_data *dev_data, + phys_addr_t top_paddr, unsigned int top_level) { + u16 domid; + u32 old_domid; + struct dev_table_entry *initial_dte; + struct dev_table_entry new = {}; struct protection_domain *domain = dev_data->domain; - struct amd_iommu *iommu; + struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; + struct dev_table_entry *dte = &get_dev_table(iommu)[dev_data->devid]; + struct pt_iommu_amdv1_hw_info pt_info; + + make_clear_dte(dev_data, dte, &new); + + if (gcr3_info && gcr3_info->gcr3_tbl) + domid = dev_data->gcr3_info.domid; + else { + domid = domain->id; + + if (domain->domain.type & __IOMMU_DOMAIN_PAGING) { + /* + * When updating the IO pagetable, the new top and level + * are provided as parameters. For other operations i.e. + * device attach, retrieve the current pagetable info + * via the IOMMU PT API. + */ + if (top_paddr) { + pt_info.host_pt_root = top_paddr; + pt_info.mode = top_level + 1; + } else { + WARN_ON(top_paddr || top_level); + pt_iommu_amdv1_hw_info(&domain->amdv1, + &pt_info); + } - iommu = amd_iommu_rlookup_table[dev_data->devid]; + new.data[0] |= __sme_set(pt_info.host_pt_root) | + (pt_info.mode & DEV_ENTRY_MODE_MASK) + << DEV_ENTRY_MODE_SHIFT; + } + } - /* Update data structures */ - dev_data->domain = NULL; - list_del(&dev_data->list); - clear_dte_entry(dev_data->devid); - clone_aliases(dev_data->pdev); + new.data[0] |= DTE_FLAG_IR | DTE_FLAG_IW; - /* Flush the DTE entry */ - device_flush_dte(dev_data); + /* + * When SNP is enabled, we can only support TV=1 with non-zero domain ID. + * This is prevented by the SNP-enable and IOMMU_DOMAIN_IDENTITY check in + * do_iommu_domain_alloc(). + */ + WARN_ON(amd_iommu_snp_en && (domid == 0)); + new.data[0] |= DTE_FLAG_TV; - /* Flush IOTLB */ - amd_iommu_domain_flush_tlb_pde(domain); + if (dev_data->ppr) + new.data[0] |= 1ULL << DEV_ENTRY_PPR; - /* Wait for the flushes to finish */ - amd_iommu_domain_flush_complete(domain); + if (domain->dirty_tracking) + new.data[0] |= DTE_FLAG_HAD; - /* decrease reference counters - needs to happen after the flushes */ - domain->dev_iommu[iommu->index] -= 1; - domain->dev_cnt -= 1; + if (dev_data->ats_enabled) + new.data[1] |= DTE_FLAG_IOTLB; + + old_domid = READ_ONCE(dte->data[1]) & DEV_DOMID_MASK; + new.data[1] |= domid; + + /* + * Restore cached persistent DTE bits, which can be set by information + * in IVRS table. See set_dev_entry_from_acpi(). + */ + initial_dte = amd_iommu_get_ivhd_dte_flags(iommu->pci_seg->id, dev_data->devid); + if (initial_dte) { + new.data128[0] |= initial_dte->data128[0]; + new.data128[1] |= initial_dte->data128[1]; + } + + set_dte_gcr3_table(iommu, dev_data, &new); + + update_dte256(iommu, dev_data, &new); + + /* + * A kdump kernel might be replacing a domain ID that was copied from + * the previous kernel--if so, it needs to flush the translation cache + * entries for the old domain ID that is being overwritten + */ + if (old_domid) { + amd_iommu_flush_tlb_domid(iommu, old_domid); + } } -static void pdev_iommuv2_disable(struct pci_dev *pdev) +/* + * Clear DMA-remap related flags to block all DMA (blockeded domain) + */ +static void clear_dte_entry(struct amd_iommu *iommu, struct iommu_dev_data *dev_data) { - pci_disable_ats(pdev); - pci_disable_pri(pdev); - pci_disable_pasid(pdev); + struct dev_table_entry new = {}; + struct dev_table_entry *dte = &get_dev_table(iommu)[dev_data->devid]; + + make_clear_dte(dev_data, dte, &new); + update_dte256(iommu, dev_data, &new); } -static int pdev_iommuv2_enable(struct pci_dev *pdev) +/* Update and flush DTE for the given device */ +static void dev_update_dte(struct iommu_dev_data *dev_data, bool set) { - int ret; + struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev); - /* Only allow access to user-accessible pages */ - ret = pci_enable_pasid(pdev, 0); - if (ret) - goto out_err; + if (set) + set_dte_entry(iommu, dev_data, 0, 0); + else + clear_dte_entry(iommu, dev_data); - /* First reset the PRI state of the device */ - ret = pci_reset_pri(pdev); - if (ret) - goto out_err; + clone_aliases(iommu, dev_data->dev); + device_flush_dte(dev_data); + iommu_completion_wait(iommu); +} + +/* + * If domain is SVA capable then initialize GCR3 table. Also if domain is + * in v2 page table mode then update GCR3[0]. + */ +static int init_gcr3_table(struct iommu_dev_data *dev_data, + struct protection_domain *pdom) +{ + struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); + int max_pasids = dev_data->max_pasids; + struct pt_iommu_x86_64_hw_info pt_info; + int ret = 0; + + /* + * If domain is in pt mode then setup GCR3 table only if device + * is PASID capable + */ + if (pdom_is_in_pt_mode(pdom) && !pdev_pasid_supported(dev_data)) + return ret; - /* Enable PRI */ - /* FIXME: Hardcode number of outstanding requests for now */ - ret = pci_enable_pri(pdev, 32); + /* + * By default, setup GCR3 table to support MAX PASIDs + * supported by the device/IOMMU. + */ + ret = setup_gcr3_table(&dev_data->gcr3_info, iommu, + max_pasids > 0 ? max_pasids : 1); if (ret) - goto out_err; + return ret; + + /* Setup GCR3[0] only if domain is setup with v2 page table mode */ + if (!pdom_is_v2_pgtbl_mode(pdom)) + return ret; - ret = pci_enable_ats(pdev, PAGE_SHIFT); + pt_iommu_x86_64_hw_info(&pdom->amdv2, &pt_info); + ret = update_gcr3(dev_data, 0, __sme_set(pt_info.gcr3_pt), true); if (ret) - goto out_err; + free_gcr3_table(&dev_data->gcr3_info); - return 0; + return ret; +} -out_err: - pci_disable_pri(pdev); - pci_disable_pasid(pdev); +static void destroy_gcr3_table(struct iommu_dev_data *dev_data, + struct protection_domain *pdom) +{ + struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; + + if (pdom_is_v2_pgtbl_mode(pdom)) + update_gcr3(dev_data, 0, 0, false); + + if (gcr3_info->gcr3_tbl == NULL) + return; + + free_gcr3_table(gcr3_info); +} + +static int pdom_attach_iommu(struct amd_iommu *iommu, + struct protection_domain *pdom) +{ + struct pdom_iommu_info *pdom_iommu_info, *curr; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&pdom->lock, flags); + pdom_iommu_info = xa_load(&pdom->iommu_array, iommu->index); + if (pdom_iommu_info) { + pdom_iommu_info->refcnt++; + goto out_unlock; + } + + pdom_iommu_info = kzalloc(sizeof(*pdom_iommu_info), GFP_ATOMIC); + if (!pdom_iommu_info) { + ret = -ENOMEM; + goto out_unlock; + } + + pdom_iommu_info->iommu = iommu; + pdom_iommu_info->refcnt = 1; + + curr = xa_cmpxchg(&pdom->iommu_array, iommu->index, + NULL, pdom_iommu_info, GFP_ATOMIC); + if (curr) { + kfree(pdom_iommu_info); + ret = -ENOSPC; + goto out_unlock; + } + +out_unlock: + spin_unlock_irqrestore(&pdom->lock, flags); return ret; } +static void pdom_detach_iommu(struct amd_iommu *iommu, + struct protection_domain *pdom) +{ + struct pdom_iommu_info *pdom_iommu_info; + unsigned long flags; + + spin_lock_irqsave(&pdom->lock, flags); + + pdom_iommu_info = xa_load(&pdom->iommu_array, iommu->index); + if (!pdom_iommu_info) { + spin_unlock_irqrestore(&pdom->lock, flags); + return; + } + + pdom_iommu_info->refcnt--; + if (pdom_iommu_info->refcnt == 0) { + xa_erase(&pdom->iommu_array, iommu->index); + kfree(pdom_iommu_info); + } + + spin_unlock_irqrestore(&pdom->lock, flags); +} + /* * If a device is not yet associated with a domain, this function makes the * device visible in the domain @@ -1569,64 +2281,59 @@ out_err: static int attach_device(struct device *dev, struct protection_domain *domain) { - struct iommu_dev_data *dev_data; + struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); + struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); struct pci_dev *pdev; unsigned long flags; - int ret; + int ret = 0; - spin_lock_irqsave(&domain->lock, flags); - - dev_data = dev_iommu_priv_get(dev); + mutex_lock(&dev_data->mutex); - spin_lock(&dev_data->lock); - - ret = -EBUSY; - if (dev_data->domain != NULL) + if (dev_data->domain != NULL) { + ret = -EBUSY; goto out; + } - if (!dev_is_pci(dev)) - goto skip_ats_check; - - pdev = to_pci_dev(dev); - if (domain->flags & PD_IOMMUV2_MASK) { - struct iommu_domain *def_domain = iommu_get_dma_domain(dev); + /* Do reference counting */ + ret = pdom_attach_iommu(iommu, domain); + if (ret) + goto out; - ret = -EINVAL; - if (def_domain->type != IOMMU_DOMAIN_IDENTITY) + /* Setup GCR3 table */ + if (pdom_is_sva_capable(domain)) { + ret = init_gcr3_table(dev_data, domain); + if (ret) { + pdom_detach_iommu(iommu, domain); goto out; - - if (dev_data->iommu_v2) { - if (pdev_iommuv2_enable(pdev) != 0) - goto out; - - dev_data->ats.enabled = true; - dev_data->ats.qdep = pci_ats_queue_depth(pdev); - dev_data->pri_tlp = pci_prg_resp_pasid_required(pdev); } - } else if (amd_iommu_iotlb_sup && - pci_enable_ats(pdev, PAGE_SHIFT) == 0) { - dev_data->ats.enabled = true; - dev_data->ats.qdep = pci_ats_queue_depth(pdev); } -skip_ats_check: - ret = 0; + pdev = dev_is_pci(dev_data->dev) ? to_pci_dev(dev_data->dev) : NULL; + if (pdev && pdom_is_sva_capable(domain)) { + pdev_enable_caps(pdev); - do_attach(dev_data, domain); + /* + * Device can continue to function even if IOPF + * enablement failed. Hence in error path just + * disable device PRI support. + */ + if (amd_iommu_iopf_add_device(iommu, dev_data)) + pdev_disable_cap_pri(pdev); + } else if (pdev) { + pdev_enable_cap_ats(pdev); + } - /* - * We might boot into a crash-kernel here. The crashed kernel - * left the caches in the IOMMU dirty. So we have to flush - * here to evict all dirty stuff. - */ - amd_iommu_domain_flush_tlb_pde(domain); + /* Update data structures */ + dev_data->domain = domain; + spin_lock_irqsave(&domain->lock, flags); + list_add(&dev_data->list, &domain->dev_list); + spin_unlock_irqrestore(&domain->lock, flags); - amd_iommu_domain_flush_complete(domain); + /* Update device table */ + dev_update_dte(dev_data, true); out: - spin_unlock(&dev_data->lock); - - spin_unlock_irqrestore(&domain->lock, flags); + mutex_unlock(&dev_data->mutex); return ret; } @@ -1636,16 +2343,12 @@ out: */ static void detach_device(struct device *dev) { - struct protection_domain *domain; - struct iommu_dev_data *dev_data; + struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); + struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); + struct protection_domain *domain = dev_data->domain; unsigned long flags; - dev_data = dev_iommu_priv_get(dev); - domain = dev_data->domain; - - spin_lock_irqsave(&domain->lock, flags); - - spin_lock(&dev_data->lock); + mutex_lock(&dev_data->mutex); /* * First check if the device is still attached. It might already @@ -1656,79 +2359,113 @@ static void detach_device(struct device *dev) if (WARN_ON(!dev_data->domain)) goto out; - do_detach(dev_data); + /* Remove IOPF handler */ + if (dev_data->ppr) { + iopf_queue_flush_dev(dev); + amd_iommu_iopf_remove_device(iommu, dev_data); + } - if (!dev_is_pci(dev)) - goto out; + if (dev_is_pci(dev)) + pdev_disable_caps(to_pci_dev(dev)); - if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2) - pdev_iommuv2_disable(to_pci_dev(dev)); - else if (dev_data->ats.enabled) - pci_disable_ats(to_pci_dev(dev)); + /* Clear DTE and flush the entry */ + dev_update_dte(dev_data, false); - dev_data->ats.enabled = false; + /* Flush IOTLB and wait for the flushes to finish */ + spin_lock_irqsave(&domain->lock, flags); + amd_iommu_domain_flush_all(domain); + list_del(&dev_data->list); + spin_unlock_irqrestore(&domain->lock, flags); -out: - spin_unlock(&dev_data->lock); + /* Clear GCR3 table */ + if (pdom_is_sva_capable(domain)) + destroy_gcr3_table(dev_data, domain); - spin_unlock_irqrestore(&domain->lock, flags); + /* Update data structures */ + dev_data->domain = NULL; + + /* decrease reference counters - needs to happen after the flushes */ + pdom_detach_iommu(iommu, domain); + +out: + mutex_unlock(&dev_data->mutex); } static struct iommu_device *amd_iommu_probe_device(struct device *dev) { struct iommu_device *iommu_dev; struct amd_iommu *iommu; - int ret, devid; + struct iommu_dev_data *dev_data; + int ret; if (!check_device(dev)) return ERR_PTR(-ENODEV); - devid = get_device_id(dev); - iommu = amd_iommu_rlookup_table[devid]; + iommu = rlookup_amd_iommu(dev); + if (!iommu) + return ERR_PTR(-ENODEV); + + /* Not registered yet? */ + if (!iommu->iommu.ops) + return ERR_PTR(-ENODEV); if (dev_iommu_priv_get(dev)) return &iommu->iommu; - ret = iommu_init_device(dev); + ret = iommu_init_device(iommu, dev); if (ret) { - if (ret != -ENOTSUPP) - dev_err(dev, "Failed to initialize - trying to proceed anyway\n"); + dev_err(dev, "Failed to initialize - trying to proceed anyway\n"); iommu_dev = ERR_PTR(ret); - iommu_ignore_device(dev); - } else { - amd_iommu_set_pci_msi_domain(dev, iommu); - iommu_dev = &iommu->iommu; + iommu_ignore_device(iommu, dev); + goto out_err; } - iommu_completion_wait(iommu); + amd_iommu_set_pci_msi_domain(dev, iommu); + iommu_dev = &iommu->iommu; - return iommu_dev; -} + /* + * If IOMMU and device supports PASID then it will contain max + * supported PASIDs, else it will be zero. + */ + dev_data = dev_iommu_priv_get(dev); + if (amd_iommu_pasid_supported() && dev_is_pci(dev) && + pdev_pasid_supported(dev_data)) { + dev_data->max_pasids = min_t(u32, iommu->iommu.max_pasids, + pci_max_pasids(to_pci_dev(dev))); + } -static void amd_iommu_probe_finalize(struct device *dev) -{ - struct iommu_domain *domain; + if (amd_iommu_pgtable == PD_MODE_NONE) { + pr_warn_once("%s: DMA translation not supported by iommu.\n", + __func__); + iommu_dev = ERR_PTR(-ENODEV); + goto out_err; + } + +out_err: + + iommu_completion_wait(iommu); - /* Domains are initialized for this device - have a look what we ended up with */ - domain = iommu_get_domain_for_dev(dev); - if (domain->type == IOMMU_DOMAIN_DMA) - iommu_setup_dma_ops(dev, 0, U64_MAX); + if (FEATURE_NUM_INT_REMAP_SUP_2K(amd_iommu_efr2)) + dev_data->max_irqs = MAX_IRQS_PER_TABLE_2K; else - set_dma_ops(dev, NULL); + dev_data->max_irqs = MAX_IRQS_PER_TABLE_512; + + if (dev_is_pci(dev)) + pci_prepare_ats(to_pci_dev(dev), PAGE_SHIFT); + + return iommu_dev; } static void amd_iommu_release_device(struct device *dev) { - int devid = get_device_id(dev); - struct amd_iommu *iommu; - - if (!check_device(dev)) - return; + struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); - iommu = amd_iommu_rlookup_table[devid]; + WARN_ON(dev_data->domain); - amd_iommu_uninit_device(dev); - iommu_completion_wait(iommu); + /* + * We keep dev_data around for unplugged devices and reuse it when the + * device is re-plugged - not doing so would introduce a ton of races. + */ } static struct iommu_group *amd_iommu_device_group(struct device *dev) @@ -1741,266 +2478,408 @@ static struct iommu_group *amd_iommu_device_group(struct device *dev) /***************************************************************************** * - * The next functions belong to the dma_ops mapping/unmapping code. + * The following functions belong to the exported interface of AMD IOMMU + * + * This interface allows access to lower level functions of the IOMMU + * like protection domain handling and assignement of devices to domains + * which is not possible with the dma_ops interface. * *****************************************************************************/ -static void update_device_table(struct protection_domain *domain) +static void protection_domain_init(struct protection_domain *domain) { - struct iommu_dev_data *dev_data; - - list_for_each_entry(dev_data, &domain->dev_list, list) { - set_dte_entry(dev_data->devid, domain, - dev_data->ats.enabled, dev_data->iommu_v2); - clone_aliases(dev_data->pdev); - } + spin_lock_init(&domain->lock); + INIT_LIST_HEAD(&domain->dev_list); + INIT_LIST_HEAD(&domain->dev_data_list); + xa_init(&domain->iommu_array); } -void amd_iommu_update_and_flush_device_table(struct protection_domain *domain) +struct protection_domain *protection_domain_alloc(void) { - update_device_table(domain); - domain_flush_devices(domain); + struct protection_domain *domain; + int domid; + + domain = kzalloc(sizeof(*domain), GFP_KERNEL); + if (!domain) + return NULL; + + domid = pdom_id_alloc(); + if (domid <= 0) { + kfree(domain); + return NULL; + } + domain->id = domid; + + protection_domain_init(domain); + + return domain; } -void amd_iommu_domain_update(struct protection_domain *domain) +static bool amd_iommu_hd_support(struct amd_iommu *iommu) { - /* Update device table */ - amd_iommu_update_and_flush_device_table(domain); + if (amd_iommu_hatdis) + return false; - /* Flush domain TLB(s) and wait for completion */ - amd_iommu_domain_flush_tlb_pde(domain); - amd_iommu_domain_flush_complete(domain); + return iommu && (iommu->features & FEATURE_HDSUP); } -static void __init amd_iommu_init_dma_ops(void) +static spinlock_t *amd_iommu_get_top_lock(struct pt_iommu *iommupt) { - swiotlb = (iommu_default_passthrough() || sme_me_mask) ? 1 : 0; + struct protection_domain *pdom = + container_of(iommupt, struct protection_domain, iommu); - if (amd_iommu_unmap_flush) - pr_info("IO/TLB flush on unmap enabled\n"); - else - pr_info("Lazy IO/TLB flushing enabled\n"); - iommu_set_dma_strict(amd_iommu_unmap_flush); + return &pdom->lock; } -int __init amd_iommu_init_api(void) +/* + * Update all HW references to the domain with a new pgtable configuration. + */ +static void amd_iommu_change_top(struct pt_iommu *iommu_table, + phys_addr_t top_paddr, unsigned int top_level) { - int err; + struct protection_domain *pdom = + container_of(iommu_table, struct protection_domain, iommu); + struct iommu_dev_data *dev_data; - amd_iommu_init_dma_ops(); + lockdep_assert_held(&pdom->lock); - err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops); - if (err) - return err; -#ifdef CONFIG_ARM_AMBA - err = bus_set_iommu(&amba_bustype, &amd_iommu_ops); - if (err) - return err; -#endif - err = bus_set_iommu(&platform_bus_type, &amd_iommu_ops); - if (err) - return err; + /* Update the DTE for all devices attached to this domain */ + list_for_each_entry(dev_data, &pdom->dev_list, list) { + struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev); - return 0; -} + /* Update the HW references with the new level and top ptr */ + set_dte_entry(iommu, dev_data, top_paddr, top_level); + clone_aliases(iommu, dev_data->dev); + } -/***************************************************************************** - * - * The following functions belong to the exported interface of AMD IOMMU - * - * This interface allows access to lower level functions of the IOMMU - * like protection domain handling and assignement of devices to domains - * which is not possible with the dma_ops interface. - * - *****************************************************************************/ + list_for_each_entry(dev_data, &pdom->dev_list, list) + device_flush_dte(dev_data); -static void cleanup_domain(struct protection_domain *domain) + domain_flush_complete(pdom); +} + +/* + * amd_iommu_iotlb_sync_map() is used to generate flushes for non-present to + * present (ie mapping) operations. It is a NOP if the IOMMU doesn't have non + * present caching (like hypervisor shadowing). + */ +static int amd_iommu_iotlb_sync_map(struct iommu_domain *dom, + unsigned long iova, size_t size) { - struct iommu_dev_data *entry; + struct protection_domain *domain = to_pdomain(dom); unsigned long flags; - spin_lock_irqsave(&domain->lock, flags); - - while (!list_empty(&domain->dev_list)) { - entry = list_first_entry(&domain->dev_list, - struct iommu_dev_data, list); - BUG_ON(!entry->domain); - do_detach(entry); - } + if (likely(!amd_iommu_np_cache)) + return 0; + spin_lock_irqsave(&domain->lock, flags); + amd_iommu_domain_flush_pages(domain, iova, size); spin_unlock_irqrestore(&domain->lock, flags); + return 0; } -static void protection_domain_free(struct protection_domain *domain) +static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain) { - if (!domain) - return; - - if (domain->id) - domain_id_free(domain->id); - - if (domain->iop.pgtbl_cfg.tlb) - free_io_pgtable_ops(&domain->iop.iop.ops); + struct protection_domain *dom = to_pdomain(domain); + unsigned long flags; - kfree(domain); + spin_lock_irqsave(&dom->lock, flags); + amd_iommu_domain_flush_all(dom); + spin_unlock_irqrestore(&dom->lock, flags); } -static int protection_domain_init_v1(struct protection_domain *domain, int mode) +static void amd_iommu_iotlb_sync(struct iommu_domain *domain, + struct iommu_iotlb_gather *gather) { - u64 *pt_root = NULL; - - BUG_ON(mode < PAGE_MODE_NONE || mode > PAGE_MODE_6_LEVEL); + struct protection_domain *dom = to_pdomain(domain); + unsigned long flags; - spin_lock_init(&domain->lock); - domain->id = domain_id_alloc(); - if (!domain->id) - return -ENOMEM; - INIT_LIST_HEAD(&domain->dev_list); + spin_lock_irqsave(&dom->lock, flags); + amd_iommu_domain_flush_pages(dom, gather->start, + gather->end - gather->start + 1); + spin_unlock_irqrestore(&dom->lock, flags); + iommu_put_pages_list(&gather->freelist); +} - if (mode != PAGE_MODE_NONE) { - pt_root = (void *)get_zeroed_page(GFP_KERNEL); - if (!pt_root) - return -ENOMEM; - } +static const struct pt_iommu_driver_ops amd_hw_driver_ops_v1 = { + .get_top_lock = amd_iommu_get_top_lock, + .change_top = amd_iommu_change_top, +}; - amd_iommu_domain_set_pgtable(domain, pt_root, mode); +static const struct iommu_domain_ops amdv1_ops = { + IOMMU_PT_DOMAIN_OPS(amdv1), + .iotlb_sync_map = amd_iommu_iotlb_sync_map, + .flush_iotlb_all = amd_iommu_flush_iotlb_all, + .iotlb_sync = amd_iommu_iotlb_sync, + .attach_dev = amd_iommu_attach_device, + .free = amd_iommu_domain_free, + .enforce_cache_coherency = amd_iommu_enforce_cache_coherency, +}; - return 0; -} +static const struct iommu_dirty_ops amdv1_dirty_ops = { + IOMMU_PT_DIRTY_OPS(amdv1), + .set_dirty_tracking = amd_iommu_set_dirty_tracking, +}; -static struct protection_domain *protection_domain_alloc(unsigned int type) +static struct iommu_domain *amd_iommu_domain_alloc_paging_v1(struct device *dev, + u32 flags) { - struct io_pgtable_ops *pgtbl_ops; + struct pt_iommu_amdv1_cfg cfg = {}; struct protection_domain *domain; - int pgtable = amd_iommu_pgtable; - int mode = DEFAULT_PGTABLE_LEVEL; int ret; - domain = kzalloc(sizeof(*domain), GFP_KERNEL); + if (amd_iommu_hatdis) + return ERR_PTR(-EOPNOTSUPP); + + domain = protection_domain_alloc(); if (!domain) - return NULL; + return ERR_PTR(-ENOMEM); + + domain->pd_mode = PD_MODE_V1; + domain->iommu.driver_ops = &amd_hw_driver_ops_v1; + domain->iommu.nid = dev_to_node(dev); + if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) + domain->domain.dirty_ops = &amdv1_dirty_ops; /* - * Force IOMMU v1 page table when iommu=pt and - * when allocating domain for pass-through devices. + * Someday FORCE_COHERENCE should be set by + * amd_iommu_enforce_cache_coherency() like VT-d does. */ - if (type == IOMMU_DOMAIN_IDENTITY) { - pgtable = AMD_IOMMU_V1; - mode = PAGE_MODE_NONE; - } else if (type == IOMMU_DOMAIN_UNMANAGED) { - pgtable = AMD_IOMMU_V1; - } + cfg.common.features = BIT(PT_FEAT_DYNAMIC_TOP) | + BIT(PT_FEAT_AMDV1_ENCRYPT_TABLES) | + BIT(PT_FEAT_AMDV1_FORCE_COHERENCE); - switch (pgtable) { - case AMD_IOMMU_V1: - ret = protection_domain_init_v1(domain, mode); - break; - default: - ret = -EINVAL; - } + /* + * AMD's IOMMU can flush as many pages as necessary in a single flush. + * Unless we run in a virtual machine, which can be inferred according + * to whether "non-present cache" is on, it is probably best to prefer + * (potentially) too extensive TLB flushing (i.e., more misses) over + * multiple TLB flushes (i.e., more flushes). For virtual machines the + * hypervisor needs to synchronize the host IOMMU PTEs with those of + * the guest, and the trade-off is different: unnecessary TLB flushes + * should be avoided. + */ + if (amd_iommu_np_cache) + cfg.common.features |= BIT(PT_FEAT_FLUSH_RANGE_NO_GAPS); + else + cfg.common.features |= BIT(PT_FEAT_FLUSH_RANGE); - if (ret) - goto out_err; + cfg.common.hw_max_vasz_lg2 = + min(64, (amd_iommu_hpt_level - 1) * 9 + 21); + cfg.common.hw_max_oasz_lg2 = 52; + cfg.starting_level = 2; + domain->domain.ops = &amdv1_ops; - pgtbl_ops = alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl_cfg, domain); - if (!pgtbl_ops) - goto out_err; + ret = pt_iommu_amdv1_init(&domain->amdv1, &cfg, GFP_KERNEL); + if (ret) { + amd_iommu_domain_free(&domain->domain); + return ERR_PTR(ret); + } - return domain; -out_err: - kfree(domain); - return NULL; + /* + * Narrow the supported page sizes to those selected by the kernel + * command line. + */ + domain->domain.pgsize_bitmap &= amd_iommu_pgsize_bitmap; + return &domain->domain; } -static struct iommu_domain *amd_iommu_domain_alloc(unsigned type) +static const struct iommu_domain_ops amdv2_ops = { + IOMMU_PT_DOMAIN_OPS(x86_64), + .iotlb_sync_map = amd_iommu_iotlb_sync_map, + .flush_iotlb_all = amd_iommu_flush_iotlb_all, + .iotlb_sync = amd_iommu_iotlb_sync, + .attach_dev = amd_iommu_attach_device, + .free = amd_iommu_domain_free, + /* + * Note the AMDv2 page table format does not support a Force Coherency + * bit, so enforce_cache_coherency should not be set. However VFIO is + * not prepared to handle a case where some domains will support + * enforcement and others do not. VFIO and iommufd will have to be fixed + * before it can fully use the V2 page table. See the comment in + * iommufd_hwpt_paging_alloc(). For now leave things as they have + * historically been and lie about enforce_cache_coherencey. + */ + .enforce_cache_coherency = amd_iommu_enforce_cache_coherency, +}; + +static struct iommu_domain *amd_iommu_domain_alloc_paging_v2(struct device *dev, + u32 flags) { + struct pt_iommu_x86_64_cfg cfg = {}; struct protection_domain *domain; + int ret; - domain = protection_domain_alloc(type); - if (!domain) - return NULL; + if (!amd_iommu_v2_pgtbl_supported()) + return ERR_PTR(-EOPNOTSUPP); - domain->domain.geometry.aperture_start = 0; - domain->domain.geometry.aperture_end = ~0ULL; - domain->domain.geometry.force_aperture = true; + domain = protection_domain_alloc(); + if (!domain) + return ERR_PTR(-ENOMEM); - if (type == IOMMU_DOMAIN_DMA && - iommu_get_dma_cookie(&domain->domain) == -ENOMEM) - goto free_domain; + domain->pd_mode = PD_MODE_V2; + domain->iommu.nid = dev_to_node(dev); - return &domain->domain; + cfg.common.features = BIT(PT_FEAT_X86_64_AMD_ENCRYPT_TABLES); + if (amd_iommu_np_cache) + cfg.common.features |= BIT(PT_FEAT_FLUSH_RANGE_NO_GAPS); + else + cfg.common.features |= BIT(PT_FEAT_FLUSH_RANGE); -free_domain: - protection_domain_free(domain); + /* + * The v2 table behaves differently if it is attached to PASID 0 vs a + * non-zero PASID. On PASID 0 it has no sign extension and the full + * 57/48 bits decode the lower addresses. Otherwise it behaves like a + * normal sign extended x86 page table. Since we want the domain to work + * in both modes the top bit is removed and PT_FEAT_SIGN_EXTEND is not + * set which creates a table that is compatible in both modes. + */ + if (amd_iommu_gpt_level == PAGE_MODE_5_LEVEL) { + cfg.common.hw_max_vasz_lg2 = 56; + cfg.top_level = 4; + } else { + cfg.common.hw_max_vasz_lg2 = 47; + cfg.top_level = 3; + } + cfg.common.hw_max_oasz_lg2 = 52; + domain->domain.ops = &amdv2_ops; - return NULL; + ret = pt_iommu_x86_64_init(&domain->amdv2, &cfg, GFP_KERNEL); + if (ret) { + amd_iommu_domain_free(&domain->domain); + return ERR_PTR(ret); + } + return &domain->domain; } -static void amd_iommu_domain_free(struct iommu_domain *dom) +static struct iommu_domain * +amd_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags, + const struct iommu_user_data *user_data) + { - struct protection_domain *domain; + struct amd_iommu *iommu = get_amd_iommu_from_dev(dev); + const u32 supported_flags = IOMMU_HWPT_ALLOC_DIRTY_TRACKING | + IOMMU_HWPT_ALLOC_PASID; - domain = to_pdomain(dom); + if ((flags & ~supported_flags) || user_data) + return ERR_PTR(-EOPNOTSUPP); - if (domain->dev_cnt > 0) - cleanup_domain(domain); + switch (flags & supported_flags) { + case IOMMU_HWPT_ALLOC_DIRTY_TRACKING: + /* Allocate domain with v1 page table for dirty tracking */ + if (!amd_iommu_hd_support(iommu)) + break; + return amd_iommu_domain_alloc_paging_v1(dev, flags); + case IOMMU_HWPT_ALLOC_PASID: + /* Allocate domain with v2 page table if IOMMU supports PASID. */ + if (!amd_iommu_pasid_supported()) + break; + return amd_iommu_domain_alloc_paging_v2(dev, flags); + case 0: { + struct iommu_domain *ret; + + /* If nothing specific is required use the kernel commandline default */ + if (amd_iommu_pgtable == PD_MODE_V1) { + ret = amd_iommu_domain_alloc_paging_v1(dev, flags); + if (ret != ERR_PTR(-EOPNOTSUPP)) + return ret; + return amd_iommu_domain_alloc_paging_v2(dev, flags); + } + ret = amd_iommu_domain_alloc_paging_v2(dev, flags); + if (ret != ERR_PTR(-EOPNOTSUPP)) + return ret; + return amd_iommu_domain_alloc_paging_v1(dev, flags); + } + default: + break; + } + return ERR_PTR(-EOPNOTSUPP); +} - BUG_ON(domain->dev_cnt != 0); +void amd_iommu_domain_free(struct iommu_domain *dom) +{ + struct protection_domain *domain = to_pdomain(dom); - if (!dom) - return; + WARN_ON(!list_empty(&domain->dev_list)); + pt_iommu_deinit(&domain->iommu); + pdom_id_free(domain->id); + kfree(domain); +} + +static int blocked_domain_attach_device(struct iommu_domain *domain, + struct device *dev, + struct iommu_domain *old) +{ + struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); - if (dom->type == IOMMU_DOMAIN_DMA) - iommu_put_dma_cookie(&domain->domain); + if (dev_data->domain) + detach_device(dev); - if (domain->flags & PD_IOMMUV2_MASK) - free_gcr3_table(domain); + /* Clear DTE and flush the entry */ + mutex_lock(&dev_data->mutex); + dev_update_dte(dev_data, false); + mutex_unlock(&dev_data->mutex); - protection_domain_free(domain); + return 0; } -static void amd_iommu_detach_device(struct iommu_domain *dom, - struct device *dev) +static int blocked_domain_set_dev_pasid(struct iommu_domain *domain, + struct device *dev, ioasid_t pasid, + struct iommu_domain *old) { - struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); - int devid = get_device_id(dev); - struct amd_iommu *iommu; + amd_iommu_remove_dev_pasid(dev, pasid, old); + return 0; +} - if (!check_device(dev)) - return; +static struct iommu_domain blocked_domain = { + .type = IOMMU_DOMAIN_BLOCKED, + .ops = &(const struct iommu_domain_ops) { + .attach_dev = blocked_domain_attach_device, + .set_dev_pasid = blocked_domain_set_dev_pasid, + } +}; - if (dev_data->domain != NULL) - detach_device(dev); +static struct protection_domain identity_domain; - iommu = amd_iommu_rlookup_table[devid]; - if (!iommu) - return; +static const struct iommu_domain_ops identity_domain_ops = { + .attach_dev = amd_iommu_attach_device, +}; -#ifdef CONFIG_IRQ_REMAP - if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) && - (dom->type == IOMMU_DOMAIN_UNMANAGED)) - dev_data->use_vapic = 0; -#endif +void amd_iommu_init_identity_domain(void) +{ + struct iommu_domain *domain = &identity_domain.domain; - iommu_completion_wait(iommu); + domain->type = IOMMU_DOMAIN_IDENTITY; + domain->ops = &identity_domain_ops; + domain->owner = &amd_iommu_ops; + + identity_domain.id = pdom_id_alloc(); + + protection_domain_init(&identity_domain); } -static int amd_iommu_attach_device(struct iommu_domain *dom, - struct device *dev) +static int amd_iommu_attach_device(struct iommu_domain *dom, struct device *dev, + struct iommu_domain *old) { + struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); struct protection_domain *domain = to_pdomain(dom); - struct iommu_dev_data *dev_data; - struct amd_iommu *iommu; + struct amd_iommu *iommu = get_amd_iommu_from_dev(dev); int ret; - if (!check_device(dev)) - return -EINVAL; + /* + * Skip attach device to domain if new domain is same as + * devices current domain + */ + if (dev_data->domain == domain) + return 0; - dev_data = dev_iommu_priv_get(dev); dev_data->defer_attach = false; - iommu = amd_iommu_rlookup_table[dev_data->devid]; - if (!iommu) + /* + * Restrict to devices with compatible IOMMU hardware support + * when enforcement of dirty tracking is enabled. + */ + if (dom->dirty_ops && !amd_iommu_hd_support(iommu)) return -EINVAL; if (dev_data->domain) @@ -2017,74 +2896,73 @@ static int amd_iommu_attach_device(struct iommu_domain *dom, } #endif - iommu_completion_wait(iommu); - return ret; } -static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, - phys_addr_t paddr, size_t page_size, int iommu_prot, - gfp_t gfp) +static bool amd_iommu_capable(struct device *dev, enum iommu_cap cap) { - struct protection_domain *domain = to_pdomain(dom); - struct io_pgtable_ops *ops = &domain->iop.iop.ops; - int prot = 0; - int ret = -EINVAL; - - if ((amd_iommu_pgtable == AMD_IOMMU_V1) && - (domain->iop.mode == PAGE_MODE_NONE)) - return -EINVAL; - - if (iommu_prot & IOMMU_READ) - prot |= IOMMU_PROT_IR; - if (iommu_prot & IOMMU_WRITE) - prot |= IOMMU_PROT_IW; + switch (cap) { + case IOMMU_CAP_CACHE_COHERENCY: + return true; + case IOMMU_CAP_NOEXEC: + return false; + case IOMMU_CAP_PRE_BOOT_PROTECTION: + return amdr_ivrs_remap_support; + case IOMMU_CAP_ENFORCE_CACHE_COHERENCY: + return true; + case IOMMU_CAP_DEFERRED_FLUSH: + return true; + case IOMMU_CAP_DIRTY_TRACKING: { + struct amd_iommu *iommu = get_amd_iommu_from_dev(dev); - if (ops->map) { - ret = ops->map(ops, iova, paddr, page_size, prot, gfp); - domain_flush_np_cache(domain, iova, page_size); + return amd_iommu_hd_support(iommu); + } + default: + break; } - return ret; + return false; } -static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, - size_t page_size, - struct iommu_iotlb_gather *gather) +static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain, + bool enable) { - struct protection_domain *domain = to_pdomain(dom); - struct io_pgtable_ops *ops = &domain->iop.iop.ops; + struct protection_domain *pdomain = to_pdomain(domain); + struct dev_table_entry *dte; + struct iommu_dev_data *dev_data; + bool domain_flush = false; + struct amd_iommu *iommu; + unsigned long flags; + u64 new; - if ((amd_iommu_pgtable == AMD_IOMMU_V1) && - (domain->iop.mode == PAGE_MODE_NONE)) + spin_lock_irqsave(&pdomain->lock, flags); + if (!(pdomain->dirty_tracking ^ enable)) { + spin_unlock_irqrestore(&pdomain->lock, flags); return 0; + } - return (ops->unmap) ? ops->unmap(ops, iova, page_size, gather) : 0; -} + list_for_each_entry(dev_data, &pdomain->dev_list, list) { + spin_lock(&dev_data->dte_lock); + iommu = get_amd_iommu_from_dev_data(dev_data); + dte = &get_dev_table(iommu)[dev_data->devid]; + new = dte->data[0]; + new = (enable ? new | DTE_FLAG_HAD : new & ~DTE_FLAG_HAD); + dte->data[0] = new; + spin_unlock(&dev_data->dte_lock); -static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, - dma_addr_t iova) -{ - struct protection_domain *domain = to_pdomain(dom); - struct io_pgtable_ops *ops = &domain->iop.iop.ops; + /* Flush device DTE */ + device_flush_dte(dev_data); + domain_flush = true; + } - return ops->iova_to_phys(ops, iova); -} + /* Flush IOTLB to mark IOPTE dirty on the next translation(s) */ + if (domain_flush) + amd_iommu_domain_flush_all(pdomain); -static bool amd_iommu_capable(enum iommu_cap cap) -{ - switch (cap) { - case IOMMU_CAP_CACHE_COHERENCY: - return true; - case IOMMU_CAP_INTR_REMAP: - return (irq_remapping_enabled == 1); - case IOMMU_CAP_NOEXEC: - return false; - default: - break; - } + pdomain->dirty_tracking = enable; + spin_unlock_irqrestore(&pdomain->lock, flags); - return false; + return 0; } static void amd_iommu_get_resv_regions(struct device *dev, @@ -2092,13 +2970,19 @@ static void amd_iommu_get_resv_regions(struct device *dev, { struct iommu_resv_region *region; struct unity_map_entry *entry; - int devid; + struct amd_iommu *iommu; + struct amd_iommu_pci_seg *pci_seg; + int devid, sbdf; - devid = get_device_id(dev); - if (devid < 0) + sbdf = get_device_sbdf_id(dev); + if (sbdf < 0) return; - list_for_each_entry(entry, &amd_iommu_unity_map, list) { + devid = PCI_SBDF_TO_DEVID(sbdf); + iommu = get_amd_iommu_from_dev(dev); + pci_seg = iommu->pci_seg; + + list_for_each_entry(entry, &pci_seg->unity_map, list) { int type, prot = 0; size_t length; @@ -2116,7 +3000,8 @@ static void amd_iommu_get_resv_regions(struct device *dev, type = IOMMU_RESV_RESERVED; region = iommu_alloc_resv_region(entry->address_start, - length, prot, type); + length, prot, type, + GFP_KERNEL); if (!region) { dev_err(dev, "Out of memory allocating dm-regions\n"); return; @@ -2126,44 +3011,28 @@ static void amd_iommu_get_resv_regions(struct device *dev, region = iommu_alloc_resv_region(MSI_RANGE_START, MSI_RANGE_END - MSI_RANGE_START + 1, - 0, IOMMU_RESV_MSI); + 0, IOMMU_RESV_MSI, GFP_KERNEL); if (!region) return; list_add_tail(®ion->list, head); + if (amd_iommu_ht_range_ignore()) + return; + region = iommu_alloc_resv_region(HT_RANGE_START, HT_RANGE_END - HT_RANGE_START + 1, - 0, IOMMU_RESV_RESERVED); + 0, IOMMU_RESV_RESERVED, GFP_KERNEL); if (!region) return; list_add_tail(®ion->list, head); } -bool amd_iommu_is_attach_deferred(struct iommu_domain *domain, - struct device *dev) +static bool amd_iommu_is_attach_deferred(struct device *dev) { struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); return dev_data->defer_attach; } -EXPORT_SYMBOL_GPL(amd_iommu_is_attach_deferred); - -static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain) -{ - struct protection_domain *dom = to_pdomain(domain); - unsigned long flags; - - spin_lock_irqsave(&dom->lock, flags); - amd_iommu_domain_flush_tlb_pde(dom); - amd_iommu_domain_flush_complete(dom); - spin_unlock_irqrestore(&dom->lock, flags); -} - -static void amd_iommu_iotlb_sync(struct iommu_domain *domain, - struct iommu_iotlb_gather *gather) -{ - amd_iommu_flush_iotlb_all(domain); -} static int amd_iommu_def_domain_type(struct device *dev) { @@ -2173,415 +3042,133 @@ static int amd_iommu_def_domain_type(struct device *dev) if (!dev_data) return 0; + /* Always use DMA domain for untrusted device */ + if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted) + return IOMMU_DOMAIN_DMA; + /* - * Do not identity map IOMMUv2 capable devices when memory encryption is - * active, because some of those devices (AMD GPUs) don't have the - * encryption bit in their DMA-mask and require remapping. + * Do not identity map IOMMUv2 capable devices when: + * - memory encryption is active, because some of those devices + * (AMD GPUs) don't have the encryption bit in their DMA-mask + * and require remapping. + * - SNP is enabled, because it prohibits DTE[Mode]=0. */ - if (!mem_encrypt_active() && dev_data->iommu_v2) + if (pdev_pasid_supported(dev_data) && + !cc_platform_has(CC_ATTR_MEM_ENCRYPT) && + !amd_iommu_snp_en) { return IOMMU_DOMAIN_IDENTITY; + } return 0; } +static bool amd_iommu_enforce_cache_coherency(struct iommu_domain *domain) +{ + /* IOMMU_PTE_FC is always set */ + return true; +} + const struct iommu_ops amd_iommu_ops = { .capable = amd_iommu_capable, - .domain_alloc = amd_iommu_domain_alloc, - .domain_free = amd_iommu_domain_free, - .attach_dev = amd_iommu_attach_device, - .detach_dev = amd_iommu_detach_device, - .map = amd_iommu_map, - .unmap = amd_iommu_unmap, - .iova_to_phys = amd_iommu_iova_to_phys, + .blocked_domain = &blocked_domain, + .release_domain = &blocked_domain, + .identity_domain = &identity_domain.domain, + .domain_alloc_paging_flags = amd_iommu_domain_alloc_paging_flags, + .domain_alloc_sva = amd_iommu_domain_alloc_sva, .probe_device = amd_iommu_probe_device, .release_device = amd_iommu_release_device, - .probe_finalize = amd_iommu_probe_finalize, .device_group = amd_iommu_device_group, .get_resv_regions = amd_iommu_get_resv_regions, - .put_resv_regions = generic_iommu_put_resv_regions, .is_attach_deferred = amd_iommu_is_attach_deferred, - .pgsize_bitmap = AMD_IOMMU_PGSIZES, - .flush_iotlb_all = amd_iommu_flush_iotlb_all, - .iotlb_sync = amd_iommu_iotlb_sync, .def_domain_type = amd_iommu_def_domain_type, + .page_response = amd_iommu_page_response, }; +#ifdef CONFIG_IRQ_REMAP + /***************************************************************************** * - * The next functions do a basic initialization of IOMMU for pass through - * mode - * - * In passthrough mode the IOMMU is initialized and enabled but not used for - * DMA-API translation. + * Interrupt Remapping Implementation * *****************************************************************************/ -/* IOMMUv2 specific functions */ -int amd_iommu_register_ppr_notifier(struct notifier_block *nb) -{ - return atomic_notifier_chain_register(&ppr_notifier, nb); -} -EXPORT_SYMBOL(amd_iommu_register_ppr_notifier); - -int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb) -{ - return atomic_notifier_chain_unregister(&ppr_notifier, nb); -} -EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier); - -void amd_iommu_domain_direct_map(struct iommu_domain *dom) -{ - struct protection_domain *domain = to_pdomain(dom); - unsigned long flags; - - spin_lock_irqsave(&domain->lock, flags); - - if (domain->iop.pgtbl_cfg.tlb) - free_io_pgtable_ops(&domain->iop.iop.ops); - - spin_unlock_irqrestore(&domain->lock, flags); -} -EXPORT_SYMBOL(amd_iommu_domain_direct_map); +static struct irq_chip amd_ir_chip; +static DEFINE_SPINLOCK(iommu_table_lock); -int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids) +static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid) { - struct protection_domain *domain = to_pdomain(dom); + int ret; + u64 data; unsigned long flags; - int levels, ret; + struct iommu_cmd cmd, cmd2; - /* Number of GCR3 table levels required */ - for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9) - levels += 1; - - if (levels > amd_iommu_max_glx_val) - return -EINVAL; + if (iommu->irtcachedis_enabled) + return; - spin_lock_irqsave(&domain->lock, flags); + build_inv_irt(&cmd, devid); + data = atomic64_inc_return(&iommu->cmd_sem_val); + build_completion_wait(&cmd2, iommu, data); - /* - * Save us all sanity checks whether devices already in the - * domain support IOMMUv2. Just force that the domain has no - * devices attached when it is switched into IOMMUv2 mode. - */ - ret = -EBUSY; - if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK) + raw_spin_lock_irqsave(&iommu->lock, flags); + ret = __iommu_queue_command_sync(iommu, &cmd, true); + if (ret) goto out; - - ret = -ENOMEM; - domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC); - if (domain->gcr3_tbl == NULL) + ret = __iommu_queue_command_sync(iommu, &cmd2, false); + if (ret) goto out; - - domain->glx = levels; - domain->flags |= PD_IOMMUV2_MASK; - - amd_iommu_domain_update(domain); - - ret = 0; - -out: - spin_unlock_irqrestore(&domain->lock, flags); - - return ret; -} -EXPORT_SYMBOL(amd_iommu_domain_enable_v2); - -static int __flush_pasid(struct protection_domain *domain, u32 pasid, - u64 address, bool size) -{ - struct iommu_dev_data *dev_data; - struct iommu_cmd cmd; - int i, ret; - - if (!(domain->flags & PD_IOMMUV2_MASK)) - return -EINVAL; - - build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size); - - /* - * IOMMU TLB needs to be flushed before Device TLB to - * prevent device TLB refill from IOMMU TLB - */ - for (i = 0; i < amd_iommu_get_num_iommus(); ++i) { - if (domain->dev_iommu[i] == 0) - continue; - - ret = iommu_queue_command(amd_iommus[i], &cmd); - if (ret != 0) - goto out; - } - - /* Wait until IOMMU TLB flushes are complete */ - amd_iommu_domain_flush_complete(domain); - - /* Now flush device TLBs */ - list_for_each_entry(dev_data, &domain->dev_list, list) { - struct amd_iommu *iommu; - int qdep; - - /* - There might be non-IOMMUv2 capable devices in an IOMMUv2 - * domain. - */ - if (!dev_data->ats.enabled) - continue; - - qdep = dev_data->ats.qdep; - iommu = amd_iommu_rlookup_table[dev_data->devid]; - - build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid, - qdep, address, size); - - ret = iommu_queue_command(iommu, &cmd); - if (ret != 0) - goto out; - } - - /* Wait until all device TLBs are flushed */ - amd_iommu_domain_flush_complete(domain); - - ret = 0; - + wait_on_sem(iommu, data); out: - - return ret; -} - -static int __amd_iommu_flush_page(struct protection_domain *domain, u32 pasid, - u64 address) -{ - return __flush_pasid(domain, pasid, address, false); -} - -int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid, - u64 address) -{ - struct protection_domain *domain = to_pdomain(dom); - unsigned long flags; - int ret; - - spin_lock_irqsave(&domain->lock, flags); - ret = __amd_iommu_flush_page(domain, pasid, address); - spin_unlock_irqrestore(&domain->lock, flags); - - return ret; -} -EXPORT_SYMBOL(amd_iommu_flush_page); - -static int __amd_iommu_flush_tlb(struct protection_domain *domain, u32 pasid) -{ - return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, - true); -} - -int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid) -{ - struct protection_domain *domain = to_pdomain(dom); - unsigned long flags; - int ret; - - spin_lock_irqsave(&domain->lock, flags); - ret = __amd_iommu_flush_tlb(domain, pasid); - spin_unlock_irqrestore(&domain->lock, flags); - - return ret; -} -EXPORT_SYMBOL(amd_iommu_flush_tlb); - -static u64 *__get_gcr3_pte(u64 *root, int level, u32 pasid, bool alloc) -{ - int index; - u64 *pte; - - while (true) { - - index = (pasid >> (9 * level)) & 0x1ff; - pte = &root[index]; - - if (level == 0) - break; - - if (!(*pte & GCR3_VALID)) { - if (!alloc) - return NULL; - - root = (void *)get_zeroed_page(GFP_ATOMIC); - if (root == NULL) - return NULL; - - *pte = iommu_virt_to_phys(root) | GCR3_VALID; - } - - root = iommu_phys_to_virt(*pte & PAGE_MASK); - - level -= 1; - } - - return pte; -} - -static int __set_gcr3(struct protection_domain *domain, u32 pasid, - unsigned long cr3) -{ - u64 *pte; - - if (domain->iop.mode != PAGE_MODE_NONE) - return -EINVAL; - - pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true); - if (pte == NULL) - return -ENOMEM; - - *pte = (cr3 & PAGE_MASK) | GCR3_VALID; - - return __amd_iommu_flush_tlb(domain, pasid); -} - -static int __clear_gcr3(struct protection_domain *domain, u32 pasid) -{ - u64 *pte; - - if (domain->iop.mode != PAGE_MODE_NONE) - return -EINVAL; - - pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false); - if (pte == NULL) - return 0; - - *pte = 0; - - return __amd_iommu_flush_tlb(domain, pasid); -} - -int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid, - unsigned long cr3) -{ - struct protection_domain *domain = to_pdomain(dom); - unsigned long flags; - int ret; - - spin_lock_irqsave(&domain->lock, flags); - ret = __set_gcr3(domain, pasid, cr3); - spin_unlock_irqrestore(&domain->lock, flags); - - return ret; -} -EXPORT_SYMBOL(amd_iommu_domain_set_gcr3); - -int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, u32 pasid) -{ - struct protection_domain *domain = to_pdomain(dom); - unsigned long flags; - int ret; - - spin_lock_irqsave(&domain->lock, flags); - ret = __clear_gcr3(domain, pasid); - spin_unlock_irqrestore(&domain->lock, flags); - - return ret; + raw_spin_unlock_irqrestore(&iommu->lock, flags); } -EXPORT_SYMBOL(amd_iommu_domain_clear_gcr3); -int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid, - int status, int tag) +static inline u8 iommu_get_int_tablen(struct iommu_dev_data *dev_data) { - struct iommu_dev_data *dev_data; - struct amd_iommu *iommu; - struct iommu_cmd cmd; - - dev_data = dev_iommu_priv_get(&pdev->dev); - iommu = amd_iommu_rlookup_table[dev_data->devid]; - - build_complete_ppr(&cmd, dev_data->devid, pasid, status, - tag, dev_data->pri_tlp); - - return iommu_queue_command(iommu, &cmd); + if (dev_data && dev_data->max_irqs == MAX_IRQS_PER_TABLE_2K) + return DTE_INTTABLEN_2K; + return DTE_INTTABLEN_512; } -EXPORT_SYMBOL(amd_iommu_complete_ppr); -int amd_iommu_device_info(struct pci_dev *pdev, - struct amd_iommu_device_info *info) +static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid, + struct irq_remap_table *table) { - int max_pasids; - int pos; - - if (pdev == NULL || info == NULL) - return -EINVAL; - - if (!amd_iommu_v2_supported()) - return -EINVAL; - - memset(info, 0, sizeof(*info)); - - if (pci_ats_supported(pdev)) - info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP; - - pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI); - if (pos) - info->flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP; - - pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID); - if (pos) { - int features; + u64 new; + struct dev_table_entry *dte = &get_dev_table(iommu)[devid]; + struct iommu_dev_data *dev_data = search_dev_data(iommu, devid); - max_pasids = 1 << (9 * (amd_iommu_max_glx_val + 1)); - max_pasids = min(max_pasids, (1 << 20)); + if (dev_data) + spin_lock(&dev_data->dte_lock); - info->flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP; - info->max_pasids = min(pci_max_pasids(pdev), max_pasids); + new = READ_ONCE(dte->data[2]); + new &= ~DTE_IRQ_PHYS_ADDR_MASK; + new |= iommu_virt_to_phys(table->table); + new |= DTE_IRQ_REMAP_INTCTL; + new |= iommu_get_int_tablen(dev_data); + new |= DTE_IRQ_REMAP_ENABLE; + WRITE_ONCE(dte->data[2], new); - features = pci_pasid_features(pdev); - if (features & PCI_PASID_CAP_EXEC) - info->flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP; - if (features & PCI_PASID_CAP_PRIV) - info->flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP; - } - - return 0; + if (dev_data) + spin_unlock(&dev_data->dte_lock); } -EXPORT_SYMBOL(amd_iommu_device_info); -#ifdef CONFIG_IRQ_REMAP - -/***************************************************************************** - * - * Interrupt Remapping Implementation - * - *****************************************************************************/ - -static struct irq_chip amd_ir_chip; -static DEFINE_SPINLOCK(iommu_table_lock); - -static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table) -{ - u64 dte; - - dte = amd_iommu_dev_table[devid].data[2]; - dte &= ~DTE_IRQ_PHYS_ADDR_MASK; - dte |= iommu_virt_to_phys(table->table); - dte |= DTE_IRQ_REMAP_INTCTL; - dte |= DTE_INTTABLEN; - dte |= DTE_IRQ_REMAP_ENABLE; - - amd_iommu_dev_table[devid].data[2] = dte; -} - -static struct irq_remap_table *get_irq_table(u16 devid) +static struct irq_remap_table *get_irq_table(struct amd_iommu *iommu, u16 devid) { struct irq_remap_table *table; + struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; - if (WARN_ONCE(!amd_iommu_rlookup_table[devid], - "%s: no iommu for devid %x\n", __func__, devid)) + if (WARN_ONCE(!pci_seg->rlookup_table[devid], + "%s: no iommu for devid %x:%x\n", + __func__, pci_seg->id, devid)) return NULL; - table = irq_lookup_table[devid]; - if (WARN_ONCE(!table, "%s: no table for devid %x\n", __func__, devid)) + table = pci_seg->irq_lookup_table[devid]; + if (WARN_ONCE(!table, "%s: no table for devid %x:%x\n", + __func__, pci_seg->id, devid)) return NULL; return table; } -static struct irq_remap_table *__alloc_irq_table(void) +static struct irq_remap_table *__alloc_irq_table(int nid, size_t size) { struct irq_remap_table *table; @@ -2589,27 +3176,24 @@ static struct irq_remap_table *__alloc_irq_table(void) if (!table) return NULL; - table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_KERNEL); + table->table = iommu_alloc_pages_node_sz( + nid, GFP_KERNEL, max(DTE_INTTAB_ALIGNMENT, size)); if (!table->table) { kfree(table); return NULL; } raw_spin_lock_init(&table->lock); - if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir)) - memset(table->table, 0, - MAX_IRQS_PER_TABLE * sizeof(u32)); - else - memset(table->table, 0, - (MAX_IRQS_PER_TABLE * (sizeof(u64) * 2))); return table; } static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid, struct irq_remap_table *table) { - irq_lookup_table[devid] = table; - set_dte_irq_entry(devid, table); + struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; + + pci_seg->irq_lookup_table[devid] = table; + set_dte_irq_entry(iommu, devid, table); iommu_flush_dte(iommu, devid); } @@ -2617,35 +3201,48 @@ static int set_remap_table_entry_alias(struct pci_dev *pdev, u16 alias, void *data) { struct irq_remap_table *table = data; + struct amd_iommu_pci_seg *pci_seg; + struct amd_iommu *iommu = rlookup_amd_iommu(&pdev->dev); - irq_lookup_table[alias] = table; - set_dte_irq_entry(alias, table); + if (!iommu) + return -EINVAL; - iommu_flush_dte(amd_iommu_rlookup_table[alias], alias); + pci_seg = iommu->pci_seg; + pci_seg->irq_lookup_table[alias] = table; + set_dte_irq_entry(iommu, alias, table); + iommu_flush_dte(pci_seg->rlookup_table[alias], alias); return 0; } -static struct irq_remap_table *alloc_irq_table(u16 devid, struct pci_dev *pdev) +static inline size_t get_irq_table_size(unsigned int max_irqs) +{ + if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir)) + return max_irqs * sizeof(u32); + + return max_irqs * (sizeof(u64) * 2); +} + +static struct irq_remap_table *alloc_irq_table(struct amd_iommu *iommu, + u16 devid, struct pci_dev *pdev, + unsigned int max_irqs) { struct irq_remap_table *table = NULL; struct irq_remap_table *new_table = NULL; - struct amd_iommu *iommu; + struct amd_iommu_pci_seg *pci_seg; unsigned long flags; + int nid = iommu && iommu->dev ? dev_to_node(&iommu->dev->dev) : NUMA_NO_NODE; u16 alias; spin_lock_irqsave(&iommu_table_lock, flags); - iommu = amd_iommu_rlookup_table[devid]; - if (!iommu) - goto out_unlock; - - table = irq_lookup_table[devid]; + pci_seg = iommu->pci_seg; + table = pci_seg->irq_lookup_table[devid]; if (table) goto out_unlock; - alias = amd_iommu_alias_table[devid]; - table = irq_lookup_table[alias]; + alias = pci_seg->alias_table[devid]; + table = pci_seg->irq_lookup_table[alias]; if (table) { set_remap_table_entry(iommu, devid, table); goto out_wait; @@ -2653,17 +3250,17 @@ static struct irq_remap_table *alloc_irq_table(u16 devid, struct pci_dev *pdev) spin_unlock_irqrestore(&iommu_table_lock, flags); /* Nothing there yet, allocate new irq remapping table */ - new_table = __alloc_irq_table(); + new_table = __alloc_irq_table(nid, get_irq_table_size(max_irqs)); if (!new_table) return NULL; spin_lock_irqsave(&iommu_table_lock, flags); - table = irq_lookup_table[devid]; + table = pci_seg->irq_lookup_table[devid]; if (table) goto out_unlock; - table = irq_lookup_table[alias]; + table = pci_seg->irq_lookup_table[alias]; if (table) { set_remap_table_entry(iommu, devid, table); goto out_wait; @@ -2688,24 +3285,21 @@ out_unlock: spin_unlock_irqrestore(&iommu_table_lock, flags); if (new_table) { - kmem_cache_free(amd_iommu_irq_cache, new_table->table); + iommu_free_pages(new_table->table); kfree(new_table); } return table; } -static int alloc_irq_index(u16 devid, int count, bool align, - struct pci_dev *pdev) +static int alloc_irq_index(struct amd_iommu *iommu, u16 devid, int count, + bool align, struct pci_dev *pdev, + unsigned long max_irqs) { struct irq_remap_table *table; int index, c, alignment = 1; unsigned long flags; - struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; - if (!iommu) - return -ENODEV; - - table = alloc_irq_table(devid, pdev); + table = alloc_irq_table(iommu, devid, pdev, max_irqs); if (!table) return -ENODEV; @@ -2716,7 +3310,7 @@ static int alloc_irq_index(u16 devid, int count, bool align, /* Scan table for free entries */ for (index = ALIGN(table->min_index, alignment), c = 0; - index < MAX_IRQS_PER_TABLE;) { + index < max_irqs;) { if (!iommu->irte_ops->is_allocated(table, index)) { c += 1; } else { @@ -2744,20 +3338,15 @@ out: return index; } -static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte, - struct amd_ir_data *data) +static int __modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index, + struct irte_ga *irte) { - bool ret; struct irq_remap_table *table; - struct amd_iommu *iommu; - unsigned long flags; struct irte_ga *entry; + unsigned long flags; + u128 old; - iommu = amd_iommu_rlookup_table[devid]; - if (iommu == NULL) - return -EINVAL; - - table = get_irq_table(devid); + table = get_irq_table(iommu, devid); if (!table) return -ENOMEM; @@ -2766,39 +3355,41 @@ static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte, entry = (struct irte_ga *)table->table; entry = &entry[index]; - ret = cmpxchg_double(&entry->lo.val, &entry->hi.val, - entry->lo.val, entry->hi.val, - irte->lo.val, irte->hi.val); /* * We use cmpxchg16 to atomically update the 128-bit IRTE, * and it cannot be updated by the hardware or other processors * behind us, so the return value of cmpxchg16 should be the * same as the old value. */ - WARN_ON(!ret); - - if (data) - data->ref = entry; + old = entry->irte; + WARN_ON(!try_cmpxchg128(&entry->irte, &old, irte->irte)); raw_spin_unlock_irqrestore(&table->lock, flags); - iommu_flush_irt(iommu, devid); - iommu_completion_wait(iommu); + return 0; +} + +static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index, + struct irte_ga *irte) +{ + int ret; + + ret = __modify_irte_ga(iommu, devid, index, irte); + if (ret) + return ret; + + iommu_flush_irt_and_complete(iommu, devid); return 0; } -static int modify_irte(u16 devid, int index, union irte *irte) +static int modify_irte(struct amd_iommu *iommu, + u16 devid, int index, union irte *irte) { struct irq_remap_table *table; - struct amd_iommu *iommu; unsigned long flags; - iommu = amd_iommu_rlookup_table[devid]; - if (iommu == NULL) - return -EINVAL; - - table = get_irq_table(devid); + table = get_irq_table(iommu, devid); if (!table) return -ENOMEM; @@ -2806,23 +3397,17 @@ static int modify_irte(u16 devid, int index, union irte *irte) table->table[index] = irte->val; raw_spin_unlock_irqrestore(&table->lock, flags); - iommu_flush_irt(iommu, devid); - iommu_completion_wait(iommu); + iommu_flush_irt_and_complete(iommu, devid); return 0; } -static void free_irte(u16 devid, int index) +static void free_irte(struct amd_iommu *iommu, u16 devid, int index) { struct irq_remap_table *table; - struct amd_iommu *iommu; unsigned long flags; - iommu = amd_iommu_rlookup_table[devid]; - if (iommu == NULL) - return; - - table = get_irq_table(devid); + table = get_irq_table(iommu, devid); if (!table) return; @@ -2830,8 +3415,7 @@ static void free_irte(u16 devid, int index) iommu->irte_ops->clear_allocated(table, index); raw_spin_unlock_irqrestore(&table->lock, flags); - iommu_flush_irt(iommu, devid); - iommu_completion_wait(iommu); + iommu_flush_irt_and_complete(iommu, devid); } static void irte_prepare(void *entry, @@ -2864,49 +3448,49 @@ static void irte_ga_prepare(void *entry, irte->lo.fields_remap.valid = 1; } -static void irte_activate(void *entry, u16 devid, u16 index) +static void irte_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index) { union irte *irte = (union irte *) entry; irte->fields.valid = 1; - modify_irte(devid, index, irte); + modify_irte(iommu, devid, index, irte); } -static void irte_ga_activate(void *entry, u16 devid, u16 index) +static void irte_ga_activate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index) { struct irte_ga *irte = (struct irte_ga *) entry; irte->lo.fields_remap.valid = 1; - modify_irte_ga(devid, index, irte, NULL); + modify_irte_ga(iommu, devid, index, irte); } -static void irte_deactivate(void *entry, u16 devid, u16 index) +static void irte_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index) { union irte *irte = (union irte *) entry; irte->fields.valid = 0; - modify_irte(devid, index, irte); + modify_irte(iommu, devid, index, irte); } -static void irte_ga_deactivate(void *entry, u16 devid, u16 index) +static void irte_ga_deactivate(struct amd_iommu *iommu, void *entry, u16 devid, u16 index) { struct irte_ga *irte = (struct irte_ga *) entry; irte->lo.fields_remap.valid = 0; - modify_irte_ga(devid, index, irte, NULL); + modify_irte_ga(iommu, devid, index, irte); } -static void irte_set_affinity(void *entry, u16 devid, u16 index, +static void irte_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index, u8 vector, u32 dest_apicid) { union irte *irte = (union irte *) entry; irte->fields.vector = vector; irte->fields.destination = dest_apicid; - modify_irte(devid, index, irte); + modify_irte(iommu, devid, index, irte); } -static void irte_ga_set_affinity(void *entry, u16 devid, u16 index, +static void irte_ga_set_affinity(struct amd_iommu *iommu, void *entry, u16 devid, u16 index, u8 vector, u32 dest_apicid) { struct irte_ga *irte = (struct irte_ga *) entry; @@ -2917,7 +3501,7 @@ static void irte_ga_set_affinity(void *entry, u16 devid, u16 index, APICID_TO_IRTE_DEST_LO(dest_apicid); irte->hi.fields.destination = APICID_TO_IRTE_DEST_HI(dest_apicid); - modify_irte_ga(devid, index, irte, NULL); + modify_irte_ga(iommu, devid, index, irte); } } @@ -2976,7 +3560,7 @@ static int get_devid(struct irq_alloc_info *info) return get_hpet_devid(info->devid); case X86_IRQ_ALLOC_TYPE_PCI_MSI: case X86_IRQ_ALLOC_TYPE_PCI_MSIX: - return get_device_id(msi_desc_to_dev(info->desc)); + return get_device_sbdf_id(msi_desc_to_dev(info->desc)); default: WARN_ON_ONCE(1); return -1; @@ -2996,6 +3580,14 @@ static void fill_msi_msg(struct msi_msg *msg, u32 index) msg->data = index; msg->address_lo = 0; msg->arch_addr_lo.base_address = X86_MSI_BASE_ADDRESS_LOW; + /* + * The struct msi_msg.dest_mode_logical is used to set the DM bit + * in MSI Message Address Register. For device w/ 2K int-remap support, + * this is bit must be set to 1 regardless of the actual destination + * mode, which is signified by the IRTE[DM]. + */ + if (FEATURE_NUM_INT_REMAP_SUP_2K(amd_iommu_efr2)) + msg->arch_addr_lo.dest_mode_logical = true; msg->address_hi = X86_MSI_BASE_ADDRESS_HIGH; } @@ -3005,14 +3597,14 @@ static void irq_remapping_prepare_irte(struct amd_ir_data *data, int devid, int index, int sub_handle) { struct irq_2_irte *irte_info = &data->irq_2_irte; - struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; + struct amd_iommu *iommu = data->iommu; if (!iommu) return; data->irq_2_irte.devid = devid; data->irq_2_irte.index = index + sub_handle; - iommu->irte_ops->prepare(data->entry, apic->delivery_mode, + iommu->irte_ops->prepare(data->entry, APIC_DELIVERY_MODE_FIXED, apic->dest_mode_logical, irq_cfg->vector, irq_cfg->dest_apicid, devid); @@ -3056,36 +3648,39 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq, struct irq_alloc_info *info = arg; struct irq_data *irq_data; struct amd_ir_data *data = NULL; + struct amd_iommu *iommu; struct irq_cfg *cfg; - int i, ret, devid; + struct iommu_dev_data *dev_data; + unsigned long max_irqs; + int i, ret, devid, seg, sbdf; int index; if (!info) return -EINVAL; - if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_PCI_MSI && - info->type != X86_IRQ_ALLOC_TYPE_PCI_MSIX) + if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_PCI_MSI) return -EINVAL; - /* - * With IRQ remapping enabled, don't need contiguous CPU vectors - * to support multiple MSI interrupts. - */ - if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI) - info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS; + sbdf = get_devid(info); + if (sbdf < 0) + return -EINVAL; - devid = get_devid(info); - if (devid < 0) + seg = PCI_SBDF_TO_SEGID(sbdf); + devid = PCI_SBDF_TO_DEVID(sbdf); + iommu = __rlookup_amd_iommu(seg, devid); + if (!iommu) return -EINVAL; + dev_data = search_dev_data(iommu, devid); + max_irqs = dev_data ? dev_data->max_irqs : MAX_IRQS_PER_TABLE_512; + ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); if (ret < 0) return ret; if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) { struct irq_remap_table *table; - struct amd_iommu *iommu; - table = alloc_irq_table(devid, NULL); + table = alloc_irq_table(iommu, devid, NULL, max_irqs); if (table) { if (!table->min_index) { /* @@ -3093,7 +3688,6 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq, * interrupts. */ table->min_index = 32; - iommu = amd_iommu_rlookup_table[devid]; for (i = 0; i < 32; ++i) iommu->irte_ops->set_allocated(table, i); } @@ -3106,10 +3700,12 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq, info->type == X86_IRQ_ALLOC_TYPE_PCI_MSIX) { bool align = (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI); - index = alloc_irq_index(devid, nr_irqs, align, - msi_desc_to_pci_dev(info->desc)); + index = alloc_irq_index(iommu, devid, nr_irqs, align, + msi_desc_to_pci_dev(info->desc), + max_irqs); } else { - index = alloc_irq_index(devid, nr_irqs, false, NULL); + index = alloc_irq_index(iommu, devid, nr_irqs, false, NULL, + max_irqs); } if (index < 0) { @@ -3141,11 +3737,11 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq, goto out_free_data; } + data->iommu = iommu; irq_data->hwirq = (devid << 16) + i; irq_data->chip_data = data; irq_data->chip = &amd_ir_chip; irq_remapping_prepare_irte(data, cfg, info, devid, index, i); - irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT); } return 0; @@ -3157,7 +3753,7 @@ out_free_data: kfree(irq_data->chip_data); } for (i = 0; i < nr_irqs; i++) - free_irte(devid, index + i); + free_irte(iommu, devid, index + i); out_free_parent: irq_domain_free_irqs_common(domain, virq, nr_irqs); return ret; @@ -3176,7 +3772,7 @@ static void irq_remapping_free(struct irq_domain *domain, unsigned int virq, if (irq_data && irq_data->chip_data) { data = irq_data->chip_data; irte_info = &data->irq_2_irte; - free_irte(irte_info->devid, irte_info->index); + free_irte(data->iommu, irte_info->devid, irte_info->index); kfree(data->entry); kfree(data); } @@ -3194,13 +3790,13 @@ static int irq_remapping_activate(struct irq_domain *domain, { struct amd_ir_data *data = irq_data->chip_data; struct irq_2_irte *irte_info = &data->irq_2_irte; - struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid]; + struct amd_iommu *iommu = data->iommu; struct irq_cfg *cfg = irqd_cfg(irq_data); if (!iommu) return 0; - iommu->irte_ops->activate(data->entry, irte_info->devid, + iommu->irte_ops->activate(iommu, data->entry, irte_info->devid, irte_info->index); amd_ir_update_irte(irq_data, iommu, data, irte_info, cfg); return 0; @@ -3211,10 +3807,10 @@ static void irq_remapping_deactivate(struct irq_domain *domain, { struct amd_ir_data *data = irq_data->chip_data; struct irq_2_irte *irte_info = &data->irq_2_irte; - struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid]; + struct amd_iommu *iommu = data->iommu; if (iommu) - iommu->irte_ops->deactivate(data->entry, irte_info->devid, + iommu->irte_ops->deactivate(iommu, data->entry, irte_info->devid, irte_info->index); } @@ -3234,8 +3830,8 @@ static int irq_remapping_select(struct irq_domain *d, struct irq_fwspec *fwspec, if (devid < 0) return 0; + iommu = __rlookup_amd_iommu((devid >> 16), (devid & 0xffff)); - iommu = amd_iommu_rlookup_table[devid]; return iommu && iommu->ir_domain == d; } @@ -3247,14 +3843,70 @@ static const struct irq_domain_ops amd_ir_domain_ops = { .deactivate = irq_remapping_deactivate, }; -int amd_iommu_activate_guest_mode(void *data) +static void __amd_iommu_update_ga(struct irte_ga *entry, int cpu, + bool ga_log_intr) +{ + if (cpu >= 0) { + entry->lo.fields_vapic.destination = + APICID_TO_IRTE_DEST_LO(cpu); + entry->hi.fields.destination = + APICID_TO_IRTE_DEST_HI(cpu); + entry->lo.fields_vapic.is_run = true; + entry->lo.fields_vapic.ga_log_intr = false; + } else { + entry->lo.fields_vapic.is_run = false; + entry->lo.fields_vapic.ga_log_intr = ga_log_intr; + } +} + +/* + * Update the pCPU information for an IRTE that is configured to post IRQs to + * a vCPU, without issuing an IOMMU invalidation for the IRTE. + * + * If the vCPU is associated with a pCPU (@cpu >= 0), configure the Destination + * with the pCPU's APIC ID, set IsRun, and clear GALogIntr. If the vCPU isn't + * associated with a pCPU (@cpu < 0), clear IsRun and set/clear GALogIntr based + * on input from the caller (e.g. KVM only requests GALogIntr when the vCPU is + * blocking and requires a notification wake event). I.e. treat vCPUs that are + * associated with a pCPU as running. This API is intended to be used when a + * vCPU is scheduled in/out (or stops running for any reason), to do a fast + * update of IsRun, GALogIntr, and (conditionally) Destination. + * + * Per the IOMMU spec, the Destination, IsRun, and GATag fields are not cached + * and thus don't require an invalidation to ensure the IOMMU consumes fresh + * information. + */ +int amd_iommu_update_ga(void *data, int cpu, bool ga_log_intr) +{ + struct amd_ir_data *ir_data = (struct amd_ir_data *)data; + struct irte_ga *entry = (struct irte_ga *) ir_data->entry; + + if (WARN_ON_ONCE(!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))) + return -EINVAL; + + if (!entry || !entry->lo.fields_vapic.guest_mode) + return 0; + + if (!ir_data->iommu) + return -ENODEV; + + __amd_iommu_update_ga(entry, cpu, ga_log_intr); + + return __modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, + ir_data->irq_2_irte.index, entry); +} +EXPORT_SYMBOL(amd_iommu_update_ga); + +int amd_iommu_activate_guest_mode(void *data, int cpu, bool ga_log_intr) { struct amd_ir_data *ir_data = (struct amd_ir_data *)data; struct irte_ga *entry = (struct irte_ga *) ir_data->entry; u64 valid; - if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) || - !entry || entry->lo.fields_vapic.guest_mode) + if (WARN_ON_ONCE(!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))) + return -EINVAL; + + if (!entry) return 0; valid = entry->lo.fields_vapic.valid; @@ -3264,13 +3916,14 @@ int amd_iommu_activate_guest_mode(void *data) entry->lo.fields_vapic.valid = valid; entry->lo.fields_vapic.guest_mode = 1; - entry->lo.fields_vapic.ga_log_intr = 1; entry->hi.fields.ga_root_ptr = ir_data->ga_root_ptr; entry->hi.fields.vector = ir_data->ga_vector; entry->lo.fields_vapic.ga_tag = ir_data->ga_tag; - return modify_irte_ga(ir_data->irq_2_irte.devid, - ir_data->irq_2_irte.index, entry, ir_data); + __amd_iommu_update_ga(entry, cpu, ga_log_intr); + + return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, + ir_data->irq_2_irte.index, entry); } EXPORT_SYMBOL(amd_iommu_activate_guest_mode); @@ -3281,8 +3934,10 @@ int amd_iommu_deactivate_guest_mode(void *data) struct irq_cfg *cfg = ir_data->cfg; u64 valid; - if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) || - !entry || !entry->lo.fields_vapic.guest_mode) + if (WARN_ON_ONCE(!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))) + return -EINVAL; + + if (!entry || !entry->lo.fields_vapic.guest_mode) return 0; valid = entry->lo.fields_remap.valid; @@ -3292,69 +3947,56 @@ int amd_iommu_deactivate_guest_mode(void *data) entry->lo.fields_remap.valid = valid; entry->lo.fields_remap.dm = apic->dest_mode_logical; - entry->lo.fields_remap.int_type = apic->delivery_mode; + entry->lo.fields_remap.int_type = APIC_DELIVERY_MODE_FIXED; entry->hi.fields.vector = cfg->vector; entry->lo.fields_remap.destination = APICID_TO_IRTE_DEST_LO(cfg->dest_apicid); entry->hi.fields.destination = APICID_TO_IRTE_DEST_HI(cfg->dest_apicid); - return modify_irte_ga(ir_data->irq_2_irte.devid, - ir_data->irq_2_irte.index, entry, ir_data); + return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, + ir_data->irq_2_irte.index, entry); } EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode); -static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info) +static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *info) { int ret; - struct amd_iommu *iommu; - struct amd_iommu_pi_data *pi_data = vcpu_info; - struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data; + struct amd_iommu_pi_data *pi_data = info; struct amd_ir_data *ir_data = data->chip_data; struct irq_2_irte *irte_info = &ir_data->irq_2_irte; - struct iommu_dev_data *dev_data = search_dev_data(irte_info->devid); + struct iommu_dev_data *dev_data; + + if (WARN_ON_ONCE(!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))) + return -EINVAL; + + if (ir_data->iommu == NULL) + return -EINVAL; + + dev_data = search_dev_data(ir_data->iommu, irte_info->devid); /* Note: * This device has never been set up for guest mode. * we should not modify the IRTE */ if (!dev_data || !dev_data->use_vapic) - return 0; + return -EINVAL; ir_data->cfg = irqd_cfg(data); - pi_data->ir_data = ir_data; - - /* Note: - * SVM tries to set up for VAPIC mode, but we are in - * legacy mode. So, we force legacy mode instead. - */ - if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) { - pr_debug("%s: Fall back to using intr legacy remap\n", - __func__); - pi_data->is_guest_mode = false; - } - iommu = amd_iommu_rlookup_table[irte_info->devid]; - if (iommu == NULL) - return -EINVAL; + if (pi_data) { + pi_data->ir_data = ir_data; - pi_data->prev_ga_tag = ir_data->cached_ga_tag; - if (pi_data->is_guest_mode) { - ir_data->ga_root_ptr = (pi_data->base >> 12); - ir_data->ga_vector = vcpu_pi_info->vector; + ir_data->ga_root_ptr = (pi_data->vapic_addr >> 12); + ir_data->ga_vector = pi_data->vector; ir_data->ga_tag = pi_data->ga_tag; - ret = amd_iommu_activate_guest_mode(ir_data); - if (!ret) - ir_data->cached_ga_tag = pi_data->ga_tag; + if (pi_data->is_guest_mode) + ret = amd_iommu_activate_guest_mode(ir_data, pi_data->cpu, + pi_data->ga_log_intr); + else + ret = amd_iommu_deactivate_guest_mode(ir_data); } else { ret = amd_iommu_deactivate_guest_mode(ir_data); - - /* - * This communicates the ga_tag back to the caller - * so that it can do all the necessary clean up. - */ - if (!ret) - ir_data->cached_ga_tag = 0; } return ret; @@ -3371,7 +4013,7 @@ static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu, * Atomically updates the IRTE with the new destination, vector * and flushes the interrupt entry cache. */ - iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid, + iommu->irte_ops->set_affinity(iommu, ir_data->entry, irte_info->devid, irte_info->index, cfg->vector, cfg->dest_apicid); } @@ -3383,7 +4025,7 @@ static int amd_ir_set_affinity(struct irq_data *data, struct irq_2_irte *irte_info = &ir_data->irq_2_irte; struct irq_cfg *cfg = irqd_cfg(data); struct irq_data *parent = data->parent_data; - struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid]; + struct amd_iommu *iommu = ir_data->iommu; int ret; if (!iommu) @@ -3399,7 +4041,7 @@ static int amd_ir_set_affinity(struct irq_data *data, * at the new destination. So, time to cleanup the previous * vector allocation. */ - send_cleanup_vector(cfg); + vector_schedule_cleanup(cfg); return IRQ_SET_MASK_OK_DONE; } @@ -3419,66 +4061,34 @@ static struct irq_chip amd_ir_chip = { .irq_compose_msi_msg = ir_compose_msi_msg, }; +static const struct msi_parent_ops amdvi_msi_parent_ops = { + .supported_flags = X86_VECTOR_MSI_FLAGS_SUPPORTED | MSI_FLAG_MULTI_PCI_MSI, + .bus_select_token = DOMAIN_BUS_AMDVI, + .bus_select_mask = MATCH_PCI_MSI, + .prefix = "IR-", + .init_dev_msi_info = msi_parent_init_dev_msi_info, +}; + int amd_iommu_create_irq_domain(struct amd_iommu *iommu) { - struct fwnode_handle *fn; + struct irq_domain_info info = { + .fwnode = irq_domain_alloc_named_id_fwnode("AMD-IR", iommu->index), + .ops = &amd_ir_domain_ops, + .domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI, + .host_data = iommu, + .parent = arch_get_ir_parent_domain(), + }; - fn = irq_domain_alloc_named_id_fwnode("AMD-IR", iommu->index); - if (!fn) + if (!info.fwnode) return -ENOMEM; - iommu->ir_domain = irq_domain_create_tree(fn, &amd_ir_domain_ops, iommu); + + iommu->ir_domain = msi_create_parent_irq_domain(&info, &amdvi_msi_parent_ops); if (!iommu->ir_domain) { - irq_domain_free_fwnode(fn); + irq_domain_free_fwnode(info.fwnode); return -ENOMEM; } - - iommu->ir_domain->parent = arch_get_ir_parent_domain(); - iommu->msi_domain = arch_create_remap_msi_irq_domain(iommu->ir_domain, - "AMD-IR-MSI", - iommu->index); return 0; } - -int amd_iommu_update_ga(int cpu, bool is_run, void *data) -{ - unsigned long flags; - struct amd_iommu *iommu; - struct irq_remap_table *table; - struct amd_ir_data *ir_data = (struct amd_ir_data *)data; - int devid = ir_data->irq_2_irte.devid; - struct irte_ga *entry = (struct irte_ga *) ir_data->entry; - struct irte_ga *ref = (struct irte_ga *) ir_data->ref; - - if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) || - !ref || !entry || !entry->lo.fields_vapic.guest_mode) - return 0; - - iommu = amd_iommu_rlookup_table[devid]; - if (!iommu) - return -ENODEV; - - table = get_irq_table(devid); - if (!table) - return -ENODEV; - - raw_spin_lock_irqsave(&table->lock, flags); - - if (ref->lo.fields_vapic.guest_mode) { - if (cpu >= 0) { - ref->lo.fields_vapic.destination = - APICID_TO_IRTE_DEST_LO(cpu); - ref->hi.fields.destination = - APICID_TO_IRTE_DEST_HI(cpu); - } - ref->lo.fields_vapic.is_run = is_run; - barrier(); - } - - raw_spin_unlock_irqrestore(&table->lock, flags); - - iommu_flush_irt(iommu, devid); - iommu_completion_wait(iommu); - return 0; -} -EXPORT_SYMBOL(amd_iommu_update_ga); #endif + +MODULE_IMPORT_NS("GENERIC_PT_IOMMU"); diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c deleted file mode 100644 index f8d4ad421e07..000000000000 --- a/drivers/iommu/amd/iommu_v2.c +++ /dev/null @@ -1,986 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2010-2012 Advanced Micro Devices, Inc. - * Author: Joerg Roedel <jroedel@suse.de> - */ - -#define pr_fmt(fmt) "AMD-Vi: " fmt - -#include <linux/mmu_notifier.h> -#include <linux/amd-iommu.h> -#include <linux/mm_types.h> -#include <linux/profile.h> -#include <linux/module.h> -#include <linux/sched.h> -#include <linux/sched/mm.h> -#include <linux/wait.h> -#include <linux/pci.h> -#include <linux/gfp.h> - -#include "amd_iommu.h" - -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Joerg Roedel <jroedel@suse.de>"); - -#define MAX_DEVICES 0x10000 -#define PRI_QUEUE_SIZE 512 - -struct pri_queue { - atomic_t inflight; - bool finish; - int status; -}; - -struct pasid_state { - struct list_head list; /* For global state-list */ - atomic_t count; /* Reference count */ - unsigned mmu_notifier_count; /* Counting nested mmu_notifier - calls */ - struct mm_struct *mm; /* mm_struct for the faults */ - struct mmu_notifier mn; /* mmu_notifier handle */ - struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */ - struct device_state *device_state; /* Link to our device_state */ - u32 pasid; /* PASID index */ - bool invalid; /* Used during setup and - teardown of the pasid */ - spinlock_t lock; /* Protect pri_queues and - mmu_notifer_count */ - wait_queue_head_t wq; /* To wait for count == 0 */ -}; - -struct device_state { - struct list_head list; - u16 devid; - atomic_t count; - struct pci_dev *pdev; - struct pasid_state **states; - struct iommu_domain *domain; - int pasid_levels; - int max_pasids; - amd_iommu_invalid_ppr_cb inv_ppr_cb; - amd_iommu_invalidate_ctx inv_ctx_cb; - spinlock_t lock; - wait_queue_head_t wq; -}; - -struct fault { - struct work_struct work; - struct device_state *dev_state; - struct pasid_state *state; - struct mm_struct *mm; - u64 address; - u16 devid; - u32 pasid; - u16 tag; - u16 finish; - u16 flags; -}; - -static LIST_HEAD(state_list); -static DEFINE_SPINLOCK(state_lock); - -static struct workqueue_struct *iommu_wq; - -static void free_pasid_states(struct device_state *dev_state); - -static u16 device_id(struct pci_dev *pdev) -{ - u16 devid; - - devid = pdev->bus->number; - devid = (devid << 8) | pdev->devfn; - - return devid; -} - -static struct device_state *__get_device_state(u16 devid) -{ - struct device_state *dev_state; - - list_for_each_entry(dev_state, &state_list, list) { - if (dev_state->devid == devid) - return dev_state; - } - - return NULL; -} - -static struct device_state *get_device_state(u16 devid) -{ - struct device_state *dev_state; - unsigned long flags; - - spin_lock_irqsave(&state_lock, flags); - dev_state = __get_device_state(devid); - if (dev_state != NULL) - atomic_inc(&dev_state->count); - spin_unlock_irqrestore(&state_lock, flags); - - return dev_state; -} - -static void free_device_state(struct device_state *dev_state) -{ - struct iommu_group *group; - - /* - * First detach device from domain - No more PRI requests will arrive - * from that device after it is unbound from the IOMMUv2 domain. - */ - group = iommu_group_get(&dev_state->pdev->dev); - if (WARN_ON(!group)) - return; - - iommu_detach_group(dev_state->domain, group); - - iommu_group_put(group); - - /* Everything is down now, free the IOMMUv2 domain */ - iommu_domain_free(dev_state->domain); - - /* Finally get rid of the device-state */ - kfree(dev_state); -} - -static void put_device_state(struct device_state *dev_state) -{ - if (atomic_dec_and_test(&dev_state->count)) - wake_up(&dev_state->wq); -} - -/* Must be called under dev_state->lock */ -static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state, - u32 pasid, bool alloc) -{ - struct pasid_state **root, **ptr; - int level, index; - - level = dev_state->pasid_levels; - root = dev_state->states; - - while (true) { - - index = (pasid >> (9 * level)) & 0x1ff; - ptr = &root[index]; - - if (level == 0) - break; - - if (*ptr == NULL) { - if (!alloc) - return NULL; - - *ptr = (void *)get_zeroed_page(GFP_ATOMIC); - if (*ptr == NULL) - return NULL; - } - - root = (struct pasid_state **)*ptr; - level -= 1; - } - - return ptr; -} - -static int set_pasid_state(struct device_state *dev_state, - struct pasid_state *pasid_state, - u32 pasid) -{ - struct pasid_state **ptr; - unsigned long flags; - int ret; - - spin_lock_irqsave(&dev_state->lock, flags); - ptr = __get_pasid_state_ptr(dev_state, pasid, true); - - ret = -ENOMEM; - if (ptr == NULL) - goto out_unlock; - - ret = -ENOMEM; - if (*ptr != NULL) - goto out_unlock; - - *ptr = pasid_state; - - ret = 0; - -out_unlock: - spin_unlock_irqrestore(&dev_state->lock, flags); - - return ret; -} - -static void clear_pasid_state(struct device_state *dev_state, u32 pasid) -{ - struct pasid_state **ptr; - unsigned long flags; - - spin_lock_irqsave(&dev_state->lock, flags); - ptr = __get_pasid_state_ptr(dev_state, pasid, true); - - if (ptr == NULL) - goto out_unlock; - - *ptr = NULL; - -out_unlock: - spin_unlock_irqrestore(&dev_state->lock, flags); -} - -static struct pasid_state *get_pasid_state(struct device_state *dev_state, - u32 pasid) -{ - struct pasid_state **ptr, *ret = NULL; - unsigned long flags; - - spin_lock_irqsave(&dev_state->lock, flags); - ptr = __get_pasid_state_ptr(dev_state, pasid, false); - - if (ptr == NULL) - goto out_unlock; - - ret = *ptr; - if (ret) - atomic_inc(&ret->count); - -out_unlock: - spin_unlock_irqrestore(&dev_state->lock, flags); - - return ret; -} - -static void free_pasid_state(struct pasid_state *pasid_state) -{ - kfree(pasid_state); -} - -static void put_pasid_state(struct pasid_state *pasid_state) -{ - if (atomic_dec_and_test(&pasid_state->count)) - wake_up(&pasid_state->wq); -} - -static void put_pasid_state_wait(struct pasid_state *pasid_state) -{ - atomic_dec(&pasid_state->count); - wait_event(pasid_state->wq, !atomic_read(&pasid_state->count)); - free_pasid_state(pasid_state); -} - -static void unbind_pasid(struct pasid_state *pasid_state) -{ - struct iommu_domain *domain; - - domain = pasid_state->device_state->domain; - - /* - * Mark pasid_state as invalid, no more faults will we added to the - * work queue after this is visible everywhere. - */ - pasid_state->invalid = true; - - /* Make sure this is visible */ - smp_wmb(); - - /* After this the device/pasid can't access the mm anymore */ - amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid); - - /* Make sure no more pending faults are in the queue */ - flush_workqueue(iommu_wq); -} - -static void free_pasid_states_level1(struct pasid_state **tbl) -{ - int i; - - for (i = 0; i < 512; ++i) { - if (tbl[i] == NULL) - continue; - - free_page((unsigned long)tbl[i]); - } -} - -static void free_pasid_states_level2(struct pasid_state **tbl) -{ - struct pasid_state **ptr; - int i; - - for (i = 0; i < 512; ++i) { - if (tbl[i] == NULL) - continue; - - ptr = (struct pasid_state **)tbl[i]; - free_pasid_states_level1(ptr); - } -} - -static void free_pasid_states(struct device_state *dev_state) -{ - struct pasid_state *pasid_state; - int i; - - for (i = 0; i < dev_state->max_pasids; ++i) { - pasid_state = get_pasid_state(dev_state, i); - if (pasid_state == NULL) - continue; - - put_pasid_state(pasid_state); - - /* - * This will call the mn_release function and - * unbind the PASID - */ - mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm); - - put_pasid_state_wait(pasid_state); /* Reference taken in - amd_iommu_bind_pasid */ - - /* Drop reference taken in amd_iommu_bind_pasid */ - put_device_state(dev_state); - } - - if (dev_state->pasid_levels == 2) - free_pasid_states_level2(dev_state->states); - else if (dev_state->pasid_levels == 1) - free_pasid_states_level1(dev_state->states); - else - BUG_ON(dev_state->pasid_levels != 0); - - free_page((unsigned long)dev_state->states); -} - -static struct pasid_state *mn_to_state(struct mmu_notifier *mn) -{ - return container_of(mn, struct pasid_state, mn); -} - -static void mn_invalidate_range(struct mmu_notifier *mn, - struct mm_struct *mm, - unsigned long start, unsigned long end) -{ - struct pasid_state *pasid_state; - struct device_state *dev_state; - - pasid_state = mn_to_state(mn); - dev_state = pasid_state->device_state; - - if ((start ^ (end - 1)) < PAGE_SIZE) - amd_iommu_flush_page(dev_state->domain, pasid_state->pasid, - start); - else - amd_iommu_flush_tlb(dev_state->domain, pasid_state->pasid); -} - -static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm) -{ - struct pasid_state *pasid_state; - struct device_state *dev_state; - bool run_inv_ctx_cb; - - might_sleep(); - - pasid_state = mn_to_state(mn); - dev_state = pasid_state->device_state; - run_inv_ctx_cb = !pasid_state->invalid; - - if (run_inv_ctx_cb && dev_state->inv_ctx_cb) - dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid); - - unbind_pasid(pasid_state); -} - -static const struct mmu_notifier_ops iommu_mn = { - .release = mn_release, - .invalidate_range = mn_invalidate_range, -}; - -static void set_pri_tag_status(struct pasid_state *pasid_state, - u16 tag, int status) -{ - unsigned long flags; - - spin_lock_irqsave(&pasid_state->lock, flags); - pasid_state->pri[tag].status = status; - spin_unlock_irqrestore(&pasid_state->lock, flags); -} - -static void finish_pri_tag(struct device_state *dev_state, - struct pasid_state *pasid_state, - u16 tag) -{ - unsigned long flags; - - spin_lock_irqsave(&pasid_state->lock, flags); - if (atomic_dec_and_test(&pasid_state->pri[tag].inflight) && - pasid_state->pri[tag].finish) { - amd_iommu_complete_ppr(dev_state->pdev, pasid_state->pasid, - pasid_state->pri[tag].status, tag); - pasid_state->pri[tag].finish = false; - pasid_state->pri[tag].status = PPR_SUCCESS; - } - spin_unlock_irqrestore(&pasid_state->lock, flags); -} - -static void handle_fault_error(struct fault *fault) -{ - int status; - - if (!fault->dev_state->inv_ppr_cb) { - set_pri_tag_status(fault->state, fault->tag, PPR_INVALID); - return; - } - - status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev, - fault->pasid, - fault->address, - fault->flags); - switch (status) { - case AMD_IOMMU_INV_PRI_RSP_SUCCESS: - set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS); - break; - case AMD_IOMMU_INV_PRI_RSP_INVALID: - set_pri_tag_status(fault->state, fault->tag, PPR_INVALID); - break; - case AMD_IOMMU_INV_PRI_RSP_FAIL: - set_pri_tag_status(fault->state, fault->tag, PPR_FAILURE); - break; - default: - BUG(); - } -} - -static bool access_error(struct vm_area_struct *vma, struct fault *fault) -{ - unsigned long requested = 0; - - if (fault->flags & PPR_FAULT_EXEC) - requested |= VM_EXEC; - - if (fault->flags & PPR_FAULT_READ) - requested |= VM_READ; - - if (fault->flags & PPR_FAULT_WRITE) - requested |= VM_WRITE; - - return (requested & ~vma->vm_flags) != 0; -} - -static void do_fault(struct work_struct *work) -{ - struct fault *fault = container_of(work, struct fault, work); - struct vm_area_struct *vma; - vm_fault_t ret = VM_FAULT_ERROR; - unsigned int flags = 0; - struct mm_struct *mm; - u64 address; - - mm = fault->state->mm; - address = fault->address; - - if (fault->flags & PPR_FAULT_USER) - flags |= FAULT_FLAG_USER; - if (fault->flags & PPR_FAULT_WRITE) - flags |= FAULT_FLAG_WRITE; - flags |= FAULT_FLAG_REMOTE; - - mmap_read_lock(mm); - vma = find_extend_vma(mm, address); - if (!vma || address < vma->vm_start) - /* failed to get a vma in the right range */ - goto out; - - /* Check if we have the right permissions on the vma */ - if (access_error(vma, fault)) - goto out; - - ret = handle_mm_fault(vma, address, flags, NULL); -out: - mmap_read_unlock(mm); - - if (ret & VM_FAULT_ERROR) - /* failed to service fault */ - handle_fault_error(fault); - - finish_pri_tag(fault->dev_state, fault->state, fault->tag); - - put_pasid_state(fault->state); - - kfree(fault); -} - -static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data) -{ - struct amd_iommu_fault *iommu_fault; - struct pasid_state *pasid_state; - struct device_state *dev_state; - struct pci_dev *pdev = NULL; - unsigned long flags; - struct fault *fault; - bool finish; - u16 tag, devid; - int ret; - - iommu_fault = data; - tag = iommu_fault->tag & 0x1ff; - finish = (iommu_fault->tag >> 9) & 1; - - devid = iommu_fault->device_id; - pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid), - devid & 0xff); - if (!pdev) - return -ENODEV; - - ret = NOTIFY_DONE; - - /* In kdump kernel pci dev is not initialized yet -> send INVALID */ - if (amd_iommu_is_attach_deferred(NULL, &pdev->dev)) { - amd_iommu_complete_ppr(pdev, iommu_fault->pasid, - PPR_INVALID, tag); - goto out; - } - - dev_state = get_device_state(iommu_fault->device_id); - if (dev_state == NULL) - goto out; - - pasid_state = get_pasid_state(dev_state, iommu_fault->pasid); - if (pasid_state == NULL || pasid_state->invalid) { - /* We know the device but not the PASID -> send INVALID */ - amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid, - PPR_INVALID, tag); - goto out_drop_state; - } - - spin_lock_irqsave(&pasid_state->lock, flags); - atomic_inc(&pasid_state->pri[tag].inflight); - if (finish) - pasid_state->pri[tag].finish = true; - spin_unlock_irqrestore(&pasid_state->lock, flags); - - fault = kzalloc(sizeof(*fault), GFP_ATOMIC); - if (fault == NULL) { - /* We are OOM - send success and let the device re-fault */ - finish_pri_tag(dev_state, pasid_state, tag); - goto out_drop_state; - } - - fault->dev_state = dev_state; - fault->address = iommu_fault->address; - fault->state = pasid_state; - fault->tag = tag; - fault->finish = finish; - fault->pasid = iommu_fault->pasid; - fault->flags = iommu_fault->flags; - INIT_WORK(&fault->work, do_fault); - - queue_work(iommu_wq, &fault->work); - - ret = NOTIFY_OK; - -out_drop_state: - - if (ret != NOTIFY_OK && pasid_state) - put_pasid_state(pasid_state); - - put_device_state(dev_state); - -out: - return ret; -} - -static struct notifier_block ppr_nb = { - .notifier_call = ppr_notifier, -}; - -int amd_iommu_bind_pasid(struct pci_dev *pdev, u32 pasid, - struct task_struct *task) -{ - struct pasid_state *pasid_state; - struct device_state *dev_state; - struct mm_struct *mm; - u16 devid; - int ret; - - might_sleep(); - - if (!amd_iommu_v2_supported()) - return -ENODEV; - - devid = device_id(pdev); - dev_state = get_device_state(devid); - - if (dev_state == NULL) - return -EINVAL; - - ret = -EINVAL; - if (pasid >= dev_state->max_pasids) - goto out; - - ret = -ENOMEM; - pasid_state = kzalloc(sizeof(*pasid_state), GFP_KERNEL); - if (pasid_state == NULL) - goto out; - - - atomic_set(&pasid_state->count, 1); - init_waitqueue_head(&pasid_state->wq); - spin_lock_init(&pasid_state->lock); - - mm = get_task_mm(task); - pasid_state->mm = mm; - pasid_state->device_state = dev_state; - pasid_state->pasid = pasid; - pasid_state->invalid = true; /* Mark as valid only if we are - done with setting up the pasid */ - pasid_state->mn.ops = &iommu_mn; - - if (pasid_state->mm == NULL) - goto out_free; - - mmu_notifier_register(&pasid_state->mn, mm); - - ret = set_pasid_state(dev_state, pasid_state, pasid); - if (ret) - goto out_unregister; - - ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid, - __pa(pasid_state->mm->pgd)); - if (ret) - goto out_clear_state; - - /* Now we are ready to handle faults */ - pasid_state->invalid = false; - - /* - * Drop the reference to the mm_struct here. We rely on the - * mmu_notifier release call-back to inform us when the mm - * is going away. - */ - mmput(mm); - - return 0; - -out_clear_state: - clear_pasid_state(dev_state, pasid); - -out_unregister: - mmu_notifier_unregister(&pasid_state->mn, mm); - mmput(mm); - -out_free: - free_pasid_state(pasid_state); - -out: - put_device_state(dev_state); - - return ret; -} -EXPORT_SYMBOL(amd_iommu_bind_pasid); - -void amd_iommu_unbind_pasid(struct pci_dev *pdev, u32 pasid) -{ - struct pasid_state *pasid_state; - struct device_state *dev_state; - u16 devid; - - might_sleep(); - - if (!amd_iommu_v2_supported()) - return; - - devid = device_id(pdev); - dev_state = get_device_state(devid); - if (dev_state == NULL) - return; - - if (pasid >= dev_state->max_pasids) - goto out; - - pasid_state = get_pasid_state(dev_state, pasid); - if (pasid_state == NULL) - goto out; - /* - * Drop reference taken here. We are safe because we still hold - * the reference taken in the amd_iommu_bind_pasid function. - */ - put_pasid_state(pasid_state); - - /* Clear the pasid state so that the pasid can be re-used */ - clear_pasid_state(dev_state, pasid_state->pasid); - - /* - * Call mmu_notifier_unregister to drop our reference - * to pasid_state->mm - */ - mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm); - - put_pasid_state_wait(pasid_state); /* Reference taken in - amd_iommu_bind_pasid */ -out: - /* Drop reference taken in this function */ - put_device_state(dev_state); - - /* Drop reference taken in amd_iommu_bind_pasid */ - put_device_state(dev_state); -} -EXPORT_SYMBOL(amd_iommu_unbind_pasid); - -int amd_iommu_init_device(struct pci_dev *pdev, int pasids) -{ - struct device_state *dev_state; - struct iommu_group *group; - unsigned long flags; - int ret, tmp; - u16 devid; - - might_sleep(); - - /* - * When memory encryption is active the device is likely not in a - * direct-mapped domain. Forbid using IOMMUv2 functionality for now. - */ - if (mem_encrypt_active()) - return -ENODEV; - - if (!amd_iommu_v2_supported()) - return -ENODEV; - - if (pasids <= 0 || pasids > (PASID_MASK + 1)) - return -EINVAL; - - devid = device_id(pdev); - - dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL); - if (dev_state == NULL) - return -ENOMEM; - - spin_lock_init(&dev_state->lock); - init_waitqueue_head(&dev_state->wq); - dev_state->pdev = pdev; - dev_state->devid = devid; - - tmp = pasids; - for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9) - dev_state->pasid_levels += 1; - - atomic_set(&dev_state->count, 1); - dev_state->max_pasids = pasids; - - ret = -ENOMEM; - dev_state->states = (void *)get_zeroed_page(GFP_KERNEL); - if (dev_state->states == NULL) - goto out_free_dev_state; - - dev_state->domain = iommu_domain_alloc(&pci_bus_type); - if (dev_state->domain == NULL) - goto out_free_states; - - amd_iommu_domain_direct_map(dev_state->domain); - - ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids); - if (ret) - goto out_free_domain; - - group = iommu_group_get(&pdev->dev); - if (!group) { - ret = -EINVAL; - goto out_free_domain; - } - - ret = iommu_attach_group(dev_state->domain, group); - if (ret != 0) - goto out_drop_group; - - iommu_group_put(group); - - spin_lock_irqsave(&state_lock, flags); - - if (__get_device_state(devid) != NULL) { - spin_unlock_irqrestore(&state_lock, flags); - ret = -EBUSY; - goto out_free_domain; - } - - list_add_tail(&dev_state->list, &state_list); - - spin_unlock_irqrestore(&state_lock, flags); - - return 0; - -out_drop_group: - iommu_group_put(group); - -out_free_domain: - iommu_domain_free(dev_state->domain); - -out_free_states: - free_page((unsigned long)dev_state->states); - -out_free_dev_state: - kfree(dev_state); - - return ret; -} -EXPORT_SYMBOL(amd_iommu_init_device); - -void amd_iommu_free_device(struct pci_dev *pdev) -{ - struct device_state *dev_state; - unsigned long flags; - u16 devid; - - if (!amd_iommu_v2_supported()) - return; - - devid = device_id(pdev); - - spin_lock_irqsave(&state_lock, flags); - - dev_state = __get_device_state(devid); - if (dev_state == NULL) { - spin_unlock_irqrestore(&state_lock, flags); - return; - } - - list_del(&dev_state->list); - - spin_unlock_irqrestore(&state_lock, flags); - - /* Get rid of any remaining pasid states */ - free_pasid_states(dev_state); - - put_device_state(dev_state); - /* - * Wait until the last reference is dropped before freeing - * the device state. - */ - wait_event(dev_state->wq, !atomic_read(&dev_state->count)); - free_device_state(dev_state); -} -EXPORT_SYMBOL(amd_iommu_free_device); - -int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev, - amd_iommu_invalid_ppr_cb cb) -{ - struct device_state *dev_state; - unsigned long flags; - u16 devid; - int ret; - - if (!amd_iommu_v2_supported()) - return -ENODEV; - - devid = device_id(pdev); - - spin_lock_irqsave(&state_lock, flags); - - ret = -EINVAL; - dev_state = __get_device_state(devid); - if (dev_state == NULL) - goto out_unlock; - - dev_state->inv_ppr_cb = cb; - - ret = 0; - -out_unlock: - spin_unlock_irqrestore(&state_lock, flags); - - return ret; -} -EXPORT_SYMBOL(amd_iommu_set_invalid_ppr_cb); - -int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev, - amd_iommu_invalidate_ctx cb) -{ - struct device_state *dev_state; - unsigned long flags; - u16 devid; - int ret; - - if (!amd_iommu_v2_supported()) - return -ENODEV; - - devid = device_id(pdev); - - spin_lock_irqsave(&state_lock, flags); - - ret = -EINVAL; - dev_state = __get_device_state(devid); - if (dev_state == NULL) - goto out_unlock; - - dev_state->inv_ctx_cb = cb; - - ret = 0; - -out_unlock: - spin_unlock_irqrestore(&state_lock, flags); - - return ret; -} -EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb); - -static int __init amd_iommu_v2_init(void) -{ - int ret; - - pr_info("AMD IOMMUv2 driver by Joerg Roedel <jroedel@suse.de>\n"); - - if (!amd_iommu_v2_supported()) { - pr_info("AMD IOMMUv2 functionality not available on this system\n"); - /* - * Load anyway to provide the symbols to other modules - * which may use AMD IOMMUv2 optionally. - */ - return 0; - } - - ret = -ENOMEM; - iommu_wq = alloc_workqueue("amd_iommu_v2", WQ_MEM_RECLAIM, 0); - if (iommu_wq == NULL) - goto out; - - amd_iommu_register_ppr_notifier(&ppr_nb); - - return 0; - -out: - return ret; -} - -static void __exit amd_iommu_v2_exit(void) -{ - struct device_state *dev_state; - int i; - - if (!amd_iommu_v2_supported()) - return; - - amd_iommu_unregister_ppr_notifier(&ppr_nb); - - flush_workqueue(iommu_wq); - - /* - * The loop below might call flush_workqueue(), so call - * destroy_workqueue() after it - */ - for (i = 0; i < MAX_DEVICES; ++i) { - dev_state = get_device_state(i); - - if (dev_state == NULL) - continue; - - WARN_ON_ONCE(1); - - put_device_state(dev_state); - amd_iommu_free_device(dev_state->pdev); - } - - destroy_workqueue(iommu_wq); -} - -module_init(amd_iommu_v2_init); -module_exit(amd_iommu_v2_exit); diff --git a/drivers/iommu/amd/pasid.c b/drivers/iommu/amd/pasid.c new file mode 100644 index 000000000000..77c8e9a91cbc --- /dev/null +++ b/drivers/iommu/amd/pasid.c @@ -0,0 +1,203 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024 Advanced Micro Devices, Inc. + */ + +#define pr_fmt(fmt) "AMD-Vi: " fmt +#define dev_fmt(fmt) pr_fmt(fmt) + +#include <linux/iommu.h> +#include <linux/mm_types.h> + +#include "amd_iommu.h" + +static inline bool is_pasid_enabled(struct iommu_dev_data *dev_data) +{ + if (dev_data->pasid_enabled && dev_data->max_pasids && + dev_data->gcr3_info.gcr3_tbl != NULL) + return true; + + return false; +} + +static inline bool is_pasid_valid(struct iommu_dev_data *dev_data, + ioasid_t pasid) +{ + if (pasid > 0 && pasid < dev_data->max_pasids) + return true; + + return false; +} + +static void remove_dev_pasid(struct pdom_dev_data *pdom_dev_data) +{ + /* Update GCR3 table and flush IOTLB */ + amd_iommu_clear_gcr3(pdom_dev_data->dev_data, pdom_dev_data->pasid); + + list_del(&pdom_dev_data->list); + kfree(pdom_dev_data); +} + +/* Clear PASID from device GCR3 table and remove pdom_dev_data from list */ +static void remove_pdom_dev_pasid(struct protection_domain *pdom, + struct device *dev, ioasid_t pasid) +{ + struct pdom_dev_data *pdom_dev_data; + struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); + + lockdep_assert_held(&pdom->lock); + + for_each_pdom_dev_data(pdom_dev_data, pdom) { + if (pdom_dev_data->dev_data == dev_data && + pdom_dev_data->pasid == pasid) { + remove_dev_pasid(pdom_dev_data); + break; + } + } +} + +static void sva_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + struct pdom_dev_data *pdom_dev_data; + struct protection_domain *sva_pdom; + unsigned long flags; + + sva_pdom = container_of(mn, struct protection_domain, mn); + + spin_lock_irqsave(&sva_pdom->lock, flags); + + for_each_pdom_dev_data(pdom_dev_data, sva_pdom) { + amd_iommu_dev_flush_pasid_pages(pdom_dev_data->dev_data, + pdom_dev_data->pasid, + start, end - start); + } + + spin_unlock_irqrestore(&sva_pdom->lock, flags); +} + +static void sva_mn_release(struct mmu_notifier *mn, struct mm_struct *mm) +{ + struct pdom_dev_data *pdom_dev_data, *next; + struct protection_domain *sva_pdom; + unsigned long flags; + + sva_pdom = container_of(mn, struct protection_domain, mn); + + spin_lock_irqsave(&sva_pdom->lock, flags); + + /* Assume dev_data_list contains same PASID with different devices */ + for_each_pdom_dev_data_safe(pdom_dev_data, next, sva_pdom) + remove_dev_pasid(pdom_dev_data); + + spin_unlock_irqrestore(&sva_pdom->lock, flags); +} + +static const struct mmu_notifier_ops sva_mn = { + .arch_invalidate_secondary_tlbs = sva_arch_invalidate_secondary_tlbs, + .release = sva_mn_release, +}; + +int iommu_sva_set_dev_pasid(struct iommu_domain *domain, + struct device *dev, ioasid_t pasid, + struct iommu_domain *old) +{ + struct pdom_dev_data *pdom_dev_data; + struct protection_domain *sva_pdom = to_pdomain(domain); + struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); + unsigned long flags; + int ret = -EINVAL; + + if (old) + return -EOPNOTSUPP; + + /* PASID zero is used for requests from the I/O device without PASID */ + if (!is_pasid_valid(dev_data, pasid)) + return ret; + + /* Make sure PASID is enabled */ + if (!is_pasid_enabled(dev_data)) + return ret; + + /* Add PASID to protection domain pasid list */ + pdom_dev_data = kzalloc(sizeof(*pdom_dev_data), GFP_KERNEL); + if (pdom_dev_data == NULL) + return ret; + + pdom_dev_data->pasid = pasid; + pdom_dev_data->dev_data = dev_data; + + spin_lock_irqsave(&sva_pdom->lock, flags); + + /* Setup GCR3 table */ + ret = amd_iommu_set_gcr3(dev_data, pasid, + iommu_virt_to_phys(domain->mm->pgd)); + if (ret) { + kfree(pdom_dev_data); + goto out_unlock; + } + + list_add(&pdom_dev_data->list, &sva_pdom->dev_data_list); + +out_unlock: + spin_unlock_irqrestore(&sva_pdom->lock, flags); + return ret; +} + +void amd_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid, + struct iommu_domain *domain) +{ + struct protection_domain *sva_pdom; + unsigned long flags; + + if (!is_pasid_valid(dev_iommu_priv_get(dev), pasid)) + return; + + sva_pdom = to_pdomain(domain); + + spin_lock_irqsave(&sva_pdom->lock, flags); + + /* Remove PASID from dev_data_list */ + remove_pdom_dev_pasid(sva_pdom, dev, pasid); + + spin_unlock_irqrestore(&sva_pdom->lock, flags); +} + +static void iommu_sva_domain_free(struct iommu_domain *domain) +{ + struct protection_domain *sva_pdom = to_pdomain(domain); + + if (sva_pdom->mn.ops) + mmu_notifier_unregister(&sva_pdom->mn, domain->mm); + + amd_iommu_domain_free(domain); +} + +static const struct iommu_domain_ops amd_sva_domain_ops = { + .set_dev_pasid = iommu_sva_set_dev_pasid, + .free = iommu_sva_domain_free +}; + +struct iommu_domain *amd_iommu_domain_alloc_sva(struct device *dev, + struct mm_struct *mm) +{ + struct protection_domain *pdom; + int ret; + + pdom = protection_domain_alloc(); + if (!pdom) + return ERR_PTR(-ENOMEM); + + pdom->domain.ops = &amd_sva_domain_ops; + pdom->mn.ops = &sva_mn; + pdom->domain.type = IOMMU_DOMAIN_SVA; + + ret = mmu_notifier_register(&pdom->mn, mm); + if (ret) { + amd_iommu_domain_free(&pdom->domain); + return ERR_PTR(ret); + } + + return &pdom->domain; +} diff --git a/drivers/iommu/amd/ppr.c b/drivers/iommu/amd/ppr.c new file mode 100644 index 000000000000..e6767c057d01 --- /dev/null +++ b/drivers/iommu/amd/ppr.c @@ -0,0 +1,273 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2023 Advanced Micro Devices, Inc. + */ + +#define pr_fmt(fmt) "AMD-Vi: " fmt +#define dev_fmt(fmt) pr_fmt(fmt) + +#include <linux/amd-iommu.h> +#include <linux/delay.h> +#include <linux/mmu_notifier.h> + +#include <asm/iommu.h> + +#include "amd_iommu.h" +#include "amd_iommu_types.h" + +#include "../iommu-pages.h" + +int __init amd_iommu_alloc_ppr_log(struct amd_iommu *iommu) +{ + iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, + PPR_LOG_SIZE); + return iommu->ppr_log ? 0 : -ENOMEM; +} + +void amd_iommu_enable_ppr_log(struct amd_iommu *iommu) +{ + u64 entry; + + if (iommu->ppr_log == NULL) + return; + + iommu_feature_enable(iommu, CONTROL_PPR_EN); + + entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512; + + memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET, + &entry, sizeof(entry)); + + /* set head and tail to zero manually */ + writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); + writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); + + iommu_feature_enable(iommu, CONTROL_PPRINT_EN); + iommu_feature_enable(iommu, CONTROL_PPRLOG_EN); +} + +void __init amd_iommu_free_ppr_log(struct amd_iommu *iommu) +{ + iommu_free_pages(iommu->ppr_log); +} + +/* + * This function restarts ppr logging in case the IOMMU experienced + * PPR log overflow. + */ +void amd_iommu_restart_ppr_log(struct amd_iommu *iommu) +{ + amd_iommu_restart_log(iommu, "PPR", CONTROL_PPRINT_EN, + CONTROL_PPRLOG_EN, MMIO_STATUS_PPR_RUN_MASK, + MMIO_STATUS_PPR_OVERFLOW_MASK); +} + +static inline u32 ppr_flag_to_fault_perm(u16 flag) +{ + int perm = 0; + + if (flag & PPR_FLAG_READ) + perm |= IOMMU_FAULT_PERM_READ; + if (flag & PPR_FLAG_WRITE) + perm |= IOMMU_FAULT_PERM_WRITE; + if (flag & PPR_FLAG_EXEC) + perm |= IOMMU_FAULT_PERM_EXEC; + if (!(flag & PPR_FLAG_US)) + perm |= IOMMU_FAULT_PERM_PRIV; + + return perm; +} + +static bool ppr_is_valid(struct amd_iommu *iommu, u64 *raw) +{ + struct device *dev = iommu->iommu.dev; + u16 devid = PPR_DEVID(raw[0]); + + if (!(PPR_FLAGS(raw[0]) & PPR_FLAG_GN)) { + dev_dbg(dev, "PPR logged [Request ignored due to GN=0 (device=%04x:%02x:%02x.%x " + "pasid=0x%05llx address=0x%llx flags=0x%04llx tag=0x%03llx]\n", + iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), + PPR_PASID(raw[0]), raw[1], PPR_FLAGS(raw[0]), PPR_TAG(raw[0])); + return false; + } + + if (PPR_FLAGS(raw[0]) & PPR_FLAG_RVSD) { + dev_dbg(dev, "PPR logged [Invalid request format (device=%04x:%02x:%02x.%x " + "pasid=0x%05llx address=0x%llx flags=0x%04llx tag=0x%03llx]\n", + iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), + PPR_PASID(raw[0]), raw[1], PPR_FLAGS(raw[0]), PPR_TAG(raw[0])); + return false; + } + + return true; +} + +static void iommu_call_iopf_notifier(struct amd_iommu *iommu, u64 *raw) +{ + struct iommu_dev_data *dev_data; + struct iopf_fault event; + struct pci_dev *pdev; + u16 devid = PPR_DEVID(raw[0]); + + if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) { + pr_info_ratelimited("Unknown PPR request received\n"); + return; + } + + pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, + PCI_BUS_NUM(devid), devid & 0xff); + if (!pdev) + return; + + if (!ppr_is_valid(iommu, raw)) + goto out; + + memset(&event, 0, sizeof(struct iopf_fault)); + + event.fault.type = IOMMU_FAULT_PAGE_REQ; + event.fault.prm.perm = ppr_flag_to_fault_perm(PPR_FLAGS(raw[0])); + event.fault.prm.addr = (u64)(raw[1] & PAGE_MASK); + event.fault.prm.pasid = PPR_PASID(raw[0]); + event.fault.prm.grpid = PPR_TAG(raw[0]) & 0x1FF; + + /* + * PASID zero is used for requests from the I/O device without + * a PASID + */ + dev_data = dev_iommu_priv_get(&pdev->dev); + if (event.fault.prm.pasid == 0 || + event.fault.prm.pasid >= dev_data->max_pasids) { + pr_info_ratelimited("Invalid PASID : 0x%x, device : 0x%x\n", + event.fault.prm.pasid, pdev->dev.id); + goto out; + } + + event.fault.prm.flags |= IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID; + event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID; + if (PPR_TAG(raw[0]) & 0x200) + event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE; + + /* Submit event */ + iommu_report_device_fault(&pdev->dev, &event); + + return; + +out: + /* Nobody cared, abort */ + amd_iommu_complete_ppr(&pdev->dev, PPR_PASID(raw[0]), + IOMMU_PAGE_RESP_FAILURE, + PPR_TAG(raw[0]) & 0x1FF); +} + +void amd_iommu_poll_ppr_log(struct amd_iommu *iommu) +{ + u32 head, tail; + + if (iommu->ppr_log == NULL) + return; + + head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); + tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); + + while (head != tail) { + volatile u64 *raw; + u64 entry[2]; + int i; + + raw = (u64 *)(iommu->ppr_log + head); + + /* + * Hardware bug: Interrupt may arrive before the entry is + * written to memory. If this happens we need to wait for the + * entry to arrive. + */ + for (i = 0; i < LOOP_TIMEOUT; ++i) { + if (PPR_REQ_TYPE(raw[0]) != 0) + break; + udelay(1); + } + + /* Avoid memcpy function-call overhead */ + entry[0] = raw[0]; + entry[1] = raw[1]; + + /* + * To detect the hardware errata 733 we need to clear the + * entry back to zero. This issue does not exist on SNP + * enabled system. Also this buffer is not writeable on + * SNP enabled system. + */ + if (!amd_iommu_snp_en) + raw[0] = raw[1] = 0UL; + + /* Update head pointer of hardware ring-buffer */ + head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE; + writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); + + /* Handle PPR entry */ + iommu_call_iopf_notifier(iommu, entry); + } +} + +/************************************************************** + * + * IOPF handling stuff + */ + +/* Setup per-IOMMU IOPF queue if not exist. */ +int amd_iommu_iopf_init(struct amd_iommu *iommu) +{ + int ret = 0; + + if (iommu->iopf_queue) + return ret; + + snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name), "amdvi-%#x", + PCI_SEG_DEVID_TO_SBDF(iommu->pci_seg->id, iommu->devid)); + + iommu->iopf_queue = iopf_queue_alloc(iommu->iopfq_name); + if (!iommu->iopf_queue) + ret = -ENOMEM; + + return ret; +} + +/* Destroy per-IOMMU IOPF queue if no longer needed. */ +void amd_iommu_iopf_uninit(struct amd_iommu *iommu) +{ + iopf_queue_free(iommu->iopf_queue); + iommu->iopf_queue = NULL; +} + +void amd_iommu_page_response(struct device *dev, struct iopf_fault *evt, + struct iommu_page_response *resp) +{ + amd_iommu_complete_ppr(dev, resp->pasid, resp->code, resp->grpid); +} + +int amd_iommu_iopf_add_device(struct amd_iommu *iommu, + struct iommu_dev_data *dev_data) +{ + int ret = 0; + + if (!dev_data->pri_enabled) + return ret; + + if (!iommu->iopf_queue) + return -EINVAL; + + ret = iopf_queue_add_device(iommu->iopf_queue, dev_data->dev); + if (ret) + return ret; + + dev_data->ppr = true; + return 0; +} + +/* Its assumed that caller has verified that device was added to iopf queue */ +void amd_iommu_iopf_remove_device(struct amd_iommu *iommu, + struct iommu_dev_data *dev_data) +{ + iopf_queue_remove_device(iommu->iopf_queue, dev_data->dev); + dev_data->ppr = false; +} diff --git a/drivers/iommu/amd/quirks.c b/drivers/iommu/amd/quirks.c index 5120ce4fdce3..79dbb8f33b47 100644 --- a/drivers/iommu/amd/quirks.c +++ b/drivers/iommu/amd/quirks.c @@ -15,7 +15,7 @@ struct ivrs_quirk_entry { u8 id; - u16 devid; + u32 devid; }; enum { @@ -49,7 +49,7 @@ static int __init ivrs_ioapic_quirk_cb(const struct dmi_system_id *d) const struct ivrs_quirk_entry *i; for (i = d->driver_data; i->id != 0 && i->devid != 0; i++) - add_special_device(IVHD_SPECIAL_IOAPIC, i->id, (u16 *)&i->devid, 0); + add_special_device(IVHD_SPECIAL_IOAPIC, i->id, (u32 *)&i->devid, 0); return 0; } |
