diff options
Diffstat (limited to 'drivers/iommu/ipmmu-vmsa.c')
| -rw-r--r-- | drivers/iommu/ipmmu-vmsa.c | 1235 |
1 files changed, 634 insertions, 601 deletions
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 2a38aa15be17..ca848288dbf2 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -1,46 +1,68 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * IPMMU VMSA + * IOMMU API for Renesas VMSA-compatible IPMMU + * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com> * - * Copyright (C) 2014 Renesas Electronics Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * Copyright (C) 2014-2020 Renesas Electronics Corporation */ #include <linux/bitmap.h> #include <linux/delay.h> -#include <linux/dma-iommu.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/export.h> +#include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/io-pgtable.h> #include <linux/iommu.h> -#include <linux/module.h> #include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/pci.h> #include <linux/platform_device.h> #include <linux/sizes.h> #include <linux/slab.h> +#include <linux/sys_soc.h> #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA) #include <asm/dma-iommu.h> -#include <asm/pgalloc.h> +#else +#define arm_iommu_create_mapping(...) NULL +#define arm_iommu_attach_device(...) -ENODEV +#define arm_iommu_release_mapping(...) do {} while (0) #endif -#include "io-pgtable.h" +#define IPMMU_CTX_MAX 16U +#define IPMMU_CTX_INVALID -1 + +#define IPMMU_UTLB_MAX 64U -#define IPMMU_CTX_MAX 1 +struct ipmmu_features { + bool use_ns_alias_offset; + bool has_cache_leaf_nodes; + unsigned int number_of_contexts; + unsigned int num_utlbs; + bool setup_imbuscr; + bool twobit_imttbcr_sl0; + bool reserved_context; + bool cache_snoop; + unsigned int ctx_offset_base; + unsigned int ctx_offset_stride; + unsigned int utlb_offset_base; +}; struct ipmmu_vmsa_device { struct device *dev; void __iomem *base; - struct list_head list; - - unsigned int num_utlbs; + struct iommu_device iommu; + struct ipmmu_vmsa_device *root; + const struct ipmmu_features *features; + unsigned int num_ctx; spinlock_t lock; /* Protects ctx and domains[] */ DECLARE_BITMAP(ctx, IPMMU_CTX_MAX); struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX]; + s8 utlb_ctx[IPMMU_UTLB_MAX]; struct dma_iommu_mapping *mapping; }; @@ -53,41 +75,17 @@ struct ipmmu_vmsa_domain { struct io_pgtable_ops *iop; unsigned int context_id; - spinlock_t lock; /* Protects mappings */ + struct mutex mutex; /* Protects mappings */ }; -struct ipmmu_vmsa_iommu_priv { - struct ipmmu_vmsa_device *mmu; - unsigned int *utlbs; - unsigned int num_utlbs; - struct device *dev; - struct list_head list; -}; - -static DEFINE_SPINLOCK(ipmmu_devices_lock); -static LIST_HEAD(ipmmu_devices); - static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom) { return container_of(dom, struct ipmmu_vmsa_domain, io_domain); } - -static struct ipmmu_vmsa_iommu_priv *to_priv(struct device *dev) -{ -#if defined(CONFIG_ARM) - return dev->archdata.iommu; -#else - return dev->iommu_fwspec->iommu_priv; -#endif -} -static void set_priv(struct device *dev, struct ipmmu_vmsa_iommu_priv *p) +static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev) { -#if defined(CONFIG_ARM) - dev->archdata.iommu = p; -#else - dev->iommu_fwspec->iommu_priv = p; -#endif + return dev_iommu_priv_get(dev); } #define TLB_LOOP_TIMEOUT 100 /* 100us */ @@ -98,117 +96,79 @@ static void set_priv(struct device *dev, struct ipmmu_vmsa_iommu_priv *p) #define IM_NS_ALIAS_OFFSET 0x800 -#define IM_CTX_SIZE 0x40 - -#define IMCTR 0x0000 -#define IMCTR_TRE (1 << 17) -#define IMCTR_AFE (1 << 16) -#define IMCTR_RTSEL_MASK (3 << 4) -#define IMCTR_RTSEL_SHIFT 4 -#define IMCTR_TREN (1 << 3) -#define IMCTR_INTEN (1 << 2) -#define IMCTR_FLUSH (1 << 1) -#define IMCTR_MMUEN (1 << 0) - -#define IMCAAR 0x0004 - -#define IMTTBCR 0x0008 -#define IMTTBCR_EAE (1 << 31) -#define IMTTBCR_PMB (1 << 30) -#define IMTTBCR_SH1_NON_SHAREABLE (0 << 28) -#define IMTTBCR_SH1_OUTER_SHAREABLE (2 << 28) -#define IMTTBCR_SH1_INNER_SHAREABLE (3 << 28) -#define IMTTBCR_SH1_MASK (3 << 28) -#define IMTTBCR_ORGN1_NC (0 << 26) -#define IMTTBCR_ORGN1_WB_WA (1 << 26) -#define IMTTBCR_ORGN1_WT (2 << 26) -#define IMTTBCR_ORGN1_WB (3 << 26) -#define IMTTBCR_ORGN1_MASK (3 << 26) -#define IMTTBCR_IRGN1_NC (0 << 24) -#define IMTTBCR_IRGN1_WB_WA (1 << 24) -#define IMTTBCR_IRGN1_WT (2 << 24) -#define IMTTBCR_IRGN1_WB (3 << 24) -#define IMTTBCR_IRGN1_MASK (3 << 24) -#define IMTTBCR_TSZ1_MASK (7 << 16) -#define IMTTBCR_TSZ1_SHIFT 16 -#define IMTTBCR_SH0_NON_SHAREABLE (0 << 12) -#define IMTTBCR_SH0_OUTER_SHAREABLE (2 << 12) -#define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) -#define IMTTBCR_SH0_MASK (3 << 12) -#define IMTTBCR_ORGN0_NC (0 << 10) -#define IMTTBCR_ORGN0_WB_WA (1 << 10) -#define IMTTBCR_ORGN0_WT (2 << 10) -#define IMTTBCR_ORGN0_WB (3 << 10) -#define IMTTBCR_ORGN0_MASK (3 << 10) -#define IMTTBCR_IRGN0_NC (0 << 8) -#define IMTTBCR_IRGN0_WB_WA (1 << 8) -#define IMTTBCR_IRGN0_WT (2 << 8) -#define IMTTBCR_IRGN0_WB (3 << 8) -#define IMTTBCR_IRGN0_MASK (3 << 8) -#define IMTTBCR_SL0_LVL_2 (0 << 4) -#define IMTTBCR_SL0_LVL_1 (1 << 4) -#define IMTTBCR_TSZ0_MASK (7 << 0) -#define IMTTBCR_TSZ0_SHIFT O - -#define IMBUSCR 0x000c -#define IMBUSCR_DVM (1 << 2) -#define IMBUSCR_BUSSEL_SYS (0 << 0) -#define IMBUSCR_BUSSEL_CCI (1 << 0) -#define IMBUSCR_BUSSEL_IMCAAR (2 << 0) -#define IMBUSCR_BUSSEL_CCI_IMCAAR (3 << 0) -#define IMBUSCR_BUSSEL_MASK (3 << 0) - -#define IMTTLBR0 0x0010 -#define IMTTUBR0 0x0014 -#define IMTTLBR1 0x0018 -#define IMTTUBR1 0x001c - -#define IMSTR 0x0020 -#define IMSTR_ERRLVL_MASK (3 << 12) -#define IMSTR_ERRLVL_SHIFT 12 -#define IMSTR_ERRCODE_TLB_FORMAT (1 << 8) -#define IMSTR_ERRCODE_ACCESS_PERM (4 << 8) -#define IMSTR_ERRCODE_SECURE_ACCESS (5 << 8) -#define IMSTR_ERRCODE_MASK (7 << 8) -#define IMSTR_MHIT (1 << 4) -#define IMSTR_ABORT (1 << 2) -#define IMSTR_PF (1 << 1) -#define IMSTR_TF (1 << 0) - -#define IMMAIR0 0x0028 -#define IMMAIR1 0x002c -#define IMMAIR_ATTR_MASK 0xff -#define IMMAIR_ATTR_DEVICE 0x04 -#define IMMAIR_ATTR_NC 0x44 -#define IMMAIR_ATTR_WBRWA 0xff -#define IMMAIR_ATTR_SHIFT(n) ((n) << 3) -#define IMMAIR_ATTR_IDX_NC 0 -#define IMMAIR_ATTR_IDX_WBRWA 1 -#define IMMAIR_ATTR_IDX_DEV 2 - -#define IMEAR 0x0030 - -#define IMPCTR 0x0200 -#define IMPSTR 0x0208 -#define IMPEAR 0x020c -#define IMPMBA(n) (0x0280 + ((n) * 4)) -#define IMPMBD(n) (0x02c0 + ((n) * 4)) - -#define IMUCTR(n) (0x0300 + ((n) * 16)) -#define IMUCTR_FIXADDEN (1 << 31) -#define IMUCTR_FIXADD_MASK (0xff << 16) -#define IMUCTR_FIXADD_SHIFT 16 -#define IMUCTR_TTSEL_MMU(n) ((n) << 4) -#define IMUCTR_TTSEL_PMB (8 << 4) -#define IMUCTR_TTSEL_MASK (15 << 4) -#define IMUCTR_FLUSH (1 << 1) -#define IMUCTR_MMUEN (1 << 0) - -#define IMUASID(n) (0x0308 + ((n) * 16)) -#define IMUASID_ASID8_MASK (0xff << 8) -#define IMUASID_ASID8_SHIFT 8 -#define IMUASID_ASID0_MASK (0xff << 0) -#define IMUASID_ASID0_SHIFT 0 +/* MMU "context" registers */ +#define IMCTR 0x0000 /* R-Car Gen2/3 */ +#define IMCTR_INTEN (1 << 2) /* R-Car Gen2/3 */ +#define IMCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */ +#define IMCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */ + +#define IMTTBCR 0x0008 /* R-Car Gen2/3 */ +#define IMTTBCR_EAE (1 << 31) /* R-Car Gen2/3 */ +#define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) /* R-Car Gen2 only */ +#define IMTTBCR_ORGN0_WB_WA (1 << 10) /* R-Car Gen2 only */ +#define IMTTBCR_IRGN0_WB_WA (1 << 8) /* R-Car Gen2 only */ +#define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6) /* R-Car Gen3 only */ +#define IMTTBCR_SL0_LVL_1 (1 << 4) /* R-Car Gen2 only */ + +#define IMBUSCR 0x000c /* R-Car Gen2 only */ +#define IMBUSCR_DVM (1 << 2) /* R-Car Gen2 only */ +#define IMBUSCR_BUSSEL_MASK (3 << 0) /* R-Car Gen2 only */ + +#define IMTTLBR0 0x0010 /* R-Car Gen2/3 */ +#define IMTTUBR0 0x0014 /* R-Car Gen2/3 */ + +#define IMSTR 0x0020 /* R-Car Gen2/3 */ +#define IMSTR_MHIT (1 << 4) /* R-Car Gen2/3 */ +#define IMSTR_ABORT (1 << 2) /* R-Car Gen2/3 */ +#define IMSTR_PF (1 << 1) /* R-Car Gen2/3 */ +#define IMSTR_TF (1 << 0) /* R-Car Gen2/3 */ + +#define IMMAIR0 0x0028 /* R-Car Gen2/3 */ + +#define IMELAR 0x0030 /* R-Car Gen2/3, IMEAR on R-Car Gen2 */ +#define IMEUAR 0x0034 /* R-Car Gen3 only */ + +/* uTLB registers */ +#define IMUCTR(n) ((n) < 32 ? IMUCTR0(n) : IMUCTR32(n)) +#define IMUCTR0(n) (0x0300 + ((n) * 16)) /* R-Car Gen2/3 */ +#define IMUCTR32(n) (0x0600 + (((n) - 32) * 16)) /* R-Car Gen3 only */ +#define IMUCTR_TTSEL_MMU(n) ((n) << 4) /* R-Car Gen2/3 */ +#define IMUCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */ +#define IMUCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */ + +#define IMUASID(n) ((n) < 32 ? IMUASID0(n) : IMUASID32(n)) +#define IMUASID0(n) (0x0308 + ((n) * 16)) /* R-Car Gen2/3 */ +#define IMUASID32(n) (0x0608 + (((n) - 32) * 16)) /* R-Car Gen3 only */ + +/* ----------------------------------------------------------------------------- + * Root device handling + */ + +static struct platform_driver ipmmu_driver; + +static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu) +{ + return mmu->root == mmu; +} + +static int __ipmmu_check_device(struct device *dev, void *data) +{ + struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev); + struct ipmmu_vmsa_device **rootp = data; + + if (ipmmu_is_root(mmu)) + *rootp = mmu; + + return 0; +} + +static struct ipmmu_vmsa_device *ipmmu_find_root(void) +{ + struct ipmmu_vmsa_device *root = NULL; + + return driver_for_each_device(&ipmmu_driver.driver, NULL, &root, + __ipmmu_check_device) == 0 ? root : NULL; +} /* ----------------------------------------------------------------------------- * Read/Write Access @@ -225,15 +185,65 @@ static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset, iowrite32(data, mmu->base + offset); } -static u32 ipmmu_ctx_read(struct ipmmu_vmsa_domain *domain, unsigned int reg) +static unsigned int ipmmu_ctx_reg(struct ipmmu_vmsa_device *mmu, + unsigned int context_id, unsigned int reg) +{ + unsigned int base = mmu->features->ctx_offset_base; + + if (context_id > 7) + base += 0x800 - 8 * 0x40; + + return base + context_id * mmu->features->ctx_offset_stride + reg; +} + +static u32 ipmmu_ctx_read(struct ipmmu_vmsa_device *mmu, + unsigned int context_id, unsigned int reg) { - return ipmmu_read(domain->mmu, domain->context_id * IM_CTX_SIZE + reg); + return ipmmu_read(mmu, ipmmu_ctx_reg(mmu, context_id, reg)); } -static void ipmmu_ctx_write(struct ipmmu_vmsa_domain *domain, unsigned int reg, - u32 data) +static void ipmmu_ctx_write(struct ipmmu_vmsa_device *mmu, + unsigned int context_id, unsigned int reg, u32 data) { - ipmmu_write(domain->mmu, domain->context_id * IM_CTX_SIZE + reg, data); + ipmmu_write(mmu, ipmmu_ctx_reg(mmu, context_id, reg), data); +} + +static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain, + unsigned int reg) +{ + return ipmmu_ctx_read(domain->mmu->root, domain->context_id, reg); +} + +static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain, + unsigned int reg, u32 data) +{ + ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data); +} + +static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain, + unsigned int reg, u32 data) +{ + if (domain->mmu != domain->mmu->root) + ipmmu_ctx_write(domain->mmu, domain->context_id, reg, data); + + ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data); +} + +static u32 ipmmu_utlb_reg(struct ipmmu_vmsa_device *mmu, unsigned int reg) +{ + return mmu->features->utlb_offset_base + reg; +} + +static void ipmmu_imuasid_write(struct ipmmu_vmsa_device *mmu, + unsigned int utlb, u32 data) +{ + ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUASID(utlb)), data); +} + +static void ipmmu_imuctr_write(struct ipmmu_vmsa_device *mmu, + unsigned int utlb, u32 data) +{ + ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUCTR(utlb)), data); } /* ----------------------------------------------------------------------------- @@ -243,26 +253,22 @@ static void ipmmu_ctx_write(struct ipmmu_vmsa_domain *domain, unsigned int reg, /* Wait for any pending TLB invalidations to complete */ static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain) { - unsigned int count = 0; + u32 val; - while (ipmmu_ctx_read(domain, IMCTR) & IMCTR_FLUSH) { - cpu_relax(); - if (++count == TLB_LOOP_TIMEOUT) { - dev_err_ratelimited(domain->mmu->dev, + if (read_poll_timeout_atomic(ipmmu_ctx_read_root, val, + !(val & IMCTR_FLUSH), 1, TLB_LOOP_TIMEOUT, + false, domain, IMCTR)) + dev_err_ratelimited(domain->mmu->dev, "TLB sync timed out -- MMU may be deadlocked\n"); - return; - } - udelay(1); - } } static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain) { u32 reg; - reg = ipmmu_ctx_read(domain, IMCTR); + reg = ipmmu_ctx_read_root(domain, IMCTR); reg |= IMCTR_FLUSH; - ipmmu_ctx_write(domain, IMCTR, reg); + ipmmu_ctx_write_all(domain, IMCTR, reg); ipmmu_tlb_sync(domain); } @@ -281,11 +287,11 @@ static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain, */ /* TODO: What should we set the ASID to ? */ - ipmmu_write(mmu, IMUASID(utlb), 0); + ipmmu_imuasid_write(mmu, utlb, 0); /* TODO: Do we need to flush the microTLB ? */ - ipmmu_write(mmu, IMUCTR(utlb), - IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH | - IMUCTR_MMUEN); + ipmmu_imuctr_write(mmu, utlb, IMUCTR_TTSEL_MMU(domain->context_id) | + IMUCTR_FLUSH | IMUCTR_MMUEN); + mmu->utlb_ctx[utlb] = domain->context_id; } /* @@ -296,7 +302,8 @@ static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain, { struct ipmmu_vmsa_device *mmu = domain->mmu; - ipmmu_write(mmu, IMUCTR(utlb), 0); + ipmmu_imuctr_write(mmu, utlb, 0); + mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID; } static void ipmmu_tlb_flush_all(void *cookie) @@ -306,16 +313,15 @@ static void ipmmu_tlb_flush_all(void *cookie) ipmmu_tlb_invalidate(domain); } -static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, - size_t granule, bool leaf, void *cookie) +static void ipmmu_tlb_flush(unsigned long iova, size_t size, + size_t granule, void *cookie) { - /* The hardware doesn't support selective TLB flush. */ + ipmmu_tlb_flush_all(cookie); } -static struct iommu_gather_ops ipmmu_gather_ops = { +static const struct iommu_flush_ops ipmmu_flush_ops = { .tlb_flush_all = ipmmu_tlb_flush_all, - .tlb_add_flush = ipmmu_tlb_add_flush, - .tlb_sync = ipmmu_tlb_flush_all, + .tlb_flush_walk = ipmmu_tlb_flush, }; /* ----------------------------------------------------------------------------- @@ -330,89 +336,72 @@ static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu, spin_lock_irqsave(&mmu->lock, flags); - ret = find_first_zero_bit(mmu->ctx, IPMMU_CTX_MAX); - if (ret != IPMMU_CTX_MAX) { + ret = find_first_zero_bit(mmu->ctx, mmu->num_ctx); + if (ret != mmu->num_ctx) { mmu->domains[ret] = domain; set_bit(ret, mmu->ctx); - } + } else + ret = -EBUSY; spin_unlock_irqrestore(&mmu->lock, flags); return ret; } -static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) +static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu, + unsigned int context_id) { - u64 ttbr; - int ret; + unsigned long flags; - /* - * Allocate the page table operations. - * - * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory - * access, Long-descriptor format" that the NStable bit being set in a - * table descriptor will result in the NStable and NS bits of all child - * entries being ignored and considered as being set. The IPMMU seems - * not to comply with this, as it generates a secure access page fault - * if any of the NStable and NS bits isn't set when running in - * non-secure mode. - */ - domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS; - domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K; - domain->cfg.ias = 32; - domain->cfg.oas = 40; - domain->cfg.tlb = &ipmmu_gather_ops; - domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32); - domain->io_domain.geometry.force_aperture = true; - /* - * TODO: Add support for coherent walk through CCI with DVM and remove - * cache handling. For now, delegate it to the io-pgtable code. - */ - domain->cfg.iommu_dev = domain->mmu->dev; + spin_lock_irqsave(&mmu->lock, flags); - domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg, - domain); - if (!domain->iop) - return -EINVAL; + clear_bit(context_id, mmu->ctx); + mmu->domains[context_id] = NULL; - /* - * Find an unused context. - */ - ret = ipmmu_domain_allocate_context(domain->mmu, domain); - if (ret == IPMMU_CTX_MAX) { - free_io_pgtable_ops(domain->iop); - return -EBUSY; - } + spin_unlock_irqrestore(&mmu->lock, flags); +} - domain->context_id = ret; +static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain) +{ + u64 ttbr; + u32 tmp; /* TTBR0 */ - ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0]; - ipmmu_ctx_write(domain, IMTTLBR0, ttbr); - ipmmu_ctx_write(domain, IMTTUBR0, ttbr >> 32); + ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr; + ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr); + ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32); /* * TTBCR - * We use long descriptors with inner-shareable WBWA tables and allocate - * the whole 32-bit VA space to TTBR0. + * We use long descriptors and allocate the whole 32-bit VA space to + * TTBR0. */ - ipmmu_ctx_write(domain, IMTTBCR, IMTTBCR_EAE | - IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA | - IMTTBCR_IRGN0_WB_WA | IMTTBCR_SL0_LVL_1); + if (domain->mmu->features->twobit_imttbcr_sl0) + tmp = IMTTBCR_SL0_TWOBIT_LVL_1; + else + tmp = IMTTBCR_SL0_LVL_1; + + if (domain->mmu->features->cache_snoop) + tmp |= IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA | + IMTTBCR_IRGN0_WB_WA; + + ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE | tmp); /* MAIR0 */ - ipmmu_ctx_write(domain, IMMAIR0, domain->cfg.arm_lpae_s1_cfg.mair[0]); + ipmmu_ctx_write_root(domain, IMMAIR0, + domain->cfg.arm_lpae_s1_cfg.mair); /* IMBUSCR */ - ipmmu_ctx_write(domain, IMBUSCR, - ipmmu_ctx_read(domain, IMBUSCR) & - ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK)); + if (domain->mmu->features->setup_imbuscr) + ipmmu_ctx_write_root(domain, IMBUSCR, + ipmmu_ctx_read_root(domain, IMBUSCR) & + ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK)); /* * IMSTR * Clear all interrupt flags. */ - ipmmu_ctx_write(domain, IMSTR, ipmmu_ctx_read(domain, IMSTR)); + ipmmu_ctx_write_root(domain, IMSTR, ipmmu_ctx_read_root(domain, IMSTR)); /* * IMCTR @@ -421,35 +410,74 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) * software management as we have no use for it. Flush the TLB as * required when modifying the context registers. */ - ipmmu_ctx_write(domain, IMCTR, IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN); - - return 0; + ipmmu_ctx_write_all(domain, IMCTR, + IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN); } -static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu, - unsigned int context_id) +static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) { - unsigned long flags; + int ret; - spin_lock_irqsave(&mmu->lock, flags); + /* + * Allocate the page table operations. + * + * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory + * access, Long-descriptor format" that the NStable bit being set in a + * table descriptor will result in the NStable and NS bits of all child + * entries being ignored and considered as being set. The IPMMU seems + * not to comply with this, as it generates a secure access page fault + * if any of the NStable and NS bits isn't set when running in + * non-secure mode. + */ + domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS; + domain->cfg.pgsize_bitmap = domain->io_domain.pgsize_bitmap; + domain->cfg.ias = 32; + domain->cfg.oas = 40; + domain->cfg.tlb = &ipmmu_flush_ops; + domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32); + domain->io_domain.geometry.force_aperture = true; + /* + * TODO: Add support for coherent walk through CCI with DVM and remove + * cache handling. For now, delegate it to the io-pgtable code. + */ + domain->cfg.coherent_walk = false; + domain->cfg.iommu_dev = domain->mmu->root->dev; - clear_bit(context_id, mmu->ctx); - mmu->domains[context_id] = NULL; + /* + * Find an unused context. + */ + ret = ipmmu_domain_allocate_context(domain->mmu->root, domain); + if (ret < 0) + return ret; - spin_unlock_irqrestore(&mmu->lock, flags); + domain->context_id = ret; + + domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg, + domain); + if (!domain->iop) { + ipmmu_domain_free_context(domain->mmu->root, + domain->context_id); + return -EINVAL; + } + + ipmmu_domain_setup_context(domain); + return 0; } static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain) { + if (!domain->mmu) + return; + /* * Disable the context. Flush the TLB as required when modifying the * context registers. * * TODO: Is TLB flush really needed ? */ - ipmmu_ctx_write(domain, IMCTR, IMCTR_FLUSH); + ipmmu_ctx_write_all(domain, IMCTR, IMCTR_FLUSH); ipmmu_tlb_sync(domain); - ipmmu_domain_free_context(domain->mmu, domain->context_id); + ipmmu_domain_free_context(domain->mmu->root, domain->context_id); } /* ----------------------------------------------------------------------------- @@ -460,14 +488,16 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain) { const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF; struct ipmmu_vmsa_device *mmu = domain->mmu; + unsigned long iova; u32 status; - u32 iova; - status = ipmmu_ctx_read(domain, IMSTR); + status = ipmmu_ctx_read_root(domain, IMSTR); if (!(status & err_mask)) return IRQ_NONE; - iova = ipmmu_ctx_read(domain, IMEAR); + iova = ipmmu_ctx_read_root(domain, IMELAR); + if (IS_ENABLED(CONFIG_64BIT)) + iova |= (u64)ipmmu_ctx_read_root(domain, IMEUAR) << 32; /* * Clear the error status flags. Unlike traditional interrupt flag @@ -475,14 +505,14 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain) * seems to require 0. The error address register must be read before, * otherwise its value will be 0. */ - ipmmu_ctx_write(domain, IMSTR, 0); + ipmmu_ctx_write_root(domain, IMSTR, 0); /* Log fatal errors. */ if (status & IMSTR_MHIT) - dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%08x\n", + dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%lx\n", iova); if (status & IMSTR_ABORT) - dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%08x\n", + dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%lx\n", iova); if (!(status & (IMSTR_PF | IMSTR_TF))) @@ -498,7 +528,7 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain) return IRQ_HANDLED; dev_err_ratelimited(mmu->dev, - "Unhandled fault: status 0x%08x iova 0x%08x\n", + "Unhandled fault: status 0x%08x iova 0x%lx\n", status, iova); return IRQ_HANDLED; @@ -516,7 +546,7 @@ static irqreturn_t ipmmu_irq(int irq, void *dev) /* * Check interrupts for all active contexts. */ - for (i = 0; i < IPMMU_CTX_MAX; i++) { + for (i = 0; i < mmu->num_ctx; i++) { if (!mmu->domains[i]) continue; if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED) @@ -532,7 +562,7 @@ static irqreturn_t ipmmu_irq(int irq, void *dev) * IOMMU Operations */ -static struct iommu_domain *__ipmmu_domain_alloc(unsigned type) +static struct iommu_domain *ipmmu_domain_alloc_paging(struct device *dev) { struct ipmmu_vmsa_domain *domain; @@ -540,7 +570,8 @@ static struct iommu_domain *__ipmmu_domain_alloc(unsigned type) if (!domain) return NULL; - spin_lock_init(&domain->lock); + mutex_init(&domain->mutex); + domain->io_domain.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K; return &domain->io_domain; } @@ -559,12 +590,11 @@ static void ipmmu_domain_free(struct iommu_domain *io_domain) } static int ipmmu_attach_device(struct iommu_domain *io_domain, - struct device *dev) + struct device *dev, struct iommu_domain *old) { - struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev); - struct ipmmu_vmsa_device *mmu = priv->mmu; + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); - unsigned long flags; unsigned int i; int ret = 0; @@ -573,66 +603,100 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain, return -ENXIO; } - spin_lock_irqsave(&domain->lock, flags); + mutex_lock(&domain->mutex); if (!domain->mmu) { /* The domain hasn't been used yet, initialize it. */ domain->mmu = mmu; ret = ipmmu_domain_init_context(domain); + if (ret < 0) { + dev_err(dev, "Unable to initialize IPMMU context\n"); + domain->mmu = NULL; + } else { + dev_info(dev, "Using IPMMU context %u\n", + domain->context_id); + } } else if (domain->mmu != mmu) { /* * Something is wrong, we can't attach two devices using * different IOMMUs to the same domain. */ - dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n", - dev_name(mmu->dev), dev_name(domain->mmu->dev)); ret = -EINVAL; } else dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id); - spin_unlock_irqrestore(&domain->lock, flags); + mutex_unlock(&domain->mutex); if (ret < 0) return ret; - for (i = 0; i < priv->num_utlbs; ++i) - ipmmu_utlb_enable(domain, priv->utlbs[i]); + for (i = 0; i < fwspec->num_ids; ++i) + ipmmu_utlb_enable(domain, fwspec->ids[i]); return 0; } -static void ipmmu_detach_device(struct iommu_domain *io_domain, - struct device *dev) +static int ipmmu_iommu_identity_attach(struct iommu_domain *identity_domain, + struct device *dev, + struct iommu_domain *old) { - struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev); - struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + struct ipmmu_vmsa_domain *domain; unsigned int i; - for (i = 0; i < priv->num_utlbs; ++i) - ipmmu_utlb_disable(domain, priv->utlbs[i]); + if (old == identity_domain || !old) + return 0; + + domain = to_vmsa_domain(old); + for (i = 0; i < fwspec->num_ids; ++i) + ipmmu_utlb_disable(domain, fwspec->ids[i]); /* * TODO: Optimize by disabling the context when no device is attached. */ + return 0; } +static struct iommu_domain_ops ipmmu_iommu_identity_ops = { + .attach_dev = ipmmu_iommu_identity_attach, +}; + +static struct iommu_domain ipmmu_iommu_identity_domain = { + .type = IOMMU_DOMAIN_IDENTITY, + .ops = &ipmmu_iommu_identity_ops, +}; + static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot) + phys_addr_t paddr, size_t pgsize, size_t pgcount, + int prot, gfp_t gfp, size_t *mapped) { struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); - if (!domain) - return -ENODEV; - - return domain->iop->map(domain->iop, iova, paddr, size, prot); + return domain->iop->map_pages(domain->iop, iova, paddr, pgsize, pgcount, + prot, gfp, mapped); } static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, - size_t size) + size_t pgsize, size_t pgcount, + struct iommu_iotlb_gather *gather) { struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); - return domain->iop->unmap(domain->iop, iova, size); + return domain->iop->unmap_pages(domain->iop, iova, pgsize, pgcount, gather); +} + +static void ipmmu_flush_iotlb_all(struct iommu_domain *io_domain) +{ + struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); + + if (domain->mmu) + ipmmu_tlb_flush_all(domain); +} + +static void ipmmu_iotlb_sync(struct iommu_domain *io_domain, + struct iommu_iotlb_gather *gather) +{ + ipmmu_flush_iotlb_all(io_domain); } static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain, @@ -645,137 +709,92 @@ static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain, return domain->iop->iova_to_phys(domain->iop, iova); } -static int ipmmu_find_utlbs(struct ipmmu_vmsa_device *mmu, struct device *dev, - unsigned int *utlbs, unsigned int num_utlbs) +static int ipmmu_init_platform_device(struct device *dev, + const struct of_phandle_args *args) { - unsigned int i; - - for (i = 0; i < num_utlbs; ++i) { - struct of_phandle_args args; - int ret; + struct platform_device *ipmmu_pdev; - ret = of_parse_phandle_with_args(dev->of_node, "iommus", - "#iommu-cells", i, &args); - if (ret < 0) - return ret; + ipmmu_pdev = of_find_device_by_node(args->np); + if (!ipmmu_pdev) + return -ENODEV; - of_node_put(args.np); + dev_iommu_priv_set(dev, platform_get_drvdata(ipmmu_pdev)); - if (args.np != mmu->dev->of_node || args.args_count != 1) - return -EINVAL; - - utlbs[i] = args.args[0]; - } + put_device(&ipmmu_pdev->dev); return 0; } -static int ipmmu_init_platform_device(struct device *dev) -{ - struct ipmmu_vmsa_iommu_priv *priv; - struct ipmmu_vmsa_device *mmu; - unsigned int *utlbs; - unsigned int i; - int num_utlbs; - int ret = -ENODEV; - - /* Find the master corresponding to the device. */ - - num_utlbs = of_count_phandle_with_args(dev->of_node, "iommus", - "#iommu-cells"); - if (num_utlbs < 0) - return -ENODEV; +static const struct soc_device_attribute soc_needs_opt_in[] = { + { .family = "R-Car Gen3", }, + { .family = "R-Car Gen4", }, + { .family = "RZ/G2", }, + { /* sentinel */ } +}; - utlbs = kcalloc(num_utlbs, sizeof(*utlbs), GFP_KERNEL); - if (!utlbs) - return -ENOMEM; +static const struct soc_device_attribute soc_denylist[] = { + { .soc_id = "r8a774a1", }, + { .soc_id = "r8a7795", .revision = "ES2.*" }, + { .soc_id = "r8a7796", }, + { /* sentinel */ } +}; - spin_lock(&ipmmu_devices_lock); +static const char * const devices_allowlist[] = { + "ee100000.mmc", + "ee120000.mmc", + "ee140000.mmc", + "ee160000.mmc" +}; - list_for_each_entry(mmu, &ipmmu_devices, list) { - ret = ipmmu_find_utlbs(mmu, dev, utlbs, num_utlbs); - if (!ret) { - /* - * TODO Take a reference to the MMU to protect - * against device removal. - */ - break; - } - } +static bool ipmmu_device_is_allowed(struct device *dev) +{ + unsigned int i; - spin_unlock(&ipmmu_devices_lock); + /* + * R-Car Gen3/4 and RZ/G2 use the allow list to opt-in devices. + * For Other SoCs, this returns true anyway. + */ + if (!soc_device_match(soc_needs_opt_in)) + return true; - if (ret < 0) - goto error; + /* Check whether this SoC can use the IPMMU correctly or not */ + if (soc_device_match(soc_denylist)) + return false; - for (i = 0; i < num_utlbs; ++i) { - if (utlbs[i] >= mmu->num_utlbs) { - ret = -EINVAL; - goto error; - } - } + /* Check whether this device is a PCI device */ + if (dev_is_pci(dev)) + return true; - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) { - ret = -ENOMEM; - goto error; + /* Check whether this device can work with the IPMMU */ + for (i = 0; i < ARRAY_SIZE(devices_allowlist); i++) { + if (!strcmp(dev_name(dev), devices_allowlist[i])) + return true; } - priv->mmu = mmu; - priv->utlbs = utlbs; - priv->num_utlbs = num_utlbs; - priv->dev = dev; - set_priv(dev, priv); - return 0; - -error: - kfree(utlbs); - return ret; + /* Otherwise, do not allow use of IPMMU */ + return false; } -#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA) - -static struct iommu_domain *ipmmu_domain_alloc(unsigned type) +static int ipmmu_of_xlate(struct device *dev, + const struct of_phandle_args *spec) { - if (type != IOMMU_DOMAIN_UNMANAGED) - return NULL; + if (!ipmmu_device_is_allowed(dev)) + return -ENODEV; - return __ipmmu_domain_alloc(type); + iommu_fwspec_add_ids(dev, spec->args, 1); + + /* Initialize once - xlate() will call multiple times */ + if (to_ipmmu(dev)) + return 0; + + return ipmmu_init_platform_device(dev, spec); } -static int ipmmu_add_device(struct device *dev) +static int ipmmu_init_arm_mapping(struct device *dev) { - struct ipmmu_vmsa_device *mmu = NULL; - struct iommu_group *group; + struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); int ret; - if (to_priv(dev)) { - dev_warn(dev, "IOMMU driver already assigned to device %s\n", - dev_name(dev)); - return -EINVAL; - } - - /* Create a device group and add the device to it. */ - group = iommu_group_alloc(); - if (IS_ERR(group)) { - dev_err(dev, "Failed to allocate IOMMU group\n"); - ret = PTR_ERR(group); - goto error; - } - - ret = iommu_group_add_device(group, dev); - iommu_group_put(group); - - if (ret < 0) { - dev_err(dev, "Failed to add device to IPMMU group\n"); - group = NULL; - goto error; - } - - ret = ipmmu_init_platform_device(dev); - if (ret < 0) - goto error; - /* * Create the ARM mapping, used by the ARM DMA mapping core to allocate * VAs. This will allocate a corresponding IOMMU domain. @@ -785,12 +804,10 @@ static int ipmmu_add_device(struct device *dev) * - Make the mapping size configurable ? We currently use a 2GB mapping * at a 1GB offset to ensure that NULL VAs will fault. */ - mmu = to_priv(dev)->mmu; if (!mmu->mapping) { struct dma_iommu_mapping *mapping; - mapping = arm_iommu_create_mapping(&platform_bus_type, - SZ_1G, SZ_2G); + mapping = arm_iommu_create_mapping(dev, SZ_1G, SZ_2G); if (IS_ERR(mapping)) { dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n"); ret = PTR_ERR(mapping); @@ -810,184 +827,76 @@ static int ipmmu_add_device(struct device *dev) return 0; error: - if (mmu) + if (mmu->mapping) arm_iommu_release_mapping(mmu->mapping); - if (!IS_ERR_OR_NULL(group)) - iommu_group_remove_device(dev); - - kfree(to_priv(dev)->utlbs); - kfree(to_priv(dev)); - set_priv(dev, NULL); - return ret; } -static void ipmmu_remove_device(struct device *dev) -{ - struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev); - - arm_iommu_detach_device(dev); - iommu_group_remove_device(dev); - - kfree(priv->utlbs); - kfree(priv); - - set_priv(dev, NULL); -} - -static const struct iommu_ops ipmmu_ops = { - .domain_alloc = ipmmu_domain_alloc, - .domain_free = ipmmu_domain_free, - .attach_dev = ipmmu_attach_device, - .detach_dev = ipmmu_detach_device, - .map = ipmmu_map, - .unmap = ipmmu_unmap, - .map_sg = default_iommu_map_sg, - .iova_to_phys = ipmmu_iova_to_phys, - .add_device = ipmmu_add_device, - .remove_device = ipmmu_remove_device, - .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, -}; - -#endif /* !CONFIG_ARM && CONFIG_IOMMU_DMA */ - -#ifdef CONFIG_IOMMU_DMA - -static DEFINE_SPINLOCK(ipmmu_slave_devices_lock); -static LIST_HEAD(ipmmu_slave_devices); - -static struct iommu_domain *ipmmu_domain_alloc_dma(unsigned type) +static struct iommu_device *ipmmu_probe_device(struct device *dev) { - struct iommu_domain *io_domain = NULL; - - switch (type) { - case IOMMU_DOMAIN_UNMANAGED: - io_domain = __ipmmu_domain_alloc(type); - break; - - case IOMMU_DOMAIN_DMA: - io_domain = __ipmmu_domain_alloc(type); - if (io_domain) - iommu_get_dma_cookie(io_domain); - break; - } - - return io_domain; -} - -static void ipmmu_domain_free_dma(struct iommu_domain *io_domain) -{ - switch (io_domain->type) { - case IOMMU_DOMAIN_DMA: - iommu_put_dma_cookie(io_domain); - /* fall-through */ - default: - ipmmu_domain_free(io_domain); - break; - } -} - -static int ipmmu_add_device_dma(struct device *dev) -{ - struct iommu_fwspec *fwspec = dev->iommu_fwspec; - struct iommu_group *group; + struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); /* * Only let through devices that have been verified in xlate() - * We may get called with dev->iommu_fwspec set to NULL. */ - if (!fwspec || !fwspec->iommu_priv) - return -ENODEV; + if (!mmu) + return ERR_PTR(-ENODEV); - group = iommu_group_get_for_dev(dev); - if (IS_ERR(group)) - return PTR_ERR(group); - - spin_lock(&ipmmu_slave_devices_lock); - list_add(&to_priv(dev)->list, &ipmmu_slave_devices); - spin_unlock(&ipmmu_slave_devices_lock); - return 0; + return &mmu->iommu; } -static void ipmmu_remove_device_dma(struct device *dev) +static void ipmmu_probe_finalize(struct device *dev) { - struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev); + int ret = 0; - spin_lock(&ipmmu_slave_devices_lock); - list_del(&priv->list); - spin_unlock(&ipmmu_slave_devices_lock); + if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA)) + ret = ipmmu_init_arm_mapping(dev); - iommu_group_remove_device(dev); + if (ret) + dev_err(dev, "Can't create IOMMU mapping - DMA-OPS will not work\n"); } -static struct device *ipmmu_find_sibling_device(struct device *dev) +static void ipmmu_release_device(struct device *dev) { - struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev); - struct ipmmu_vmsa_iommu_priv *sibling_priv = NULL; - bool found = false; + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); + unsigned int i; - spin_lock(&ipmmu_slave_devices_lock); + for (i = 0; i < fwspec->num_ids; ++i) { + unsigned int utlb = fwspec->ids[i]; - list_for_each_entry(sibling_priv, &ipmmu_slave_devices, list) { - if (priv == sibling_priv) - continue; - if (sibling_priv->mmu == priv->mmu) { - found = true; - break; - } + ipmmu_imuctr_write(mmu, utlb, 0); + mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID; } - spin_unlock(&ipmmu_slave_devices_lock); - - return found ? sibling_priv->dev : NULL; -} - -static struct iommu_group *ipmmu_find_group_dma(struct device *dev) -{ - struct iommu_group *group; - struct device *sibling; - - sibling = ipmmu_find_sibling_device(dev); - if (sibling) - group = iommu_group_get(sibling); - if (!sibling || IS_ERR(group)) - group = generic_device_group(dev); - - return group; -} - -static int ipmmu_of_xlate_dma(struct device *dev, - struct of_phandle_args *spec) -{ - /* If the IPMMU device is disabled in DT then return error - * to make sure the of_iommu code does not install ops - * even though the iommu device is disabled - */ - if (!of_device_is_available(spec->np)) - return -ENODEV; - - return ipmmu_init_platform_device(dev); + arm_iommu_release_mapping(mmu->mapping); } static const struct iommu_ops ipmmu_ops = { - .domain_alloc = ipmmu_domain_alloc_dma, - .domain_free = ipmmu_domain_free_dma, - .attach_dev = ipmmu_attach_device, - .detach_dev = ipmmu_detach_device, - .map = ipmmu_map, - .unmap = ipmmu_unmap, - .map_sg = default_iommu_map_sg, - .iova_to_phys = ipmmu_iova_to_phys, - .add_device = ipmmu_add_device_dma, - .remove_device = ipmmu_remove_device_dma, - .device_group = ipmmu_find_group_dma, - .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, - .of_xlate = ipmmu_of_xlate_dma, + .identity_domain = &ipmmu_iommu_identity_domain, + .domain_alloc_paging = ipmmu_domain_alloc_paging, + .probe_device = ipmmu_probe_device, + .release_device = ipmmu_release_device, + .probe_finalize = ipmmu_probe_finalize, + /* + * FIXME: The device grouping is a fixed property of the hardware's + * ability to isolate and control DMA, it should not depend on kconfig. + */ + .device_group = IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA) + ? generic_device_group : generic_single_device_group, + .of_xlate = ipmmu_of_xlate, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = ipmmu_attach_device, + .map_pages = ipmmu_map, + .unmap_pages = ipmmu_unmap, + .flush_iotlb_all = ipmmu_flush_iotlb_all, + .iotlb_sync = ipmmu_iotlb_sync, + .iova_to_phys = ipmmu_iova_to_phys, + .free = ipmmu_domain_free, + } }; -#endif /* CONFIG_IOMMU_DMA */ - /* ----------------------------------------------------------------------------- * Probe/remove and init */ @@ -997,14 +906,106 @@ static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu) unsigned int i; /* Disable all contexts. */ - for (i = 0; i < 4; ++i) - ipmmu_write(mmu, i * IM_CTX_SIZE + IMCTR, 0); + for (i = 0; i < mmu->num_ctx; ++i) + ipmmu_ctx_write(mmu, i, IMCTR, 0); } +static const struct ipmmu_features ipmmu_features_default = { + .use_ns_alias_offset = true, + .has_cache_leaf_nodes = false, + .number_of_contexts = 1, /* software only tested with one context */ + .num_utlbs = 32, + .setup_imbuscr = true, + .twobit_imttbcr_sl0 = false, + .reserved_context = false, + .cache_snoop = true, + .ctx_offset_base = 0, + .ctx_offset_stride = 0x40, + .utlb_offset_base = 0, +}; + +static const struct ipmmu_features ipmmu_features_rcar_gen3 = { + .use_ns_alias_offset = false, + .has_cache_leaf_nodes = true, + .number_of_contexts = 8, + .num_utlbs = 48, + .setup_imbuscr = false, + .twobit_imttbcr_sl0 = true, + .reserved_context = true, + .cache_snoop = false, + .ctx_offset_base = 0, + .ctx_offset_stride = 0x40, + .utlb_offset_base = 0, +}; + +static const struct ipmmu_features ipmmu_features_rcar_gen4 = { + .use_ns_alias_offset = false, + .has_cache_leaf_nodes = true, + .number_of_contexts = 16, + .num_utlbs = 64, + .setup_imbuscr = false, + .twobit_imttbcr_sl0 = true, + .reserved_context = true, + .cache_snoop = false, + .ctx_offset_base = 0x10000, + .ctx_offset_stride = 0x1040, + .utlb_offset_base = 0x3000, +}; + +static const struct of_device_id ipmmu_of_ids[] = { + { + .compatible = "renesas,ipmmu-vmsa", + .data = &ipmmu_features_default, + }, { + .compatible = "renesas,ipmmu-r8a774a1", + .data = &ipmmu_features_rcar_gen3, + }, { + .compatible = "renesas,ipmmu-r8a774b1", + .data = &ipmmu_features_rcar_gen3, + }, { + .compatible = "renesas,ipmmu-r8a774c0", + .data = &ipmmu_features_rcar_gen3, + }, { + .compatible = "renesas,ipmmu-r8a774e1", + .data = &ipmmu_features_rcar_gen3, + }, { + .compatible = "renesas,ipmmu-r8a7795", + .data = &ipmmu_features_rcar_gen3, + }, { + .compatible = "renesas,ipmmu-r8a7796", + .data = &ipmmu_features_rcar_gen3, + }, { + .compatible = "renesas,ipmmu-r8a77961", + .data = &ipmmu_features_rcar_gen3, + }, { + .compatible = "renesas,ipmmu-r8a77965", + .data = &ipmmu_features_rcar_gen3, + }, { + .compatible = "renesas,ipmmu-r8a77970", + .data = &ipmmu_features_rcar_gen3, + }, { + .compatible = "renesas,ipmmu-r8a77980", + .data = &ipmmu_features_rcar_gen3, + }, { + .compatible = "renesas,ipmmu-r8a77990", + .data = &ipmmu_features_rcar_gen3, + }, { + .compatible = "renesas,ipmmu-r8a77995", + .data = &ipmmu_features_rcar_gen3, + }, { + .compatible = "renesas,ipmmu-r8a779a0", + .data = &ipmmu_features_rcar_gen4, + }, { + .compatible = "renesas,rcar-gen4-ipmmu-vmsa", + .data = &ipmmu_features_rcar_gen4, + }, { + /* Terminator */ + }, +}; + static int ipmmu_probe(struct platform_device *pdev) { struct ipmmu_vmsa_device *mmu; - struct resource *res; int irq; int ret; @@ -1015,13 +1016,16 @@ static int ipmmu_probe(struct platform_device *pdev) } mmu->dev = &pdev->dev; - mmu->num_utlbs = 32; spin_lock_init(&mmu->lock); bitmap_zero(mmu->ctx, IPMMU_CTX_MAX); + mmu->features = of_device_get_match_data(&pdev->dev); + memset(mmu->utlb_ctx, IPMMU_CTX_INVALID, mmu->features->num_utlbs); + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); + if (ret) + return ret; /* Map I/O memory and request IRQ. */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - mmu->base = devm_ioremap_resource(&pdev->dev, res); + mmu->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(mmu->base)) return PTR_ERR(mmu->base); @@ -1037,91 +1041,120 @@ static int ipmmu_probe(struct platform_device *pdev) * Offset the registers base unconditionally to point to the non-secure * alias space for now. */ - mmu->base += IM_NS_ALIAS_OFFSET; + if (mmu->features->use_ns_alias_offset) + mmu->base += IM_NS_ALIAS_OFFSET; - irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(&pdev->dev, "no IRQ found\n"); - return irq; - } - - ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0, - dev_name(&pdev->dev), mmu); - if (ret < 0) { - dev_err(&pdev->dev, "failed to request IRQ %d\n", irq); - return ret; - } + mmu->num_ctx = min(IPMMU_CTX_MAX, mmu->features->number_of_contexts); - ipmmu_device_reset(mmu); + /* + * Determine if this IPMMU instance is a root device by checking for + * the lack of has_cache_leaf_nodes flag or renesas,ipmmu-main property. + */ + if (!mmu->features->has_cache_leaf_nodes || + !of_property_present(pdev->dev.of_node, "renesas,ipmmu-main")) + mmu->root = mmu; + else + mmu->root = ipmmu_find_root(); /* - * We can't create the ARM mapping here as it requires the bus to have - * an IOMMU, which only happens when bus_set_iommu() is called in - * ipmmu_init() after the probe function returns. + * Wait until the root device has been registered for sure. */ + if (!mmu->root) + return -EPROBE_DEFER; + + /* Root devices have mandatory IRQs */ + if (ipmmu_is_root(mmu)) { + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0, + dev_name(&pdev->dev), mmu); + if (ret < 0) { + dev_err(&pdev->dev, "failed to request IRQ %d\n", irq); + return ret; + } - spin_lock(&ipmmu_devices_lock); - list_add(&mmu->list, &ipmmu_devices); - spin_unlock(&ipmmu_devices_lock); + ipmmu_device_reset(mmu); + + if (mmu->features->reserved_context) { + dev_info(&pdev->dev, "IPMMU context 0 is reserved\n"); + set_bit(0, mmu->ctx); + } + } platform_set_drvdata(pdev, mmu); + /* + * Register the IPMMU to the IOMMU subsystem in the following cases: + * - R-Car Gen2 IPMMU (all devices registered) + * - R-Car Gen3 IPMMU (leaf devices only - skip root IPMMU-MM device) + */ + if (mmu->features->has_cache_leaf_nodes && ipmmu_is_root(mmu)) + return 0; - return 0; + ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL, "%s", + dev_name(&pdev->dev)); + if (ret) + return ret; + + ret = iommu_device_register(&mmu->iommu, &ipmmu_ops, &pdev->dev); + if (ret) + iommu_device_sysfs_remove(&mmu->iommu); + + return ret; } -static int ipmmu_remove(struct platform_device *pdev) +static void ipmmu_remove(struct platform_device *pdev) { struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev); - spin_lock(&ipmmu_devices_lock); - list_del(&mmu->list); - spin_unlock(&ipmmu_devices_lock); + iommu_device_sysfs_remove(&mmu->iommu); + iommu_device_unregister(&mmu->iommu); -#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA) arm_iommu_release_mapping(mmu->mapping); -#endif ipmmu_device_reset(mmu); +} + +static int ipmmu_resume_noirq(struct device *dev) +{ + struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev); + unsigned int i; + + /* Reset root MMU and restore contexts */ + if (ipmmu_is_root(mmu)) { + ipmmu_device_reset(mmu); + + for (i = 0; i < mmu->num_ctx; i++) { + if (!mmu->domains[i]) + continue; + + ipmmu_domain_setup_context(mmu->domains[i]); + } + } + + /* Re-enable active micro-TLBs */ + for (i = 0; i < mmu->features->num_utlbs; i++) { + if (mmu->utlb_ctx[i] == IPMMU_CTX_INVALID) + continue; + + ipmmu_utlb_enable(mmu->root->domains[mmu->utlb_ctx[i]], i); + } return 0; } -static const struct of_device_id ipmmu_of_ids[] = { - { .compatible = "renesas,ipmmu-vmsa", }, - { } +static const struct dev_pm_ops ipmmu_pm = { + NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, ipmmu_resume_noirq) }; static struct platform_driver ipmmu_driver = { .driver = { .name = "ipmmu-vmsa", - .of_match_table = of_match_ptr(ipmmu_of_ids), + .of_match_table = ipmmu_of_ids, + .pm = pm_sleep_ptr(&ipmmu_pm), }, .probe = ipmmu_probe, - .remove = ipmmu_remove, + .remove = ipmmu_remove, }; - -static int __init ipmmu_init(void) -{ - int ret; - - ret = platform_driver_register(&ipmmu_driver); - if (ret < 0) - return ret; - - if (!iommu_present(&platform_bus_type)) - bus_set_iommu(&platform_bus_type, &ipmmu_ops); - - return 0; -} - -static void __exit ipmmu_exit(void) -{ - return platform_driver_unregister(&ipmmu_driver); -} - -subsys_initcall(ipmmu_init); -module_exit(ipmmu_exit); - -MODULE_DESCRIPTION("IOMMU API for Renesas VMSA-compatible IPMMU"); -MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>"); -MODULE_LICENSE("GPL v2"); +builtin_platform_driver(ipmmu_driver); |
