summaryrefslogtreecommitdiff
path: root/drivers/iommu/arm/arm-smmu/arm-smmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/arm/arm-smmu/arm-smmu.c')
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c166
1 files changed, 103 insertions, 63 deletions
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index d6d1a2a55cc0..68b6bc5e7c71 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -82,6 +82,23 @@ static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
pm_runtime_put_autosuspend(smmu->dev);
}
+static void arm_smmu_rpm_use_autosuspend(struct arm_smmu_device *smmu)
+{
+ /*
+ * Setup an autosuspend delay to avoid bouncing runpm state.
+ * Otherwise, if a driver for a suspended consumer device
+ * unmaps buffers, it will runpm resume/suspend for each one.
+ *
+ * For example, when used by a GPU device, when an application
+ * or game exits, it can trigger unmapping 100s or 1000s of
+ * buffers. With a runpm cycle for each buffer, that adds up
+ * to 5-10sec worth of reprogramming the context bank, while
+ * the system appears to be locked up to the user.
+ */
+ pm_runtime_set_autosuspend_delay(smmu->dev, 20);
+ pm_runtime_use_autosuspend(smmu->dev);
+}
+
static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
{
return container_of(dom, struct arm_smmu_domain, domain);
@@ -392,8 +409,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
{
u32 fsr, fsynr, cbfrsynra;
unsigned long iova;
- struct iommu_domain *domain = dev;
- struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct arm_smmu_domain *smmu_domain = dev;
struct arm_smmu_device *smmu = smmu_domain->smmu;
int idx = smmu_domain->cfg.cbndx;
int ret;
@@ -406,7 +422,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
- ret = report_iommu_fault(domain, NULL, iova,
+ ret = report_iommu_fault(&smmu_domain->domain, NULL, iova,
fsynr & ARM_SMMU_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ);
if (ret == -ENOSYS)
@@ -607,7 +623,7 @@ static int arm_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain,
return __arm_smmu_alloc_bitmap(smmu->context_map, start, smmu->num_context_banks);
}
-static int arm_smmu_init_domain_context(struct iommu_domain *domain,
+static int arm_smmu_init_domain_context(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_device *smmu,
struct device *dev)
{
@@ -616,7 +632,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
struct io_pgtable_ops *pgtbl_ops;
struct io_pgtable_cfg pgtbl_cfg;
enum io_pgtable_fmt fmt;
- struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct iommu_domain *domain = &smmu_domain->domain;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
irqreturn_t (*context_fault)(int irq, void *dev);
@@ -624,12 +640,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
if (smmu_domain->smmu)
goto out_unlock;
- if (domain->type == IOMMU_DOMAIN_IDENTITY) {
- smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
- smmu_domain->smmu = smmu;
- goto out_unlock;
- }
-
/*
* Mapping the requested stage onto what we support is surprisingly
* complicated, mainly because the spec allows S1+S2 SMMUs without
@@ -796,8 +806,8 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
else
context_fault = arm_smmu_context_fault;
- ret = devm_request_irq(smmu->dev, irq, context_fault,
- IRQF_SHARED, "arm-smmu-context-fault", domain);
+ ret = devm_request_irq(smmu->dev, irq, context_fault, IRQF_SHARED,
+ "arm-smmu-context-fault", smmu_domain);
if (ret < 0) {
dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
cfg->irptndx, irq);
@@ -818,14 +828,13 @@ out_unlock:
return ret;
}
-static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
+static void arm_smmu_destroy_domain_context(struct arm_smmu_domain *smmu_domain)
{
- struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
int ret, irq;
- if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
+ if (!smmu)
return;
ret = arm_smmu_rpm_get(smmu);
@@ -841,7 +850,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
if (cfg->irptndx != ARM_SMMU_INVALID_IRPTNDX) {
irq = smmu->irqs[cfg->irptndx];
- devm_free_irq(smmu->dev, irq, domain);
+ devm_free_irq(smmu->dev, irq, smmu_domain);
}
free_io_pgtable_ops(smmu_domain->pgtbl_ops);
@@ -850,14 +859,10 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
arm_smmu_rpm_put(smmu);
}
-static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
+static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
{
struct arm_smmu_domain *smmu_domain;
- if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_IDENTITY) {
- if (using_legacy_binding || type != IOMMU_DOMAIN_DMA)
- return NULL;
- }
/*
* Allocate the domain and initialise some of its data structures.
* We can't really do anything meaningful until we've added a
@@ -870,6 +875,15 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
mutex_init(&smmu_domain->init_mutex);
spin_lock_init(&smmu_domain->cb_lock);
+ if (dev) {
+ struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
+
+ if (arm_smmu_init_domain_context(smmu_domain, cfg->smmu, dev)) {
+ kfree(smmu_domain);
+ return NULL;
+ }
+ }
+
return &smmu_domain->domain;
}
@@ -881,7 +895,7 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
* Free the domain resources. We assume that all devices have
* already been detached.
*/
- arm_smmu_destroy_domain_context(domain);
+ arm_smmu_destroy_domain_context(smmu_domain);
kfree(smmu_domain);
}
@@ -1081,21 +1095,14 @@ static void arm_smmu_master_free_smes(struct arm_smmu_master_cfg *cfg,
mutex_unlock(&smmu->stream_map_mutex);
}
-static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
- struct arm_smmu_master_cfg *cfg,
- struct iommu_fwspec *fwspec)
+static void arm_smmu_master_install_s2crs(struct arm_smmu_master_cfg *cfg,
+ enum arm_smmu_s2cr_type type,
+ u8 cbndx, struct iommu_fwspec *fwspec)
{
- struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct arm_smmu_device *smmu = cfg->smmu;
struct arm_smmu_s2cr *s2cr = smmu->s2crs;
- u8 cbndx = smmu_domain->cfg.cbndx;
- enum arm_smmu_s2cr_type type;
int i, idx;
- if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
- type = S2CR_TYPE_BYPASS;
- else
- type = S2CR_TYPE_TRANS;
-
for_each_cfg_sme(cfg, fwspec, i, idx) {
if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
continue;
@@ -1105,7 +1112,6 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
s2cr[idx].cbndx = cbndx;
arm_smmu_write_s2cr(smmu, idx);
}
- return 0;
}
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
@@ -1116,11 +1122,6 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
struct arm_smmu_device *smmu;
int ret;
- if (!fwspec || fwspec->ops != &arm_smmu_ops) {
- dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
- return -ENXIO;
- }
-
/*
* FIXME: The arch/arm DMA API code tries to attach devices to its own
* domains between of_xlate() and probe_device() - we have no way to cope
@@ -1139,7 +1140,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
return ret;
/* Ensure that the domain is finalised */
- ret = arm_smmu_init_domain_context(domain, smmu, dev);
+ ret = arm_smmu_init_domain_context(smmu_domain, smmu, dev);
if (ret < 0)
goto rpm_put;
@@ -1153,27 +1154,66 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
}
/* Looks ok, so add the device to the domain */
- ret = arm_smmu_domain_add_master(smmu_domain, cfg, fwspec);
-
- /*
- * Setup an autosuspend delay to avoid bouncing runpm state.
- * Otherwise, if a driver for a suspended consumer device
- * unmaps buffers, it will runpm resume/suspend for each one.
- *
- * For example, when used by a GPU device, when an application
- * or game exits, it can trigger unmapping 100s or 1000s of
- * buffers. With a runpm cycle for each buffer, that adds up
- * to 5-10sec worth of reprogramming the context bank, while
- * the system appears to be locked up to the user.
- */
- pm_runtime_set_autosuspend_delay(smmu->dev, 20);
- pm_runtime_use_autosuspend(smmu->dev);
-
+ arm_smmu_master_install_s2crs(cfg, S2CR_TYPE_TRANS,
+ smmu_domain->cfg.cbndx, fwspec);
+ arm_smmu_rpm_use_autosuspend(smmu);
rpm_put:
arm_smmu_rpm_put(smmu);
return ret;
}
+static int arm_smmu_attach_dev_type(struct device *dev,
+ enum arm_smmu_s2cr_type type)
+{
+ struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ struct arm_smmu_device *smmu;
+ int ret;
+
+ if (!cfg)
+ return -ENODEV;
+ smmu = cfg->smmu;
+
+ ret = arm_smmu_rpm_get(smmu);
+ if (ret < 0)
+ return ret;
+
+ arm_smmu_master_install_s2crs(cfg, type, 0, fwspec);
+ arm_smmu_rpm_use_autosuspend(smmu);
+ arm_smmu_rpm_put(smmu);
+ return 0;
+}
+
+static int arm_smmu_attach_dev_identity(struct iommu_domain *domain,
+ struct device *dev)
+{
+ return arm_smmu_attach_dev_type(dev, S2CR_TYPE_BYPASS);
+}
+
+static const struct iommu_domain_ops arm_smmu_identity_ops = {
+ .attach_dev = arm_smmu_attach_dev_identity,
+};
+
+static struct iommu_domain arm_smmu_identity_domain = {
+ .type = IOMMU_DOMAIN_IDENTITY,
+ .ops = &arm_smmu_identity_ops,
+};
+
+static int arm_smmu_attach_dev_blocked(struct iommu_domain *domain,
+ struct device *dev)
+{
+ return arm_smmu_attach_dev_type(dev, S2CR_TYPE_FAULT);
+}
+
+static const struct iommu_domain_ops arm_smmu_blocked_ops = {
+ .attach_dev = arm_smmu_attach_dev_blocked,
+};
+
+static struct iommu_domain arm_smmu_blocked_domain = {
+ .type = IOMMU_DOMAIN_BLOCKED,
+ .ops = &arm_smmu_blocked_ops,
+};
+
static int arm_smmu_map_pages(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped)
@@ -1357,10 +1397,8 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev)
fwspec = dev_iommu_fwspec_get(dev);
if (ret)
goto out_free;
- } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
- smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
} else {
- return ERR_PTR(-ENODEV);
+ smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
}
ret = -EINVAL;
@@ -1427,7 +1465,6 @@ static void arm_smmu_release_device(struct device *dev)
arm_smmu_rpm_put(cfg->smmu);
- dev_iommu_priv_set(dev, NULL);
kfree(cfg);
}
@@ -1560,8 +1597,10 @@ static int arm_smmu_def_domain_type(struct device *dev)
}
static struct iommu_ops arm_smmu_ops = {
+ .identity_domain = &arm_smmu_identity_domain,
+ .blocked_domain = &arm_smmu_blocked_domain,
.capable = arm_smmu_capable,
- .domain_alloc = arm_smmu_domain_alloc,
+ .domain_alloc_paging = arm_smmu_domain_alloc_paging,
.probe_device = arm_smmu_probe_device,
.release_device = arm_smmu_release_device,
.probe_finalize = arm_smmu_probe_finalize,
@@ -2161,7 +2200,8 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
return err;
}
- err = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev);
+ err = iommu_device_register(&smmu->iommu, &arm_smmu_ops,
+ using_legacy_binding ? NULL : dev);
if (err) {
dev_err(dev, "Failed to register iommu\n");
iommu_device_sysfs_remove(&smmu->iommu);