summaryrefslogtreecommitdiff
path: root/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c')
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c79
1 files changed, 35 insertions, 44 deletions
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 7445454c2af2..0ffb1cf17e0b 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -1063,6 +1063,7 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_master *master, int ssid,
bool cd_live;
__le64 *cdptr;
struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
+ struct arm_smmu_device *smmu = master->smmu;
if (WARN_ON(ssid >= (1 << cd_table->s1cdmax)))
return -E2BIG;
@@ -1077,6 +1078,8 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_master *master, int ssid,
if (!cd) { /* (5) */
val = 0;
} else if (cd == &quiet_cd) { /* (4) */
+ if (!(smmu->features & ARM_SMMU_FEAT_STALL_FORCE))
+ val &= ~(CTXDESC_CD_0_S | CTXDESC_CD_0_R);
val |= CTXDESC_CD_0_TCR_EPD0;
} else if (cd_live) { /* (3) */
val &= ~CTXDESC_CD_0_ASID;
@@ -1249,7 +1252,7 @@ static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
}
static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
- __le64 *dst)
+ struct arm_smmu_ste *dst)
{
/*
* This is hideously complicated, but we only really care about
@@ -1267,12 +1270,12 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
* 2. Write everything apart from dword 0, sync, write dword 0, sync
* 3. Update Config, sync
*/
- u64 val = le64_to_cpu(dst[0]);
+ u64 val = le64_to_cpu(dst->data[0]);
bool ste_live = false;
- struct arm_smmu_device *smmu = NULL;
+ struct arm_smmu_device *smmu = master->smmu;
struct arm_smmu_ctx_desc_cfg *cd_table = NULL;
struct arm_smmu_s2_cfg *s2_cfg = NULL;
- struct arm_smmu_domain *smmu_domain = NULL;
+ struct arm_smmu_domain *smmu_domain = master->domain;
struct arm_smmu_cmdq_ent prefetch_cmd = {
.opcode = CMDQ_OP_PREFETCH_CFG,
.prefetch = {
@@ -1280,18 +1283,12 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
},
};
- if (master) {
- smmu_domain = master->domain;
- smmu = master->smmu;
- }
-
if (smmu_domain) {
switch (smmu_domain->stage) {
case ARM_SMMU_DOMAIN_S1:
cd_table = &master->cd_table;
break;
case ARM_SMMU_DOMAIN_S2:
- case ARM_SMMU_DOMAIN_NESTED:
s2_cfg = &smmu_domain->s2_cfg;
break;
default:
@@ -1325,10 +1322,10 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
else
val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
- dst[0] = cpu_to_le64(val);
- dst[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
+ dst->data[0] = cpu_to_le64(val);
+ dst->data[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
STRTAB_STE_1_SHCFG_INCOMING));
- dst[2] = 0; /* Nuke the VMID */
+ dst->data[2] = 0; /* Nuke the VMID */
/*
* The SMMU can perform negative caching, so we must sync
* the STE regardless of whether the old value was live.
@@ -1343,7 +1340,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
STRTAB_STE_1_STRW_EL2 : STRTAB_STE_1_STRW_NSEL1;
BUG_ON(ste_live);
- dst[1] = cpu_to_le64(
+ dst->data[1] = cpu_to_le64(
FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) |
FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) |
FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) |
@@ -1352,7 +1349,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
if (smmu->features & ARM_SMMU_FEAT_STALLS &&
!master->stall_enabled)
- dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
+ dst->data[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
val |= (cd_table->cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) |
@@ -1362,7 +1359,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
if (s2_cfg) {
BUG_ON(ste_live);
- dst[2] = cpu_to_le64(
+ dst->data[2] = cpu_to_le64(
FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) |
FIELD_PREP(STRTAB_STE_2_VTCR, s2_cfg->vtcr) |
#ifdef __BIG_ENDIAN
@@ -1371,18 +1368,18 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
STRTAB_STE_2_S2R);
- dst[3] = cpu_to_le64(s2_cfg->vttbr & STRTAB_STE_3_S2TTB_MASK);
+ dst->data[3] = cpu_to_le64(s2_cfg->vttbr & STRTAB_STE_3_S2TTB_MASK);
val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS);
}
if (master->ats_enabled)
- dst[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_EATS,
+ dst->data[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_EATS,
STRTAB_STE_1_EATS_TRANS));
arm_smmu_sync_ste_for_sid(smmu, sid);
/* See comment in arm_smmu_write_ctx_desc() */
- WRITE_ONCE(dst[0], cpu_to_le64(val));
+ WRITE_ONCE(dst->data[0], cpu_to_le64(val));
arm_smmu_sync_ste_for_sid(smmu, sid);
/* It's likely that we'll want to use the new STE soon */
@@ -1390,7 +1387,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
}
-static void arm_smmu_init_bypass_stes(__le64 *strtab, unsigned int nent, bool force)
+static void arm_smmu_init_bypass_stes(struct arm_smmu_ste *strtab,
+ unsigned int nent, bool force)
{
unsigned int i;
u64 val = STRTAB_STE_0_V;
@@ -1401,11 +1399,11 @@ static void arm_smmu_init_bypass_stes(__le64 *strtab, unsigned int nent, bool fo
val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
for (i = 0; i < nent; ++i) {
- strtab[0] = cpu_to_le64(val);
- strtab[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
- STRTAB_STE_1_SHCFG_INCOMING));
- strtab[2] = 0;
- strtab += STRTAB_STE_DWORDS;
+ strtab->data[0] = cpu_to_le64(val);
+ strtab->data[1] = cpu_to_le64(FIELD_PREP(
+ STRTAB_STE_1_SHCFG, STRTAB_STE_1_SHCFG_INCOMING));
+ strtab->data[2] = 0;
+ strtab++;
}
}
@@ -2171,7 +2169,6 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
fmt = ARM_64_LPAE_S1;
finalise_stage_fn = arm_smmu_domain_finalise_s1;
break;
- case ARM_SMMU_DOMAIN_NESTED:
case ARM_SMMU_DOMAIN_S2:
ias = smmu->ias;
oas = smmu->oas;
@@ -2209,26 +2206,23 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
return 0;
}
-static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
+static struct arm_smmu_ste *
+arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
{
- __le64 *step;
struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
- struct arm_smmu_strtab_l1_desc *l1_desc;
- int idx;
+ unsigned int idx1, idx2;
/* Two-level walk */
- idx = (sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS;
- l1_desc = &cfg->l1_desc[idx];
- idx = (sid & ((1 << STRTAB_SPLIT) - 1)) * STRTAB_STE_DWORDS;
- step = &l1_desc->l2ptr[idx];
+ idx1 = (sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS;
+ idx2 = sid & ((1 << STRTAB_SPLIT) - 1);
+ return &cfg->l1_desc[idx1].l2ptr[idx2];
} else {
/* Simple linear lookup */
- step = &cfg->strtab[sid * STRTAB_STE_DWORDS];
+ return (struct arm_smmu_ste *)&cfg
+ ->strtab[sid * STRTAB_STE_DWORDS];
}
-
- return step;
}
static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master)
@@ -2238,7 +2232,8 @@ static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master)
for (i = 0; i < master->num_streams; ++i) {
u32 sid = master->streams[i].id;
- __le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
+ struct arm_smmu_ste *step =
+ arm_smmu_get_step_for_sid(smmu, sid);
/* Bridged PCI devices may end up with duplicated IDs */
for (j = 0; j < i; j++)
@@ -2649,9 +2644,6 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev)
struct arm_smmu_master *master;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- if (!fwspec || fwspec->ops != &arm_smmu_ops)
- return ERR_PTR(-ENODEV);
-
if (WARN_ON_ONCE(dev_iommu_priv_get(dev)))
return ERR_PTR(-EBUSY);
@@ -2698,7 +2690,6 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev)
err_free_master:
kfree(master);
- dev_iommu_priv_set(dev, NULL);
return ERR_PTR(ret);
}
@@ -2742,7 +2733,7 @@ static int arm_smmu_enable_nesting(struct iommu_domain *domain)
if (smmu_domain->smmu)
ret = -EPERM;
else
- smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
+ smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
mutex_unlock(&smmu_domain->init_mutex);
return ret;
@@ -3769,7 +3760,7 @@ static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu)
iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
list_for_each_entry(e, &rmr_list, list) {
- __le64 *step;
+ struct arm_smmu_ste *step;
struct iommu_iort_rmr_data *rmr;
int ret, i;