summaryrefslogtreecommitdiff
path: root/drivers/iommu/arm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/arm')
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c3
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c20
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h2
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c4
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c12
-rw-r--r--drivers/iommu/arm/arm-smmu/qcom_iommu.c6
6 files changed, 24 insertions, 23 deletions
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index 0601dece0a0d..59a480974d80 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -220,6 +220,9 @@ bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
feat_mask |= ARM_SMMU_FEAT_VAX;
}
+ if (system_supports_bbml2_noabort())
+ feat_mask |= ARM_SMMU_FEAT_BBML2;
+
if ((smmu->features & feat_mask) != feat_mask)
return false;
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 10cc6dc26b7b..f39bd7235011 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -38,7 +38,7 @@ module_param(disable_msipolling, bool, 0444);
MODULE_PARM_DESC(disable_msipolling,
"Disable MSI-based polling for CMD_SYNC completion.");
-static struct iommu_ops arm_smmu_ops;
+static const struct iommu_ops arm_smmu_ops;
static struct iommu_dirty_ops arm_smmu_dirty_ops;
enum arm_smmu_msi_index {
@@ -2906,8 +2906,8 @@ int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state,
master_domain = kzalloc(sizeof(*master_domain), GFP_KERNEL);
if (!master_domain) {
- kfree(state->vmaster);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_free_vmaster;
}
master_domain->domain = new_domain;
master_domain->master = master;
@@ -2941,7 +2941,6 @@ int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state,
!arm_smmu_master_canwbs(master)) {
spin_unlock_irqrestore(&smmu_domain->devices_lock,
flags);
- kfree(state->vmaster);
ret = -EINVAL;
goto err_iopf;
}
@@ -2967,6 +2966,8 @@ err_iopf:
arm_smmu_disable_iopf(master, master_domain);
err_free_master_domain:
kfree(master_domain);
+err_free_vmaster:
+ kfree(state->vmaster);
return ret;
}
@@ -3674,7 +3675,7 @@ static int arm_smmu_def_domain_type(struct device *dev)
return 0;
}
-static struct iommu_ops arm_smmu_ops = {
+static const struct iommu_ops arm_smmu_ops = {
.identity_domain = &arm_smmu_identity_domain,
.blocked_domain = &arm_smmu_blocked_domain,
.capable = arm_smmu_capable,
@@ -3690,7 +3691,6 @@ static struct iommu_ops arm_smmu_ops = {
.def_domain_type = arm_smmu_def_domain_type,
.viommu_alloc = arm_vsmmu_alloc,
.user_pasid_table = 1,
- .pgsize_bitmap = -1UL, /* Restricted during device attach */
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = arm_smmu_attach_dev,
@@ -4457,6 +4457,9 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
if (FIELD_GET(IDR3_FWB, reg))
smmu->features |= ARM_SMMU_FEAT_S2FWB;
+ if (FIELD_GET(IDR3_BBM, reg) == 2)
+ smmu->features |= ARM_SMMU_FEAT_BBML2;
+
/* IDR5 */
reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
@@ -4504,11 +4507,6 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
smmu->oas = 48;
}
- if (arm_smmu_ops.pgsize_bitmap == -1UL)
- arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
- else
- arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
-
/* Set the DMA mask for our table walker */
if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas)))
dev_warn(smmu->dev,
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index ea41d790463e..a33bf520ba97 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -60,6 +60,7 @@ struct arm_smmu_device;
#define ARM_SMMU_IDR3 0xc
#define IDR3_FWB (1 << 8)
#define IDR3_RIL (1 << 10)
+#define IDR3_BBM GENMASK(12, 11)
#define ARM_SMMU_IDR5 0x14
#define IDR5_STALL_MAX GENMASK(31, 16)
@@ -755,6 +756,7 @@ struct arm_smmu_device {
#define ARM_SMMU_FEAT_HA (1 << 21)
#define ARM_SMMU_FEAT_HD (1 << 22)
#define ARM_SMMU_FEAT_S2FWB (1 << 23)
+#define ARM_SMMU_FEAT_BBML2 (1 << 24)
u32 features;
#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index 62874b18f645..57c097e87613 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -355,7 +355,8 @@ static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
priv->set_prr_addr = NULL;
if (of_device_is_compatible(np, "qcom,smmu-500") &&
- of_device_is_compatible(np, "qcom,adreno-smmu")) {
+ !of_device_is_compatible(np, "qcom,sm8250-smmu-500") &&
+ of_device_is_compatible(np, "qcom,adreno-smmu")) {
priv->set_prr_bit = qcom_adreno_smmu_set_prr_bit;
priv->set_prr_addr = qcom_adreno_smmu_set_prr_addr;
}
@@ -379,6 +380,7 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
{ .compatible = "qcom,sdm670-mdss" },
{ .compatible = "qcom,sdm845-mdss" },
{ .compatible = "qcom,sdm845-mss-pil" },
+ { .compatible = "qcom,sm6115-mdss" },
{ .compatible = "qcom,sm6350-mdss" },
{ .compatible = "qcom,sm6375-mdss" },
{ .compatible = "qcom,sm8150-mdss" },
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index 8d95b14c7d5a..4ced4b5bee4d 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -109,7 +109,7 @@ static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
}
static struct platform_driver arm_smmu_driver;
-static struct iommu_ops arm_smmu_ops;
+static const struct iommu_ops arm_smmu_ops;
#ifdef CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS
static struct device_node *dev_get_dev_node(struct device *dev)
@@ -919,6 +919,8 @@ static void arm_smmu_destroy_domain_context(struct arm_smmu_domain *smmu_domain)
static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
{
struct arm_smmu_domain *smmu_domain;
+ struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
+ struct arm_smmu_device *smmu = cfg->smmu;
/*
* Allocate the domain and initialise some of its data structures.
@@ -931,6 +933,7 @@ static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
mutex_init(&smmu_domain->init_mutex);
spin_lock_init(&smmu_domain->cb_lock);
+ smmu_domain->domain.pgsize_bitmap = smmu->pgsize_bitmap;
return &smmu_domain->domain;
}
@@ -1627,7 +1630,7 @@ static int arm_smmu_def_domain_type(struct device *dev)
return 0;
}
-static struct iommu_ops arm_smmu_ops = {
+static const struct iommu_ops arm_smmu_ops = {
.identity_domain = &arm_smmu_identity_domain,
.blocked_domain = &arm_smmu_blocked_domain,
.capable = arm_smmu_capable,
@@ -1639,7 +1642,6 @@ static struct iommu_ops arm_smmu_ops = {
.of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
.def_domain_type = arm_smmu_def_domain_type,
- .pgsize_bitmap = -1UL, /* Restricted during device attach */
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = arm_smmu_attach_dev,
@@ -1919,10 +1921,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
- if (arm_smmu_ops.pgsize_bitmap == -1UL)
- arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
- else
- arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
smmu->pgsize_bitmap);
diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
index 3907924646a2..c5be95e56031 100644
--- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c
+++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
@@ -229,7 +229,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
goto out_unlock;
pgtbl_cfg = (struct io_pgtable_cfg) {
- .pgsize_bitmap = qcom_iommu_ops.pgsize_bitmap,
+ .pgsize_bitmap = domain->pgsize_bitmap,
.ias = 32,
.oas = 40,
.tlb = &qcom_flush_ops,
@@ -246,8 +246,6 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
goto out_clear_iommu;
}
- /* Update the domain's page sizes to reflect the page table format */
- domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
domain->geometry.aperture_end = (1ULL << pgtbl_cfg.ias) - 1;
domain->geometry.force_aperture = true;
@@ -337,6 +335,7 @@ static struct iommu_domain *qcom_iommu_domain_alloc_paging(struct device *dev)
mutex_init(&qcom_domain->init_mutex);
spin_lock_init(&qcom_domain->pgtbl_lock);
+ qcom_domain->domain.pgsize_bitmap = SZ_4K;
return &qcom_domain->domain;
}
@@ -598,7 +597,6 @@ static const struct iommu_ops qcom_iommu_ops = {
.probe_device = qcom_iommu_probe_device,
.device_group = generic_device_group,
.of_xlate = qcom_iommu_of_xlate,
- .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = qcom_iommu_attach_dev,
.map_pages = qcom_iommu_map,