summaryrefslogtreecommitdiff
path: root/drivers/iommu/tegra-smmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/tegra-smmu.c')
-rw-r--r--drivers/iommu/tegra-smmu.c589
1 files changed, 351 insertions, 238 deletions
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 3a5c7dc6dc57..c391e7f2cde6 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -1,9 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/bitops.h>
@@ -12,18 +9,24 @@
#include <linux/iommu.h>
#include <linux/kernel.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/dma-mapping.h>
#include <soc/tegra/ahb.h>
#include <soc/tegra/mc.h>
+#include "iommu-pages.h"
+
struct tegra_smmu_group {
struct list_head list;
+ struct tegra_smmu *smmu;
const struct tegra_smmu_group_soc *soc;
struct iommu_group *group;
+ unsigned int swgroup;
};
struct tegra_smmu {
@@ -48,13 +51,17 @@ struct tegra_smmu {
struct iommu_device iommu; /* IOMMU Core code handle */
};
+struct tegra_pd;
+struct tegra_pt;
+
struct tegra_smmu_as {
struct iommu_domain domain;
struct tegra_smmu *smmu;
unsigned int use_count;
+ spinlock_t lock;
u32 *count;
- struct page **pts;
- struct page *pd;
+ struct tegra_pt **pts;
+ struct tegra_pd *pd;
dma_addr_t pd_dma;
unsigned id;
u32 attr;
@@ -102,7 +109,6 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
#define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0)
#define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
#define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0)
-#define SMMU_TLB_FLUSH_ASID(x) (((x) & 0x7f) << 24)
#define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
SMMU_TLB_FLUSH_VA_MATCH_SECTION)
#define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \
@@ -131,6 +137,11 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
#define SMMU_PDE_SHIFT 22
#define SMMU_PTE_SHIFT 12
+#define SMMU_PAGE_MASK (~(SMMU_SIZE_PT-1))
+#define SMMU_OFFSET_IN_PAGE(x) ((unsigned long)(x) & ~SMMU_PAGE_MASK)
+#define SMMU_PFN_PHYS(x) ((phys_addr_t)(x) << SMMU_PTE_SHIFT)
+#define SMMU_PHYS_PFN(x) ((unsigned long)((x) >> SMMU_PTE_SHIFT))
+
#define SMMU_PD_READABLE (1 << 31)
#define SMMU_PD_WRITABLE (1 << 30)
#define SMMU_PD_NONSECURE (1 << 29)
@@ -146,8 +157,14 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
#define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
SMMU_PDE_NONSECURE)
-#define SMMU_PTE_ATTR (SMMU_PTE_READABLE | SMMU_PTE_WRITABLE | \
- SMMU_PTE_NONSECURE)
+
+struct tegra_pd {
+ u32 val[SMMU_NUM_PDE];
+};
+
+struct tegra_pt {
+ u32 val[SMMU_NUM_PTE];
+};
static unsigned int iova_pd_index(unsigned long iova)
{
@@ -165,9 +182,9 @@ static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr)
return (addr & smmu->pfn_mask) == addr;
}
-static dma_addr_t smmu_pde_to_dma(u32 pde)
+static dma_addr_t smmu_pde_to_dma(struct tegra_smmu *smmu, u32 pde)
{
- return pde << 12;
+ return (dma_addr_t)(pde & smmu->pfn_mask) << 12;
}
static void smmu_flush_ptc_all(struct tegra_smmu *smmu)
@@ -205,8 +222,12 @@ static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu,
{
u32 value;
- value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
- SMMU_TLB_FLUSH_VA_MATCH_ALL;
+ if (smmu->soc->num_asids == 4)
+ value = (asid & 0x3) << 29;
+ else
+ value = (asid & 0x7f) << 24;
+
+ value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_MATCH_ALL;
smmu_writel(smmu, value, SMMU_TLB_FLUSH);
}
@@ -216,8 +237,12 @@ static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu,
{
u32 value;
- value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
- SMMU_TLB_FLUSH_VA_SECTION(iova);
+ if (smmu->soc->num_asids == 4)
+ value = (asid & 0x3) << 29;
+ else
+ value = (asid & 0x7f) << 24;
+
+ value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_SECTION(iova);
smmu_writel(smmu, value, SMMU_TLB_FLUSH);
}
@@ -227,61 +252,50 @@ static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
{
u32 value;
- value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
- SMMU_TLB_FLUSH_VA_GROUP(iova);
+ if (smmu->soc->num_asids == 4)
+ value = (asid & 0x3) << 29;
+ else
+ value = (asid & 0x7f) << 24;
+
+ value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_GROUP(iova);
smmu_writel(smmu, value, SMMU_TLB_FLUSH);
}
static inline void smmu_flush(struct tegra_smmu *smmu)
{
- smmu_readl(smmu, SMMU_CONFIG);
+ smmu_readl(smmu, SMMU_PTB_ASID);
}
static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp)
{
unsigned long id;
- mutex_lock(&smmu->lock);
-
id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids);
- if (id >= smmu->soc->num_asids) {
- mutex_unlock(&smmu->lock);
+ if (id >= smmu->soc->num_asids)
return -ENOSPC;
- }
set_bit(id, smmu->asids);
*idp = id;
- mutex_unlock(&smmu->lock);
return 0;
}
static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
{
- mutex_lock(&smmu->lock);
clear_bit(id, smmu->asids);
- mutex_unlock(&smmu->lock);
-}
-
-static bool tegra_smmu_capable(enum iommu_cap cap)
-{
- return false;
}
-static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
+static struct iommu_domain *tegra_smmu_domain_alloc_paging(struct device *dev)
{
struct tegra_smmu_as *as;
- if (type != IOMMU_DOMAIN_UNMANAGED)
- return NULL;
-
as = kzalloc(sizeof(*as), GFP_KERNEL);
if (!as)
return NULL;
as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
- as->pd = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
+ as->pd = iommu_alloc_pages_sz(GFP_KERNEL | __GFP_DMA, SMMU_SIZE_PD);
if (!as->pd) {
kfree(as);
return NULL;
@@ -289,7 +303,7 @@ static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL);
if (!as->count) {
- __free_page(as->pd);
+ iommu_free_pages(as->pd);
kfree(as);
return NULL;
}
@@ -297,11 +311,15 @@ static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL);
if (!as->pts) {
kfree(as->count);
- __free_page(as->pd);
+ iommu_free_pages(as->pd);
kfree(as);
return NULL;
}
+ spin_lock_init(&as->lock);
+
+ as->domain.pgsize_bitmap = SZ_4K;
+
/* setup aperture */
as->domain.geometry.aperture_start = 0;
as->domain.geometry.aperture_end = 0xffffffff;
@@ -316,6 +334,9 @@ static void tegra_smmu_domain_free(struct iommu_domain *domain)
/* TODO: free page directory and page tables */
+ WARN_ON_ONCE(as->use_count);
+ kfree(as->count);
+ kfree(as->pts);
kfree(as);
}
@@ -342,17 +363,6 @@ static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
unsigned int i;
u32 value;
- for (i = 0; i < smmu->soc->num_clients; i++) {
- const struct tegra_mc_client *client = &smmu->soc->clients[i];
-
- if (client->swgroup != swgroup)
- continue;
-
- value = smmu_readl(smmu, client->smmu.reg);
- value |= BIT(client->smmu.bit);
- smmu_writel(smmu, value, client->smmu.reg);
- }
-
group = tegra_smmu_find_swgroup(smmu, swgroup);
if (group) {
value = smmu_readl(smmu, group->reg);
@@ -360,6 +370,22 @@ static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
value |= SMMU_ASID_VALUE(asid);
value |= SMMU_ASID_ENABLE;
smmu_writel(smmu, value, group->reg);
+ } else {
+ pr_warn("%s group from swgroup %u not found\n", __func__,
+ swgroup);
+ /* No point moving ahead if group was not found */
+ return;
+ }
+
+ for (i = 0; i < smmu->soc->num_clients; i++) {
+ const struct tegra_mc_client *client = &smmu->soc->clients[i];
+
+ if (client->swgroup != swgroup)
+ continue;
+
+ value = smmu_readl(smmu, client->regs.smmu.reg);
+ value |= BIT(client->regs.smmu.bit);
+ smmu_writel(smmu, value, client->regs.smmu.reg);
}
}
@@ -385,9 +411,9 @@ static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup,
if (client->swgroup != swgroup)
continue;
- value = smmu_readl(smmu, client->smmu.reg);
- value &= ~BIT(client->smmu.bit);
- smmu_writel(smmu, value, client->smmu.reg);
+ value = smmu_readl(smmu, client->regs.smmu.reg);
+ value &= ~BIT(client->regs.smmu.bit);
+ smmu_writel(smmu, value, client->regs.smmu.reg);
}
}
@@ -395,17 +421,21 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
struct tegra_smmu_as *as)
{
u32 value;
- int err;
+ int err = 0;
+
+ mutex_lock(&smmu->lock);
if (as->use_count > 0) {
as->use_count++;
- return 0;
+ goto unlock;
}
- as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD,
- DMA_TO_DEVICE);
- if (dma_mapping_error(smmu->dev, as->pd_dma))
- return -ENOMEM;
+ as->pd_dma =
+ dma_map_single(smmu->dev, as->pd, SMMU_SIZE_PD, DMA_TO_DEVICE);
+ if (dma_mapping_error(smmu->dev, as->pd_dma)) {
+ err = -ENOMEM;
+ goto unlock;
+ }
/* We can't handle 64-bit DMA addresses */
if (!smmu_dma_addr_valid(smmu, as->pd_dma)) {
@@ -428,96 +458,114 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
as->smmu = smmu;
as->use_count++;
+ mutex_unlock(&smmu->lock);
+
return 0;
err_unmap:
- dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
+ dma_unmap_single(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
+unlock:
+ mutex_unlock(&smmu->lock);
+
return err;
}
static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
struct tegra_smmu_as *as)
{
- if (--as->use_count > 0)
+ mutex_lock(&smmu->lock);
+
+ if (--as->use_count > 0) {
+ mutex_unlock(&smmu->lock);
return;
+ }
tegra_smmu_free_asid(smmu, as->id);
- dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
+ dma_unmap_single(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
as->smmu = NULL;
+
+ mutex_unlock(&smmu->lock);
}
static int tegra_smmu_attach_dev(struct iommu_domain *domain,
- struct device *dev)
+ struct device *dev, struct iommu_domain *old)
{
- struct tegra_smmu *smmu = dev->archdata.iommu;
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
struct tegra_smmu_as *as = to_smmu_as(domain);
- struct device_node *np = dev->of_node;
- struct of_phandle_args args;
- unsigned int index = 0;
- int err = 0;
-
- while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
- &args)) {
- unsigned int swgroup = args.args[0];
-
- if (args.np != smmu->dev->of_node) {
- of_node_put(args.np);
- continue;
- }
+ unsigned int index;
+ int err;
- of_node_put(args.np);
+ if (!fwspec)
+ return -ENOENT;
+ for (index = 0; index < fwspec->num_ids; index++) {
err = tegra_smmu_as_prepare(smmu, as);
- if (err < 0)
- return err;
+ if (err)
+ goto disable;
- tegra_smmu_enable(smmu, swgroup, as->id);
- index++;
+ tegra_smmu_enable(smmu, fwspec->ids[index], as->id);
}
if (index == 0)
return -ENODEV;
return 0;
+
+disable:
+ while (index--) {
+ tegra_smmu_disable(smmu, fwspec->ids[index], as->id);
+ tegra_smmu_as_unprepare(smmu, as);
+ }
+
+ return err;
}
-static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
+static int tegra_smmu_identity_attach(struct iommu_domain *identity_domain,
+ struct device *dev,
+ struct iommu_domain *old)
{
- struct tegra_smmu_as *as = to_smmu_as(domain);
- struct device_node *np = dev->of_node;
- struct tegra_smmu *smmu = as->smmu;
- struct of_phandle_args args;
- unsigned int index = 0;
-
- while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
- &args)) {
- unsigned int swgroup = args.args[0];
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ struct tegra_smmu_as *as;
+ struct tegra_smmu *smmu;
+ unsigned int index;
- if (args.np != smmu->dev->of_node) {
- of_node_put(args.np);
- continue;
- }
+ if (!fwspec)
+ return -ENODEV;
- of_node_put(args.np);
+ if (old == identity_domain || !old)
+ return 0;
- tegra_smmu_disable(smmu, swgroup, as->id);
+ as = to_smmu_as(old);
+ smmu = as->smmu;
+ for (index = 0; index < fwspec->num_ids; index++) {
+ tegra_smmu_disable(smmu, fwspec->ids[index], as->id);
tegra_smmu_as_unprepare(smmu, as);
- index++;
}
+ return 0;
}
+static struct iommu_domain_ops tegra_smmu_identity_ops = {
+ .attach_dev = tegra_smmu_identity_attach,
+};
+
+static struct iommu_domain tegra_smmu_identity_domain = {
+ .type = IOMMU_DOMAIN_IDENTITY,
+ .ops = &tegra_smmu_identity_ops,
+};
+
static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
u32 value)
{
unsigned int pd_index = iova_pd_index(iova);
struct tegra_smmu *smmu = as->smmu;
- u32 *pd = page_address(as->pd);
+ u32 *pd = &as->pd->val[pd_index];
unsigned long offset = pd_index * sizeof(*pd);
/* Set the page directory entry first */
- pd[pd_index] = value;
+ *pd = value;
/* The flush the page directory entry from caches */
dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset,
@@ -529,68 +577,58 @@ static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
smmu_flush(smmu);
}
-static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova)
+static u32 *tegra_smmu_pte_offset(struct tegra_pt *pt, unsigned long iova)
{
- u32 *pt = page_address(pt_page);
-
- return pt + iova_pt_index(iova);
+ return &pt->val[iova_pt_index(iova)];
}
static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
dma_addr_t *dmap)
{
unsigned int pd_index = iova_pd_index(iova);
- struct page *pt_page;
- u32 *pd;
+ struct tegra_smmu *smmu = as->smmu;
+ struct tegra_pt *pt;
- pt_page = as->pts[pd_index];
- if (!pt_page)
+ pt = as->pts[pd_index];
+ if (!pt)
return NULL;
- pd = page_address(as->pd);
- *dmap = smmu_pde_to_dma(pd[pd_index]);
+ *dmap = smmu_pde_to_dma(smmu, as->pd->val[pd_index]);
- return tegra_smmu_pte_offset(pt_page, iova);
+ return tegra_smmu_pte_offset(pt, iova);
}
static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
- dma_addr_t *dmap)
+ dma_addr_t *dmap, struct tegra_pt *pt)
{
unsigned int pde = iova_pd_index(iova);
struct tegra_smmu *smmu = as->smmu;
if (!as->pts[pde]) {
- struct page *page;
dma_addr_t dma;
- page = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
- if (!page)
- return NULL;
-
- dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
- DMA_TO_DEVICE);
+ dma = dma_map_single(smmu->dev, pt, SMMU_SIZE_PT,
+ DMA_TO_DEVICE);
if (dma_mapping_error(smmu->dev, dma)) {
- __free_page(page);
+ iommu_free_pages(pt);
return NULL;
}
if (!smmu_dma_addr_valid(smmu, dma)) {
- dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT,
- DMA_TO_DEVICE);
- __free_page(page);
+ dma_unmap_single(smmu->dev, dma, SMMU_SIZE_PT,
+ DMA_TO_DEVICE);
+ iommu_free_pages(pt);
return NULL;
}
- as->pts[pde] = page;
+ as->pts[pde] = pt;
tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR |
SMMU_PDE_NEXT));
*dmap = dma;
} else {
- u32 *pd = page_address(as->pd);
-
- *dmap = smmu_pde_to_dma(pd[pde]);
+ *dmap = smmu_pde_to_dma(smmu, as->pd->val[pde]);
}
return tegra_smmu_pte_offset(as->pts[pde], iova);
@@ -606,7 +644,7 @@ static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova)
static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
{
unsigned int pde = iova_pd_index(iova);
- struct page *page = as->pts[pde];
+ struct tegra_pt *pt = as->pts[pde];
/*
* When no entries in this page table are used anymore, return the
@@ -614,13 +652,13 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
*/
if (--as->count[pde] == 0) {
struct tegra_smmu *smmu = as->smmu;
- u32 *pd = page_address(as->pd);
- dma_addr_t pte_dma = smmu_pde_to_dma(pd[pde]);
+ dma_addr_t pte_dma = smmu_pde_to_dma(smmu, as->pd->val[pde]);
tegra_smmu_set_pde(as, iova, 0);
- dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE);
- __free_page(page);
+ dma_unmap_single(smmu->dev, pte_dma, SMMU_SIZE_PT,
+ DMA_TO_DEVICE);
+ iommu_free_pages(pt);
as->pts[pde] = NULL;
}
}
@@ -629,7 +667,7 @@ static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
u32 *pte, dma_addr_t pte_dma, u32 val)
{
struct tegra_smmu *smmu = as->smmu;
- unsigned long offset = offset_in_page(pte);
+ unsigned long offset = SMMU_OFFSET_IN_PAGE(pte);
*pte = val;
@@ -640,14 +678,61 @@ static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
smmu_flush(smmu);
}
-static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+static struct tegra_pt *as_get_pde_page(struct tegra_smmu_as *as,
+ unsigned long iova, gfp_t gfp,
+ unsigned long *flags)
+{
+ unsigned int pde = iova_pd_index(iova);
+ struct tegra_pt *pt = as->pts[pde];
+
+ /* at first check whether allocation needs to be done at all */
+ if (pt)
+ return pt;
+
+ /*
+ * In order to prevent exhaustion of the atomic memory pool, we
+ * allocate page in a sleeping context if GFP flags permit. Hence
+ * spinlock needs to be unlocked and re-locked after allocation.
+ */
+ if (gfpflags_allow_blocking(gfp))
+ spin_unlock_irqrestore(&as->lock, *flags);
+
+ pt = iommu_alloc_pages_sz(gfp | __GFP_DMA, SMMU_SIZE_PT);
+
+ if (gfpflags_allow_blocking(gfp))
+ spin_lock_irqsave(&as->lock, *flags);
+
+ /*
+ * In a case of blocking allocation, a concurrent mapping may win
+ * the PDE allocation. In this case the allocated page isn't needed
+ * if allocation succeeded and the allocation failure isn't fatal.
+ */
+ if (as->pts[pde]) {
+ if (pt)
+ iommu_free_pages(pt);
+
+ pt = as->pts[pde];
+ }
+
+ return pt;
+}
+
+static int
+__tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp,
+ unsigned long *flags)
{
struct tegra_smmu_as *as = to_smmu_as(domain);
dma_addr_t pte_dma;
+ struct tegra_pt *pt;
+ u32 pte_attrs;
u32 *pte;
- pte = as_get_pte(as, iova, &pte_dma);
+ pt = as_get_pde_page(as, iova, gfp, flags);
+ if (!pt)
+ return -ENOMEM;
+
+ pte = as_get_pte(as, iova, &pte_dma, pt);
if (!pte)
return -ENOMEM;
@@ -655,14 +740,23 @@ static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
if (*pte == 0)
tegra_smmu_pte_get_use(as, iova);
+ pte_attrs = SMMU_PTE_NONSECURE;
+
+ if (prot & IOMMU_READ)
+ pte_attrs |= SMMU_PTE_READABLE;
+
+ if (prot & IOMMU_WRITE)
+ pte_attrs |= SMMU_PTE_WRITABLE;
+
tegra_smmu_set_pte(as, iova, pte, pte_dma,
- __phys_to_pfn(paddr) | SMMU_PTE_ATTR);
+ SMMU_PHYS_PFN(paddr) | pte_attrs);
return 0;
}
-static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
- size_t size)
+static size_t
+__tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
+ size_t size, struct iommu_iotlb_gather *gather)
{
struct tegra_smmu_as *as = to_smmu_as(domain);
dma_addr_t pte_dma;
@@ -678,6 +772,37 @@ static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
return size;
}
+static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, size_t count,
+ int prot, gfp_t gfp, size_t *mapped)
+{
+ struct tegra_smmu_as *as = to_smmu_as(domain);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&as->lock, flags);
+ ret = __tegra_smmu_map(domain, iova, paddr, size, prot, gfp, &flags);
+ spin_unlock_irqrestore(&as->lock, flags);
+
+ if (!ret)
+ *mapped = size;
+
+ return ret;
+}
+
+static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
+ size_t size, size_t count, struct iommu_iotlb_gather *gather)
+{
+ struct tegra_smmu_as *as = to_smmu_as(domain);
+ unsigned long flags;
+
+ spin_lock_irqsave(&as->lock, flags);
+ size = __tegra_smmu_unmap(domain, iova, size, gather);
+ spin_unlock_irqrestore(&as->lock, flags);
+
+ return size;
+}
+
static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
@@ -692,7 +817,7 @@ static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
pfn = *pte & as->smmu->pfn_mask;
- return PFN_PHYS(pfn);
+ return SMMU_PFN_PHYS(pfn) + SMMU_OFFSET_IN_PAGE(iova);
}
static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
@@ -705,6 +830,7 @@ static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
return NULL;
mc = platform_get_drvdata(pdev);
+ put_device(&pdev->dev);
if (!mc)
return NULL;
@@ -712,12 +838,12 @@ static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
}
static int tegra_smmu_configure(struct tegra_smmu *smmu, struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
const struct iommu_ops *ops = smmu->iommu.ops;
int err;
- err = iommu_fwspec_init(dev, &dev->of_node->fwnode, ops);
+ err = iommu_fwspec_init(dev, dev_fwnode(smmu->dev));
if (err < 0) {
dev_err(dev, "failed to initialize fwspec: %d\n", err);
return err;
@@ -726,18 +852,16 @@ static int tegra_smmu_configure(struct tegra_smmu *smmu, struct device *dev,
err = ops->of_xlate(dev, args);
if (err < 0) {
dev_err(dev, "failed to parse SW group ID: %d\n", err);
- iommu_fwspec_free(dev);
return err;
}
return 0;
}
-static int tegra_smmu_add_device(struct device *dev)
+static struct iommu_device *tegra_smmu_probe_device(struct device *dev)
{
struct device_node *np = dev->of_node;
struct tegra_smmu *smmu = NULL;
- struct iommu_group *group;
struct of_phandle_args args;
unsigned int index = 0;
int err;
@@ -747,48 +871,22 @@ static int tegra_smmu_add_device(struct device *dev)
smmu = tegra_smmu_find(args.np);
if (smmu) {
err = tegra_smmu_configure(smmu, dev, &args);
- of_node_put(args.np);
-
- if (err < 0)
- return err;
-
- /*
- * Only a single IOMMU master interface is currently
- * supported by the Linux kernel, so abort after the
- * first match.
- */
- dev->archdata.iommu = smmu;
- iommu_device_link(&smmu->iommu, dev);
-
- break;
+ if (err < 0) {
+ of_node_put(args.np);
+ return ERR_PTR(err);
+ }
}
of_node_put(args.np);
index++;
}
+ smmu = dev_iommu_priv_get(dev);
if (!smmu)
- return -ENODEV;
-
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group))
- return PTR_ERR(group);
-
- iommu_group_put(group);
-
- return 0;
-}
-
-static void tegra_smmu_remove_device(struct device *dev)
-{
- struct tegra_smmu *smmu = dev->archdata.iommu;
-
- if (smmu)
- iommu_device_unlink(&smmu->iommu, dev);
+ return ERR_PTR(-ENODEV);
- dev->archdata.iommu = NULL;
- iommu_group_remove_device(dev);
+ return &smmu->iommu;
}
static const struct tegra_smmu_group_soc *
@@ -804,22 +902,36 @@ tegra_smmu_find_group(struct tegra_smmu *smmu, unsigned int swgroup)
return NULL;
}
-static struct iommu_group *tegra_smmu_group_get(struct tegra_smmu *smmu,
- unsigned int swgroup)
+static void tegra_smmu_group_release(void *iommu_data)
+{
+ struct tegra_smmu_group *group = iommu_data;
+ struct tegra_smmu *smmu = group->smmu;
+
+ mutex_lock(&smmu->lock);
+ list_del(&group->list);
+ mutex_unlock(&smmu->lock);
+}
+
+static struct iommu_group *tegra_smmu_device_group(struct device *dev)
{
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
const struct tegra_smmu_group_soc *soc;
+ unsigned int swgroup = fwspec->ids[0];
struct tegra_smmu_group *group;
+ struct iommu_group *grp;
+ /* Find group_soc associating with swgroup */
soc = tegra_smmu_find_group(smmu, swgroup);
- if (!soc)
- return NULL;
mutex_lock(&smmu->lock);
+ /* Find existing iommu_group associating with swgroup or group_soc */
list_for_each_entry(group, &smmu->groups, list)
- if (group->soc == soc) {
+ if ((group->swgroup == swgroup) || (soc && group->soc == soc)) {
+ grp = iommu_group_ref_get(group->group);
mutex_unlock(&smmu->lock);
- return group->group;
+ return grp;
}
group = devm_kzalloc(smmu->dev, sizeof(*group), GFP_KERNEL);
@@ -829,56 +941,75 @@ static struct iommu_group *tegra_smmu_group_get(struct tegra_smmu *smmu,
}
INIT_LIST_HEAD(&group->list);
+ group->swgroup = swgroup;
+ group->smmu = smmu;
group->soc = soc;
- group->group = iommu_group_alloc();
+ if (dev_is_pci(dev))
+ group->group = pci_device_group(dev);
+ else
+ group->group = generic_device_group(dev);
+
if (IS_ERR(group->group)) {
devm_kfree(smmu->dev, group);
mutex_unlock(&smmu->lock);
return NULL;
}
+ iommu_group_set_iommudata(group->group, group, tegra_smmu_group_release);
+ if (soc)
+ iommu_group_set_name(group->group, soc->name);
list_add_tail(&group->list, &smmu->groups);
mutex_unlock(&smmu->lock);
return group->group;
}
-static struct iommu_group *tegra_smmu_device_group(struct device *dev)
+static int tegra_smmu_of_xlate(struct device *dev,
+ const struct of_phandle_args *args)
{
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct tegra_smmu *smmu = dev->archdata.iommu;
- struct iommu_group *group;
+ struct platform_device *iommu_pdev = of_find_device_by_node(args->np);
+ struct tegra_mc *mc = platform_get_drvdata(iommu_pdev);
+ u32 id = args->args[0];
+
+ /*
+ * Note: we are here releasing the reference of &iommu_pdev->dev, which
+ * is mc->dev. Although some functions in tegra_smmu_ops may keep using
+ * its private data beyond this point, it's still safe to do so because
+ * the SMMU parent device is the same as the MC, so the reference count
+ * isn't strictly necessary.
+ */
+ put_device(&iommu_pdev->dev);
- group = tegra_smmu_group_get(smmu, fwspec->ids[0]);
- if (!group)
- group = generic_device_group(dev);
+ dev_iommu_priv_set(dev, mc->smmu);
- return group;
+ return iommu_fwspec_add_ids(dev, &id, 1);
}
-static int tegra_smmu_of_xlate(struct device *dev,
- struct of_phandle_args *args)
+static int tegra_smmu_def_domain_type(struct device *dev)
{
- u32 id = args->args[0];
-
- return iommu_fwspec_add_ids(dev, &id, 1);
+ /*
+ * FIXME: For now we want to run all translation in IDENTITY mode, due
+ * to some device quirks. Better would be to just quirk the troubled
+ * devices.
+ */
+ return IOMMU_DOMAIN_IDENTITY;
}
static const struct iommu_ops tegra_smmu_ops = {
- .capable = tegra_smmu_capable,
- .domain_alloc = tegra_smmu_domain_alloc,
- .domain_free = tegra_smmu_domain_free,
- .attach_dev = tegra_smmu_attach_dev,
- .detach_dev = tegra_smmu_detach_dev,
- .add_device = tegra_smmu_add_device,
- .remove_device = tegra_smmu_remove_device,
+ .identity_domain = &tegra_smmu_identity_domain,
+ .def_domain_type = &tegra_smmu_def_domain_type,
+ .domain_alloc_paging = tegra_smmu_domain_alloc_paging,
+ .probe_device = tegra_smmu_probe_device,
.device_group = tegra_smmu_device_group,
- .map = tegra_smmu_map,
- .unmap = tegra_smmu_unmap,
- .iova_to_phys = tegra_smmu_iova_to_phys,
.of_xlate = tegra_smmu_of_xlate,
- .pgsize_bitmap = SZ_4K,
+ .default_domain_ops = &(const struct iommu_domain_ops) {
+ .attach_dev = tegra_smmu_attach_dev,
+ .map_pages = tegra_smmu_map,
+ .unmap_pages = tegra_smmu_unmap,
+ .iova_to_phys = tegra_smmu_iova_to_phys,
+ .free = tegra_smmu_domain_free,
+ }
};
static void tegra_smmu_ahb_enable(void)
@@ -941,9 +1072,9 @@ static int tegra_smmu_clients_show(struct seq_file *s, void *data)
const struct tegra_mc_client *client = &smmu->soc->clients[i];
const char *status;
- value = smmu_readl(smmu, client->smmu.reg);
+ value = smmu_readl(smmu, client->regs.smmu.reg);
- if (value & BIT(client->smmu.bit))
+ if (value & BIT(client->regs.smmu.bit))
status = "yes";
else
status = "no";
@@ -959,8 +1090,6 @@ DEFINE_SHOW_ATTRIBUTE(tegra_smmu_clients);
static void tegra_smmu_debugfs_init(struct tegra_smmu *smmu)
{
smmu->debugfs = debugfs_create_dir("smmu", NULL);
- if (!smmu->debugfs)
- return;
debugfs_create_file("swgroups", S_IRUGO, smmu->debugfs, smmu,
&tegra_smmu_swgroups_fops);
@@ -978,31 +1107,24 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
struct tegra_mc *mc)
{
struct tegra_smmu *smmu;
- size_t size;
u32 value;
int err;
- /* This can happen on Tegra20 which doesn't have an SMMU */
- if (!soc)
- return NULL;
-
smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
if (!smmu)
return ERR_PTR(-ENOMEM);
/*
* This is a bit of a hack. Ideally we'd want to simply return this
- * value. However the IOMMU registration process will attempt to add
- * all devices to the IOMMU when bus_set_iommu() is called. In order
+ * value. However iommu_device_register() will attempt to add
+ * all devices to the IOMMU before we get that far. In order
* not to rely on global variables to track the IOMMU instance, we
- * set it here so that it can be looked up from the .add_device()
+ * set it here so that it can be looked up from the .probe_device()
* callback via the IOMMU device's .drvdata field.
*/
mc->smmu = smmu;
- size = BITS_TO_LONGS(soc->num_asids) * sizeof(long);
-
- smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL);
+ smmu->asids = devm_bitmap_zalloc(dev, soc->num_asids, GFP_KERNEL);
if (!smmu->asids)
return ERR_PTR(-ENOMEM);
@@ -1014,10 +1136,11 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
smmu->dev = dev;
smmu->mc = mc;
- smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1;
+ smmu->pfn_mask =
+ BIT_MASK(mc->soc->num_address_bits - SMMU_PTE_SHIFT) - 1;
dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
mc->soc->num_address_bits, smmu->pfn_mask);
- smmu->tlb_mask = (smmu->soc->num_tlb_lines << 1) - 1;
+ smmu->tlb_mask = (1 << fls(smmu->soc->num_tlb_lines)) - 1;
dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines,
smmu->tlb_mask);
@@ -1047,22 +1170,12 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
if (err)
return ERR_PTR(err);
- iommu_device_set_ops(&smmu->iommu, &tegra_smmu_ops);
- iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
-
- err = iommu_device_register(&smmu->iommu);
+ err = iommu_device_register(&smmu->iommu, &tegra_smmu_ops, dev);
if (err) {
iommu_device_sysfs_remove(&smmu->iommu);
return ERR_PTR(err);
}
- err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops);
- if (err < 0) {
- iommu_device_unregister(&smmu->iommu);
- iommu_device_sysfs_remove(&smmu->iommu);
- return ERR_PTR(err);
- }
-
if (IS_ENABLED(CONFIG_DEBUG_FS))
tegra_smmu_debugfs_init(smmu);