summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-09-09 15:03:24 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-09 15:03:24 -0700
commit4dfc2788033d30dfccfd4268e06dd73ce2c654ed (patch)
treef6675b959f4aa12e64e68383874683c4cea46334 /include
parenta59e57da49f7c3f3de8cf4b7568a0c6c82f5b242 (diff)
parent47b59d8e40850a05370ee9198ea5e505d89489f1 (diff)
Merge tag 'iommu-updates-v4.14' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull IOMMU updates from Joerg Roedel: "Slightly more changes than usual this time: - KDump Kernel IOMMU take-over code for AMD IOMMU. The code now tries to preserve the mappings of the kernel so that master aborts for devices are avoided. Master aborts cause some devices to fail in the kdump kernel, so this code makes the dump more likely to succeed when AMD IOMMU is enabled. - common flush queue implementation for IOVA code users. The code is still optional, but AMD and Intel IOMMU drivers had their own implementation which is now unified. - finish support for iommu-groups. All drivers implement this feature now so that IOMMU core code can rely on it. - finish support for 'struct iommu_device' in iommu drivers. All drivers now use the interface. - new functions in the IOMMU-API for explicit IO/TLB flushing. This will help to reduce the number of IO/TLB flushes when IOMMU drivers support this interface. - support for mt2712 in the Mediatek IOMMU driver - new IOMMU driver for QCOM hardware - system PM support for ARM-SMMU - shutdown method for ARM-SMMU-v3 - some constification patches - various other small improvements and fixes" * tag 'iommu-updates-v4.14' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (87 commits) iommu/vt-d: Don't be too aggressive when clearing one context entry iommu: Introduce Interface for IOMMU TLB Flushing iommu/s390: Constify iommu_ops iommu/vt-d: Avoid calling virt_to_phys() on null pointer iommu/vt-d: IOMMU Page Request needs to check if address is canonical. arm/tegra: Call bus_set_iommu() after iommu_device_register() iommu/exynos: Constify iommu_ops iommu/ipmmu-vmsa: Make ipmmu_gather_ops const iommu/ipmmu-vmsa: Rereserving a free context before setting up a pagetable iommu/amd: Rename a few flush functions iommu/amd: Check if domain is NULL in get_domain() and return -EBUSY iommu/mediatek: Fix a build warning of BIT(32) in ARM iommu/mediatek: Fix a build fail of m4u_type iommu: qcom: annotate PM functions as __maybe_unused iommu/pamu: Fix PAMU boot crash memory: mtk-smi: Degrade SMI init to module_init iommu/mediatek: Enlarge the validate PA range for 4GB mode iommu/mediatek: Disable iommu clock when system suspend iommu/mediatek: Move pgtable allocation into domain_alloc iommu/mediatek: Merge 2 M4U HWs into one iommu domain ...
Diffstat (limited to 'include')
-rw-r--r--include/dt-bindings/memory/mt8173-larb-port.h4
-rw-r--r--include/linux/iommu.h55
-rw-r--r--include/linux/iova.h67
-rw-r--r--include/soc/mediatek/smi.h2
4 files changed, 120 insertions, 8 deletions
diff --git a/include/dt-bindings/memory/mt8173-larb-port.h b/include/dt-bindings/memory/mt8173-larb-port.h
index 5fef5d1f8f82..111b4b0ec85a 100644
--- a/include/dt-bindings/memory/mt8173-larb-port.h
+++ b/include/dt-bindings/memory/mt8173-larb-port.h
@@ -15,10 +15,6 @@
#define __DTS_IOMMU_PORT_MT8173_H
#define MTK_M4U_ID(larb, port) (((larb) << 5) | (port))
-/* Local arbiter ID */
-#define MTK_M4U_TO_LARB(id) (((id) >> 5) & 0x7)
-/* PortID within the local arbiter */
-#define MTK_M4U_TO_PORT(id) ((id) & 0x1f)
#define M4U_LARB0_ID 0
#define M4U_LARB1_ID 1
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 176f7569d874..a7f2ac689d29 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -167,6 +167,10 @@ struct iommu_resv_region {
* @map: map a physically contiguous memory region to an iommu domain
* @unmap: unmap a physically contiguous memory region from an iommu domain
* @map_sg: map a scatter-gather list of physically contiguous memory chunks
+ * @flush_tlb_all: Synchronously flush all hardware TLBs for this domain
+ * @tlb_range_add: Add a given iova range to the flush queue for this domain
+ * @tlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
+ * queue
* to an iommu domain
* @iova_to_phys: translate iova to physical address
* @add_device: add device to iommu grouping
@@ -199,6 +203,10 @@ struct iommu_ops {
size_t size);
size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg, unsigned int nents, int prot);
+ void (*flush_iotlb_all)(struct iommu_domain *domain);
+ void (*iotlb_range_add)(struct iommu_domain *domain,
+ unsigned long iova, size_t size);
+ void (*iotlb_sync)(struct iommu_domain *domain);
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
int (*add_device)(struct device *dev);
void (*remove_device)(struct device *dev);
@@ -225,6 +233,7 @@ struct iommu_ops {
u32 (*domain_get_windows)(struct iommu_domain *domain);
int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
+ bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev);
unsigned long pgsize_bitmap;
};
@@ -291,7 +300,9 @@ extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
- size_t size);
+ size_t size);
+extern size_t iommu_unmap_fast(struct iommu_domain *domain,
+ unsigned long iova, size_t size);
extern size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg,unsigned int nents,
int prot);
@@ -348,6 +359,25 @@ extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags);
+static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
+{
+ if (domain->ops->flush_iotlb_all)
+ domain->ops->flush_iotlb_all(domain);
+}
+
+static inline void iommu_tlb_range_add(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+ if (domain->ops->iotlb_range_add)
+ domain->ops->iotlb_range_add(domain, iova, size);
+}
+
+static inline void iommu_tlb_sync(struct iommu_domain *domain)
+{
+ if (domain->ops->iotlb_sync)
+ domain->ops->iotlb_sync(domain);
+}
+
static inline size_t iommu_map_sg(struct iommu_domain *domain,
unsigned long iova, struct scatterlist *sg,
unsigned int nents, int prot)
@@ -430,13 +460,19 @@ static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
}
static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, int gfp_order, int prot)
+ phys_addr_t paddr, size_t size, int prot)
{
return -ENODEV;
}
static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova,
- int gfp_order)
+ size_t size)
+{
+ return -ENODEV;
+}
+
+static inline int iommu_unmap_fast(struct iommu_domain *domain, unsigned long iova,
+ int gfp_order)
{
return -ENODEV;
}
@@ -448,6 +484,19 @@ static inline size_t iommu_map_sg(struct iommu_domain *domain,
return -ENODEV;
}
+static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
+{
+}
+
+static inline void iommu_tlb_range_add(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+}
+
+static inline void iommu_tlb_sync(struct iommu_domain *domain)
+{
+}
+
static inline int iommu_domain_window_enable(struct iommu_domain *domain,
u32 wnd_nr, phys_addr_t paddr,
u64 size, int prot)
diff --git a/include/linux/iova.h b/include/linux/iova.h
index e0a892ae45c0..d179b9bf7814 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/rbtree.h>
+#include <linux/atomic.h>
#include <linux/dma-mapping.h>
/* iova structure */
@@ -36,6 +37,35 @@ struct iova_rcache {
struct iova_cpu_rcache __percpu *cpu_rcaches;
};
+struct iova_domain;
+
+/* Call-Back from IOVA code into IOMMU drivers */
+typedef void (* iova_flush_cb)(struct iova_domain *domain);
+
+/* Destructor for per-entry data */
+typedef void (* iova_entry_dtor)(unsigned long data);
+
+/* Number of entries per Flush Queue */
+#define IOVA_FQ_SIZE 256
+
+/* Timeout (in ms) after which entries are flushed from the Flush-Queue */
+#define IOVA_FQ_TIMEOUT 10
+
+/* Flush Queue entry for defered flushing */
+struct iova_fq_entry {
+ unsigned long iova_pfn;
+ unsigned long pages;
+ unsigned long data;
+ u64 counter; /* Flush counter when this entrie was added */
+};
+
+/* Per-CPU Flush Queue structure */
+struct iova_fq {
+ struct iova_fq_entry entries[IOVA_FQ_SIZE];
+ unsigned head, tail;
+ spinlock_t lock;
+};
+
/* holds all the iova translations for a domain */
struct iova_domain {
spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
@@ -45,6 +75,25 @@ struct iova_domain {
unsigned long start_pfn; /* Lower limit for this domain */
unsigned long dma_32bit_pfn;
struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
+
+ iova_flush_cb flush_cb; /* Call-Back function to flush IOMMU
+ TLBs */
+
+ iova_entry_dtor entry_dtor; /* IOMMU driver specific destructor for
+ iova entry */
+
+ struct iova_fq __percpu *fq; /* Flush Queue */
+
+ atomic64_t fq_flush_start_cnt; /* Number of TLB flushes that
+ have been started */
+
+ atomic64_t fq_flush_finish_cnt; /* Number of TLB flushes that
+ have been finished */
+
+ struct timer_list fq_timer; /* Timer to regularily empty the
+ flush-queues */
+ atomic_t fq_timer_on; /* 1 when timer is active, 0
+ when not */
};
static inline unsigned long iova_size(struct iova *iova)
@@ -95,6 +144,9 @@ struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
bool size_aligned);
void free_iova_fast(struct iova_domain *iovad, unsigned long pfn,
unsigned long size);
+void queue_iova(struct iova_domain *iovad,
+ unsigned long pfn, unsigned long pages,
+ unsigned long data);
unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
unsigned long limit_pfn);
struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
@@ -102,6 +154,8 @@ struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
unsigned long start_pfn, unsigned long pfn_32bit);
+int init_iova_flush_queue(struct iova_domain *iovad,
+ iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
void put_iova_domain(struct iova_domain *iovad);
struct iova *split_and_remove_iova(struct iova_domain *iovad,
@@ -148,6 +202,12 @@ static inline void free_iova_fast(struct iova_domain *iovad,
{
}
+static inline void queue_iova(struct iova_domain *iovad,
+ unsigned long pfn, unsigned long pages,
+ unsigned long data)
+{
+}
+
static inline unsigned long alloc_iova_fast(struct iova_domain *iovad,
unsigned long size,
unsigned long limit_pfn)
@@ -174,6 +234,13 @@ static inline void init_iova_domain(struct iova_domain *iovad,
{
}
+static inline int init_iova_flush_queue(struct iova_domain *iovad,
+ iova_flush_cb flush_cb,
+ iova_entry_dtor entry_dtor)
+{
+ return -ENODEV;
+}
+
static inline struct iova *find_iova(struct iova_domain *iovad,
unsigned long pfn)
{
diff --git a/include/soc/mediatek/smi.h b/include/soc/mediatek/smi.h
index 8893c5eacd07..5201e9022c86 100644
--- a/include/soc/mediatek/smi.h
+++ b/include/soc/mediatek/smi.h
@@ -19,7 +19,7 @@
#ifdef CONFIG_MTK_SMI
-#define MTK_LARB_NR_MAX 8
+#define MTK_LARB_NR_MAX 16
#define MTK_SMI_MMU_EN(port) BIT(port)