summaryrefslogtreecommitdiff
path: root/drivers/iommu
diff options
context:
space:
mode:
authorJoerg Roedel <jroedel@suse.de>2017-08-10 16:14:59 +0200
committerJoerg Roedel <jroedel@suse.de>2017-08-15 18:23:51 +0200
commitfb418dab8a4f01dde0c025d15145c589ec02796b (patch)
treea3248e1152ca546661147605d2480528a8775af6 /drivers/iommu
parent1928210107edd4fa786199fef6b875d3af3bef88 (diff)
iommu/iova: Add flush counters to Flush-Queue implementation
There are two counters: * fq_flush_start_cnt - Increased when a TLB flush is started. * fq_flush_finish_cnt - Increased when a TLB flush is finished. The fq_flush_start_cnt is assigned to every Flush-Queue entry on its creation. When freeing entries from the Flush-Queue, the value in the entry is compared to the fq_flush_finish_cnt. The entry can only be freed when its value is less than the value of fq_flush_finish_cnt. The reason for these counters it to take advantage of IOMMU TLB flushes that happened on other CPUs. These already flushed the TLB for Flush-Queue entries on other CPUs so that they can already be freed without flushing the TLB again. This makes it less likely that the Flush-Queue is full and saves IOMMU TLB flushes. Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/iova.c27
1 files changed, 24 insertions, 3 deletions
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index e5c9a7ae6088..47b144e417ad 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -75,6 +75,9 @@ int init_iova_flush_queue(struct iova_domain *iovad,
{
int cpu;
+ atomic64_set(&iovad->fq_flush_start_cnt, 0);
+ atomic64_set(&iovad->fq_flush_finish_cnt, 0);
+
iovad->fq = alloc_percpu(struct iova_fq);
if (!iovad->fq)
return -ENOMEM;
@@ -482,20 +485,30 @@ static inline unsigned fq_ring_add(struct iova_fq *fq)
static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq)
{
+ u64 counter = atomic64_read(&iovad->fq_flush_finish_cnt);
unsigned idx;
fq_ring_for_each(idx, fq) {
+ if (fq->entries[idx].counter >= counter)
+ break;
+
if (iovad->entry_dtor)
iovad->entry_dtor(fq->entries[idx].data);
free_iova_fast(iovad,
fq->entries[idx].iova_pfn,
fq->entries[idx].pages);
+
+ fq->head = (fq->head + 1) % IOVA_FQ_SIZE;
}
+}
- fq->head = 0;
- fq->tail = 0;
+static void iova_domain_flush(struct iova_domain *iovad)
+{
+ atomic64_inc(&iovad->fq_flush_start_cnt);
+ iovad->flush_cb(iovad);
+ atomic64_inc(&iovad->fq_flush_finish_cnt);
}
static void fq_destroy_all_entries(struct iova_domain *iovad)
@@ -526,8 +539,15 @@ void queue_iova(struct iova_domain *iovad,
struct iova_fq *fq = get_cpu_ptr(iovad->fq);
unsigned idx;
+ /*
+ * First remove all entries from the flush queue that have already been
+ * flushed out on another CPU. This makes the fq_full() check below less
+ * likely to be true.
+ */
+ fq_ring_free(iovad, fq);
+
if (fq_full(fq)) {
- iovad->flush_cb(iovad);
+ iova_domain_flush(iovad);
fq_ring_free(iovad, fq);
}
@@ -536,6 +556,7 @@ void queue_iova(struct iova_domain *iovad,
fq->entries[idx].iova_pfn = pfn;
fq->entries[idx].pages = pages;
fq->entries[idx].data = data;
+ fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt);
put_cpu_ptr(iovad->fq);
}