summaryrefslogtreecommitdiff
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2023-05-25 14:57:05 +0200
committerAndrew Morton <akpm@linux-foundation.org>2023-06-09 16:25:41 -0700
commita09fad96ffb1d0da007283727235a03b813f989b (patch)
tree58c8bd33ae572dd83a2eb4e723b2be781504f0df /mm/vmalloc.c
parentca5e46c3400badc418a8fbcaeba711ad60ff4e1b (diff)
mm/vmalloc: prevent flushing dirty space over and over
vmap blocks which have active mappings cannot be purged. Allocations which have been freed are accounted for in vmap_block::dirty_min/max, so that they can be detected in _vm_unmap_aliases() as potentially stale TLBs. If there are several invocations of _vm_unmap_aliases() then each of them will flush the dirty range. That's pointless and just increases the probability of full TLB flushes. Avoid that by resetting the flush range after accounting for it. That's safe versus other invocations of _vm_unmap_aliases() because this is all serialized with vmap_purge_lock. Link: https://lkml.kernel.org/r/20230525124504.692056496@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Baoquan He <bhe@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Lorenzo Stoakes <lstoakes@gmail.com> Cc: Uladzislau Rezki (Sony) <urezki@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c8
1 files changed, 6 insertions, 2 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index eaef5e0400db..ad9a1d9e314f 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2226,7 +2226,7 @@ static void vb_free(unsigned long addr, unsigned long size)
spin_lock(&vb->lock);
- /* Expand dirty range */
+ /* Expand the not yet TLB flushed dirty range */
vb->dirty_min = min(vb->dirty_min, offset);
vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
@@ -2264,7 +2264,7 @@ static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
* space to be flushed.
*/
if (!purge_fragmented_block(vb, vbq, &purge_list) &&
- vb->dirty && vb->dirty != VMAP_BBMAP_BITS) {
+ vb->dirty_max && vb->dirty != VMAP_BBMAP_BITS) {
unsigned long va_start = vb->va->va_start;
unsigned long s, e;
@@ -2274,6 +2274,10 @@ static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
start = min(s, start);
end = max(e, end);
+ /* Prevent that this is flushed again */
+ vb->dirty_min = VMAP_BBMAP_BITS;
+ vb->dirty_max = 0;
+
flush = 1;
}
spin_unlock(&vb->lock);