summaryrefslogtreecommitdiff
path: root/arch/nios2
diff options
context:
space:
mode:
authorHelge Deller <deller@gmx.de>2023-08-22 16:27:49 +0200
committerAndrew Morton <akpm@linux-foundation.org>2023-08-24 16:20:31 -0700
commit7db15418d390cf878d7c77ae08a4ad39f1534bc5 (patch)
tree89c0d1cdfd00afdb004e131d62b03fd86480d310 /arch/nios2
parent8cfd014efd93e9450fcd4892bbfe8b10f41e53c3 (diff)
nios2: fix flush_dcache_page() for usage from irq context
Since at least kernel 6.1, flush_dcache_page() is called with IRQs disabled, e.g. from aio_complete(). But the current implementation for flush_dcache_page() on NIOS2 unintentionally re-enables IRQs, which may lead to deadlocks. Fix it by using xa_lock_irqsave() and xa_unlock_irqrestore() for the flush_dcache_mmap_*lock() macros instead. Link: https://lkml.kernel.org/r/ZOTF5WWURQNH9+iw@p100 Signed-off-by: Helge Deller <deller@gmx.de> Cc: Dinh Nguyen <dinguyen@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'arch/nios2')
-rw-r--r--arch/nios2/include/asm/cacheflush.h4
-rw-r--r--arch/nios2/mm/cacheflush.c5
2 files changed, 7 insertions, 2 deletions
diff --git a/arch/nios2/include/asm/cacheflush.h b/arch/nios2/include/asm/cacheflush.h
index 7c48c5213fb7..348cea097792 100644
--- a/arch/nios2/include/asm/cacheflush.h
+++ b/arch/nios2/include/asm/cacheflush.h
@@ -52,5 +52,9 @@ extern void invalidate_dcache_range(unsigned long start, unsigned long end);
#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
+#define flush_dcache_mmap_lock_irqsave(mapping, flags) \
+ xa_lock_irqsave(&mapping->i_pages, flags)
+#define flush_dcache_mmap_unlock_irqrestore(mapping, flags) \
+ xa_unlock_irqrestore(&mapping->i_pages, flags)
#endif /* _ASM_NIOS2_CACHEFLUSH_H */
diff --git a/arch/nios2/mm/cacheflush.c b/arch/nios2/mm/cacheflush.c
index 28b805f465a8..0ee9c5f02e08 100644
--- a/arch/nios2/mm/cacheflush.c
+++ b/arch/nios2/mm/cacheflush.c
@@ -75,12 +75,13 @@ static void flush_aliases(struct address_space *mapping, struct folio *folio)
{
struct mm_struct *mm = current->active_mm;
struct vm_area_struct *vma;
+ unsigned long flags;
pgoff_t pgoff;
unsigned long nr = folio_nr_pages(folio);
pgoff = folio->index;
- flush_dcache_mmap_lock(mapping);
+ flush_dcache_mmap_lock_irqsave(mapping, flags);
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff + nr - 1) {
unsigned long start;
@@ -92,7 +93,7 @@ static void flush_aliases(struct address_space *mapping, struct folio *folio)
start = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
flush_cache_range(vma, start, start + nr * PAGE_SIZE);
}
- flush_dcache_mmap_unlock(mapping);
+ flush_dcache_mmap_unlock_irqrestore(mapping, flags);
}
void flush_cache_all(void)