From 876397837d582ce72f977ac3e635ce74eebcecc9 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 2 Aug 2023 16:13:41 +0100 Subject: ia64: implement the new page table range API Add PFN_PTE_SHIFT, update_mmu_cache_range() and flush_dcache_folio(). Change the PG_arch_1 (aka PG_dcache_clean) flag from being per-page to per-folio, which makes arch_dma_mark_clean() and mark_clean() a little more exciting. [willy@infradead.org: fix folio_size() handling] Link: https://lkml.kernel.org/r/ZNPlOCe8F+nrzPxr@casper.infradead.org Link: https://lkml.kernel.org/r/20230802151406.3735276-14-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Acked-by: Mike Rapoport (IBM) Signed-off-by: Andrew Morton --- arch/ia64/mm/init.c | 32 +++++++++++++++++++++++--------- 1 file changed, 23 insertions(+), 9 deletions(-) (limited to 'arch/ia64/mm') diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 7f5353e28516..05b0f2f0c073 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -50,30 +50,44 @@ void __ia64_sync_icache_dcache (pte_t pte) { unsigned long addr; - struct page *page; + struct folio *folio; - page = pte_page(pte); - addr = (unsigned long) page_address(page); + folio = page_folio(pte_page(pte)); + addr = (unsigned long)folio_address(folio); - if (test_bit(PG_arch_1, &page->flags)) + if (test_bit(PG_arch_1, &folio->flags)) return; /* i-cache is already coherent with d-cache */ - flush_icache_range(addr, addr + page_size(page)); - set_bit(PG_arch_1, &page->flags); /* mark page as clean */ + flush_icache_range(addr, addr + folio_size(folio)); + set_bit(PG_arch_1, &folio->flags); /* mark page as clean */ } /* - * Since DMA is i-cache coherent, any (complete) pages that were written via + * Since DMA is i-cache coherent, any (complete) folios that were written via * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to * flush them when they get mapped into an executable vm-area. */ void arch_dma_mark_clean(phys_addr_t paddr, size_t size) { unsigned long pfn = PHYS_PFN(paddr); + struct folio *folio = page_folio(pfn_to_page(pfn)); + ssize_t left = size; + size_t offset = offset_in_folio(folio, paddr); - do { + if (offset) { + left -= folio_size(folio) - offset; + if (left <= 0) + return; + folio = folio_next(folio); + } + + while (left >= (ssize_t)folio_size(folio)) { + left -= folio_size(folio); set_bit(PG_arch_1, &pfn_to_page(pfn)->flags); - } while (++pfn <= PHYS_PFN(paddr + size - 1)); + if (!left) + break; + folio = folio_next(folio); + } } inline void -- cgit