summaryrefslogtreecommitdiff
path: root/arch/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Kconfig1
-rw-r--r--arch/ia64/hp/common/sba_iommu.c30
-rw-r--r--arch/ia64/include/asm/acpi.h6
-rw-r--r--arch/ia64/include/asm/cacheflush.h14
-rw-r--r--arch/ia64/include/asm/io.h13
-rw-r--r--arch/ia64/include/asm/pgtable.h4
-rw-r--r--arch/ia64/include/asm/processor.h3
-rw-r--r--arch/ia64/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/ia64/mm/init.c32
-rw-r--r--arch/ia64/mm/ioremap.c41
10 files changed, 70 insertions, 75 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 88382f105301..53faa122b0f4 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -47,6 +47,7 @@ config IA64
select GENERIC_IRQ_LEGACY
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_IOMAP
+ select GENERIC_IOREMAP
select GENERIC_SMP_IDLE_THREAD
select ARCH_TASK_STRUCT_ON_STACK
select ARCH_TASK_STRUCT_ALLOCATOR
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 8ad6946521d8..c4d477e8bcd4 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -798,22 +798,30 @@ sba_io_pdir_entry(u64 *pdir_ptr, unsigned long vba)
#endif
#ifdef ENABLE_MARK_CLEAN
-/**
+/*
* Since DMA is i-cache coherent, any (complete) pages that were written via
* DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
* flush them when they get mapped into an executable vm-area.
*/
-static void
-mark_clean (void *addr, size_t size)
+static void mark_clean(void *addr, size_t size)
{
- unsigned long pg_addr, end;
-
- pg_addr = PAGE_ALIGN((unsigned long) addr);
- end = (unsigned long) addr + size;
- while (pg_addr + PAGE_SIZE <= end) {
- struct page *page = virt_to_page((void *)pg_addr);
- set_bit(PG_arch_1, &page->flags);
- pg_addr += PAGE_SIZE;
+ struct folio *folio = virt_to_folio(addr);
+ ssize_t left = size;
+ size_t offset = offset_in_folio(folio, addr);
+
+ if (offset) {
+ left -= folio_size(folio) - offset;
+ if (left <= 0)
+ return;
+ folio = folio_next(folio);
+ }
+
+ while (left >= folio_size(folio)) {
+ left -= folio_size(folio);
+ set_bit(PG_arch_1, &folio->flags);
+ if (!left)
+ break;
+ folio = folio_next(folio);
}
}
#endif
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h
index 87927eb824cc..58500a964238 100644
--- a/arch/ia64/include/asm/acpi.h
+++ b/arch/ia64/include/asm/acpi.h
@@ -11,7 +11,7 @@
#ifdef __KERNEL__
-#include <acpi/pdc_intel.h>
+#include <acpi/proc_cap_intel.h>
#include <linux/init.h>
#include <linux/numa.h>
@@ -69,9 +69,9 @@ extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
#endif
static inline bool arch_has_acpi_pdc(void) { return true; }
-static inline void arch_acpi_set_pdc_bits(u32 *buf)
+static inline void arch_acpi_set_proc_cap_bits(u32 *cap)
{
- buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
+ *cap |= ACPI_PROC_CAP_EST_CAPABILITY_SMP;
}
#ifdef CONFIG_ACPI_NUMA
diff --git a/arch/ia64/include/asm/cacheflush.h b/arch/ia64/include/asm/cacheflush.h
index 708c0fa5d975..eac493fa9e0d 100644
--- a/arch/ia64/include/asm/cacheflush.h
+++ b/arch/ia64/include/asm/cacheflush.h
@@ -13,10 +13,16 @@
#include <asm/page.h>
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
-#define flush_dcache_page(page) \
-do { \
- clear_bit(PG_arch_1, &(page)->flags); \
-} while (0)
+static inline void flush_dcache_folio(struct folio *folio)
+{
+ clear_bit(PG_arch_1, &folio->flags);
+}
+#define flush_dcache_folio flush_dcache_folio
+
+static inline void flush_dcache_page(struct page *page)
+{
+ flush_dcache_folio(page_folio(page));
+}
extern void flush_icache_range(unsigned long start, unsigned long end);
#define flush_icache_range flush_icache_range
diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h
index 83a492c8d298..eedc0afa8cad 100644
--- a/arch/ia64/include/asm/io.h
+++ b/arch/ia64/include/asm/io.h
@@ -243,15 +243,12 @@ static inline void outsl(unsigned long port, const void *src,
# ifdef __KERNEL__
-extern void __iomem * ioremap(unsigned long offset, unsigned long size);
+#define _PAGE_IOREMAP pgprot_val(PAGE_KERNEL)
+
extern void __iomem * ioremap_uc(unsigned long offset, unsigned long size);
-extern void iounmap (volatile void __iomem *addr);
-static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned long size)
-{
- return ioremap(phys_addr, size);
-}
-#define ioremap ioremap
-#define ioremap_cache ioremap_cache
+
+#define ioremap_prot ioremap_prot
+#define ioremap_cache ioremap
#define ioremap_uc ioremap_uc
#define iounmap iounmap
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index 21c97e31a28a..4e5dd800ce1f 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -206,6 +206,7 @@ ia64_phys_addr_valid (unsigned long addr)
#define RGN_MAP_SHIFT (PGDIR_SHIFT + PTRS_PER_PGD_SHIFT - 3)
#define RGN_MAP_LIMIT ((1UL << RGN_MAP_SHIFT) - PAGE_SIZE) /* per region addr limit */
+#define PFN_PTE_SHIFT PAGE_SHIFT
/*
* Conversion functions: convert page frame number (pfn) and a protection value to a page
* table entry (pte).
@@ -303,8 +304,6 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
*ptep = pteval;
}
-#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
-
/*
* Make page protection values cacheable, uncacheable, or write-
* combining. Note that "protection" is really a misnomer here as the
@@ -396,6 +395,7 @@ pte_same (pte_t a, pte_t b)
return pte_val(a) == pte_val(b);
}
+#define update_mmu_cache_range(vmf, vma, address, ptep, nr) do { } while (0)
#define update_mmu_cache(vma, address, ptep) do { } while (0)
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h
index d1978e004054..47e3801b526a 100644
--- a/arch/ia64/include/asm/processor.h
+++ b/arch/ia64/include/asm/processor.h
@@ -634,7 +634,6 @@ ia64_imva (void *addr)
#define ARCH_HAS_PREFETCH
#define ARCH_HAS_PREFETCHW
-#define ARCH_HAS_SPINLOCK_PREFETCH
#define PREFETCH_STRIDE L1_CACHE_BYTES
static inline void
@@ -649,8 +648,6 @@ prefetchw (const void *x)
ia64_lfetch_excl(ia64_lfhint_none, x);
}
-#define spin_lock_prefetch(x) prefetchw(x)
-
extern unsigned long boot_option_idle_override;
enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_FORCE_MWAIT,
diff --git a/arch/ia64/kernel/syscalls/syscall.tbl b/arch/ia64/kernel/syscalls/syscall.tbl
index f8c74ffeeefb..83d8609aec03 100644
--- a/arch/ia64/kernel/syscalls/syscall.tbl
+++ b/arch/ia64/kernel/syscalls/syscall.tbl
@@ -372,3 +372,4 @@
449 common futex_waitv sys_futex_waitv
450 common set_mempolicy_home_node sys_set_mempolicy_home_node
451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 7f5353e28516..05b0f2f0c073 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -50,30 +50,44 @@ void
__ia64_sync_icache_dcache (pte_t pte)
{
unsigned long addr;
- struct page *page;
+ struct folio *folio;
- page = pte_page(pte);
- addr = (unsigned long) page_address(page);
+ folio = page_folio(pte_page(pte));
+ addr = (unsigned long)folio_address(folio);
- if (test_bit(PG_arch_1, &page->flags))
+ if (test_bit(PG_arch_1, &folio->flags))
return; /* i-cache is already coherent with d-cache */
- flush_icache_range(addr, addr + page_size(page));
- set_bit(PG_arch_1, &page->flags); /* mark page as clean */
+ flush_icache_range(addr, addr + folio_size(folio));
+ set_bit(PG_arch_1, &folio->flags); /* mark page as clean */
}
/*
- * Since DMA is i-cache coherent, any (complete) pages that were written via
+ * Since DMA is i-cache coherent, any (complete) folios that were written via
* DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
* flush them when they get mapped into an executable vm-area.
*/
void arch_dma_mark_clean(phys_addr_t paddr, size_t size)
{
unsigned long pfn = PHYS_PFN(paddr);
+ struct folio *folio = page_folio(pfn_to_page(pfn));
+ ssize_t left = size;
+ size_t offset = offset_in_folio(folio, paddr);
- do {
+ if (offset) {
+ left -= folio_size(folio) - offset;
+ if (left <= 0)
+ return;
+ folio = folio_next(folio);
+ }
+
+ while (left >= (ssize_t)folio_size(folio)) {
+ left -= folio_size(folio);
set_bit(PG_arch_1, &pfn_to_page(pfn)->flags);
- } while (++pfn <= PHYS_PFN(paddr + size - 1));
+ if (!left)
+ break;
+ folio = folio_next(folio);
+ }
}
inline void
diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c
index 92b81bc91397..711b6abc822e 100644
--- a/arch/ia64/mm/ioremap.c
+++ b/arch/ia64/mm/ioremap.c
@@ -29,13 +29,9 @@ early_ioremap (unsigned long phys_addr, unsigned long size)
return __ioremap_uc(phys_addr);
}
-void __iomem *
-ioremap (unsigned long phys_addr, unsigned long size)
+void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
+ unsigned long flags)
{
- void __iomem *addr;
- struct vm_struct *area;
- unsigned long offset;
- pgprot_t prot;
u64 attr;
unsigned long gran_base, gran_size;
unsigned long page_base;
@@ -68,36 +64,12 @@ ioremap (unsigned long phys_addr, unsigned long size)
*/
page_base = phys_addr & PAGE_MASK;
size = PAGE_ALIGN(phys_addr + size) - page_base;
- if (efi_mem_attribute(page_base, size) & EFI_MEMORY_WB) {
- prot = PAGE_KERNEL;
-
- /*
- * Mappings have to be page-aligned
- */
- offset = phys_addr & ~PAGE_MASK;
- phys_addr &= PAGE_MASK;
-
- /*
- * Ok, go for it..
- */
- area = get_vm_area(size, VM_IOREMAP);
- if (!area)
- return NULL;
-
- area->phys_addr = phys_addr;
- addr = (void __iomem *) area->addr;
- if (ioremap_page_range((unsigned long) addr,
- (unsigned long) addr + size, phys_addr, prot)) {
- vunmap((void __force *) addr);
- return NULL;
- }
-
- return (void __iomem *) (offset + (char __iomem *)addr);
- }
+ if (efi_mem_attribute(page_base, size) & EFI_MEMORY_WB)
+ return generic_ioremap_prot(phys_addr, size, __pgprot(flags));
return __ioremap_uc(phys_addr);
}
-EXPORT_SYMBOL(ioremap);
+EXPORT_SYMBOL(ioremap_prot);
void __iomem *
ioremap_uc(unsigned long phys_addr, unsigned long size)
@@ -114,8 +86,7 @@ early_iounmap (volatile void __iomem *addr, unsigned long size)
{
}
-void
-iounmap (volatile void __iomem *addr)
+void iounmap(volatile void __iomem *addr)
{
if (REGION_NUMBER(addr) == RGN_GATE)
vunmap((void *) ((unsigned long) addr & PAGE_MASK));