diff options
Diffstat (limited to 'arch/parisc/kernel')
45 files changed, 1045 insertions, 878 deletions
diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile index 2d1478fc4aa5..9157bc8bdf41 100644 --- a/arch/parisc/kernel/Makefile +++ b/arch/parisc/kernel/Makefile @@ -3,10 +3,10 @@ # Makefile for arch/parisc/kernel # -extra-y := vmlinux.lds +always-$(KBUILD_BUILTIN) := vmlinux.lds obj-y := head.o cache.o pacache.o setup.o pdt.o traps.o time.o irq.o \ - pa7300lc.o syscall.o entry.o sys_parisc.o firmware.o \ + syscall.o entry.o sys_parisc.o firmware.o \ ptrace.o hardware.o inventory.o drivers.o alternative.o \ signal.o hpmc.o real2.o parisc_ksyms.o unaligned.o \ process.o processor.o pdc_cons.o pdc_chassis.o unwind.o \ @@ -38,6 +38,7 @@ obj-$(CONFIG_GENERIC_ARCH_TOPOLOGY) += topology.o obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o +obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_regs.o obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_KEXEC_CORE) += kexec.o relocate_kernel.o diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c index 94652e13c260..3de4b5933b10 100644 --- a/arch/parisc/kernel/asm-offsets.c +++ b/arch/parisc/kernel/asm-offsets.c @@ -13,6 +13,7 @@ * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org> * Copyright (C) 2003 James Bottomley <jejb at parisc-linux.org> */ +#define COMPILE_OFFSETS #include <linux/types.h> #include <linux/sched.h> @@ -257,6 +258,8 @@ int main(void) BLANK(); DEFINE(TIF_BLOCKSTEP_PA_BIT, 31-TIF_BLOCKSTEP); DEFINE(TIF_SINGLESTEP_PA_BIT, 31-TIF_SINGLESTEP); + DEFINE(TIF_32BIT_PA_BIT, 31-TIF_32BIT); + BLANK(); DEFINE(ASM_PMD_SHIFT, PMD_SHIFT); DEFINE(ASM_PGDIR_SHIFT, PGDIR_SHIFT); @@ -275,6 +278,8 @@ int main(void) * and kernel data on physical huge pages */ #ifdef CONFIG_HUGETLB_PAGE DEFINE(HUGEPAGE_SIZE, 1UL << REAL_HPAGE_SHIFT); +#elif !defined(CONFIG_64BIT) + DEFINE(HUGEPAGE_SIZE, 4*1024*1024); #else DEFINE(HUGEPAGE_SIZE, PAGE_SIZE); #endif diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index b55b35c89d6a..4c5240d3a3c7 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -20,6 +20,7 @@ #include <linux/sched.h> #include <linux/sched/mm.h> #include <linux/syscalls.h> +#include <linux/vmalloc.h> #include <asm/pdc.h> #include <asm/cache.h> #include <asm/cacheflush.h> @@ -31,20 +32,31 @@ #include <asm/mmu_context.h> #include <asm/cachectl.h> +#define PTR_PAGE_ALIGN_DOWN(addr) PTR_ALIGN_DOWN(addr, PAGE_SIZE) + +/* + * When nonzero, use _PAGE_ACCESSED bit to try to reduce the number + * of page flushes done flush_cache_page_if_present. There are some + * pros and cons in using this option. It may increase the risk of + * random segmentation faults. + */ +#define CONFIG_FLUSH_PAGE_ACCESSED 0 + int split_tlb __ro_after_init; int dcache_stride __ro_after_init; int icache_stride __ro_after_init; EXPORT_SYMBOL(dcache_stride); +/* Internal implementation in arch/parisc/kernel/pacache.S */ void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); EXPORT_SYMBOL(flush_dcache_page_asm); void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr); - -/* Internal implementation in arch/parisc/kernel/pacache.S */ void flush_data_cache_local(void *); /* flushes local data-cache only */ void flush_instruction_cache_local(void); /* flushes local code-cache only */ +static void flush_kernel_dcache_page_addr(const void *addr); + /* On some machines (i.e., ones with the Merced bus), there can be * only a single PxTLB broadcast at a time; this must be guaranteed * by software. We need a spinlock around all TLB flushes to ensure @@ -58,7 +70,7 @@ int pa_serialize_tlb_flushes __ro_after_init; struct pdc_cache_info cache_info __ro_after_init; #ifndef CONFIG_PA20 -static struct pdc_btlb_info btlb_info __ro_after_init; +struct pdc_btlb_info btlb_info; #endif DEFINE_STATIC_KEY_TRUE(parisc_has_cache); @@ -94,11 +106,11 @@ static inline void flush_data_cache(void) /* Kernel virtual address of pfn. */ #define pfn_va(pfn) __va(PFN_PHYS(pfn)) -void -__update_cache(pte_t pte) +void __update_cache(pte_t pte) { unsigned long pfn = pte_pfn(pte); - struct page *page; + struct folio *folio; + unsigned int nr; /* We don't have pte special. As a result, we can be called with an invalid pfn and we don't need to flush the kernel dcache page. @@ -106,13 +118,17 @@ __update_cache(pte_t pte) if (!pfn_valid(pfn)) return; - page = pfn_to_page(pfn); - if (page_mapping_file(page) && - test_bit(PG_dcache_dirty, &page->flags)) { - flush_kernel_dcache_page_addr(pfn_va(pfn)); - clear_bit(PG_dcache_dirty, &page->flags); + folio = page_folio(pfn_to_page(pfn)); + pfn = folio_pfn(folio); + nr = folio_nr_pages(folio); + if (folio_flush_mapping(folio) && + test_bit(PG_dcache_dirty, &folio->flags.f)) { + while (nr--) + flush_kernel_dcache_page_addr(pfn_va(pfn + nr)); + clear_bit(PG_dcache_dirty, &folio->flags.f); } else if (parisc_requires_coherency()) - flush_kernel_dcache_page_addr(pfn_va(pfn)); + while (nr--) + flush_kernel_dcache_page_addr(pfn_va(pfn + nr)); } void @@ -260,11 +276,9 @@ parisc_cache_init(void) icache_stride = CAFL_STRIDE(cache_info.ic_conf); #undef CAFL_STRIDE -#ifndef CONFIG_PA20 - if (pdc_btlb_info(&btlb_info) < 0) { - memset(&btlb_info, 0, sizeof btlb_info); - } -#endif + /* stride needs to be non-zero, otherwise cache flushes will not work */ + WARN_ON(cache_info.dc_size && dcache_stride == 0); + WARN_ON(cache_info.ic_size && icache_stride == 0); if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) == PDC_MODEL_NVA_UNSUPPORTED) { @@ -319,6 +333,18 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, { if (!static_branch_likely(&parisc_has_cache)) return; + + /* + * The TLB is the engine of coherence on parisc. The CPU is + * entitled to speculate any page with a TLB mapping, so here + * we kill the mapping then flush the page along a special flush + * only alias mapping. This guarantees that the page is no-longer + * in the cache for any process and nor may it be speculatively + * read in (until the user or kernel specifically accesses it, + * of course). + */ + flush_tlb_page(vma, vmaddr); + preempt_disable(); flush_dcache_page_asm(physaddr, vmaddr); if (vma->vm_flags & VM_EXEC) @@ -326,46 +352,61 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, preempt_enable(); } -static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmaddr) +static void flush_kernel_dcache_page_addr(const void *addr) { - unsigned long flags, space, pgd, prot; -#ifdef CONFIG_TLB_PTLOCK - unsigned long pgd_lock; -#endif + unsigned long vaddr = (unsigned long)addr; + unsigned long flags; - vmaddr &= PAGE_MASK; + /* Purge TLB entry to remove translation on all CPUs */ + purge_tlb_start(flags); + pdtlb(SR_KERNEL, addr); + purge_tlb_end(flags); + /* Use tmpalias flush to prevent data cache move-in */ preempt_disable(); + flush_dcache_page_asm(__pa(vaddr), vaddr); + preempt_enable(); +} - /* Set context for flush */ - local_irq_save(flags); - prot = mfctl(8); - space = mfsp(SR_USER); - pgd = mfctl(25); -#ifdef CONFIG_TLB_PTLOCK - pgd_lock = mfctl(28); -#endif - switch_mm_irqs_off(NULL, vma->vm_mm, NULL); - local_irq_restore(flags); - - flush_user_dcache_range_asm(vmaddr, vmaddr + PAGE_SIZE); - if (vma->vm_flags & VM_EXEC) - flush_user_icache_range_asm(vmaddr, vmaddr + PAGE_SIZE); - flush_tlb_page(vma, vmaddr); +static void flush_kernel_icache_page_addr(const void *addr) +{ + unsigned long vaddr = (unsigned long)addr; + unsigned long flags; - /* Restore previous context */ - local_irq_save(flags); -#ifdef CONFIG_TLB_PTLOCK - mtctl(pgd_lock, 28); -#endif - mtctl(pgd, 25); - mtsp(space, SR_USER); - mtctl(prot, 8); - local_irq_restore(flags); + /* Purge TLB entry to remove translation on all CPUs */ + purge_tlb_start(flags); + pdtlb(SR_KERNEL, addr); + purge_tlb_end(flags); + /* Use tmpalias flush to prevent instruction cache move-in */ + preempt_disable(); + flush_icache_page_asm(__pa(vaddr), vaddr); preempt_enable(); } +void kunmap_flush_on_unmap(const void *addr) +{ + flush_kernel_dcache_page_addr(addr); +} +EXPORT_SYMBOL(kunmap_flush_on_unmap); + +void flush_icache_pages(struct vm_area_struct *vma, struct page *page, + unsigned int nr) +{ + void *kaddr = page_address(page); + + for (;;) { + flush_kernel_dcache_page_addr(kaddr); + flush_kernel_icache_page_addr(kaddr); + if (--nr == 0) + break; + kaddr += PAGE_SIZE; + } +} + +/* + * Walk page directory for MM to find PTEP pointer for address ADDR. + */ static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr) { pte_t *ptep = NULL; @@ -388,33 +429,71 @@ static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr) return ptep; } -static inline bool pte_needs_flush(pte_t pte) +static inline bool pte_needs_cache_flush(pte_t pte) { return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE)) == (_PAGE_PRESENT | _PAGE_ACCESSED); } -void flush_dcache_page(struct page *page) +/* + * Return user physical address. Returns 0 if page is not present. + */ +static inline unsigned long get_upa(struct mm_struct *mm, unsigned long addr) +{ + unsigned long flags, space, pgd, prot, pa; +#ifdef CONFIG_TLB_PTLOCK + unsigned long pgd_lock; +#endif + + /* Save context */ + local_irq_save(flags); + prot = mfctl(8); + space = mfsp(SR_USER); + pgd = mfctl(25); +#ifdef CONFIG_TLB_PTLOCK + pgd_lock = mfctl(28); +#endif + + /* Set context for lpa_user */ + switch_mm_irqs_off(NULL, mm, NULL); + pa = lpa_user(addr); + + /* Restore previous context */ +#ifdef CONFIG_TLB_PTLOCK + mtctl(pgd_lock, 28); +#endif + mtctl(pgd, 25); + mtsp(space, SR_USER); + mtctl(prot, 8); + local_irq_restore(flags); + + return pa; +} + +void flush_dcache_folio(struct folio *folio) { - struct address_space *mapping = page_mapping_file(page); - struct vm_area_struct *mpnt; - unsigned long offset; + struct address_space *mapping = folio_flush_mapping(folio); + struct vm_area_struct *vma; unsigned long addr, old_addr = 0; + void *kaddr; unsigned long count = 0; - unsigned long flags; + unsigned long i, nr, flags; pgoff_t pgoff; if (mapping && !mapping_mapped(mapping)) { - set_bit(PG_dcache_dirty, &page->flags); + set_bit(PG_dcache_dirty, &folio->flags.f); return; } - flush_kernel_dcache_page_addr(page_address(page)); + nr = folio_nr_pages(folio); + kaddr = folio_address(folio); + for (i = 0; i < nr; i++) + flush_kernel_dcache_page_addr(kaddr + i * PAGE_SIZE); if (!mapping) return; - pgoff = page->index; + pgoff = folio->index; /* * We have carefully arranged in arch_get_unmapped_area() that @@ -424,53 +503,44 @@ void flush_dcache_page(struct page *page) * on machines that support equivalent aliasing */ flush_dcache_mmap_lock_irqsave(mapping, flags); - vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { - offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; - addr = mpnt->vm_start + offset; - if (parisc_requires_coherency()) { - bool needs_flush = false; - pte_t *ptep; - - ptep = get_ptep(mpnt->vm_mm, addr); - if (ptep) { - needs_flush = pte_needs_flush(*ptep); - pte_unmap(ptep); - } - if (needs_flush) - flush_user_cache_page(mpnt, addr); + vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff + nr - 1) { + unsigned long offset = pgoff - vma->vm_pgoff; + unsigned long pfn = folio_pfn(folio); + + addr = vma->vm_start; + nr = folio_nr_pages(folio); + if (offset > -nr) { + pfn -= offset; + nr += offset; } else { + addr += offset * PAGE_SIZE; + } + if (addr + nr * PAGE_SIZE > vma->vm_end) + nr = (vma->vm_end - addr) / PAGE_SIZE; + + if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1)) + != (addr & (SHM_COLOUR - 1))) { + for (i = 0; i < nr; i++) + __flush_cache_page(vma, + addr + i * PAGE_SIZE, + (pfn + i) * PAGE_SIZE); /* - * The TLB is the engine of coherence on parisc: - * The CPU is entitled to speculate any page - * with a TLB mapping, so here we kill the - * mapping then flush the page along a special - * flush only alias mapping. This guarantees that - * the page is no-longer in the cache for any - * process and nor may it be speculatively read - * in (until the user or kernel specifically - * accesses it, of course) + * Software is allowed to have any number + * of private mappings to a page. */ - flush_tlb_page(mpnt, addr); - if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1)) - != (addr & (SHM_COLOUR - 1))) { - __flush_cache_page(mpnt, addr, page_to_phys(page)); - /* - * Software is allowed to have any number - * of private mappings to a page. - */ - if (!(mpnt->vm_flags & VM_SHARED)) - continue; - if (old_addr) - pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", - old_addr, addr, mpnt->vm_file); + if (!(vma->vm_flags & VM_SHARED)) + continue; + if (old_addr) + pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", + old_addr, addr, vma->vm_file); + if (nr == folio_nr_pages(folio)) old_addr = addr; - } } WARN_ON(++count == 4096); } flush_dcache_mmap_unlock_irqrestore(mapping, flags); } -EXPORT_SYMBOL(flush_dcache_page); +EXPORT_SYMBOL(flush_dcache_folio); /* Defined in arch/parisc/kernel/pacache.S */ EXPORT_SYMBOL(flush_kernel_dcache_range_asm); @@ -541,11 +611,7 @@ void __init parisc_setup_cache_timing(void) threshold/1024); set_tlb_threshold: - if (threshold > FLUSH_TLB_THRESHOLD) - parisc_tlb_flush_threshold = threshold; - else - parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD; - + parisc_tlb_flush_threshold = max(threshold, FLUSH_TLB_THRESHOLD); printk(KERN_INFO "TLB flush threshold set to %lu KiB\n", parisc_tlb_flush_threshold/1024); } @@ -554,35 +620,28 @@ extern void purge_kernel_dcache_page_asm(unsigned long); extern void clear_user_page_asm(void *, unsigned long); extern void copy_user_page_asm(void *, void *, unsigned long); -void flush_kernel_dcache_page_addr(const void *addr) -{ - unsigned long flags; - - flush_kernel_dcache_page_asm(addr); - purge_tlb_start(flags); - pdtlb(SR_KERNEL, addr); - purge_tlb_end(flags); -} -EXPORT_SYMBOL(flush_kernel_dcache_page_addr); - static void flush_cache_page_if_present(struct vm_area_struct *vma, - unsigned long vmaddr, unsigned long pfn) + unsigned long vmaddr) { +#if CONFIG_FLUSH_PAGE_ACCESSED bool needs_flush = false; - pte_t *ptep; + pte_t *ptep, pte; - /* - * The pte check is racy and sometimes the flush will trigger - * a non-access TLB miss. Hopefully, the page has already been - * flushed. - */ ptep = get_ptep(vma->vm_mm, vmaddr); if (ptep) { - needs_flush = pte_needs_flush(*ptep); + pte = ptep_get(ptep); + needs_flush = pte_needs_cache_flush(pte); pte_unmap(ptep); } if (needs_flush) - flush_cache_page(vma, vmaddr, pfn); + __flush_cache_page(vma, vmaddr, PFN_PHYS(pte_pfn(pte))); +#else + struct mm_struct *mm = vma->vm_mm; + unsigned long physaddr = get_upa(mm, vmaddr); + + if (physaddr) + __flush_cache_page(vma, vmaddr, PAGE_ALIGN_DOWN(physaddr)); +#endif } void copy_user_highpage(struct page *to, struct page *from, @@ -592,7 +651,7 @@ void copy_user_highpage(struct page *to, struct page *from, kfrom = kmap_local_page(from); kto = kmap_local_page(to); - flush_cache_page_if_present(vma, vaddr, page_to_pfn(from)); + __flush_cache_page(vma, vaddr, PFN_PHYS(page_to_pfn(from))); copy_page_asm(kto, kfrom); kunmap_local(kto); kunmap_local(kfrom); @@ -601,16 +660,17 @@ void copy_user_highpage(struct page *to, struct page *from, void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long user_vaddr, void *dst, void *src, int len) { - flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page)); + __flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page))); memcpy(dst, src, len); - flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); + flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(dst)); } void copy_from_user_page(struct vm_area_struct *vma, struct page *page, unsigned long user_vaddr, void *dst, void *src, int len) { - flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page)); + __flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page))); memcpy(dst, src, len); + flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(src)); } /* __flush_tlb_range() @@ -644,32 +704,10 @@ int __flush_tlb_range(unsigned long sid, unsigned long start, static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - unsigned long addr, pfn; - pte_t *ptep; - - for (addr = start; addr < end; addr += PAGE_SIZE) { - bool needs_flush = false; - /* - * The vma can contain pages that aren't present. Although - * the pte search is expensive, we need the pte to find the - * page pfn and to check whether the page should be flushed. - */ - ptep = get_ptep(vma->vm_mm, addr); - if (ptep) { - needs_flush = pte_needs_flush(*ptep); - pfn = pte_pfn(*ptep); - pte_unmap(ptep); - } - if (needs_flush) { - if (parisc_requires_coherency()) { - flush_user_cache_page(vma, addr); - } else { - if (WARN_ON(!pfn_valid(pfn))) - return; - __flush_cache_page(vma, addr, PFN_PHYS(pfn)); - } - } - } + unsigned long addr; + + for (addr = start; addr < end; addr += PAGE_SIZE) + flush_cache_page_if_present(vma, addr); } static inline unsigned long mm_total_size(struct mm_struct *mm) @@ -720,21 +758,19 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) return; flush_tlb_range(vma, start, end); - flush_cache_all(); + if (vma->vm_flags & VM_EXEC) + flush_cache_all(); + else + flush_data_cache(); return; } - flush_cache_pages(vma, start, end); + flush_cache_pages(vma, start & PAGE_MASK, end); } void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) { - if (WARN_ON(!pfn_valid(pfn))) - return; - if (parisc_requires_coherency()) - flush_user_cache_page(vma, vmaddr); - else - __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); + __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); } void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) @@ -742,34 +778,133 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned lon if (!PageAnon(page)) return; - if (parisc_requires_coherency()) { - if (vma->vm_flags & VM_SHARED) - flush_data_cache(); - else - flush_user_cache_page(vma, vmaddr); + __flush_cache_page(vma, vmaddr, PFN_PHYS(page_to_pfn(page))); +} + +int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep) +{ + pte_t pte = ptep_get(ptep); + + if (!pte_young(pte)) + return 0; + set_pte(ptep, pte_mkold(pte)); +#if CONFIG_FLUSH_PAGE_ACCESSED + __flush_cache_page(vma, addr, PFN_PHYS(pte_pfn(pte))); +#endif + return 1; +} + +/* + * After a PTE is cleared, we have no way to flush the cache for + * the physical page. On PA8800 and PA8900 processors, these lines + * can cause random cache corruption. Thus, we must flush the cache + * as well as the TLB when clearing a PTE that's valid. + */ +pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep) +{ + struct mm_struct *mm = (vma)->vm_mm; + pte_t pte = ptep_get_and_clear(mm, addr, ptep); + unsigned long pfn = pte_pfn(pte); + + if (pfn_valid(pfn)) + __flush_cache_page(vma, addr, PFN_PHYS(pfn)); + else if (pte_accessible(mm, pte)) + flush_tlb_page(vma, addr); + + return pte; +} + +/* + * The physical address for pages in the ioremap case can be obtained + * from the vm_struct struct. I wasn't able to successfully handle the + * vmalloc and vmap cases. We have an array of struct page pointers in + * the uninitialized vmalloc case but the flush failed using page_to_pfn. + */ +void flush_cache_vmap(unsigned long start, unsigned long end) +{ + unsigned long addr, physaddr; + struct vm_struct *vm; + + /* Prevent cache move-in */ + flush_tlb_kernel_range(start, end); + + if (end - start >= parisc_cache_flush_threshold) { + flush_cache_all(); return; } - flush_tlb_page(vma, vmaddr); - preempt_disable(); - flush_dcache_page_asm(page_to_phys(page), vmaddr); - preempt_enable(); + if (WARN_ON_ONCE(!is_vmalloc_addr((void *)start))) { + flush_cache_all(); + return; + } + + vm = find_vm_area((void *)start); + if (!vm) { + flush_cache_all(); + return; + } + + /* The physical addresses of IOREMAP regions are contiguous */ + if (vm->flags & VM_IOREMAP) { + physaddr = vm->phys_addr; + for (addr = start; addr < end; addr += PAGE_SIZE) { + preempt_disable(); + flush_dcache_page_asm(physaddr, start); + flush_icache_page_asm(physaddr, start); + preempt_enable(); + physaddr += PAGE_SIZE; + } + return; + } + + flush_cache_all(); +} +EXPORT_SYMBOL(flush_cache_vmap); + +/* + * The vm_struct has been retired and the page table is set up. The + * last page in the range is a guard page. Its physical address can't + * be determined using lpa, so there is no way to flush the range + * using flush_dcache_page_asm. + */ +void flush_cache_vunmap(unsigned long start, unsigned long end) +{ + /* Prevent cache move-in */ + flush_tlb_kernel_range(start, end); + flush_data_cache(); } +EXPORT_SYMBOL(flush_cache_vunmap); +/* + * On systems with PA8800/PA8900 processors, there is no way to flush + * a vmap range other than using the architected loop to flush the + * entire cache. The page directory is not set up, so we can't use + * fdc, etc. FDCE/FICE don't work to flush a portion of the cache. + * L2 is physically indexed but FDCE/FICE instructions in virtual + * mode output their virtual address on the core bus, not their + * real address. As a result, the L2 cache index formed from the + * virtual address will most likely not be the same as the L2 index + * formed from the real address. + */ void flush_kernel_vmap_range(void *vaddr, int size) { unsigned long start = (unsigned long)vaddr; unsigned long end = start + size; - if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && - (unsigned long)size >= parisc_cache_flush_threshold) { - flush_tlb_kernel_range(start, end); - flush_data_cache(); + flush_tlb_kernel_range(start, end); + + if (!static_branch_likely(&parisc_has_dcache)) + return; + + /* If interrupts are disabled, we can only do local flush */ + if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) { + flush_data_cache_local(NULL); return; } - flush_kernel_dcache_range_asm(start, end); - flush_tlb_kernel_range(start, end); + flush_data_cache(); } EXPORT_SYMBOL(flush_kernel_vmap_range); @@ -781,15 +916,18 @@ void invalidate_kernel_vmap_range(void *vaddr, int size) /* Ensure DMA is complete */ asm_syncdma(); - if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && - (unsigned long)size >= parisc_cache_flush_threshold) { - flush_tlb_kernel_range(start, end); - flush_data_cache(); + flush_tlb_kernel_range(start, end); + + if (!static_branch_likely(&parisc_has_dcache)) + return; + + /* If interrupts are disabled, we can only do local flush */ + if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) { + flush_data_cache_local(NULL); return; } - purge_kernel_dcache_range_asm(start, end); - flush_tlb_kernel_range(start, end); + flush_data_cache(); } EXPORT_SYMBOL(invalidate_kernel_vmap_range); @@ -817,7 +955,7 @@ SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes, #endif " fic,m %3(%4,%0)\n" "2: sync\n" - ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b) + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b, "%1") : "+r" (start), "+r" (error) : "r" (end), "r" (dcache_stride), "i" (SR_USER)); } @@ -832,7 +970,7 @@ SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes, #endif " fdc,m %3(%4,%0)\n" "2: sync\n" - ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b) + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b, "%1") : "+r" (start), "+r" (error) : "r" (end), "r" (icache_stride), "i" (SR_USER)); } diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c index 8f4b77648491..8d23fe42b0ce 100644 --- a/arch/parisc/kernel/drivers.c +++ b/arch/parisc/kernel/drivers.c @@ -97,7 +97,7 @@ static int for_each_padev(int (*fn)(struct device *, void *), void * data) * @driver: the PA-RISC driver to try * @dev: the PA-RISC device to try */ -static int match_device(struct parisc_driver *driver, struct parisc_device *dev) +static int match_device(const struct parisc_driver *driver, struct parisc_device *dev) { const struct parisc_device_id *ids; @@ -548,7 +548,7 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path) return dev; } -static int parisc_generic_match(struct device *dev, struct device_driver *drv) +static int parisc_generic_match(struct device *dev, const struct device_driver *drv) { return match_device(to_parisc_driver(drv), to_parisc_device(dev)); } @@ -618,7 +618,7 @@ static struct attribute *parisc_device_attrs[] = { }; ATTRIBUTE_GROUPS(parisc_device); -struct bus_type parisc_bus_type = { +const struct bus_type parisc_bus_type = { .name = "parisc", .match = parisc_generic_match, .uevent = parisc_uevent, @@ -742,7 +742,7 @@ parse_tree_node(struct device *parent, int index, struct hardware_path *modpath) }; if (device_for_each_child(parent, &recurse_data, descend_children)) - { /* nothing */ }; + { /* nothing */ } return d.dev; } @@ -925,10 +925,10 @@ static __init void qemu_header(void) pr_info("#define PARISC_MODEL \"%s\"\n\n", boot_cpu_data.pdc.sys_model_name); - pr_info("#define PARISC_PDC_MODEL 0x%lx, 0x%lx, 0x%lx, " - "0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx\n\n", #define p ((unsigned long *)&boot_cpu_data.pdc.model) - p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8]); + pr_info("#define PARISC_PDC_MODEL 0x%lx, 0x%lx, 0x%lx, " + "0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx\n\n", + p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8], p[9]); #undef p pr_info("#define PARISC_PDC_VERSION 0x%04lx\n\n", @@ -995,6 +995,7 @@ static __init int qemu_print_iodc_data(struct device *lin_dev, void *data) struct pdc_system_map_mod_info pdc_mod_info; struct pdc_module_path mod_path; + memset(&iodc_data, 0, sizeof(iodc_data)); status = pdc_iodc_read(&count, hpa, 0, &iodc_data, sizeof(iodc_data)); if (status != PDC_OK) { @@ -1004,11 +1005,19 @@ static __init int qemu_print_iodc_data(struct device *lin_dev, void *data) pr_info("\n"); + /* Prevent hung task messages when printing on serial console */ + cond_resched(); + pr_info("#define HPA_%08lx_DESCRIPTION \"%s\"\n", hpa, parisc_hardware_description(&dev->id)); mod_index = 0; do { + /* initialize device path for old machines */ + memset(&mod_path, 0xff, sizeof(mod_path)); + get_node_path(dev->dev.parent, &mod_path.path); + mod_path.path.mod = dev->hw_path; + memset(&pdc_mod_info, 0, sizeof(pdc_mod_info)); status = pdc_system_map_find_mods(&pdc_mod_info, &mod_path, mod_index++); } while (status == PDC_OK && pdc_mod_info.mod_addr != hpa); @@ -1034,11 +1043,7 @@ static __init int qemu_print_iodc_data(struct device *lin_dev, void *data) (unsigned char)mod_path.path.bc[3], (unsigned char)mod_path.path.bc[4], (unsigned char)mod_path.path.bc[5]); - pr_cont(".mod = 0x%x ", mod_path.path.mod); - pr_cont(" },\n"); - pr_cont("\t.layers = { 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x }\n", - mod_path.layers[0], mod_path.layers[1], mod_path.layers[2], - mod_path.layers[3], mod_path.layers[4], mod_path.layers[5]); + pr_cont(".mod = 0x%x }\n", mod_path.path.mod); pr_cont("};\n"); pr_info("static struct pdc_iodc iodc_data_hpa_%08lx = {\n", hpa); @@ -1058,8 +1063,6 @@ static __init int qemu_print_iodc_data(struct device *lin_dev, void *data) DO(checksum); DO(length); #undef DO - pr_cont("\t/* pad: 0x%04x, 0x%04x */\n", - iodc_data.pad[0], iodc_data.pad[1]); pr_cont("};\n"); pr_info("#define HPA_%08lx_num_addr %d\n", hpa, dev->num_addrs); diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index 0e5ebfe8d9d2..e04c5d806c10 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -25,6 +25,7 @@ #include <asm/traps.h> #include <asm/thread_info.h> #include <asm/alternative.h> +#include <asm/spinlock_types.h> #include <linux/linkage.h> #include <linux/pgtable.h> @@ -35,6 +36,24 @@ .level 2.0 #endif +/* + * We need seven instructions after a TLB insert for it to take effect. + * The PA8800/PA8900 processors are an exception and need 12 instructions. + * The RFI changes both IAOQ_Back and IAOQ_Front, so it counts as one. + */ +#ifdef CONFIG_64BIT +#define NUM_PIPELINE_INSNS 12 +#else +#define NUM_PIPELINE_INSNS 7 +#endif + + /* Insert num nops */ + .macro insert_nops num + .rept \num + nop + .endr + .endm + /* Get aligned page_table_lock address for this mm from cr28/tr4 */ .macro get_ptl reg mfctl %cr28,\reg @@ -406,7 +425,7 @@ LDREG 0(\ptp),\pte bb,<,n \pte,_PAGE_PRESENT_BIT,3f b \fault - stw \spc,0(\tmp) + stw \tmp1,0(\tmp) 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) #endif 2: LDREG 0(\ptp),\pte @@ -414,26 +433,20 @@ 3: .endm - /* Release page_table_lock without reloading lock address. - Note that the values in the register spc are limited to - NR_SPACE_IDS (262144). Thus, the stw instruction always - stores a nonzero value even when register spc is 64 bits. - We use an ordered store to ensure all prior accesses are - performed prior to releasing the lock. */ - .macro ptl_unlock0 spc,tmp -#ifdef CONFIG_TLB_PTLOCK -98: or,COND(=) %r0,\spc,%r0 - stw,ma \spc,0(\tmp) -99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) -#endif - .endm - - /* Release page_table_lock. */ - .macro ptl_unlock1 spc,tmp + /* Release page_table_lock if for user space. We use an ordered + store to ensure all prior accesses are performed prior to + releasing the lock. Note stw may not be executed, so we + provide one extra nop when CONFIG_TLB_PTLOCK is defined. */ + .macro ptl_unlock spc,tmp,tmp2 #ifdef CONFIG_TLB_PTLOCK 98: get_ptl \tmp - ptl_unlock0 \spc,\tmp + ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, \tmp2 + or,COND(=) %r0,\spc,%r0 + stw,ma \tmp2,0(\tmp) 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) + insert_nops NUM_PIPELINE_INSNS - 4 +#else + insert_nops NUM_PIPELINE_INSNS - 1 #endif .endm @@ -462,13 +475,13 @@ * to a CPU TLB 4k PFN (4k => 12 bits to shift) */ #define PAGE_ADD_SHIFT (PAGE_SHIFT-12) #define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12) + #define PFN_START_BIT (63-ASM_PFN_PTE_SHIFT+(63-58)-PAGE_ADD_SHIFT) /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ .macro convert_for_tlb_insert20 pte,tmp #ifdef CONFIG_HUGETLB_PAGE copy \pte,\tmp - extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\ - 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte + extrd,u \tmp,PFN_START_BIT,PFN_START_BIT+1,\pte depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\ (63-58)+PAGE_ADD_SHIFT,\pte @@ -476,8 +489,7 @@ depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\ (63-58)+PAGE_ADD_HUGE_SHIFT,\pte #else /* Huge pages disabled */ - extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\ - 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte + extrd,u \pte,PFN_START_BIT,PFN_START_BIT+1,\pte depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\ (63-58)+PAGE_ADD_SHIFT,\pte #endif @@ -487,6 +499,12 @@ * this happens is quite subtle, read below */ .macro make_insert_tlb spc,pte,prot,tmp space_to_prot \spc \prot /* create prot id from space */ + +#if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT + /* need to drop DMB bit, as it's used as SPECIAL flag */ + depi 0,_PAGE_SPECIAL_BIT,1,\pte +#endif + /* The following is the real subtlety. This is depositing * T <-> _PAGE_REFTRAP * D <-> _PAGE_DIRTY @@ -499,17 +517,18 @@ * Finally, _PAGE_READ goes in the top bit of PL1 (so we * trigger an access rights trap in user space if the user * tries to read an unreadable page */ -#if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT - /* need to drop DMB bit, as it's used as SPECIAL flag */ - depi 0,_PAGE_SPECIAL_BIT,1,\pte -#endif depd \pte,8,7,\prot /* PAGE_USER indicates the page can be read with user privileges, * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1 - * contains _PAGE_READ) */ + * contains _PAGE_READ). While the kernel can't directly write + * user pages which have _PAGE_WRITE zero, it can read pages + * which have _PAGE_READ zero (PL <= PL1). Thus, the kernel + * exception fault handler doesn't trigger when reading pages + * that aren't user read accessible */ extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0 depdi 7,11,3,\prot + /* If we're a gateway page, drop PL2 back to zero for promotion * to kernel privilege (so we can execute the page as kernel). * Any privilege promotion page always denys read and write */ @@ -1039,23 +1058,26 @@ ENTRY_CFI(intr_save) /* for os_hpmc */ STREG %r16, PT_ISR(%r29) STREG %r17, PT_IOR(%r29) -#if 0 && defined(CONFIG_64BIT) - /* Revisit when we have 64-bit code above 4Gb */ - b,n intr_save2 - +#if defined(CONFIG_64BIT) skip_save_ior: /* We have a itlb miss, and when executing code above 4 Gb on ILP64, we * need to adjust iasq/iaoq here in the same way we adjusted isr/ior * above. */ - extrd,u,* %r8,PSW_W_BIT,1,%r1 - cmpib,COND(=),n 1,%r1,intr_save2 + bb,COND(>=),n %r8,PSW_W_BIT,intr_save2 LDREG PT_IASQ0(%r29), %r16 LDREG PT_IAOQ0(%r29), %r17 - /* adjust iasq/iaoq */ + /* adjust iasq0/iaoq0 */ space_adjust %r16,%r17,%r1 STREG %r16, PT_IASQ0(%r29) STREG %r17, PT_IAOQ0(%r29) + + LDREG PT_IASQ1(%r29), %r16 + LDREG PT_IAOQ1(%r29), %r17 + /* adjust iasq1/iaoq1 */ + space_adjust %r16,%r17,%r1 + STREG %r16, PT_IASQ1(%r29) + STREG %r17, PT_IAOQ1(%r29) #else skip_save_ior: #endif @@ -1125,7 +1147,7 @@ dtlb_miss_20w: idtlbt pte,prot - ptl_unlock1 spc,t0 + ptl_unlock spc,t0,t1 rfir nop @@ -1134,6 +1156,7 @@ dtlb_check_alias_20w: idtlbt pte,prot + insert_nops NUM_PIPELINE_INSNS - 1 rfir nop @@ -1151,7 +1174,7 @@ nadtlb_miss_20w: idtlbt pte,prot - ptl_unlock1 spc,t0 + ptl_unlock spc,t0,t1 rfir nop @@ -1160,6 +1183,7 @@ nadtlb_check_alias_20w: idtlbt pte,prot + insert_nops NUM_PIPELINE_INSNS - 1 rfir nop @@ -1185,7 +1209,7 @@ dtlb_miss_11: mtsp t1, %sr1 /* Restore sr1 */ - ptl_unlock1 spc,t0 + ptl_unlock spc,t0,t1 rfir nop @@ -1195,6 +1219,7 @@ dtlb_check_alias_11: idtlba pte,(va) idtlbp prot,(va) + insert_nops NUM_PIPELINE_INSNS - 1 rfir nop @@ -1218,7 +1243,7 @@ nadtlb_miss_11: mtsp t1, %sr1 /* Restore sr1 */ - ptl_unlock1 spc,t0 + ptl_unlock spc,t0,t1 rfir nop @@ -1228,6 +1253,7 @@ nadtlb_check_alias_11: idtlba pte,(va) idtlbp prot,(va) + insert_nops NUM_PIPELINE_INSNS - 1 rfir nop @@ -1247,7 +1273,7 @@ dtlb_miss_20: idtlbt pte,prot - ptl_unlock1 spc,t0 + ptl_unlock spc,t0,t1 rfir nop @@ -1256,6 +1282,7 @@ dtlb_check_alias_20: idtlbt pte,prot + insert_nops NUM_PIPELINE_INSNS - 1 rfir nop @@ -1275,7 +1302,7 @@ nadtlb_miss_20: idtlbt pte,prot - ptl_unlock1 spc,t0 + ptl_unlock spc,t0,t1 rfir nop @@ -1284,6 +1311,7 @@ nadtlb_check_alias_20: idtlbt pte,prot + insert_nops NUM_PIPELINE_INSNS - 1 rfir nop @@ -1320,7 +1348,7 @@ itlb_miss_20w: iitlbt pte,prot - ptl_unlock1 spc,t0 + ptl_unlock spc,t0,t1 rfir nop @@ -1344,7 +1372,7 @@ naitlb_miss_20w: iitlbt pte,prot - ptl_unlock1 spc,t0 + ptl_unlock spc,t0,t1 rfir nop @@ -1353,6 +1381,7 @@ naitlb_check_alias_20w: iitlbt pte,prot + insert_nops NUM_PIPELINE_INSNS - 1 rfir nop @@ -1378,7 +1407,7 @@ itlb_miss_11: mtsp t1, %sr1 /* Restore sr1 */ - ptl_unlock1 spc,t0 + ptl_unlock spc,t0,t1 rfir nop @@ -1402,7 +1431,7 @@ naitlb_miss_11: mtsp t1, %sr1 /* Restore sr1 */ - ptl_unlock1 spc,t0 + ptl_unlock spc,t0,t1 rfir nop @@ -1412,6 +1441,7 @@ naitlb_check_alias_11: iitlba pte,(%sr0, va) iitlbp prot,(%sr0, va) + insert_nops NUM_PIPELINE_INSNS - 1 rfir nop @@ -1432,7 +1462,7 @@ itlb_miss_20: iitlbt pte,prot - ptl_unlock1 spc,t0 + ptl_unlock spc,t0,t1 rfir nop @@ -1452,7 +1482,7 @@ naitlb_miss_20: iitlbt pte,prot - ptl_unlock1 spc,t0 + ptl_unlock spc,t0,t1 rfir nop @@ -1461,6 +1491,7 @@ naitlb_check_alias_20: iitlbt pte,prot + insert_nops NUM_PIPELINE_INSNS - 1 rfir nop @@ -1482,7 +1513,7 @@ dbit_trap_20w: idtlbt pte,prot - ptl_unlock0 spc,t0 + ptl_unlock spc,t0,t1 rfir nop #else @@ -1508,7 +1539,7 @@ dbit_trap_11: mtsp t1, %sr1 /* Restore sr1 */ - ptl_unlock0 spc,t0 + ptl_unlock spc,t0,t1 rfir nop @@ -1528,7 +1559,7 @@ dbit_trap_20: idtlbt pte,prot - ptl_unlock0 spc,t0 + ptl_unlock spc,t0,t1 rfir nop #endif @@ -1815,6 +1846,10 @@ syscall_restore_rfi: extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0 depi -1,7,1,%r20 /* T bit */ +#ifdef CONFIG_64BIT + extru,<> %r19,TIF_32BIT_PA_BIT,1,%r0 + depi -1,4,1,%r20 /* W bit */ +#endif STREG %r20,TASK_PT_PSW(%r1) /* Always store space registers, since sr3 can be changed (e.g. fork) */ @@ -1828,7 +1863,6 @@ syscall_restore_rfi: STREG %r25,TASK_PT_IASQ0(%r1) STREG %r25,TASK_PT_IASQ1(%r1) - /* XXX W bit??? */ /* Now if old D bit is clear, it means we didn't save all registers * on syscall entry, so do that now. This only happens on TRACEME * calls, or if someone attached to us while we were on a syscall. diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c index 6d1c781eb1db..042343492a28 100644 --- a/arch/parisc/kernel/firmware.c +++ b/arch/parisc/kernel/firmware.c @@ -74,16 +74,16 @@ static DEFINE_SPINLOCK(pdc_lock); #endif -unsigned long pdc_result[NUM_PDC_RESULT] __aligned(8); -unsigned long pdc_result2[NUM_PDC_RESULT] __aligned(8); +static unsigned long pdc_result[NUM_PDC_RESULT] __aligned(8); +static unsigned long pdc_result2[NUM_PDC_RESULT] __aligned(8); #ifdef CONFIG_64BIT -#define WIDE_FIRMWARE 0x1 -#define NARROW_FIRMWARE 0x2 +#define WIDE_FIRMWARE PDC_MODEL_OS64 +#define NARROW_FIRMWARE PDC_MODEL_OS32 -/* Firmware needs to be initially set to narrow to determine the +/* Firmware needs to be initially set to narrow to determine the * actual firmware width. */ -int parisc_narrow_firmware __ro_after_init = 2; +int parisc_narrow_firmware __ro_after_init = NARROW_FIRMWARE; #endif /* On most currently-supported platforms, IODC I/O calls are 32-bit calls @@ -123,10 +123,10 @@ static unsigned long f_extend(unsigned long address) #ifdef CONFIG_64BIT if(unlikely(parisc_narrow_firmware)) { if((address & 0xff000000) == 0xf0000000) - return 0xf0f0f0f000000000UL | (u32)address; + return (0xfffffff0UL << 32) | (u32)address; if((address & 0xf0000000) == 0xf0000000) - return 0xffffffff00000000UL | (u32)address; + return (0xffffffffUL << 32) | (u32)address; } #endif return address; @@ -166,10 +166,10 @@ void set_firmware_width_unlocked(void) if (pdc_result[0] != NARROW_FIRMWARE) parisc_narrow_firmware = 0; } - + /** * set_firmware_width - Determine if the firmware is wide or narrow. - * + * * This function must be called before any pdc_* function that uses the * convert_to_wide function. */ @@ -178,7 +178,7 @@ void set_firmware_width(void) unsigned long flags; /* already initialized? */ - if (parisc_narrow_firmware != 2) + if (parisc_narrow_firmware != NARROW_FIRMWARE) return; spin_lock_irqsave(&pdc_lock, flags); @@ -334,7 +334,7 @@ int __pdc_cpu_rendezvous(void) /** * pdc_cpu_rendezvous_lock - Lock PDC while transitioning to rendezvous state */ -void pdc_cpu_rendezvous_lock(void) +void pdc_cpu_rendezvous_lock(void) __acquires(&pdc_lock) { spin_lock(&pdc_lock); } @@ -342,7 +342,7 @@ void pdc_cpu_rendezvous_lock(void) /** * pdc_cpu_rendezvous_unlock - Unlock PDC after reaching rendezvous state */ -void pdc_cpu_rendezvous_unlock(void) +void pdc_cpu_rendezvous_unlock(void) __releases(&pdc_lock) { spin_unlock(&pdc_lock); } @@ -464,7 +464,8 @@ int pdc_system_map_find_mods(struct pdc_system_map_mod_info *pdc_mod_info, unsigned long flags; spin_lock_irqsave(&pdc_lock, flags); - retval = mem_pdc_call(PDC_SYSTEM_MAP, PDC_FIND_MODULE, __pa(pdc_result), + memcpy(pdc_result2, mod_path, sizeof(*mod_path)); + retval = mem_pdc_call(PDC_SYSTEM_MAP, PDC_FIND_MODULE, __pa(pdc_result), __pa(pdc_result2), mod_index); convert_to_wide(pdc_result); memcpy(pdc_mod_info, pdc_result, sizeof(*pdc_mod_info)); @@ -687,7 +688,6 @@ int pdc_spaceid_bits(unsigned long *space_bits) return retval; } -#ifndef CONFIG_PA20 /** * pdc_btlb_info - Return block TLB information. * @btlb: The return buffer. @@ -696,18 +696,51 @@ int pdc_spaceid_bits(unsigned long *space_bits) */ int pdc_btlb_info(struct pdc_btlb_info *btlb) { - int retval; + int retval; unsigned long flags; - spin_lock_irqsave(&pdc_lock, flags); - retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_INFO, __pa(pdc_result), 0); - memcpy(btlb, pdc_result, sizeof(*btlb)); - spin_unlock_irqrestore(&pdc_lock, flags); + if (IS_ENABLED(CONFIG_PA20)) + return PDC_BAD_PROC; - if(retval < 0) { - btlb->max_size = 0; - } - return retval; + spin_lock_irqsave(&pdc_lock, flags); + retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_INFO, __pa(pdc_result), 0); + memcpy(btlb, pdc_result, sizeof(*btlb)); + spin_unlock_irqrestore(&pdc_lock, flags); + + if(retval < 0) { + btlb->max_size = 0; + } + return retval; +} + +int pdc_btlb_insert(unsigned long long vpage, unsigned long physpage, unsigned long len, + unsigned long entry_info, unsigned long slot) +{ + int retval; + unsigned long flags; + + if (IS_ENABLED(CONFIG_PA20)) + return PDC_BAD_PROC; + + spin_lock_irqsave(&pdc_lock, flags); + retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_INSERT, (unsigned long) (vpage >> 32), + (unsigned long) vpage, physpage, len, entry_info, slot); + spin_unlock_irqrestore(&pdc_lock, flags); + return retval; +} + +int pdc_btlb_purge_all(void) +{ + int retval; + unsigned long flags; + + if (IS_ENABLED(CONFIG_PA20)) + return PDC_BAD_PROC; + + spin_lock_irqsave(&pdc_lock, flags); + retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_PURGE_ALL); + spin_unlock_irqrestore(&pdc_lock, flags); + return retval; } /** @@ -728,6 +761,9 @@ int pdc_mem_map_hpa(struct pdc_memory_map *address, int retval; unsigned long flags; + if (IS_ENABLED(CONFIG_PA20)) + return PDC_BAD_PROC; + spin_lock_irqsave(&pdc_lock, flags); memcpy(pdc_result2, mod_path, sizeof(*mod_path)); retval = mem_pdc_call(PDC_MEM_MAP, PDC_MEM_MAP_HPA, __pa(pdc_result), @@ -737,7 +773,6 @@ int pdc_mem_map_hpa(struct pdc_memory_map *address, return retval; } -#endif /* !CONFIG_PA20 */ /** * pdc_lan_station_id - Get the LAN address. diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c index 4d392e4ed358..10fd5b3e63e7 100644 --- a/arch/parisc/kernel/ftrace.c +++ b/arch/parisc/kernel/ftrace.c @@ -20,7 +20,7 @@ #include <asm/assembly.h> #include <asm/sections.h> #include <asm/ftrace.h> -#include <asm/patch.h> +#include <asm/text-patching.h> #define __hot __section(".text.hot") @@ -53,7 +53,7 @@ static void __hot prepare_ftrace_return(unsigned long *parent, static ftrace_func_t ftrace_func; -void notrace __hot ftrace_function_trampoline(unsigned long parent, +asmlinkage void notrace __hot ftrace_function_trampoline(unsigned long parent, unsigned long self_addr, unsigned long org_sp_gr3, struct ftrace_regs *fregs) @@ -78,7 +78,7 @@ void notrace __hot ftrace_function_trampoline(unsigned long parent, #endif } -#ifdef CONFIG_FUNCTION_GRAPH_TRACER +#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_FUNCTION_GRAPH_TRACER) int ftrace_enable_ftrace_graph_caller(void) { static_key_enable(&ftrace_graph_enable.key); @@ -87,7 +87,7 @@ int ftrace_enable_ftrace_graph_caller(void) int ftrace_disable_ftrace_graph_caller(void) { - static_key_enable(&ftrace_graph_enable.key); + static_key_disable(&ftrace_graph_enable.key); return 0; } #endif @@ -206,6 +206,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, struct kprobe *p; int bit; + if (unlikely(kprobe_ftrace_disabled)) + return; + bit = ftrace_test_recursion_trylock(ip, parent_ip); if (bit < 0) return; diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S index fd15fd4bbb61..96e0264ac961 100644 --- a/arch/parisc/kernel/head.S +++ b/arch/parisc/kernel/head.S @@ -70,9 +70,8 @@ $bss_loop: stw,ma %arg2,4(%r1) stw,ma %arg3,4(%r1) -#if !defined(CONFIG_64BIT) && defined(CONFIG_PA20) - /* This 32-bit kernel was compiled for PA2.0 CPUs. Check current CPU - * and halt kernel if we detect a PA1.x CPU. */ +#if defined(CONFIG_PA20) + /* check for 64-bit capable CPU as required by current kernel */ ldi 32,%r10 mtctl %r10,%cr11 .level 2.0 @@ -180,10 +179,10 @@ $pgt_fill_loop: std %dp,0x18(%r10) #endif -#ifdef CONFIG_64BIT - /* Get PDCE_PROC for monarch CPU. */ #define MEM_PDC_LO 0x388 #define MEM_PDC_HI 0x35C +#ifdef CONFIG_64BIT + /* Get PDCE_PROC for monarch CPU. */ ldw MEM_PDC_LO(%r0),%r3 ldw MEM_PDC_HI(%r0),%r10 depd %r10, 31, 32, %r3 /* move to upper word */ @@ -269,7 +268,17 @@ stext_pdc_ret: tovirt_r1 %r6 mtctl %r6,%cr30 /* restore task thread info */ #endif - + +#ifndef CONFIG_64BIT + /* clear all BTLBs */ + ldi PDC_BLOCK_TLB,%arg0 + load32 PA(stext_pdc_btlb_ret), %rp + ldw MEM_PDC_LO(%r0),%r3 + bv (%r3) + ldi PDC_BTLB_PURGE_ALL,%arg1 +stext_pdc_btlb_ret: +#endif + /* PARANOID: clear user scratch/user space SR's */ mtsp %r0,%sr0 mtsp %r0,%sr1 diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index 12c4d4104ade..dff66be65d29 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c @@ -365,7 +365,7 @@ union irq_stack_union { volatile unsigned int lock[1]; }; -DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = { +static DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = { .slock = { 1,1,1,1 }, }; #endif @@ -498,7 +498,7 @@ asmlinkage void do_cpu_irq_mask(struct pt_regs *regs) old_regs = set_irq_regs(regs); local_irq_disable(); - irq_enter(); + irq_enter_rcu(); eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu); if (!eirr_val) @@ -533,7 +533,7 @@ asmlinkage void do_cpu_irq_mask(struct pt_regs *regs) #endif /* CONFIG_IRQSTACKS */ out: - irq_exit(); + irq_exit_rcu(); set_irq_regs(old_regs); return; diff --git a/arch/parisc/kernel/jump_label.c b/arch/parisc/kernel/jump_label.c index e253b134500d..ea51f15bf0e6 100644 --- a/arch/parisc/kernel/jump_label.c +++ b/arch/parisc/kernel/jump_label.c @@ -8,7 +8,7 @@ #include <linux/jump_label.h> #include <linux/bug.h> #include <asm/alternative.h> -#include <asm/patch.h> +#include <asm/text-patching.h> static inline int reassemble_17(int as17) { diff --git a/arch/parisc/kernel/kexec_file.c b/arch/parisc/kernel/kexec_file.c index 8c534204f0fd..3fc82130b6c3 100644 --- a/arch/parisc/kernel/kexec_file.c +++ b/arch/parisc/kernel/kexec_file.c @@ -38,8 +38,8 @@ static void *elf_load(struct kimage *image, char *kernel_buf, for (i = 0; i < image->nr_segments; i++) image->segment[i].mem = __pa(image->segment[i].mem); - pr_debug("Loaded the kernel at 0x%lx, entry at 0x%lx\n", - kernel_load_addr, image->start); + kexec_dprintk("Loaded the kernel at 0x%lx, entry at 0x%lx\n", + kernel_load_addr, image->start); if (initrd != NULL) { kbuf.buffer = initrd; @@ -51,7 +51,7 @@ static void *elf_load(struct kimage *image, char *kernel_buf, if (ret) goto out; - pr_debug("Loaded initrd at 0x%lx\n", kbuf.mem); + kexec_dprintk("Loaded initrd at 0x%lx\n", kbuf.mem); image->arch.initrd_start = kbuf.mem; image->arch.initrd_end = kbuf.mem + initrd_len; } @@ -68,7 +68,7 @@ static void *elf_load(struct kimage *image, char *kernel_buf, if (ret) goto out; - pr_debug("Loaded cmdline at 0x%lx\n", kbuf.mem); + kexec_dprintk("Loaded cmdline at 0x%lx\n", kbuf.mem); image->arch.cmdline = kbuf.mem; } out: diff --git a/arch/parisc/kernel/kgdb.c b/arch/parisc/kernel/kgdb.c index b16fa9bac5f4..fee81f877525 100644 --- a/arch/parisc/kernel/kgdb.c +++ b/arch/parisc/kernel/kgdb.c @@ -16,7 +16,7 @@ #include <asm/ptrace.h> #include <asm/traps.h> #include <asm/processor.h> -#include <asm/patch.h> +#include <asm/text-patching.h> #include <asm/cacheflush.h> const struct kgdb_arch arch_kgdb_ops = { diff --git a/arch/parisc/kernel/kprobes.c b/arch/parisc/kernel/kprobes.c index 6e0b86652f30..9255adba67a3 100644 --- a/arch/parisc/kernel/kprobes.c +++ b/arch/parisc/kernel/kprobes.c @@ -12,7 +12,7 @@ #include <linux/kprobes.h> #include <linux/slab.h> #include <asm/cacheflush.h> -#include <asm/patch.h> +#include <asm/text-patching.h> DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c index d214bbe3c2af..4e5d991b2b65 100644 --- a/arch/parisc/kernel/module.c +++ b/arch/parisc/kernel/module.c @@ -41,7 +41,6 @@ #include <linux/moduleloader.h> #include <linux/elf.h> -#include <linux/vmalloc.h> #include <linux/fs.h> #include <linux/ftrace.h> #include <linux/string.h> @@ -173,17 +172,6 @@ static inline int reassemble_22(int as22) ((as22 & 0x0003ff) << 3)); } -void *module_alloc(unsigned long size) -{ - /* using RWX means less protection for modules, but it's - * easier than trying to map the text, data, init_text and - * init_data correctly */ - return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, - GFP_KERNEL, - PAGE_KERNEL_RWX, 0, NUMA_NO_NODE, - __builtin_return_address(0)); -} - #ifndef CONFIG_64BIT static inline unsigned long count_gots(const Elf_Rela *rela, unsigned long n) { diff --git a/arch/parisc/kernel/pa7300lc.c b/arch/parisc/kernel/pa7300lc.c deleted file mode 100644 index 0d770ac83f70..000000000000 --- a/arch/parisc/kernel/pa7300lc.c +++ /dev/null @@ -1,51 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * linux/arch/parisc/kernel/pa7300lc.c - * - PA7300LC-specific functions - * - * Copyright (C) 2000 Philipp Rumpf */ - -#include <linux/sched.h> -#include <linux/sched/debug.h> -#include <linux/smp.h> -#include <linux/kernel.h> -#include <asm/io.h> -#include <asm/ptrace.h> -#include <asm/machdep.h> - -/* CPU register indices */ - -#define MIOC_STATUS 0xf040 -#define MIOC_CONTROL 0xf080 -#define MDERRADD 0xf0e0 -#define DMAERR 0xf0e8 -#define DIOERR 0xf0ec -#define HIDMAMEM 0xf0f4 - -/* this returns the HPA of the CPU it was called on */ -static u32 cpu_hpa(void) -{ - return 0xfffb0000; -} - -static void pa7300lc_lpmc(int code, struct pt_regs *regs) -{ - u32 hpa; - printk(KERN_WARNING "LPMC on CPU %d\n", smp_processor_id()); - - show_regs(regs); - - hpa = cpu_hpa(); - printk(KERN_WARNING - "MIOC_CONTROL %08x\n" "MIOC_STATUS %08x\n" - "MDERRADD %08x\n" "DMAERR %08x\n" - "DIOERR %08x\n" "HIDMAMEM %08x\n", - gsc_readl(hpa+MIOC_CONTROL), gsc_readl(hpa+MIOC_STATUS), - gsc_readl(hpa+MDERRADD), gsc_readl(hpa+DMAERR), - gsc_readl(hpa+DIOERR), gsc_readl(hpa+HIDMAMEM)); -} - -void pa7300lc_init(void) -{ - cpu_lpmc = pa7300lc_lpmc; -} diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c index 00297e8e1c88..509146a52725 100644 --- a/arch/parisc/kernel/parisc_ksyms.c +++ b/arch/parisc/kernel/parisc_ksyms.c @@ -14,6 +14,7 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/syscalls.h> +#include <linux/libgcc.h> #include <linux/string.h> EXPORT_SYMBOL(memset); @@ -21,6 +22,8 @@ EXPORT_SYMBOL(memset); #include <linux/atomic.h> EXPORT_SYMBOL(__xchg8); EXPORT_SYMBOL(__xchg32); +EXPORT_SYMBOL(__cmpxchg_u8); +EXPORT_SYMBOL(__cmpxchg_u16); EXPORT_SYMBOL(__cmpxchg_u32); EXPORT_SYMBOL(__cmpxchg_u64); #ifdef CONFIG_SMP @@ -40,9 +43,6 @@ EXPORT_SYMBOL($global$); #endif #include <asm/io.h> -EXPORT_SYMBOL(memcpy_toio); -EXPORT_SYMBOL(memcpy_fromio); -EXPORT_SYMBOL(memset_io); extern void $$divI(void); extern void $$divU(void); @@ -92,12 +92,6 @@ EXPORT_SYMBOL($$divI_12); EXPORT_SYMBOL($$divI_14); EXPORT_SYMBOL($$divI_15); -extern void __ashrdi3(void); -extern void __ashldi3(void); -extern void __lshrdi3(void); -extern void __muldi3(void); -extern void __ucmpdi2(void); - EXPORT_SYMBOL(__ashrdi3); EXPORT_SYMBOL(__ashldi3); EXPORT_SYMBOL(__lshrdi3); diff --git a/arch/parisc/kernel/patch.c b/arch/parisc/kernel/patch.c index e59574f65e64..35dd764b871e 100644 --- a/arch/parisc/kernel/patch.c +++ b/arch/parisc/kernel/patch.c @@ -13,7 +13,7 @@ #include <asm/cacheflush.h> #include <asm/fixmap.h> -#include <asm/patch.h> +#include <asm/text-patching.h> struct patch { void *addr; diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c index d818ece23b4a..bf9f192c826e 100644 --- a/arch/parisc/kernel/pci-dma.c +++ b/arch/parisc/kernel/pci-dma.c @@ -39,7 +39,7 @@ static struct proc_dir_entry * proc_gsc_root __read_mostly = NULL; static unsigned long pcxl_used_bytes __read_mostly; static unsigned long pcxl_used_pages __read_mostly; -extern unsigned long pcxl_dma_start; /* Start of pcxl dma mapping area */ +unsigned long pcxl_dma_start __ro_after_init; /* pcxl dma mapping area start */ static DEFINE_SPINLOCK(pcxl_res_lock); static char *pcxl_res_map; static int pcxl_res_hint; @@ -381,7 +381,7 @@ pcxl_dma_init(void) pcxl_res_map = (char *)__get_free_pages(GFP_KERNEL, get_order(pcxl_res_size)); memset(pcxl_res_map, 0, pcxl_res_size); - proc_gsc_root = proc_mkdir("gsc", NULL); + proc_gsc_root = proc_mkdir("bus/gsc", NULL); if (!proc_gsc_root) printk(KERN_WARNING "pcxl_dma_init: Unable to create gsc /proc dir entry\n"); @@ -417,14 +417,6 @@ void *arch_dma_alloc(struct device *dev, size_t size, map_uncached_pages(vaddr, size, paddr); *dma_handle = (dma_addr_t) paddr; -#if 0 -/* This probably isn't needed to support EISA cards. -** ISA cards will certainly only support 24-bit DMA addressing. -** Not clear if we can, want, or need to support ISA. -*/ - if (!dev || *dev->coherent_dma_mask < 0xffffffff) - gfp |= GFP_DMA; -#endif return (void *)vaddr; } diff --git a/arch/parisc/kernel/pdc_chassis.c b/arch/parisc/kernel/pdc_chassis.c index 0a9d7008ef2a..d477d0177c2f 100644 --- a/arch/parisc/kernel/pdc_chassis.c +++ b/arch/parisc/kernel/pdc_chassis.c @@ -31,6 +31,7 @@ #include <asm/processor.h> #include <asm/pdc.h> #include <asm/pdcpat.h> +#include <asm/led.h> #define PDC_CHASSIS_VER "0.05" @@ -234,6 +235,11 @@ int pdc_chassis_send_status(int message) } else retval = -1; #endif /* CONFIG_64BIT */ } /* if (pdc_chassis_enabled) */ + + /* if system has LCD display, update current string */ + if (retval != -1 && IS_ENABLED(CONFIG_CHASSIS_LCD_LED)) + lcd_print(NULL); + #endif /* CONFIG_PDC_CHASSIS */ return retval; } diff --git a/arch/parisc/kernel/pdt.c b/arch/parisc/kernel/pdt.c index 0d24735bd918..b70b67adb855 100644 --- a/arch/parisc/kernel/pdt.c +++ b/arch/parisc/kernel/pdt.c @@ -63,6 +63,7 @@ static unsigned long pdt_entry[MAX_PDT_ENTRIES] __page_aligned_bss; #define PDT_ADDR_PERM_ERR (pdt_type != PDT_PDC ? 2UL : 0UL) #define PDT_ADDR_SINGLE_ERR 1UL +#ifdef CONFIG_PROC_FS /* report PDT entries via /proc/meminfo */ void arch_report_meminfo(struct seq_file *m) { @@ -74,6 +75,7 @@ void arch_report_meminfo(struct seq_file *m) seq_printf(m, "PDT_cur_entries: %7lu\n", pdt_status.pdt_entries); } +#endif static int get_info_pat_new(void) { @@ -354,10 +356,8 @@ static int __init pdt_initcall(void) return -ENODEV; kpdtd_task = kthread_run(pdt_mainloop, NULL, "kpdtd"); - if (IS_ERR(kpdtd_task)) - return PTR_ERR(kpdtd_task); - return 0; + return PTR_ERR_OR_ZERO(kpdtd_task); } late_initcall(pdt_initcall); diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c index 90b04d8af212..5e10f98ce7b5 100644 --- a/arch/parisc/kernel/perf.c +++ b/arch/parisc/kernel/perf.c @@ -57,7 +57,7 @@ struct rdr_tbl_ent { static int perf_processor_interface __read_mostly = UNKNOWN_INTF; static int perf_enabled __read_mostly; static DEFINE_SPINLOCK(perf_lock); -struct parisc_device *cpu_device __read_mostly; +static struct parisc_device *cpu_device __read_mostly; /* RDRs to write for PCX-W */ static const int perf_rdrs_W[] = @@ -466,7 +466,6 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) } static const struct file_operations perf_fops = { - .llseek = no_llseek, .read = perf_read, .write = perf_write, .unlocked_ioctl = perf_ioctl, @@ -476,9 +475,9 @@ static const struct file_operations perf_fops = { }; static struct miscdevice perf_dev = { - MISC_DYNAMIC_MINOR, - PA_PERF_DEV, - &perf_fops + .minor = MISC_DYNAMIC_MINOR, + .name = PA_PERF_DEV, + .fops = &perf_fops, }; /* diff --git a/arch/parisc/kernel/perf_event.c b/arch/parisc/kernel/perf_event.c new file mode 100644 index 000000000000..f90b83886ab4 --- /dev/null +++ b/arch/parisc/kernel/perf_event.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Performance event support for parisc + * + * Copyright (C) 2025 by Helge Deller <deller@gmx.de> + */ + +#include <linux/kernel.h> +#include <linux/perf_event.h> +#include <asm/unwind.h> + +void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) +{ + + struct unwind_frame_info info; + + unwind_frame_init_task(&info, current, NULL); + while (1) { + if (unwind_once(&info) < 0 || info.ip == 0) + break; + + if (!__kernel_text_address(info.ip) || + perf_callchain_store(entry, info.ip)) + return; + } +} diff --git a/arch/parisc/kernel/perf_regs.c b/arch/parisc/kernel/perf_regs.c new file mode 100644 index 000000000000..10a1a5f06a18 --- /dev/null +++ b/arch/parisc/kernel/perf_regs.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Copyright (C) 2025 by Helge Deller <deller@gmx.de> */ + +#include <linux/perf_event.h> +#include <linux/perf_regs.h> +#include <asm/ptrace.h> + +u64 perf_reg_value(struct pt_regs *regs, int idx) +{ + switch (idx) { + case PERF_REG_PARISC_R0 ... PERF_REG_PARISC_R31: + return regs->gr[idx - PERF_REG_PARISC_R0]; + case PERF_REG_PARISC_SR0 ... PERF_REG_PARISC_SR7: + return regs->sr[idx - PERF_REG_PARISC_SR0]; + case PERF_REG_PARISC_IASQ0 ... PERF_REG_PARISC_IASQ1: + return regs->iasq[idx - PERF_REG_PARISC_IASQ0]; + case PERF_REG_PARISC_IAOQ0 ... PERF_REG_PARISC_IAOQ1: + return regs->iasq[idx - PERF_REG_PARISC_IAOQ0]; + case PERF_REG_PARISC_SAR: /* CR11 */ + return regs->sar; + case PERF_REG_PARISC_IIR: /* CR19 */ + return regs->iir; + case PERF_REG_PARISC_ISR: /* CR20 */ + return regs->isr; + case PERF_REG_PARISC_IOR: /* CR21 */ + return regs->ior; + case PERF_REG_PARISC_IPSW: /* CR22 */ + return regs->ipsw; + } + WARN_ON_ONCE((u32)idx >= PERF_REG_PARISC_MAX); + return 0; +} + +#define REG_RESERVED (~((1ULL << PERF_REG_PARISC_MAX) - 1)) + +int perf_reg_validate(u64 mask) +{ + if (!mask || mask & REG_RESERVED) + return -EINVAL; + + return 0; +} + +u64 perf_reg_abi(struct task_struct *task) +{ + if (!IS_ENABLED(CONFIG_64BIT)) + return PERF_SAMPLE_REGS_ABI_32; + + if (test_tsk_thread_flag(task, TIF_32BIT)) + return PERF_SAMPLE_REGS_ABI_32; + + return PERF_SAMPLE_REGS_ABI_64; +} + +void perf_get_regs_user(struct perf_regs *regs_user, + struct pt_regs *regs) +{ + regs_user->regs = task_pt_regs(current); + regs_user->abi = perf_reg_abi(current); +} diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index abdbf038d643..e64ab5d2a40d 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -97,18 +97,12 @@ void machine_restart(char *cmd) } -void (*chassis_power_off)(void); - /* * This routine is called from sys_reboot to actually turn off the * machine */ void machine_power_off(void) { - /* If there is a registered power off handler, call it. */ - if (chassis_power_off) - chassis_power_off(); - /* Put the soft power button back under hardware control. * If the user had already pressed the power button, the * following call will immediately power off. */ @@ -207,7 +201,7 @@ arch_initcall(parisc_idle_init); int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) { - unsigned long clone_flags = args->flags; + u64 clone_flags = args->flags; unsigned long usp = args->stack; unsigned long tls = args->tls; struct pt_regs *cregs = &(p->thread.regs); @@ -284,17 +278,3 @@ __get_wchan(struct task_struct *p) } while (count++ < MAX_UNWIND_ENTRIES); return 0; } - -static inline unsigned long brk_rnd(void) -{ - return (get_random_u32() & BRK_RND_MASK) << PAGE_SHIFT; -} - -unsigned long arch_randomize_brk(struct mm_struct *mm) -{ - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd()); - - if (ret < mm->brk) - return mm->brk; - return ret; -} diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c index 00b0df97afb1..bf73562706b2 100644 --- a/arch/parisc/kernel/processor.c +++ b/arch/parisc/kernel/processor.c @@ -26,6 +26,7 @@ #include <asm/processor.h> #include <asm/page.h> #include <asm/pdc.h> +#include <asm/smp.h> #include <asm/pdcpat.h> #include <asm/irq.h> /* for struct irq_region */ #include <asm/parisc-device.h> @@ -171,7 +172,6 @@ static int __init processor_probe(struct parisc_device *dev) p->cpu_num = cpu_info.cpu_num; p->cpu_loc = cpu_info.cpu_loc; - set_cpu_possible(cpuid, true); store_cpu_topology(cpuid); #ifdef CONFIG_SMP @@ -241,9 +241,9 @@ void __init collect_boot_cpu_data(void) /* get CPU-Model Information... */ #define p ((unsigned long *)&boot_cpu_data.pdc.model) if (pdc_model_info(&boot_cpu_data.pdc.model) == PDC_OK) { - printk(KERN_INFO - "model %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", - p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8]); + printk(KERN_INFO + "model %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", + p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8], p[9]); add_device_randomness(&boot_cpu_data.pdc.model, sizeof(boot_cpu_data.pdc.model)); @@ -367,6 +367,8 @@ int init_per_cpu(int cpunum) /* FUTURE: Enable Performance Monitor : ccr bit 0x20 */ init_percpu_prof(cpunum); + btlb_init_per_cpu(); + return ret; } @@ -377,10 +379,18 @@ int show_cpuinfo (struct seq_file *m, void *v) { unsigned long cpu; + char cpu_name[60], *p; + + /* strip PA path from CPU name to not confuse lscpu */ + strscpy(cpu_name, per_cpu(cpu_data, 0).dev->name, sizeof(cpu_name)); + p = strrchr(cpu_name, '['); + if (p) + *(--p) = 0; for_each_online_cpu(cpu) { - const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu); #ifdef CONFIG_SMP + const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu); + if (0 == cpuinfo->hpa) continue; #endif @@ -425,8 +435,7 @@ show_cpuinfo (struct seq_file *m, void *v) seq_printf(m, "model\t\t: %s - %s\n", boot_cpu_data.pdc.sys_model_name, - cpuinfo->dev ? - cpuinfo->dev->name : "Unknown"); + cpu_name); seq_printf(m, "hversion\t: 0x%08x\n" "sversion\t: 0x%08x\n", @@ -464,13 +473,6 @@ static struct parisc_driver cpu_driver __refdata = { */ void __init processor_init(void) { - unsigned int cpu; - reset_cpu_topology(); - - /* reset possible mask. We will mark those which are possible. */ - for_each_possible_cpu(cpu) - set_cpu_possible(cpu, false); - register_parisc_driver(&cpu_driver); } diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index ceb45f51d52e..8a17ab7e6e0b 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c @@ -562,12 +562,12 @@ static int gpr_set(struct task_struct *target, static const struct user_regset native_regsets[] = { [REGSET_GENERAL] = { - .core_note_type = NT_PRSTATUS, .n = ELF_NGREG, + USER_REGSET_NOTE_TYPE(PRSTATUS), .n = ELF_NGREG, .size = sizeof(long), .align = sizeof(long), .regset_get = gpr_get, .set = gpr_set }, [REGSET_FP] = { - .core_note_type = NT_PRFPREG, .n = ELF_NFPREG, + USER_REGSET_NOTE_TYPE(PRFPREG), .n = ELF_NFPREG, .size = sizeof(__u64), .align = sizeof(__u64), .regset_get = fpr_get, .set = fpr_set } @@ -629,12 +629,12 @@ static int gpr32_set(struct task_struct *target, */ static const struct user_regset compat_regsets[] = { [REGSET_GENERAL] = { - .core_note_type = NT_PRSTATUS, .n = ELF_NGREG, + USER_REGSET_NOTE_TYPE(PRSTATUS), .n = ELF_NGREG, .size = sizeof(compat_long_t), .align = sizeof(compat_long_t), .regset_get = gpr32_get, .set = gpr32_set }, [REGSET_FP] = { - .core_note_type = NT_PRFPREG, .n = ELF_NFPREG, + USER_REGSET_NOTE_TYPE(PRFPREG), .n = ELF_NFPREG, .size = sizeof(__u64), .align = sizeof(__u64), .regset_get = fpr_get, .set = fpr_set } diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index 573f8303e2b0..ace483b6f19a 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c @@ -31,7 +31,6 @@ #include <asm/sections.h> #include <asm/pdc.h> #include <asm/led.h> -#include <asm/machdep.h> /* for pa7300lc_init() proto */ #include <asm/pdc_chassis.h> #include <asm/io.h> #include <asm/setup.h> @@ -40,11 +39,6 @@ static char __initdata command_line[COMMAND_LINE_SIZE]; -/* Intended for ccio/sba/cpu statistics under /proc/bus/{runway|gsc} */ -struct proc_dir_entry * proc_runway_root __read_mostly = NULL; -struct proc_dir_entry * proc_gsc_root __read_mostly = NULL; -struct proc_dir_entry * proc_mckinley_root __read_mostly = NULL; - static void __init setup_cmdline(char **cmdline_p) { extern unsigned int boot_args[]; @@ -98,8 +92,6 @@ static void __init dma_ops_init(void) "the PA-RISC 1.1 or 2.0 architecture specification.\n"); case pcxl2: - pa7300lc_init(); - break; default: break; } @@ -108,9 +100,6 @@ static void __init dma_ops_init(void) void __init setup_arch(char **cmdline_p) { -#ifdef CONFIG_64BIT - extern int parisc_narrow_firmware; -#endif unwind_init(); init_per_cpu(smp_processor_id()); /* Set Modes & Enable FP */ @@ -151,11 +140,6 @@ void __init setup_arch(char **cmdline_p) parisc_cache_init(); paging_init(); -#ifdef CONFIG_CHASSIS_LCD_LED - /* initialize the LCD/LED after boot_cpu_data is available ! */ - led_init(); /* LCD/LED initialization */ -#endif - #ifdef CONFIG_PA11 dma_ops_init(); #endif @@ -196,48 +180,6 @@ const struct seq_operations cpuinfo_op = { .show = show_cpuinfo }; -static void __init parisc_proc_mkdir(void) -{ - /* - ** Can't call proc_mkdir() until after proc_root_init() has been - ** called by start_kernel(). In other words, this code can't - ** live in arch/.../setup.c because start_parisc() calls - ** start_kernel(). - */ - switch (boot_cpu_data.cpu_type) { - case pcxl: - case pcxl2: - if (NULL == proc_gsc_root) - { - proc_gsc_root = proc_mkdir("bus/gsc", NULL); - } - break; - case pcxt_: - case pcxu: - case pcxu_: - case pcxw: - case pcxw_: - case pcxw2: - if (NULL == proc_runway_root) - { - proc_runway_root = proc_mkdir("bus/runway", NULL); - } - break; - case mako: - case mako2: - if (NULL == proc_mckinley_root) - { - proc_mckinley_root = proc_mkdir("bus/mckinley", NULL); - } - break; - default: - /* FIXME: this was added to prevent the compiler - * complaining about missing pcx, pcxs and pcxt - * I'm assuming they have neither gsc nor runway */ - break; - } -} - static struct resource central_bus = { .name = "Central Bus", .start = F_EXTEND(0xfff80000), @@ -294,7 +236,6 @@ static int __init parisc_init(void) { u32 osid = (OS_ID_LINUX << 16); - parisc_proc_mkdir(); parisc_init_resources(); do_device_inventory(); /* probe for hardware */ @@ -329,47 +270,6 @@ static int __init parisc_init(void) apply_alternatives_all(); parisc_setup_cache_timing(); - - /* These are in a non-obvious order, will fix when we have an iotree */ -#if defined(CONFIG_IOSAPIC) - iosapic_init(); -#endif -#if defined(CONFIG_IOMMU_SBA) - sba_init(); -#endif -#if defined(CONFIG_PCI_LBA) - lba_init(); -#endif - - /* CCIO before any potential subdevices */ -#if defined(CONFIG_IOMMU_CCIO) - ccio_init(); -#endif - - /* - * Need to register Asp & Wax before the EISA adapters for the IRQ - * regions. EISA must come before PCI to be sure it gets IRQ region - * 0. - */ -#if defined(CONFIG_GSC_LASI) || defined(CONFIG_GSC_WAX) - gsc_init(); -#endif -#ifdef CONFIG_EISA - parisc_eisa_init(); -#endif - -#if defined(CONFIG_HPPB) - hppb_init(); -#endif - -#if defined(CONFIG_GSC_DINO) - dino_init(); -#endif - -#ifdef CONFIG_CHASSIS_LCD_LED - register_led_regions(); /* register LED port info in procfs */ -#endif - return 0; } arch_initcall(parisc_init); diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index f886ff0c75df..e8d27def6c52 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c @@ -423,7 +423,7 @@ static void check_syscallno_in_delay_branch(struct pt_regs *regs) regs->gr[31] -= 8; /* delayed branching */ /* Get assembler opcode of code in delay branch */ - uaddr = (unsigned int *) ((regs->gr[31] & ~3) + 4); + uaddr = (u32 __user *) ((regs->gr[31] & ~3) + 4); err = get_user(opcode, uaddr); if (err) return; diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 4098f9a0964b..b2d12ab728b1 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c @@ -297,7 +297,7 @@ smp_cpu_init(int cpunum) enter_lazy_tlb(&init_mm, current); init_IRQ(); /* make sure no IRQs are enabled or pending */ - start_cpu_itimer(); + parisc_clockevent_init(); } @@ -344,7 +344,7 @@ static int smp_boot_one_cpu(int cpuid, struct task_struct *idle) struct irq_desc *desc = irq_to_desc(i); if (desc && desc->kstat_irqs) - *per_cpu_ptr(desc->kstat_irqs, cpuid) = 0; + *per_cpu_ptr(desc->kstat_irqs, cpuid) = (struct irqstat) { }; } #endif @@ -404,13 +404,7 @@ alive: void __init smp_prepare_boot_cpu(void) { - int bootstrap_processor = per_cpu(cpu_data, 0).cpuid; - - /* Setup BSP mappings */ - printk(KERN_INFO "SMP: bootstrap CPU ID is %d\n", bootstrap_processor); - - set_cpu_online(bootstrap_processor, true); - set_cpu_present(bootstrap_processor, true); + pr_info("SMP: bootstrap CPU ID is 0\n"); } @@ -440,7 +434,9 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) if (cpu_online(cpu)) return 0; - if (num_online_cpus() < setup_max_cpus && smp_boot_one_cpu(cpu, tidle)) + if (num_online_cpus() < nr_cpu_ids && + num_online_cpus() < setup_max_cpus && + smp_boot_one_cpu(cpu, tidle)) return -EIO; return cpu_online(cpu) ? 0 : -EIO; diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c index ca2d537e25b1..b2cdbb8a12b1 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c @@ -27,17 +27,12 @@ #include <linux/elf-randomize.h> /* - * Construct an artificial page offset for the mapping based on the virtual + * Construct an artificial page offset for the mapping based on the physical * address of the kernel file mapping variable. - * If filp is zero the calculated pgoff value aliases the memory of the given - * address. This is useful for io_uring where the mapping shall alias a kernel - * address and a userspace adress where both the kernel and the userspace - * access the same memory region. */ -#define GET_FILP_PGOFF(filp, addr) \ - ((filp ? (((unsigned long) filp->f_mapping) >> 8) \ - & ((SHM_COLOUR-1) >> PAGE_SHIFT) : 0UL) \ - + (addr >> PAGE_SHIFT)) +#define GET_FILP_PGOFF(filp) \ + (filp ? (((unsigned long) filp->f_mapping) >> 8) \ + & ((SHM_COLOUR-1) >> PAGE_SHIFT) : 0UL) static unsigned long shared_align_offset(unsigned long filp_pgoff, unsigned long pgoff) @@ -82,7 +77,7 @@ unsigned long calc_max_stack_size(unsigned long stack_max) * indicating that "current" should be used instead of a passed-in * value from the exec bprm as done with arch_pick_mmap_layout(). */ -static unsigned long mmap_upper_limit(struct rlimit *rlim_stack) +unsigned long mmap_upper_limit(const struct rlimit *rlim_stack) { unsigned long stack_base; @@ -109,7 +104,9 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, struct vm_area_struct *vma, *prev; unsigned long filp_pgoff; int do_color_align; - struct vm_unmapped_area_info info; + struct vm_unmapped_area_info info = { + .length = len + }; if (unlikely(len > TASK_SIZE)) return -ENOMEM; @@ -117,7 +114,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, do_color_align = 0; if (filp || (flags & MAP_SHARED)) do_color_align = 1; - filp_pgoff = GET_FILP_PGOFF(filp, addr); + filp_pgoff = GET_FILP_PGOFF(filp); if (flags & MAP_FIXED) { /* Even MAP_FIXED mappings must reside within TASK_SIZE */ @@ -144,7 +141,6 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, return addr; } - info.length = len; info.align_mask = do_color_align ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0; info.align_offset = shared_align_offset(filp_pgoff, pgoff); @@ -165,14 +161,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, */ } - info.flags = 0; - info.low_limit = mm->mmap_legacy_base; + info.low_limit = mm->mmap_base; info.high_limit = mmap_upper_limit(NULL); return vm_unmapped_area(&info); } unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, - unsigned long len, unsigned long pgoff, unsigned long flags) + unsigned long len, unsigned long pgoff, unsigned long flags, + vm_flags_t vm_flags) { return arch_get_unmapped_area_common(filp, addr, len, pgoff, flags, UP); @@ -180,64 +176,12 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, - unsigned long flags) + unsigned long flags, vm_flags_t vm_flags) { return arch_get_unmapped_area_common(filp, addr, len, pgoff, flags, DOWN); } -static int mmap_is_legacy(void) -{ - if (current->personality & ADDR_COMPAT_LAYOUT) - return 1; - - /* parisc stack always grows up - so a unlimited stack should - * not be an indicator to use the legacy memory layout. - * if (rlimit(RLIMIT_STACK) == RLIM_INFINITY) - * return 1; - */ - - return sysctl_legacy_va_layout; -} - -static unsigned long mmap_rnd(void) -{ - unsigned long rnd = 0; - - if (current->flags & PF_RANDOMIZE) - rnd = get_random_u32() & MMAP_RND_MASK; - - return rnd << PAGE_SHIFT; -} - -unsigned long arch_mmap_rnd(void) -{ - return (get_random_u32() & MMAP_RND_MASK) << PAGE_SHIFT; -} - -static unsigned long mmap_legacy_base(void) -{ - return TASK_UNMAPPED_BASE + mmap_rnd(); -} - -/* - * This function, called very early during the creation of a new - * process VM image, sets up which VM layout function to use: - */ -void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) -{ - mm->mmap_legacy_base = mmap_legacy_base(); - mm->mmap_base = mmap_upper_limit(rlim_stack); - - if (mmap_is_legacy()) { - mm->mmap_base = mm->mmap_legacy_base; - mm->get_unmapped_area = arch_get_unmapped_area; - } else { - mm->get_unmapped_area = arch_get_unmapped_area_topdown; - } -} - - asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff) diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c index 2a12a547b447..826c8e51b585 100644 --- a/arch/parisc/kernel/sys_parisc32.c +++ b/arch/parisc/kernel/sys_parisc32.c @@ -23,12 +23,3 @@ asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23, current->comm, current->pid, r20); return -ENOSYS; } - -asmlinkage long sys32_fanotify_mark(compat_int_t fanotify_fd, compat_uint_t flags, - compat_uint_t mask0, compat_uint_t mask1, compat_int_t dfd, - const char __user * pathname) -{ - return sys_fanotify_mark(fanotify_fd, flags, - ((__u64)mask1 << 32) | mask0, - dfd, pathname); -} diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index 1373e5129868..f58c4bccfbce 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -39,6 +39,7 @@ registers). #include <asm/assembly.h> #include <asm/processor.h> #include <asm/cache.h> +#include <asm/spinlock_types.h> #include <linux/linkage.h> @@ -66,6 +67,16 @@ registers). stw \reg1, 0(%sr2,\reg2) .endm + /* raise exception if spinlock content is not zero or + * __ARCH_SPIN_LOCK_UNLOCKED_VAL */ + .macro spinlock_check spin_val,tmpreg +#ifdef CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK + ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, \tmpreg + andcm,= \spin_val, \tmpreg, %r0 + .word SPINLOCK_BREAK_INSN +#endif + .endm + .text .import syscall_exit,code @@ -232,10 +243,10 @@ linux_gateway_entry: #ifdef CONFIG_64BIT ldil L%sys_call_table, %r1 - or,= %r2,%r2,%r2 - addil L%(sys_call_table64-sys_call_table), %r1 + or,ev %r2,%r2,%r2 + ldil L%sys_call_table64, %r1 ldo R%sys_call_table(%r1), %r19 - or,= %r2,%r2,%r2 + or,ev %r2,%r2,%r2 ldo R%sys_call_table64(%r1), %r19 #else load32 sys_call_table, %r19 @@ -368,10 +379,10 @@ tracesys_next: extrd,u %r19,63,1,%r2 /* W hidden in bottom bit */ ldil L%sys_call_table, %r1 - or,= %r2,%r2,%r2 - addil L%(sys_call_table64-sys_call_table), %r1 + or,ev %r2,%r2,%r2 + ldil L%sys_call_table64, %r1 ldo R%sys_call_table(%r1), %r19 - or,= %r2,%r2,%r2 + or,ev %r2,%r2,%r2 ldo R%sys_call_table64(%r1), %r19 #else load32 sys_call_table, %r19 @@ -508,7 +519,8 @@ lws_start: lws_exit_noerror: lws_pagefault_enable %r1,%r21 - stw,ma %r20, 0(%sr2,%r20) + ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, %r21 + stw,ma %r21, 0(%sr2,%r20) ssm PSW_SM_I, %r0 b lws_exit copy %r0, %r21 @@ -521,7 +533,8 @@ lws_wouldblock: lws_pagefault: lws_pagefault_enable %r1,%r21 - stw,ma %r20, 0(%sr2,%r20) + ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, %r21 + stw,ma %r21, 0(%sr2,%r20) ssm PSW_SM_I, %r0 ldo 3(%r0),%r28 b lws_exit @@ -600,6 +613,9 @@ lws_compare_and_swap32: lws_compare_and_swap: /* Trigger memory reference interruptions without writing to memory */ 1: ldw 0(%r26), %r28 + proberi (%r26), PRIV_USER, %r28 + comb,=,n %r28, %r0, lws_fault /* backwards, likely not taken */ + nop 2: stbys,e %r0, 0(%r26) /* Calculate 8-bit hash index from virtual address */ @@ -619,6 +635,7 @@ lws_compare_and_swap: /* Try to acquire the lock */ LDCW 0(%sr2,%r20), %r28 + spinlock_check %r28, %r21 comclr,<> %r0, %r28, %r0 b,n lws_wouldblock @@ -753,6 +770,9 @@ cas2_lock_start: copy %r26, %r28 depi_safe 0, 31, 2, %r28 10: ldw 0(%r28), %r1 + proberi (%r28), PRIV_USER, %r1 + comb,=,n %r1, %r0, lws_fault /* backwards, likely not taken */ + nop 11: stbys,e %r0, 0(%r28) /* Calculate 8-bit hash index from virtual address */ @@ -772,6 +792,7 @@ cas2_lock_start: /* Try to acquire the lock */ LDCW 0(%sr2,%r20), %r28 + spinlock_check %r28, %r21 comclr,<> %r0, %r28, %r0 b,n lws_wouldblock @@ -936,41 +957,47 @@ atomic_xchg_begin: /* 8-bit exchange */ 1: ldb 0(%r24), %r20 + proberi (%r24), PRIV_USER, %r20 + comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */ + nop copy %r23, %r20 depi_safe 0, 31, 2, %r20 b atomic_xchg_start 2: stbys,e %r0, 0(%r20) - nop - nop - nop /* 16-bit exchange */ 3: ldh 0(%r24), %r20 + proberi (%r24), PRIV_USER, %r20 + comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */ + nop copy %r23, %r20 depi_safe 0, 31, 2, %r20 b atomic_xchg_start 4: stbys,e %r0, 0(%r20) - nop - nop - nop /* 32-bit exchange */ 5: ldw 0(%r24), %r20 + proberi (%r24), PRIV_USER, %r20 + comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */ + nop b atomic_xchg_start 6: stbys,e %r0, 0(%r23) nop nop - nop - nop - nop /* 64-bit exchange */ #ifdef CONFIG_64BIT 7: ldd 0(%r24), %r20 + proberi (%r24), PRIV_USER, %r20 + comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */ + nop 8: stdby,e %r0, 0(%r23) #else 7: ldw 0(%r24), %r20 8: ldw 4(%r24), %r20 + proberi (%r24), PRIV_USER, %r20 + comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */ + nop copy %r23, %r20 depi_safe 0, 31, 2, %r20 9: stbys,e %r0, 0(%r20) @@ -1001,6 +1028,7 @@ atomic_xchg_start: /* Try to acquire the lock */ LDCW 0(%sr2,%r20), %r28 + spinlock_check %r28, %r21 comclr,<> %r0, %r28, %r0 b,n lws_wouldblock @@ -1199,6 +1227,7 @@ atomic_store_start: /* Try to acquire the lock */ LDCW 0(%sr2,%r20), %r28 + spinlock_check %r28, %r21 comclr,<> %r0, %r28, %r0 b,n lws_wouldblock @@ -1310,6 +1339,8 @@ ENTRY(sys_call_table) END(sys_call_table) #ifdef CONFIG_64BIT +#undef __SYSCALL_WITH_COMPAT +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) .align 8 ENTRY(sys_call_table64) #include <asm/syscall_table_64.h> /* 64-bit syscalls */ @@ -1330,7 +1361,7 @@ ENTRY(lws_lock_start) /* lws locks */ .rept 256 /* Keep locks aligned at 16-bytes */ - .word 1 + .word __ARCH_SPIN_LOCK_UNLOCKED_VAL .word 0 .word 0 .word 0 diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl index a0a9145b6dd4..39bdacaa530b 100644 --- a/arch/parisc/kernel/syscalls/syscall.tbl +++ b/arch/parisc/kernel/syscalls/syscall.tbl @@ -108,7 +108,7 @@ 95 common fchown sys_fchown 96 common getpriority sys_getpriority 97 common setpriority sys_setpriority -98 common recv sys_recv +98 common recv sys_recv compat_sys_recv 99 common statfs sys_statfs compat_sys_statfs 100 common fstatfs sys_fstatfs compat_sys_fstatfs 101 common stat64 sys_stat64 @@ -135,7 +135,7 @@ 120 common clone sys_clone_wrapper 121 common setdomainname sys_setdomainname 122 common sendfile sys_sendfile compat_sys_sendfile -123 common recvfrom sys_recvfrom +123 common recvfrom sys_recvfrom compat_sys_recvfrom 124 32 adjtimex sys_adjtimex_time32 124 64 adjtimex sys_adjtimex 125 common mprotect sys_mprotect @@ -245,7 +245,7 @@ # 220 was alloc_hugepages # 221 was free_hugepages 222 common exit_group sys_exit_group -223 common lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie +223 common lookup_dcookie sys_ni_syscall 224 common epoll_create sys_epoll_create 225 common epoll_ctl sys_epoll_ctl 226 common epoll_wait sys_epoll_wait @@ -364,7 +364,7 @@ 320 common accept4 sys_accept4 321 common prlimit64 sys_prlimit64 322 common fanotify_init sys_fanotify_init -323 common fanotify_mark sys_fanotify_mark sys32_fanotify_mark +323 common fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark 324 32 clock_adjtime sys_clock_adjtime32 324 64 clock_adjtime sys_clock_adjtime 325 common name_to_handle_at sys_name_to_handle_at @@ -450,3 +450,22 @@ 449 common futex_waitv sys_futex_waitv 450 common set_mempolicy_home_node sys_set_mempolicy_home_node 451 common cachestat sys_cachestat +452 common fchmodat2 sys_fchmodat2 +453 common map_shadow_stack sys_map_shadow_stack +454 common futex_wake sys_futex_wake +455 common futex_wait sys_futex_wait +456 common futex_requeue sys_futex_requeue +457 common statmount sys_statmount +458 common listmount sys_listmount +459 common lsm_get_self_attr sys_lsm_get_self_attr +460 common lsm_set_self_attr sys_lsm_set_self_attr +461 common lsm_list_modules sys_lsm_list_modules +462 common mseal sys_mseal +463 common setxattrat sys_setxattrat +464 common getxattrat sys_getxattrat +465 common listxattrat sys_listxattrat +466 common removexattrat sys_removexattrat +467 common open_tree_attr sys_open_tree_attr +468 common file_getattr sys_file_getattr +469 common file_setattr sys_file_setattr +470 common listns sys_listns diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index 9714fbd7c42d..c17e2249115f 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c @@ -1,166 +1,119 @@ // SPDX-License-Identifier: GPL-2.0 /* - * linux/arch/parisc/kernel/time.c + * Common time service routines for parisc machines. + * based on arch/loongarch/kernel/time.c * - * Copyright (C) 1991, 1992, 1995 Linus Torvalds - * Modifications for ARM (C) 1994, 1995, 1996,1997 Russell King - * Copyright (C) 1999 SuSE GmbH, (Philipp Rumpf, prumpf@tux.org) - * - * 1994-07-02 Alan Modra - * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime - * 1998-12-20 Updated NTP code according to technical memorandum Jan '96 - * "A Kernel Model for Precision Timekeeping" by Dave Mills + * Copyright (C) 2024 Helge Deller <deller@gmx.de> */ -#include <linux/errno.h> -#include <linux/module.h> -#include <linux/rtc.h> -#include <linux/sched.h> -#include <linux/sched/clock.h> -#include <linux/sched_clock.h> -#include <linux/kernel.h> -#include <linux/param.h> -#include <linux/string.h> -#include <linux/mm.h> -#include <linux/interrupt.h> -#include <linux/time.h> +#include <linux/clockchips.h> +#include <linux/delay.h> +#include <linux/export.h> #include <linux/init.h> -#include <linux/smp.h> -#include <linux/profile.h> -#include <linux/clocksource.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/sched_clock.h> +#include <linux/spinlock.h> +#include <linux/rtc.h> #include <linux/platform_device.h> -#include <linux/ftrace.h> +#include <asm/processor.h> -#include <linux/uaccess.h> -#include <asm/io.h> -#include <asm/irq.h> -#include <asm/page.h> -#include <asm/param.h> -#include <asm/pdc.h> -#include <asm/led.h> +static u64 cr16_clock_freq; +static unsigned long clocktick; -#include <linux/timex.h> +int time_keeper_id; /* CPU used for timekeeping */ -int time_keeper_id __read_mostly; /* CPU used for timekeeping. */ +static DEFINE_PER_CPU(struct clock_event_device, parisc_clockevent_device); -static unsigned long clocktick __ro_after_init; /* timer cycles per tick */ +static void parisc_event_handler(struct clock_event_device *dev) +{ +} -/* - * We keep time on PA-RISC Linux by using the Interval Timer which is - * a pair of registers; one is read-only and one is write-only; both - * accessed through CR16. The read-only register is 32 or 64 bits wide, - * and increments by 1 every CPU clock tick. The architecture only - * guarantees us a rate between 0.5 and 2, but all implementations use a - * rate of 1. The write-only register is 32-bits wide. When the lowest - * 32 bits of the read-only register compare equal to the write-only - * register, it raises a maskable external interrupt. Each processor has - * an Interval Timer of its own and they are not synchronised. - * - * We want to generate an interrupt every 1/HZ seconds. So we program - * CR16 to interrupt every @clocktick cycles. The it_value in cpu_data - * is programmed with the intended time of the next tick. We can be - * held off for an arbitrarily long period of time by interrupts being - * disabled, so we may miss one or more ticks. - */ -irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id) +static int parisc_timer_next_event(unsigned long delta, struct clock_event_device *evt) { - unsigned long now; - unsigned long next_tick; - unsigned long ticks_elapsed = 0; - unsigned int cpu = smp_processor_id(); - struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu); - - /* gcc can optimize for "read-only" case with a local clocktick */ - unsigned long cpt = clocktick; - - /* Initialize next_tick to the old expected tick time. */ - next_tick = cpuinfo->it_value; - - /* Calculate how many ticks have elapsed. */ - now = mfctl(16); - do { - ++ticks_elapsed; - next_tick += cpt; - } while (next_tick - now > cpt); - - /* Store (in CR16 cycles) up to when we are accounting right now. */ - cpuinfo->it_value = next_tick; - - /* Go do system house keeping. */ - if (IS_ENABLED(CONFIG_SMP) && (cpu != time_keeper_id)) - ticks_elapsed = 0; - legacy_timer_tick(ticks_elapsed); - - /* Skip clockticks on purpose if we know we would miss those. - * The new CR16 must be "later" than current CR16 otherwise - * itimer would not fire until CR16 wrapped - e.g 4 seconds - * later on a 1Ghz processor. We'll account for the missed - * ticks on the next timer interrupt. - * We want IT to fire modulo clocktick even if we miss/skip some. - * But those interrupts don't in fact get delivered that regularly. - * - * "next_tick - now" will always give the difference regardless - * if one or the other wrapped. If "now" is "bigger" we'll end up - * with a very large unsigned number. - */ - now = mfctl(16); - while (next_tick - now > cpt) - next_tick += cpt; - - /* Program the IT when to deliver the next interrupt. - * Only bottom 32-bits of next_tick are writable in CR16! - * Timer interrupt will be delivered at least a few hundred cycles - * after the IT fires, so if we are too close (<= 8000 cycles) to the - * next cycle, simply skip it. - */ - if (next_tick - now <= 8000) - next_tick += cpt; - mtctl(next_tick, 16); + unsigned long new_cr16; - return IRQ_HANDLED; -} + new_cr16 = mfctl(16) + delta; + mtctl(new_cr16, 16); + return 0; +} -unsigned long profile_pc(struct pt_regs *regs) +irqreturn_t timer_interrupt(int irq, void *data) { - unsigned long pc = instruction_pointer(regs); + struct clock_event_device *cd; + int cpu = smp_processor_id(); - if (regs->gr[0] & PSW_N) - pc -= 4; + cd = &per_cpu(parisc_clockevent_device, cpu); -#ifdef CONFIG_SMP - if (in_lock_functions(pc)) - pc = regs->gr[2]; -#endif + if (clockevent_state_periodic(cd)) + parisc_timer_next_event(clocktick, cd); - return pc; + if (clockevent_state_periodic(cd) || clockevent_state_oneshot(cd)) + cd->event_handler(cd); + + return IRQ_HANDLED; } -EXPORT_SYMBOL(profile_pc); +static int parisc_set_state_oneshot(struct clock_event_device *evt) +{ + parisc_timer_next_event(clocktick, evt); -/* clock source code */ + return 0; +} -static u64 notrace read_cr16(struct clocksource *cs) +static int parisc_set_state_periodic(struct clock_event_device *evt) { - return get_cycles(); + parisc_timer_next_event(clocktick, evt); + + return 0; } -static struct clocksource clocksource_cr16 = { - .name = "cr16", - .rating = 300, - .read = read_cr16, - .mask = CLOCKSOURCE_MASK(BITS_PER_LONG), - .flags = CLOCK_SOURCE_IS_CONTINUOUS, -}; +static int parisc_set_state_shutdown(struct clock_event_device *evt) +{ + return 0; +} -void start_cpu_itimer(void) +void parisc_clockevent_init(void) { unsigned int cpu = smp_processor_id(); - unsigned long next_tick = mfctl(16) + clocktick; + unsigned long min_delta = 0x600; /* XXX */ + unsigned long max_delta = (1UL << (BITS_PER_LONG - 1)); + struct clock_event_device *cd; + + cd = &per_cpu(parisc_clockevent_device, cpu); + + cd->name = "cr16_clockevent"; + cd->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_PERCPU; + + cd->irq = TIMER_IRQ; + cd->rating = 320; + cd->cpumask = cpumask_of(cpu); + cd->set_state_oneshot = parisc_set_state_oneshot; + cd->set_state_oneshot_stopped = parisc_set_state_shutdown; + cd->set_state_periodic = parisc_set_state_periodic; + cd->set_state_shutdown = parisc_set_state_shutdown; + cd->set_next_event = parisc_timer_next_event; + cd->event_handler = parisc_event_handler; + + clockevents_config_and_register(cd, cr16_clock_freq, min_delta, max_delta); +} + +unsigned long notrace profile_pc(struct pt_regs *regs) +{ + unsigned long pc = instruction_pointer(regs); - mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */ + if (regs->gr[0] & PSW_N) + pc -= 4; + +#ifdef CONFIG_SMP + if (in_lock_functions(pc)) + pc = regs->gr[2]; +#endif - per_cpu(cpu_data, cpu).it_value = next_tick; + return pc; } +EXPORT_SYMBOL(profile_pc); #if IS_ENABLED(CONFIG_RTC_DRV_GENERIC) static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm) @@ -224,12 +177,27 @@ void read_persistent_clock64(struct timespec64 *ts) } } - static u64 notrace read_cr16_sched_clock(void) { return get_cycles(); } +static u64 notrace read_cr16(struct clocksource *cs) +{ + return get_cycles(); +} + +static struct clocksource clocksource_cr16 = { + .name = "cr16", + .rating = 300, + .read = read_cr16, + .mask = CLOCKSOURCE_MASK(BITS_PER_LONG), + .flags = CLOCK_SOURCE_IS_CONTINUOUS | + CLOCK_SOURCE_VALID_FOR_HRES | + CLOCK_SOURCE_MUST_VERIFY | + CLOCK_SOURCE_VERIFY_PERCPU, +}; + /* * timer interrupt and sched_clock() initialization @@ -237,33 +205,14 @@ static u64 notrace read_cr16_sched_clock(void) void __init time_init(void) { - unsigned long cr16_hz; - - clocktick = (100 * PAGE0->mem_10msec) / HZ; - start_cpu_itimer(); /* get CPU 0 started */ - - cr16_hz = 100 * PAGE0->mem_10msec; /* Hz */ + cr16_clock_freq = 100 * PAGE0->mem_10msec; /* Hz */ + clocktick = cr16_clock_freq / HZ; /* register as sched_clock source */ - sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_hz); -} + sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_clock_freq); -static int __init init_cr16_clocksource(void) -{ - /* - * The cr16 interval timers are not synchronized across CPUs. - */ - if (num_online_cpus() > 1 && !running_on_qemu) { - clocksource_cr16.name = "cr16_unstable"; - clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE; - clocksource_cr16.rating = 0; - } + parisc_clockevent_init(); /* register at clocksource framework */ - clocksource_register_hz(&clocksource_cr16, - 100 * PAGE0->mem_10msec); - - return 0; + clocksource_register_hz(&clocksource_cr16, cr16_clock_freq); } - -device_initcall(init_cr16_clocksource); diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index 3b97944c7291..4c7c5df80bd0 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -31,12 +31,13 @@ #include <linux/uaccess.h> #include <linux/kdebug.h> #include <linux/kfence.h> +#include <linux/perf_event.h> #include <asm/assembly.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/traps.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/atomic.h> #include <asm/smp.h> #include <asm/pdc.h> @@ -47,6 +48,8 @@ #include <linux/kgdb.h> #include <linux/kprobes.h> +#include "unaligned.h" + #if defined(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK) #include <asm/spinlock.h> #endif @@ -335,9 +338,6 @@ static void default_trap(int code, struct pt_regs *regs) show_regs(regs); } -void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap; - - static void transfer_pim_to_trap_frame(struct pt_regs *regs) { register int i; @@ -507,7 +507,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) if (((unsigned long)regs->iaoq[0] & 3) && ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) { /* Kill the user process later */ - regs->iaoq[0] = 0 | 3; + regs->iaoq[0] = 0 | PRIV_USER; regs->iaoq[1] = regs->iaoq[0] + 4; regs->iasq[0] = regs->iasq[1] = regs->sr[7]; regs->gr[0] &= ~PSW_B; @@ -557,7 +557,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) flush_cache_all(); flush_tlb_all(); - cpu_lpmc(5, regs); + default_trap(code, regs); return; case PARISC_ITLB_TRAP: @@ -634,6 +634,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) /* Assist Exception Trap, i.e. floating point exception. */ die_if_kernel("Floating point exception", regs, 0); /* quiet */ __inc_irq_stat(irq_fpassist_count); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); handle_fpe(regs); return; diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c index 033b9e50b44a..fb64d9ce0b17 100644 --- a/arch/parisc/kernel/unaligned.c +++ b/arch/parisc/kernel/unaligned.c @@ -11,9 +11,12 @@ #include <linux/signal.h> #include <linux/ratelimit.h> #include <linux/uaccess.h> -#include <asm/unaligned.h> +#include <linux/sysctl.h> +#include <linux/unaligned.h> +#include <linux/perf_event.h> #include <asm/hardirq.h> #include <asm/traps.h> +#include "unaligned.h" /* #define DEBUG_UNALIGNED 1 */ @@ -23,7 +26,7 @@ #define DPRINTF(fmt, args...) #endif -#define RFMT "%#08lx" +#define RFMT "0x%08lx" /* 1111 1100 0000 0000 0001 0011 1100 0000 */ #define OPCODE1(a,b,c) ((a)<<26|(b)<<12|(c)<<6) @@ -103,6 +106,7 @@ #define ERR_NOTHANDLED -1 int unaligned_enabled __read_mostly = 1; +int no_unaligned_warning __read_mostly; static int emulate_ldh(struct pt_regs *regs, int toreg) { @@ -119,8 +123,8 @@ static int emulate_ldh(struct pt_regs *regs, int toreg) "2: ldbs 1(%%sr1,%3), %0\n" " depw %2, 23, 24, %0\n" "3: \n" - ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b) - ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b) + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%1") + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%1") : "+r" (val), "+r" (ret), "=&r" (temp1) : "r" (saddr), "r" (regs->isr) ); @@ -151,8 +155,8 @@ static int emulate_ldw(struct pt_regs *regs, int toreg, int flop) " mtctl %2,11\n" " vshd %0,%3,%0\n" "3: \n" - ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b) - ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b) + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%1") + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%1") : "+r" (val), "+r" (ret), "=&r" (temp1), "=&r" (temp2) : "r" (saddr), "r" (regs->isr) ); @@ -168,6 +172,7 @@ static int emulate_ldw(struct pt_regs *regs, int toreg, int flop) static int emulate_ldd(struct pt_regs *regs, int toreg, int flop) { unsigned long saddr = regs->ior; + unsigned long shift, temp1; __u64 val = 0; ASM_EXCEPTIONTABLE_VAR(ret); @@ -179,25 +184,22 @@ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop) #ifdef CONFIG_64BIT __asm__ __volatile__ ( -" depd,z %3,60,3,%%r19\n" /* r19=(ofs&7)*8 */ -" mtsp %4, %%sr1\n" -" depd %%r0,63,3,%3\n" -"1: ldd 0(%%sr1,%3),%0\n" -"2: ldd 8(%%sr1,%3),%%r20\n" -" subi 64,%%r19,%%r19\n" -" mtsar %%r19\n" -" shrpd %0,%%r20,%%sar,%0\n" +" depd,z %2,60,3,%3\n" /* shift=(ofs&7)*8 */ +" mtsp %5, %%sr1\n" +" depd %%r0,63,3,%2\n" +"1: ldd 0(%%sr1,%2),%0\n" +"2: ldd 8(%%sr1,%2),%4\n" +" subi 64,%3,%3\n" +" mtsar %3\n" +" shrpd %0,%4,%%sar,%0\n" "3: \n" - ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b) - ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b) - : "=r" (val), "+r" (ret) - : "0" (val), "r" (saddr), "r" (regs->isr) - : "r19", "r20" ); + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%1") + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%1") + : "+r" (val), "+r" (ret), "+r" (saddr), "=&r" (shift), "=&r" (temp1) + : "r" (regs->isr) ); #else - { - unsigned long shift, temp1; __asm__ __volatile__ ( -" zdep %2,29,2,%3\n" /* r19=(ofs&3)*8 */ +" zdep %2,29,2,%3\n" /* shift=(ofs&3)*8 */ " mtsp %5, %%sr1\n" " dep %%r0,31,2,%2\n" "1: ldw 0(%%sr1,%2),%0\n" @@ -208,12 +210,11 @@ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop) " vshd %0,%R0,%0\n" " vshd %R0,%4,%R0\n" "4: \n" - ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 4b) - ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 4b) - ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 4b) + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 4b, "%1") + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 4b, "%1") + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 4b, "%1") : "+r" (val), "+r" (ret), "+r" (saddr), "=&r" (shift), "=&r" (temp1) : "r" (regs->isr) ); - } #endif DPRINTF("val = 0x%llx\n", val); @@ -243,8 +244,8 @@ static int emulate_sth(struct pt_regs *regs, int frreg) "1: stb %1, 0(%%sr1, %3)\n" "2: stb %2, 1(%%sr1, %3)\n" "3: \n" - ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b) - ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b) + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%0") + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%0") : "+r" (ret), "=&r" (temp1) : "r" (val), "r" (regs->ior), "r" (regs->isr) ); @@ -284,8 +285,8 @@ static int emulate_stw(struct pt_regs *regs, int frreg, int flop) " stw %%r20,0(%%sr1,%2)\n" " stw %%r21,4(%%sr1,%2)\n" "3: \n" - ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b) - ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b) + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%0") + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%0") : "+r" (ret) : "r" (val), "r" (regs->ior), "r" (regs->isr) : "r19", "r20", "r21", "r22", "r1" ); @@ -328,42 +329,41 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop) "3: std %%r20,0(%%sr1,%2)\n" "4: std %%r21,8(%%sr1,%2)\n" "5: \n" - ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 5b) - ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 5b) - ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 5b) - ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 5b) + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 5b, "%0") + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 5b, "%0") + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 5b, "%0") + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 5b, "%0") : "+r" (ret) : "r" (val), "r" (regs->ior), "r" (regs->isr) : "r19", "r20", "r21", "r22", "r1" ); #else { - unsigned long valh=(val>>32),vall=(val&0xffffffffl); __asm__ __volatile__ ( -" mtsp %4, %%sr1\n" -" zdep %2, 29, 2, %%r19\n" -" dep %%r0, 31, 2, %3\n" +" mtsp %3, %%sr1\n" +" zdep %R1, 29, 2, %%r19\n" +" dep %%r0, 31, 2, %2\n" " mtsar %%r19\n" " zvdepi -2, 32, %%r19\n" -"1: ldw 0(%%sr1,%3),%%r20\n" -"2: ldw 8(%%sr1,%3),%%r21\n" -" vshd %1, %2, %%r1\n" +"1: ldw 0(%%sr1,%2),%%r20\n" +"2: ldw 8(%%sr1,%2),%%r21\n" +" vshd %1, %R1, %%r1\n" " vshd %%r0, %1, %1\n" -" vshd %2, %%r0, %2\n" +" vshd %R1, %%r0, %R1\n" " and %%r20, %%r19, %%r20\n" " andcm %%r21, %%r19, %%r21\n" " or %1, %%r20, %1\n" -" or %2, %%r21, %2\n" -"3: stw %1,0(%%sr1,%3)\n" -"4: stw %%r1,4(%%sr1,%3)\n" -"5: stw %2,8(%%sr1,%3)\n" +" or %R1, %%r21, %R1\n" +"3: stw %1,0(%%sr1,%2)\n" +"4: stw %%r1,4(%%sr1,%2)\n" +"5: stw %R1,8(%%sr1,%2)\n" "6: \n" - ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 6b) - ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 6b) - ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 6b) - ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 6b) - ASM_EXCEPTIONTABLE_ENTRY_EFAULT(5b, 6b) + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 6b, "%0") + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 6b, "%0") + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 6b, "%0") + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 6b, "%0") + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(5b, 6b, "%0") : "+r" (ret) - : "r" (valh), "r" (vall), "r" (regs->ior), "r" (regs->isr) + : "r" (val), "r" (regs->ior), "r" (regs->isr) : "r19", "r20", "r21", "r1" ); } #endif @@ -379,6 +379,7 @@ void handle_unaligned(struct pt_regs *regs) int ret = ERR_NOTHANDLED; __inc_irq_stat(irq_unaligned_count); + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->ior); /* log a message with pacing */ if (user_mode(regs)) { @@ -399,6 +400,14 @@ void handle_unaligned(struct pt_regs *regs) if (!unaligned_enabled) goto force_sigbus; + } else { + static DEFINE_RATELIMIT_STATE(kernel_ratelimit, 5 * HZ, 5); + if (!(current->thread.flags & PARISC_UAC_NOPRINT) && + !no_unaligned_warning && + __ratelimit(&kernel_ratelimit)) + pr_warn("Kernel: unaligned access to " RFMT " in %pS " + "(iir " RFMT ")\n", + regs->ior, (void *)regs->iaoq[0], regs->iir); } /* handle modification - OK, it's ugly, see the instruction manual */ @@ -473,7 +482,7 @@ void handle_unaligned(struct pt_regs *regs) case OPCODE_LDWA_I: case OPCODE_LDW_S: case OPCODE_LDWA_S: - ret = emulate_ldw(regs, R3(regs->iir),0); + ret = emulate_ldw(regs, R3(regs->iir), 0); break; case OPCODE_STH: @@ -482,7 +491,7 @@ void handle_unaligned(struct pt_regs *regs) case OPCODE_STW: case OPCODE_STWA: - ret = emulate_stw(regs, R2(regs->iir),0); + ret = emulate_stw(regs, R2(regs->iir), 0); break; #ifdef CONFIG_64BIT @@ -490,12 +499,12 @@ void handle_unaligned(struct pt_regs *regs) case OPCODE_LDDA_I: case OPCODE_LDD_S: case OPCODE_LDDA_S: - ret = emulate_ldd(regs, R3(regs->iir),0); + ret = emulate_ldd(regs, R3(regs->iir), 0); break; case OPCODE_STD: case OPCODE_STDA: - ret = emulate_std(regs, R2(regs->iir),0); + ret = emulate_std(regs, R2(regs->iir), 0); break; #endif @@ -503,24 +512,24 @@ void handle_unaligned(struct pt_regs *regs) case OPCODE_FLDWS: case OPCODE_FLDWXR: case OPCODE_FLDWSR: - ret = emulate_ldw(regs,FR3(regs->iir),1); + ret = emulate_ldw(regs, FR3(regs->iir), 1); break; case OPCODE_FLDDX: case OPCODE_FLDDS: - ret = emulate_ldd(regs,R3(regs->iir),1); + ret = emulate_ldd(regs, R3(regs->iir), 1); break; case OPCODE_FSTWX: case OPCODE_FSTWS: case OPCODE_FSTWXR: case OPCODE_FSTWSR: - ret = emulate_stw(regs,FR3(regs->iir),1); + ret = emulate_stw(regs, FR3(regs->iir), 1); break; case OPCODE_FSTDX: case OPCODE_FSTDS: - ret = emulate_std(regs,R3(regs->iir),1); + ret = emulate_std(regs, R3(regs->iir), 1); break; case OPCODE_LDCD_I: diff --git a/arch/parisc/kernel/unaligned.h b/arch/parisc/kernel/unaligned.h new file mode 100644 index 000000000000..c1aa4b12e284 --- /dev/null +++ b/arch/parisc/kernel/unaligned.h @@ -0,0 +1,3 @@ +struct pt_regs; +void handle_unaligned(struct pt_regs *regs); +int check_unaligned(struct pt_regs *regs); diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c index 27ae40a443b8..7ac88ff13d3c 100644 --- a/arch/parisc/kernel/unwind.c +++ b/arch/parisc/kernel/unwind.c @@ -35,6 +35,8 @@ #define KERNEL_START (KERNEL_BINARY_TEXT_START) +#define ALIGNMENT_OK(ptr, type) (((ptr) & (sizeof(type) - 1)) == 0) + extern struct unwind_table_entry __start___unwind[]; extern struct unwind_table_entry __stop___unwind[]; @@ -228,10 +230,8 @@ static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int #ifdef CONFIG_IRQSTACKS extern void * const _call_on_stack; #endif /* CONFIG_IRQSTACKS */ - void *ptr; - ptr = dereference_kernel_function_descriptor(&handle_interruption); - if (pc_is_kernel_fn(pc, ptr)) { + if (pc_is_kernel_fn(pc, handle_interruption)) { struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN); dbg("Unwinding through handle_interruption()\n"); info->prev_sp = regs->gr[30]; @@ -239,13 +239,13 @@ static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int return 1; } - if (pc_is_kernel_fn(pc, ret_from_kernel_thread) || - pc_is_kernel_fn(pc, syscall_exit)) { + if (pc == (unsigned long)&ret_from_kernel_thread || + pc == (unsigned long)&syscall_exit) { info->prev_sp = info->prev_ip = 0; return 1; } - if (pc_is_kernel_fn(pc, intr_return)) { + if (pc == (unsigned long)&intr_return) { struct pt_regs *regs; dbg("Found intr_return()\n"); @@ -257,14 +257,17 @@ static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int } if (pc_is_kernel_fn(pc, _switch_to) || - pc_is_kernel_fn(pc, _switch_to_ret)) { + pc == (unsigned long)&_switch_to_ret) { info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE; - info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET); + if (ALIGNMENT_OK(info->prev_sp, long)) + info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET); + else + info->prev_ip = info->prev_sp = 0; return 1; } #ifdef CONFIG_IRQSTACKS - if (pc_is_kernel_fn(pc, _call_on_stack)) { + if (pc == (unsigned long)&_call_on_stack && ALIGNMENT_OK(info->sp, long)) { info->prev_sp = *(unsigned long *)(info->sp - FRAME_SIZE - REG_SZ); info->prev_ip = *(unsigned long *)(info->sp - FRAME_SIZE - RP_OFFSET); return 1; @@ -372,8 +375,10 @@ static void unwind_frame_regs(struct unwind_frame_info *info) info->prev_sp = info->sp - frame_size; if (e->Millicode) info->rp = info->r31; - else if (rpoffset) + else if (rpoffset && ALIGNMENT_OK(info->prev_sp, long)) info->rp = *(unsigned long *)(info->prev_sp - rpoffset); + else + info->rp = 0; info->prev_ip = info->rp; info->rp = 0; } diff --git a/arch/parisc/kernel/vdso32/Makefile b/arch/parisc/kernel/vdso32/Makefile index 4459a48d2303..4ee8d17da229 100644 --- a/arch/parisc/kernel/vdso32/Makefile +++ b/arch/parisc/kernel/vdso32/Makefile @@ -1,11 +1,25 @@ -# List of files in the vdso, has to be asm only for now +# Include the generic Makefile to check the built vdso. +include $(srctree)/lib/vdso/Makefile.include + +KCOV_INSTRUMENT := n + +# Disable gcov profiling, ubsan and kasan for VDSO code +GCOV_PROFILE := n +UBSAN_SANITIZE := n +KASAN_SANITIZE := n +KCSAN_SANITIZE := n obj-vdso32 = note.o sigtramp.o restart_syscall.o +obj-cvdso32 = vdso32_generic.o # Build rules -targets := $(obj-vdso32) vdso32.so +targets := $(obj-vdso32) $(obj-cvdso32) vdso32.so obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32)) +obj-cvdso32 := $(addprefix $(obj)/, $(obj-cvdso32)) + +VDSO_CFLAGS_REMOVE := -pg $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_vdso32_generic.o = $(VDSO_CFLAGS_REMOVE) ccflags-y := -shared -fno-common -fbuiltin -mno-fast-indirect-calls -O2 -mno-long-calls # -march=1.1 -mschedule=7100LC @@ -19,20 +33,19 @@ KBUILD_CFLAGS += -DBUILD_VDSO -DDISABLE_BRANCH_PROFILING VDSO_LIBGCC := $(shell $(CROSS32CC) -print-libgcc-file-name) obj-y += vdso32_wrapper.o -extra-y += vdso32.lds +targets += vdso32.lds CPPFLAGS_vdso32.lds += -P -C # -U$(ARCH) $(obj)/vdso32_wrapper.o : $(obj)/vdso32.so FORCE # Force dependency (incbin is bad) # link rule for the .so file, .lds has to be first -$(obj)/vdso32.so: $(src)/vdso32.lds $(obj-vdso32) $(obj-cvdso32) $(VDSO_LIBGCC) FORCE +$(obj)/vdso32.so: $(obj)/vdso32.lds $(obj-vdso32) $(obj-cvdso32) $(VDSO_LIBGCC) FORCE $(call if_changed,vdso32ld) # assembly rules for the .S files $(obj-vdso32): %.o: %.S FORCE $(call if_changed_dep,vdso32as) - $(obj-cvdso32): %.o: %.c FORCE $(call if_changed_dep,vdso32cc) @@ -42,10 +55,10 @@ quiet_cmd_vdso32ld = VDSO32L $@ quiet_cmd_vdso32as = VDSO32A $@ cmd_vdso32as = $(CROSS32CC) $(a_flags) -c -o $@ $< quiet_cmd_vdso32cc = VDSO32C $@ - cmd_vdso32cc = $(CROSS32CC) $(c_flags) -c -fPIC -mno-fast-indirect-calls -o $@ $< + cmd_vdso32cc = $(CROSS32CC) $(c_flags) -c -o $@ $< # Generate VDSO offsets using helper script -gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh +gen-vdsosym := $(src)/gen_vdso_offsets.sh quiet_cmd_vdsosym = VDSOSYM $@ cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@ diff --git a/arch/parisc/kernel/vdso32/vdso32.lds.S b/arch/parisc/kernel/vdso32/vdso32.lds.S index d4aff3af5262..4273baa26b65 100644 --- a/arch/parisc/kernel/vdso32/vdso32.lds.S +++ b/arch/parisc/kernel/vdso32/vdso32.lds.S @@ -106,6 +106,9 @@ VERSION global: __kernel_sigtramp_rt32; __kernel_restart_syscall32; + __vdso_gettimeofday; + __vdso_clock_gettime; + __vdso_clock_gettime64; local: *; }; } diff --git a/arch/parisc/kernel/vdso32/vdso32_generic.c b/arch/parisc/kernel/vdso32/vdso32_generic.c new file mode 100644 index 000000000000..8d5bd59e8646 --- /dev/null +++ b/arch/parisc/kernel/vdso32/vdso32_generic.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "asm/unistd.h" +#include <linux/types.h> +#include <uapi/asm/unistd_32.h> + +struct timezone; +struct old_timespec32; +struct __kernel_timespec; +struct __kernel_old_timeval; + +/* forward declarations */ +int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz); +int __vdso_clock_gettime(clockid_t clock, struct old_timespec32 *ts); +int __vdso_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts); + + +int __vdso_gettimeofday(struct __kernel_old_timeval *tv, + struct timezone *tz) +{ + return syscall2(__NR_gettimeofday, (long)tv, (long)tz); +} + +int __vdso_clock_gettime(clockid_t clock, struct old_timespec32 *ts) +{ + return syscall2(__NR_clock_gettime, (long)clock, (long)ts); +} + +int __vdso_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts) +{ + return syscall2(__NR_clock_gettime64, (long)clock, (long)ts); +} diff --git a/arch/parisc/kernel/vdso64/Makefile b/arch/parisc/kernel/vdso64/Makefile index f3d6045793f4..c63f4069170f 100644 --- a/arch/parisc/kernel/vdso64/Makefile +++ b/arch/parisc/kernel/vdso64/Makefile @@ -1,12 +1,25 @@ -# List of files in the vdso, has to be asm only for now +# Include the generic Makefile to check the built vdso. +include $(srctree)/lib/vdso/Makefile.include + +KCOV_INSTRUMENT := n + +# Disable gcov profiling, ubsan and kasan for VDSO code +GCOV_PROFILE := n +UBSAN_SANITIZE := n +KASAN_SANITIZE := n +KCSAN_SANITIZE := n obj-vdso64 = note.o sigtramp.o restart_syscall.o +obj-cvdso64 = vdso64_generic.o # Build rules -targets := $(obj-vdso64) vdso64.so -obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64)) +targets := $(obj-vdso64) $(obj-cvdso64) vdso64.so +obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64)) +obj-cvdso64 := $(addprefix $(obj)/, $(obj-cvdso64)) +VDSO_CFLAGS_REMOVE := -pg $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_vdso64_generic.o = $(VDSO_CFLAGS_REMOVE) ccflags-y := -shared -fno-common -fno-builtin ccflags-y += -nostdlib -Wl,-soname=linux-vdso64.so.1 \ @@ -19,28 +32,32 @@ KBUILD_CFLAGS += -DBUILD_VDSO -DDISABLE_BRANCH_PROFILING VDSO_LIBGCC := $(shell $(CC) -print-libgcc-file-name) obj-y += vdso64_wrapper.o -extra-y += vdso64.lds +targets += vdso64.lds CPPFLAGS_vdso64.lds += -P -C -U$(ARCH) $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so FORCE # Force dependency (incbin is bad) # link rule for the .so file, .lds has to be first -$(obj)/vdso64.so: $(src)/vdso64.lds $(obj-vdso64) $(VDSO_LIBGCC) FORCE +$(obj)/vdso64.so: $(obj)/vdso64.lds $(obj-vdso64) $(obj-cvdso64) $(VDSO_LIBGCC) FORCE $(call if_changed,vdso64ld) # assembly rules for the .S files $(obj-vdso64): %.o: %.S FORCE $(call if_changed_dep,vdso64as) +$(obj-cvdso64): %.o: %.c FORCE + $(call if_changed_dep,vdso64cc) # actual build commands quiet_cmd_vdso64ld = VDSO64L $@ cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $(filter-out FORCE, $^) -o $@ quiet_cmd_vdso64as = VDSO64A $@ cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $< +quiet_cmd_vdso64cc = VDSO64C $@ + cmd_vdso64cc = $(CC) $(c_flags) -c -o $@ $< # Generate VDSO offsets using helper script -gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh +gen-vdsosym := $(src)/gen_vdso_offsets.sh quiet_cmd_vdsosym = VDSOSYM $@ cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@ diff --git a/arch/parisc/kernel/vdso64/vdso64.lds.S b/arch/parisc/kernel/vdso64/vdso64.lds.S index de1fb4b19286..10f25e4e1554 100644 --- a/arch/parisc/kernel/vdso64/vdso64.lds.S +++ b/arch/parisc/kernel/vdso64/vdso64.lds.S @@ -104,6 +104,8 @@ VERSION global: __kernel_sigtramp_rt64; __kernel_restart_syscall64; + __vdso_gettimeofday; + __vdso_clock_gettime; local: *; }; } diff --git a/arch/parisc/kernel/vdso64/vdso64_generic.c b/arch/parisc/kernel/vdso64/vdso64_generic.c new file mode 100644 index 000000000000..fc6836a0075b --- /dev/null +++ b/arch/parisc/kernel/vdso64/vdso64_generic.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "asm/unistd.h" +#include <linux/types.h> + +struct timezone; +struct __kernel_timespec; +struct __kernel_old_timeval; + +/* forward declarations */ +int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz); +int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts); + + +int __vdso_gettimeofday(struct __kernel_old_timeval *tv, + struct timezone *tz) +{ + return syscall2(__NR_gettimeofday, (long)tv, (long)tz); +} + +int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts) +{ + return syscall2(__NR_clock_gettime, (long)clock, (long)ts); +} diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index 1aaa2ca09800..b445e47903cf 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S @@ -127,9 +127,10 @@ SECTIONS } #endif - RO_DATA(8) + RO_DATA(PAGE_SIZE) /* unwind info */ + . = ALIGN(4); .PARISC.unwind : { __start___unwind = .; *(.PARISC.unwind) @@ -154,6 +155,7 @@ SECTIONS } /* End of data section */ + . = ALIGN(PAGE_SIZE); _edata = .; /* BSS */ |
