diff options
Diffstat (limited to 'arch/sparc')
-rw-r--r-- | arch/sparc/Kconfig | 2 | ||||
-rw-r--r-- | arch/sparc/include/asm/Kbuild | 1 | ||||
-rw-r--r-- | arch/sparc/include/asm/agp.h | 17 | ||||
-rw-r--r-- | arch/sparc/include/asm/dma-mapping.h | 2 | ||||
-rw-r--r-- | arch/sparc/include/asm/page_32.h | 1 | ||||
-rw-r--r-- | arch/sparc/include/asm/pgtable_32.h | 26 | ||||
-rw-r--r-- | arch/sparc/include/asm/pgtable_64.h | 37 | ||||
-rw-r--r-- | arch/sparc/include/asm/pgtsrmmu.h | 14 | ||||
-rw-r--r-- | arch/sparc/include/asm/vio.h | 5 | ||||
-rw-r--r-- | arch/sparc/kernel/leon_pmc.c | 4 | ||||
-rw-r--r-- | arch/sparc/kernel/process_32.c | 1 | ||||
-rw-r--r-- | arch/sparc/kernel/process_64.c | 3 | ||||
-rw-r--r-- | arch/sparc/kernel/vio.c | 2 | ||||
-rw-r--r-- | arch/sparc/kernel/vmlinux.lds.S | 1 | ||||
-rw-r--r-- | arch/sparc/mm/fault_32.c | 5 | ||||
-rw-r--r-- | arch/sparc/mm/fault_64.c | 7 |
16 files changed, 83 insertions, 45 deletions
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 4d3d1af90d52..84437a4c6545 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -283,7 +283,7 @@ config ARCH_FORCE_MAX_ORDER This config option is actually maximum order plus one. For example, a value of 13 means that the largest free memory block is 2^12 pages. -if SPARC64 +if SPARC64 || COMPILE_TEST source "kernel/power/Kconfig" endif diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild index 0b9d98ced34a..595ca0be286b 100644 --- a/arch/sparc/include/asm/Kbuild +++ b/arch/sparc/include/asm/Kbuild @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 generated-y += syscall_table_32.h generated-y += syscall_table_64.h +generic-y += agp.h generic-y += export.h generic-y += kvm_para.h generic-y += mcs_spinlock.h diff --git a/arch/sparc/include/asm/agp.h b/arch/sparc/include/asm/agp.h deleted file mode 100644 index 2d0ff84cee3f..000000000000 --- a/arch/sparc/include/asm/agp.h +++ /dev/null @@ -1,17 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef AGP_H -#define AGP_H 1 - -/* dummy for now */ - -#define map_page_into_agp(page) do { } while (0) -#define unmap_page_from_agp(page) do { } while (0) -#define flush_agp_cache() mb() - -/* GATT allocation. Returns/accepts GATT kernel virtual address. */ -#define alloc_gatt_pages(order) \ - ((char *)__get_free_pages(GFP_KERNEL, (order))) -#define free_gatt_pages(table, order) \ - free_pages((unsigned long)(table), (order)) - -#endif diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h index 2f051343612e..55c12fc2ba63 100644 --- a/arch/sparc/include/asm/dma-mapping.h +++ b/arch/sparc/include/asm/dma-mapping.h @@ -4,7 +4,7 @@ extern const struct dma_map_ops *dma_ops; -static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) +static inline const struct dma_map_ops *get_arch_dma_ops(void) { /* sparc32 uses per-device dma_ops */ return IS_ENABLED(CONFIG_SPARC64) ? dma_ops : NULL; diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h index fff8861df107..6be6f683f98f 100644 --- a/arch/sparc/include/asm/page_32.h +++ b/arch/sparc/include/asm/page_32.h @@ -130,7 +130,6 @@ extern unsigned long pfn_base; #define ARCH_PFN_OFFSET (pfn_base) #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) -#define pfn_valid(pfn) (((pfn) >= (pfn_base)) && (((pfn)-(pfn_base)) < max_mapnr)) #define virt_addr_valid(kaddr) ((((unsigned long)(kaddr)-PAGE_OFFSET)>>PAGE_SHIFT) < max_mapnr) #include <asm-generic/memory_model.h> diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h index 5acc05b572e6..d4330e3c57a6 100644 --- a/arch/sparc/include/asm/pgtable_32.h +++ b/arch/sparc/include/asm/pgtable_32.h @@ -323,7 +323,16 @@ void srmmu_mapiorange(unsigned int bus, unsigned long xpa, unsigned long xva, unsigned int len); void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len); -/* Encode and de-code a swap entry */ +/* + * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that + * are !pte_none() && !pte_present(). + * + * Format of swap PTEs: + * + * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * <-------------- offset ---------------> < type -> E 0 0 0 0 0 0 + */ static inline unsigned long __swp_type(swp_entry_t entry) { return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK; @@ -344,6 +353,21 @@ static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) +static inline int pte_swp_exclusive(pte_t pte) +{ + return pte_val(pte) & SRMMU_SWP_EXCLUSIVE; +} + +static inline pte_t pte_swp_mkexclusive(pte_t pte) +{ + return __pte(pte_val(pte) | SRMMU_SWP_EXCLUSIVE); +} + +static inline pte_t pte_swp_clear_exclusive(pte_t pte) +{ + return __pte(pte_val(pte) & ~SRMMU_SWP_EXCLUSIVE); +} + static inline unsigned long __get_phys (unsigned long addr) { diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 3bc9736bddb1..2dc8d4641734 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -187,6 +187,9 @@ bool kern_addr_valid(unsigned long addr); #define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U #define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V +/* We borrow bit 20 to store the exclusive marker in swap PTEs. */ +#define _PAGE_SWP_EXCLUSIVE _AC(0x0000000000100000, UL) + #ifndef __ASSEMBLY__ pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long); @@ -961,18 +964,46 @@ void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); #endif -/* Encode and de-code a swap entry */ -#define __swp_type(entry) (((entry).val >> PAGE_SHIFT) & 0xffUL) +/* + * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that + * are !pte_none() && !pte_present(). + * + * Format of swap PTEs: + * + * 6 6 6 6 5 5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3 + * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 + * <--------------------------- offset --------------------------- + * + * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * --------------------> E <-- type ---> <------- zeroes --------> + */ +#define __swp_type(entry) (((entry).val >> PAGE_SHIFT) & 0x7fUL) #define __swp_offset(entry) ((entry).val >> (PAGE_SHIFT + 8UL)) #define __swp_entry(type, offset) \ ( (swp_entry_t) \ { \ - (((long)(type) << PAGE_SHIFT) | \ + ((((long)(type) & 0x7fUL) << PAGE_SHIFT) | \ ((long)(offset) << (PAGE_SHIFT + 8UL))) \ } ) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) +static inline int pte_swp_exclusive(pte_t pte) +{ + return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; +} + +static inline pte_t pte_swp_mkexclusive(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_SWP_EXCLUSIVE); +} + +static inline pte_t pte_swp_clear_exclusive(pte_t pte) +{ + return __pte(pte_val(pte) & ~_PAGE_SWP_EXCLUSIVE); +} + int page_in_phys_avail(unsigned long paddr); /* diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h index 6067925972d9..18e68d43f036 100644 --- a/arch/sparc/include/asm/pgtsrmmu.h +++ b/arch/sparc/include/asm/pgtsrmmu.h @@ -53,21 +53,13 @@ #define SRMMU_CHG_MASK (0xffffff00 | SRMMU_REF | SRMMU_DIRTY) -/* SRMMU swap entry encoding - * - * We use 5 bits for the type and 19 for the offset. This gives us - * 32 swapfiles of 4GB each. Encoding looks like: - * - * oooooooooooooooooootttttRRRRRRRR - * fedcba9876543210fedcba9876543210 - * - * The bottom 7 bits are reserved for protection and status bits, especially - * PRESENT. - */ +/* SRMMU swap entry encoding */ #define SRMMU_SWP_TYPE_MASK 0x1f #define SRMMU_SWP_TYPE_SHIFT 7 #define SRMMU_SWP_OFF_MASK 0xfffff #define SRMMU_SWP_OFF_SHIFT (SRMMU_SWP_TYPE_SHIFT + 5) +/* We borrow bit 6 to store the exclusive marker in swap PTEs. */ +#define SRMMU_SWP_EXCLUSIVE SRMMU_DIRTY /* Some day I will implement true fine grained access bits for * user pages because the SRMMU gives us the capabilities to diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h index 2d7bdf665fd3..8a0c3c11c9ce 100644 --- a/arch/sparc/include/asm/vio.h +++ b/arch/sparc/include/asm/vio.h @@ -488,10 +488,7 @@ static inline struct vio_driver *to_vio_driver(struct device_driver *drv) return container_of(drv, struct vio_driver, driver); } -static inline struct vio_dev *to_vio_dev(struct device *dev) -{ - return container_of(dev, struct vio_dev, dev); -} +#define to_vio_dev(__dev) container_of_const(__dev, struct vio_dev, dev) int vio_ldc_send(struct vio_driver_state *vio, void *data, int len); void vio_link_state_change(struct vio_driver_state *vio, int event); diff --git a/arch/sparc/kernel/leon_pmc.c b/arch/sparc/kernel/leon_pmc.c index 396f46bca52e..6c00cbad7fb5 100644 --- a/arch/sparc/kernel/leon_pmc.c +++ b/arch/sparc/kernel/leon_pmc.c @@ -57,6 +57,8 @@ static void pmc_leon_idle_fixup(void) "lda [%0] %1, %%g0\n" : : "r"(address), "i"(ASI_LEON_BYPASS)); + + raw_local_irq_disable(); } /* @@ -70,6 +72,8 @@ static void pmc_leon_idle(void) /* For systems without power-down, this will be no-op */ __asm__ __volatile__ ("wr %g0, %asr19\n\t"); + + raw_local_irq_disable(); } /* Install LEON Power Down function */ diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c index 33b0215a4182..9c7c662cb565 100644 --- a/arch/sparc/kernel/process_32.c +++ b/arch/sparc/kernel/process_32.c @@ -71,7 +71,6 @@ void arch_cpu_idle(void) { if (sparc_idle) (*sparc_idle)(); - raw_local_irq_enable(); } /* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */ diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index 6335b698a4b4..91c2b8124527 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -59,7 +59,6 @@ void arch_cpu_idle(void) { if (tlb_type != hypervisor) { touch_nmi_watchdog(); - raw_local_irq_enable(); } else { unsigned long pstate; @@ -90,6 +89,8 @@ void arch_cpu_idle(void) "wrpr %0, %%g0, %%pstate" : "=&r" (pstate) : "i" (PSTATE_IE)); + + raw_local_irq_disable(); } } diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c index 01122a208f94..b78df3a15a72 100644 --- a/arch/sparc/kernel/vio.c +++ b/arch/sparc/kernel/vio.c @@ -46,7 +46,7 @@ static const struct vio_device_id *vio_match_device( return NULL; } -static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env) +static int vio_hotplug(const struct device *dev, struct kobj_uevent_env *env) { const struct vio_dev *vio_dev = to_vio_dev(dev); diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index d55ae65a07ad..d317a843f7ea 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S @@ -50,7 +50,6 @@ SECTIONS HEAD_TEXT TEXT_TEXT SCHED_TEXT - CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index 91259f291c54..179295b14664 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -187,8 +187,11 @@ good_area: */ fault = handle_mm_fault(vma, address, flags, regs); - if (fault_signal_pending(fault, regs)) + if (fault_signal_pending(fault, regs)) { + if (!from_user) + goto no_context; return; + } /* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 4acc12eafbf5..d91305de694c 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -424,8 +424,13 @@ good_area: fault = handle_mm_fault(vma, address, flags, regs); - if (fault_signal_pending(fault, regs)) + if (fault_signal_pending(fault, regs)) { + if (regs->tstate & TSTATE_PRIV) { + insn = get_fault_insn(regs, insn); + goto handle_kernel_fault; + } goto exit_exception; + } /* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) |