summaryrefslogtreecommitdiff
path: root/arch/arc/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arc/include/asm')
-rw-r--r--arch/arc/include/asm/Kbuild51
-rw-r--r--arch/arc/include/asm/arcregs.h389
-rw-r--r--arch/arc/include/asm/asm-offsets.h5
-rw-r--r--arch/arc/include/asm/asserts.h34
-rw-r--r--arch/arc/include/asm/atomic-llsc.h97
-rw-r--r--arch/arc/include/asm/atomic-spinlock.h111
-rw-r--r--arch/arc/include/asm/atomic.h216
-rw-r--r--arch/arc/include/asm/atomic64-arcv2.h230
-rw-r--r--arch/arc/include/asm/barrier.h60
-rw-r--r--arch/arc/include/asm/bitops.h481
-rw-r--r--arch/arc/include/asm/bug.h20
-rw-r--r--arch/arc/include/asm/cache.h100
-rw-r--r--arch/arc/include/asm/cacheflush.h73
-rw-r--r--arch/arc/include/asm/cachetype.h8
-rw-r--r--arch/arc/include/asm/checksum.h11
-rw-r--r--arch/arc/include/asm/clk.h22
-rw-r--r--arch/arc/include/asm/cmpxchg.h228
-rw-r--r--arch/arc/include/asm/current.h15
-rw-r--r--arch/arc/include/asm/delay.h23
-rw-r--r--arch/arc/include/asm/disasm.h5
-rw-r--r--arch/arc/include/asm/dma-mapping.h221
-rw-r--r--arch/arc/include/asm/dma.h5
-rw-r--r--arch/arc/include/asm/dsp-impl.h152
-rw-r--r--arch/arc/include/asm/dsp.h29
-rw-r--r--arch/arc/include/asm/dwarf.h43
-rw-r--r--arch/arc/include/asm/elf.h17
-rw-r--r--arch/arc/include/asm/entry-arcv2.h326
-rw-r--r--arch/arc/include/asm/entry-compact.h387
-rw-r--r--arch/arc/include/asm/entry.h563
-rw-r--r--arch/arc/include/asm/exec.h5
-rw-r--r--arch/arc/include/asm/fpu.h57
-rw-r--r--arch/arc/include/asm/futex.h152
-rw-r--r--arch/arc/include/asm/highmem.h53
-rw-r--r--arch/arc/include/asm/hugepage.h72
-rw-r--r--arch/arc/include/asm/io.h154
-rw-r--r--arch/arc/include/asm/irq.h28
-rw-r--r--arch/arc/include/asm/irqflags-arcv2.h175
-rw-r--r--arch/arc/include/asm/irqflags-compact.h201
-rw-r--r--arch/arc/include/asm/irqflags.h173
-rw-r--r--arch/arc/include/asm/jump_label.h72
-rw-r--r--arch/arc/include/asm/kdebug.h5
-rw-r--r--arch/arc/include/asm/kgdb.h37
-rw-r--r--arch/arc/include/asm/kprobes.h22
-rw-r--r--arch/arc/include/asm/linkage.h59
-rw-r--r--arch/arc/include/asm/mach_desc.h37
-rw-r--r--arch/arc/include/asm/mmu-arcv2.h105
-rw-r--r--arch/arc/include/asm/mmu.h59
-rw-r--r--arch/arc/include/asm/mmu_context.h229
-rw-r--r--arch/arc/include/asm/module.h15
-rw-r--r--arch/arc/include/asm/mutex.h18
-rw-r--r--arch/arc/include/asm/page.h136
-rw-r--r--arch/arc/include/asm/pci.h19
-rw-r--r--arch/arc/include/asm/perf_event.h67
-rw-r--r--arch/arc/include/asm/pgalloc.h96
-rw-r--r--arch/arc/include/asm/pgtable-bits-arcv2.h147
-rw-r--r--arch/arc/include/asm/pgtable-levels.h186
-rw-r--r--arch/arc/include/asm/pgtable.h395
-rw-r--r--arch/arc/include/asm/processor.h126
-rw-r--r--arch/arc/include/asm/prom.h14
-rw-r--r--arch/arc/include/asm/ptrace.h170
-rw-r--r--arch/arc/include/asm/sections.h7
-rw-r--r--arch/arc/include/asm/segment.h24
-rw-r--r--arch/arc/include/asm/serial.h28
-rw-r--r--arch/arc/include/asm/setup.h34
-rw-r--r--arch/arc/include/asm/shmparam.h7
-rw-r--r--arch/arc/include/asm/smp.h58
-rw-r--r--arch/arc/include/asm/spinlock.h294
-rw-r--r--arch/arc/include/asm/spinlock_types.h13
-rw-r--r--arch/arc/include/asm/stacktrace.h34
-rw-r--r--arch/arc/include/asm/string.h8
-rw-r--r--arch/arc/include/asm/switch_to.h26
-rw-r--r--arch/arc/include/asm/syscall.h50
-rw-r--r--arch/arc/include/asm/syscalls.h11
-rw-r--r--arch/arc/include/asm/thread_info.h53
-rw-r--r--arch/arc/include/asm/timex.h5
-rw-r--r--arch/arc/include/asm/tlb-mmu1.h104
-rw-r--r--arch/arc/include/asm/tlb.h37
-rw-r--r--arch/arc/include/asm/tlbflush.h26
-rw-r--r--arch/arc/include/asm/uaccess.h163
-rw-r--r--arch/arc/include/asm/unaligned.h29
-rw-r--r--arch/arc/include/asm/unistd.h14
-rw-r--r--arch/arc/include/asm/unwind.h9
-rw-r--r--arch/arc/include/asm/vermagic.h8
-rw-r--r--arch/arc/include/asm/vmalloc.h4
84 files changed, 4413 insertions, 3639 deletions
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
index d8dd660898b9..4c69522e0328 100644
--- a/arch/arc/include/asm/Kbuild
+++ b/arch/arc/include/asm/Kbuild
@@ -1,48 +1,9 @@
-generic-y += auxvec.h
-generic-y += bugs.h
-generic-y += bitsperlong.h
-generic-y += clkdev.h
-generic-y += cputime.h
-generic-y += device.h
-generic-y += div64.h
-generic-y += emergency-restart.h
-generic-y += errno.h
-generic-y += fcntl.h
-generic-y += fb.h
-generic-y += ftrace.h
-generic-y += hardirq.h
-generic-y += hw_irq.h
-generic-y += ioctl.h
-generic-y += ioctls.h
-generic-y += ipcbuf.h
-generic-y += irq_regs.h
-generic-y += kmap_types.h
+# SPDX-License-Identifier: GPL-2.0
+syscall-y += syscall_table_32.h
+
+generic-y += extable.h
generic-y += kvm_para.h
-generic-y += local.h
-generic-y += local64.h
-generic-y += mman.h
-generic-y += msgbuf.h
-generic-y += param.h
+generic-y += mcs_spinlock.h
generic-y += parport.h
-generic-y += pci.h
-generic-y += percpu.h
-generic-y += poll.h
-generic-y += posix_types.h
-generic-y += resource.h
-generic-y += scatterlist.h
-generic-y += sembuf.h
-generic-y += shmbuf.h
-generic-y += siginfo.h
-generic-y += socket.h
-generic-y += sockios.h
-generic-y += stat.h
-generic-y += statfs.h
-generic-y += termbits.h
-generic-y += termios.h
-generic-y += topology.h
-generic-y += trace_clock.h
-generic-y += types.h
-generic-y += ucontext.h
generic-y += user.h
-generic-y += vga.h
-generic-y += xor.h
+generic-y += text-patching.h
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index 355cb470c2a4..d84908a177bd 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -1,47 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASM_ARC_ARCREGS_H
#define _ASM_ARC_ARCREGS_H
-#ifdef __KERNEL__
-
/* Build Configuration Registers */
-#define ARC_REG_DCCMBASE_BCR 0x61 /* DCCM Base Addr */
+#define ARC_REG_AUX_DCCM 0x18 /* DCCM Base Addr ARCv2 */
+#define ARC_REG_ERP_CTRL 0x3F /* ARCv2 Error protection control */
+#define ARC_REG_DCCM_BASE_BUILD 0x61 /* DCCM Base Addr ARCompact */
#define ARC_REG_CRC_BCR 0x62
-#define ARC_REG_DVFB_BCR 0x64
-#define ARC_REG_EXTARITH_BCR 0x65
#define ARC_REG_VECBASE_BCR 0x68
#define ARC_REG_PERIBASE_BCR 0x69
-#define ARC_REG_FP_BCR 0x6B /* Single-Precision FPU */
-#define ARC_REG_DPFP_BCR 0x6C /* Dbl Precision FPU */
-#define ARC_REG_DCCM_BCR 0x74 /* DCCM Present + SZ */
-#define ARC_REG_TIMERS_BCR 0x75
-#define ARC_REG_ICCM_BCR 0x78
+#define ARC_REG_FP_BCR 0x6B /* ARCompact: Single-Precision FPU */
+#define ARC_REG_DPFP_BCR 0x6C /* ARCompact: Dbl Precision FPU */
+#define ARC_REG_ERP_BUILD 0xc7 /* ARCv2 Error protection Build: ECC/Parity */
+#define ARC_REG_FP_V2_BCR 0xc8 /* ARCv2 FPU */
+#define ARC_REG_SLC_BCR 0xce
+#define ARC_REG_DCCM_BUILD 0x74 /* DCCM size (common) */
+#define ARC_REG_AP_BCR 0x76
+#define ARC_REG_ICCM_BUILD 0x78 /* ICCM size (common) */
#define ARC_REG_XY_MEM_BCR 0x79
#define ARC_REG_MAC_BCR 0x7a
-#define ARC_REG_MUL_BCR 0x7b
+#define ARC_REG_MPY_BCR 0x7b
#define ARC_REG_SWAP_BCR 0x7c
#define ARC_REG_NORM_BCR 0x7d
#define ARC_REG_MIXMAX_BCR 0x7e
#define ARC_REG_BARREL_BCR 0x7f
#define ARC_REG_D_UNCACH_BCR 0x6A
+#define ARC_REG_BPU_BCR 0xc0
+#define ARC_REG_ISA_CFG_BCR 0xc1
+#define ARC_REG_LPB_BUILD 0xE9 /* ARCv2 Loop Buffer Build */
+#define ARC_REG_RTT_BCR 0xF2
+#define ARC_REG_IRQ_BCR 0xF3
+#define ARC_REG_MICRO_ARCH_BCR 0xF9 /* ARCv2 Product revision */
+#define ARC_REG_SMART_BCR 0xFF
+#define ARC_REG_CLUSTER_BCR 0xcf
+#define ARC_REG_AUX_ICCM 0x208 /* ICCM Base Addr (ARCv2) */
+#define ARC_REG_LPB_CTRL 0x488 /* ARCv2 Loop Buffer control */
+#define ARC_REG_FPU_CTRL 0x300
+#define ARC_REG_FPU_STATUS 0x301
+
+/* Common for ARCompact and ARCv2 status register */
+#define ARC_REG_STATUS32 0x0A
/* status32 Bits Positions */
#define STATUS_AE_BIT 5 /* Exception active */
#define STATUS_DE_BIT 6 /* PC is in delay slot */
#define STATUS_U_BIT 7 /* User/Kernel mode */
+#define STATUS_Z_BIT 11
#define STATUS_L_BIT 12 /* Loop inhibit */
/* These masks correspond to the status word(STATUS_32) bits */
#define STATUS_AE_MASK (1<<STATUS_AE_BIT)
#define STATUS_DE_MASK (1<<STATUS_DE_BIT)
#define STATUS_U_MASK (1<<STATUS_U_BIT)
+#define STATUS_Z_MASK (1<<STATUS_Z_BIT)
#define STATUS_L_MASK (1<<STATUS_L_BIT)
/*
@@ -50,19 +65,27 @@
* [15: 8] = Exception Cause Code
* [ 7: 0] = Exception Parameters (for certain types only)
*/
-#define ECR_VEC_MASK 0xff0000
-#define ECR_CODE_MASK 0x00ff00
-#define ECR_PARAM_MASK 0x0000ff
-
-/* Exception Cause Vector Values */
+#ifdef CONFIG_ISA_ARCOMPACT
+#define ECR_V_MEM_ERR 0x01
#define ECR_V_INSN_ERR 0x02
#define ECR_V_MACH_CHK 0x20
#define ECR_V_ITLB_MISS 0x21
#define ECR_V_DTLB_MISS 0x22
#define ECR_V_PROTV 0x23
#define ECR_V_TRAP 0x25
+#else
+#define ECR_V_MEM_ERR 0x01
+#define ECR_V_INSN_ERR 0x02
+#define ECR_V_MACH_CHK 0x03
+#define ECR_V_ITLB_MISS 0x04
+#define ECR_V_DTLB_MISS 0x05
+#define ECR_V_PROTV 0x06
+#define ECR_V_TRAP 0x09
+#define ECR_V_MISALIGN 0x0d
+#endif
+
+/* DTLB Miss and Protection Violation Cause Codes */
-/* Protection Violation Exception Cause Code Values */
#define ECR_C_PROTV_INST_FETCH 0x00
#define ECR_C_PROTV_LOAD 0x01
#define ECR_C_PROTV_STORE 0x02
@@ -78,14 +101,11 @@
#define ECR_C_BIT_DTLB_LD_MISS 8
#define ECR_C_BIT_DTLB_ST_MISS 9
-/* Dummy ECR values for Interrupts */
-#define event_IRQ1 0x0031abcd
-#define event_IRQ2 0x0032abcd
-
/* Auxiliary registers */
#define AUX_IDENTITY 4
+#define AUX_EXEC_CTRL 8
#define AUX_INTR_VEC_BASE 0x25
-
+#define AUX_VOL 0x5e
/*
* Floating Pt Registers
@@ -98,107 +118,39 @@
#define ARC_AUX_DPFP_2H 0x304
#define ARC_AUX_DPFP_STAT 0x305
-#ifndef __ASSEMBLY__
-
/*
- ******************************************************************
- * Inline ASM macros to read/write AUX Regs
- * Essentially invocation of lr/sr insns from "C"
+ * DSP-related registers
+ * Registers names must correspond to dsp_callee_regs structure fields names
+ * for automatic offset calculation in DSP_AUX_SAVE_RESTORE macros.
*/
-
-#if 1
-
-#define read_aux_reg(reg) __builtin_arc_lr(reg)
-
-/* gcc builtin sr needs reg param to be long immediate */
-#define write_aux_reg(reg_immed, val) \
- __builtin_arc_sr((unsigned int)val, reg_immed)
-
-#else
-
-#define read_aux_reg(reg) \
-({ \
- unsigned int __ret; \
- __asm__ __volatile__( \
- " lr %0, [%1]" \
- : "=r"(__ret) \
- : "i"(reg)); \
- __ret; \
-})
-
-/*
- * Aux Reg address is specified as long immediate by caller
- * e.g.
- * write_aux_reg(0x69, some_val);
- * This generates tightest code.
- */
-#define write_aux_reg(reg_imm, val) \
-({ \
- __asm__ __volatile__( \
- " sr %0, [%1] \n" \
- : \
- : "ir"(val), "i"(reg_imm)); \
-})
-
-/*
- * Aux Reg address is specified in a variable
- * * e.g.
- * reg_num = 0x69
- * write_aux_reg2(reg_num, some_val);
- * This has to generate glue code to load the reg num from
- * memory to a reg hence not recommended.
- */
-#define write_aux_reg2(reg_in_var, val) \
-({ \
- unsigned int tmp; \
- \
- __asm__ __volatile__( \
- " ld %0, [%2] \n\t" \
- " sr %1, [%0] \n\t" \
- : "=&r"(tmp) \
- : "r"(val), "memory"(&reg_in_var)); \
-})
-
-#endif
-
-#define READ_BCR(reg, into) \
-{ \
- unsigned int tmp; \
- tmp = read_aux_reg(reg); \
- if (sizeof(tmp) == sizeof(into)) { \
- into = *((typeof(into) *)&tmp); \
- } else { \
- extern void bogus_undefined(void); \
- bogus_undefined(); \
- } \
-}
-
-#define WRITE_BCR(reg, into) \
-{ \
- unsigned int tmp; \
- if (sizeof(tmp) == sizeof(into)) { \
- tmp = (*(unsigned int *)(into)); \
- write_aux_reg(reg, tmp); \
- } else { \
- extern void bogus_undefined(void); \
- bogus_undefined(); \
- } \
-}
+#define ARC_AUX_DSP_BUILD 0x7A
+#define ARC_AUX_ACC0_LO 0x580
+#define ARC_AUX_ACC0_GLO 0x581
+#define ARC_AUX_ACC0_HI 0x582
+#define ARC_AUX_ACC0_GHI 0x583
+#define ARC_AUX_DSP_BFLY0 0x598
+#define ARC_AUX_DSP_CTRL 0x59F
+#define ARC_AUX_DSP_FFT_CTRL 0x59E
+
+#define ARC_AUX_AGU_BUILD 0xCC
+#define ARC_AUX_AGU_AP0 0x5C0
+#define ARC_AUX_AGU_AP1 0x5C1
+#define ARC_AUX_AGU_AP2 0x5C2
+#define ARC_AUX_AGU_AP3 0x5C3
+#define ARC_AUX_AGU_OS0 0x5D0
+#define ARC_AUX_AGU_OS1 0x5D1
+#define ARC_AUX_AGU_MOD0 0x5E0
+#define ARC_AUX_AGU_MOD1 0x5E1
+#define ARC_AUX_AGU_MOD2 0x5E2
+#define ARC_AUX_AGU_MOD3 0x5E3
+
+#ifndef __ASSEMBLER__
+
+#include <soc/arc/arc_aux.h>
/* Helpers */
#define TO_KB(bytes) ((bytes) >> 10)
#define TO_MB(bytes) (TO_KB(bytes) >> 10)
-#define PAGES_TO_KB(n_pages) ((n_pages) << (PAGE_SHIFT - 10))
-#define PAGES_TO_MB(n_pages) (PAGES_TO_KB(n_pages) >> 10)
-
-#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
-/* These DPFP regs need to be saved/restored across ctx-sw */
-struct arc_fpu {
- struct {
- unsigned int l, h;
- } aux_dpfp[2];
-};
-#endif
/*
***************************************************************
@@ -212,46 +164,86 @@ struct bcr_identity {
#endif
};
-#define EXTN_SWAP_VALID 0x1
-#define EXTN_NORM_VALID 0x2
-#define EXTN_MINMAX_VALID 0x2
-#define EXTN_BARREL_VALID 0x2
+struct bcr_isa_arcv2 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int div_rem:4, pad2:4, ldd:1, unalign:1, atomic:1, be:1,
+ pad1:12, ver:8;
+#else
+ unsigned int ver:8, pad1:12, be:1, atomic:1, unalign:1,
+ ldd:1, pad2:4, div_rem:4;
+#endif
+};
+
+struct bcr_uarch_build {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:8, prod:8, maj:8, min:8;
+#else
+ unsigned int min:8, maj:8, prod:8, pad:8;
+#endif
+};
+
+struct bcr_mmu_3 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int ver:8, ways:4, sets:4, res:3, sasid:1, pg_sz:4,
+ u_itlb:4, u_dtlb:4;
+#else
+ unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, sasid:1, res:3, sets:4,
+ ways:4, ver:8;
+#endif
+};
+
+struct bcr_mmu_4 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int ver:8, sasid:1, sz1:4, sz0:4, res:2, pae:1,
+ n_ways:2, n_entry:2, n_super:2, u_itlb:3, u_dtlb:3;
+#else
+ /* DTLB ITLB JES JE JA */
+ unsigned int u_dtlb:3, u_itlb:3, n_super:2, n_entry:2, n_ways:2,
+ pae:1, res:2, sz0:4, sz1:4, sasid:1, ver:8;
+#endif
+};
-struct bcr_extn {
+struct bcr_cache {
#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int pad:20, crc:1, ext_arith:2, mul:2, barrel:2, minmax:2,
- norm:2, swap:1;
+ unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
#else
- unsigned int swap:1, norm:2, minmax:2, barrel:2, mul:2, ext_arith:2,
- crc:1, pad:20;
+ unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
#endif
};
-/* DSP Options Ref Manual */
-struct bcr_extn_mac_mul {
+struct bcr_slc_cfg {
#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int pad:16, type:8, ver:8;
+ unsigned int pad:24, way:2, lsz:2, sz:4;
#else
- unsigned int ver:8, type:8, pad:16;
+ unsigned int sz:4, lsz:2, way:2, pad:24;
#endif
};
-struct bcr_extn_xymem {
+struct bcr_clust_cfg {
#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int ram_org:2, num_banks:4, bank_sz:4, ver:8;
+ unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8;
#else
- unsigned int ver:8, bank_sz:4, num_banks:4, ram_org:2;
+ unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7;
#endif
};
-struct bcr_perip {
+struct bcr_volatile {
#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int start:8, pad2:8, sz:8, pad:8;
+ unsigned int start:4, limit:4, pad:22, order:1, disable:1;
#else
- unsigned int pad:8, sz:8, pad2:8, start:8;
+ unsigned int disable:1, order:1, pad:22, limit:4, start:4;
#endif
};
-struct bcr_iccm {
+
+struct bcr_mpy {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:8, x1616:8, dsp:4, cycles:2, type:2, ver:8;
+#else
+ unsigned int ver:8, type:2, cycles:2, dsp:4, x1616:8, pad:8;
+#endif
+};
+
+struct bcr_iccm_arcompact {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int base:16, pad:5, sz:3, ver:8;
#else
@@ -259,17 +251,15 @@ struct bcr_iccm {
#endif
};
-/* DCCM Base Address Register: ARC_REG_DCCMBASE_BCR */
-struct bcr_dccm_base {
+struct bcr_iccm_arcv2 {
#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int addr:24, ver:8;
+ unsigned int pad:8, sz11:4, sz01:4, sz10:4, sz00:4, ver:8;
#else
- unsigned int ver:8, addr:24;
+ unsigned int ver:8, sz00:4, sz10:4, sz01:4, sz11:4, pad:8;
#endif
};
-/* DCCM RAM Configuration Register: ARC_REG_DCCM_BCR */
-struct bcr_dccm {
+struct bcr_dccm_arcompact {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int res:21, sz:3, ver:8;
#else
@@ -277,8 +267,16 @@ struct bcr_dccm {
#endif
};
-/* Both SP and DP FPU BCRs have same format */
-struct bcr_fp {
+struct bcr_dccm_arcv2 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad2:12, cyc:3, pad1:1, sz1:4, sz0:4, ver:8;
+#else
+ unsigned int ver:8, sz0:4, sz1:4, pad1:1, cyc:3, pad2:12;
+#endif
+};
+
+/* ARCompact: Both SP and DP FPU BCRs have same format */
+struct bcr_fp_arcompact {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int fast:1, ver:8;
#else
@@ -286,41 +284,84 @@ struct bcr_fp {
#endif
};
-/*
- *******************************************************************
- * Generic structures to hold build configuration used at runtime
- */
+struct bcr_fp_arcv2 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad2:15, dp:1, pad1:7, sp:1, ver:8;
+#else
+ unsigned int ver:8, sp:1, pad1:7, dp:1, pad2:15;
+#endif
+};
+
+struct bcr_actionpoint {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:21, min:1, num:2, ver:8;
+#else
+ unsigned int ver:8, num:2, min:1, pad:21;
+#endif
+};
+
+#include <soc/arc/timers.h>
+
+struct bcr_bpu_arcompact {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad2:19, fam:1, pad:2, ent:2, ver:8;
+#else
+ unsigned int ver:8, ent:2, pad:2, fam:1, pad2:19;
+#endif
+};
+
+struct bcr_bpu_arcv2 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:6, fbe:2, tqe:2, ts:4, ft:1, rse:2, pte:3, bce:3, ver:8;
+#else
+ unsigned int ver:8, bce:3, pte:3, rse:2, ft:1, ts:4, tqe:2, fbe:2, pad:6;
+#endif
+};
-struct cpuinfo_arc_mmu {
- unsigned int ver, pg_sz, sets, ways, u_dtlb, u_itlb, num_tlb;
+/* Error Protection Build: ECC/Parity */
+struct bcr_erp {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad3:5, mmu:3, pad2:4, ic:3, dc:3, pad1:6, ver:8;
+#else
+ unsigned int ver:8, pad1:6, dc:3, ic:3, pad2:4, mmu:3, pad3:5;
+#endif
};
-struct cpuinfo_arc_cache {
- unsigned int sz, line_len, assoc, ver;
+/* Error Protection Control */
+struct ctl_erp {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad2:27, mpd:1, pad1:2, dpd:1, dpi:1;
+#else
+ unsigned int dpi:1, dpd:1, pad1:2, mpd:1, pad2:27;
+#endif
};
-struct cpuinfo_arc_ccm {
- unsigned int base_addr, sz;
+struct bcr_lpb {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:16, entries:8, ver:8;
+#else
+ unsigned int ver:8, entries:8, pad:16;
+#endif
};
-struct cpuinfo_arc {
- struct cpuinfo_arc_cache icache, dcache;
- struct cpuinfo_arc_mmu mmu;
- struct bcr_identity core;
- unsigned int timers;
- unsigned int vec_base;
- unsigned int uncached_base;
- struct cpuinfo_arc_ccm iccm, dccm;
- struct bcr_extn extn;
- struct bcr_extn_xymem extn_xymem;
- struct bcr_extn_mac_mul extn_mac_mul;
- struct bcr_fp fp, dpfp;
+struct bcr_generic {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int info:24, ver:8;
+#else
+ unsigned int ver:8, info:24;
+#endif
};
-extern struct cpuinfo_arc cpuinfo_arc700[];
+static inline int is_isa_arcv2(void)
+{
+ return IS_ENABLED(CONFIG_ISA_ARCV2);
+}
+
+static inline int is_isa_arcompact(void)
+{
+ return IS_ENABLED(CONFIG_ISA_ARCOMPACT);
+}
#endif /* __ASEMBLY__ */
-#endif /* __KERNEL__ */
-
#endif /* _ASM_ARC_ARCREGS_H */
diff --git a/arch/arc/include/asm/asm-offsets.h b/arch/arc/include/asm/asm-offsets.h
index dad18768fe43..32a1d3d518dc 100644
--- a/arch/arc/include/asm/asm-offsets.h
+++ b/arch/arc/include/asm/asm-offsets.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <generated/asm-offsets.h>
diff --git a/arch/arc/include/asm/asserts.h b/arch/arc/include/asm/asserts.h
new file mode 100644
index 000000000000..108f33be6aa5
--- /dev/null
+++ b/arch/arc/include/asm/asserts.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
+ */
+#ifndef __ASM_ARC_ASSERTS_H
+#define __ASM_ARC_ASSERTS_H
+
+/* Helpers to sanitize config options. */
+
+void chk_opt_strict(char *opt_name, bool hw_exists, bool opt_ena);
+void chk_opt_weak(char *opt_name, bool hw_exists, bool opt_ena);
+
+/*
+ * Check required config option:
+ * - panic in case of OPT enabled but corresponding HW absent.
+ * - warn in case of OPT disabled but corresponding HW exists.
+*/
+#define CHK_OPT_STRICT(opt_name, hw_exists) \
+({ \
+ chk_opt_strict(#opt_name, hw_exists, IS_ENABLED(opt_name)); \
+})
+
+/*
+ * Check optional config option:
+ * - panic in case of OPT enabled but corresponding HW absent.
+*/
+#define CHK_OPT_WEAK(opt_name, hw_exists) \
+({ \
+ chk_opt_weak(#opt_name, hw_exists, IS_ENABLED(opt_name)); \
+})
+
+#endif /* __ASM_ARC_ASSERTS_H */
diff --git a/arch/arc/include/asm/atomic-llsc.h b/arch/arc/include/asm/atomic-llsc.h
new file mode 100644
index 000000000000..5258cb81a16b
--- /dev/null
+++ b/arch/arc/include/asm/atomic-llsc.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ASM_ARC_ATOMIC_LLSC_H
+#define _ASM_ARC_ATOMIC_LLSC_H
+
+#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
+
+#define ATOMIC_OP(op, asm_op) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned int val; \
+ \
+ __asm__ __volatile__( \
+ "1: llock %[val], [%[ctr]] \n" \
+ " " #asm_op " %[val], %[val], %[i] \n" \
+ " scond %[val], [%[ctr]] \n" \
+ " bnz 1b \n" \
+ : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
+ : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
+ [i] "ir" (i) \
+ : "cc", "memory"); \
+} \
+
+#define ATOMIC_OP_RETURN(op, asm_op) \
+static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
+{ \
+ unsigned int val; \
+ \
+ __asm__ __volatile__( \
+ "1: llock %[val], [%[ctr]] \n" \
+ " " #asm_op " %[val], %[val], %[i] \n" \
+ " scond %[val], [%[ctr]] \n" \
+ " bnz 1b \n" \
+ : [val] "=&r" (val) \
+ : [ctr] "r" (&v->counter), \
+ [i] "ir" (i) \
+ : "cc", "memory"); \
+ \
+ return val; \
+}
+
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+
+#define ATOMIC_FETCH_OP(op, asm_op) \
+static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
+{ \
+ unsigned int val, orig; \
+ \
+ __asm__ __volatile__( \
+ "1: llock %[orig], [%[ctr]] \n" \
+ " " #asm_op " %[val], %[orig], %[i] \n" \
+ " scond %[val], [%[ctr]] \n" \
+ " bnz 1b \n" \
+ : [val] "=&r" (val), \
+ [orig] "=&r" (orig) \
+ : [ctr] "r" (&v->counter), \
+ [i] "ir" (i) \
+ : "cc", "memory"); \
+ \
+ return orig; \
+}
+
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
+
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
+
+#define ATOMIC_OPS(op, asm_op) \
+ ATOMIC_OP(op, asm_op) \
+ ATOMIC_OP_RETURN(op, asm_op) \
+ ATOMIC_FETCH_OP(op, asm_op)
+
+ATOMIC_OPS(add, add)
+ATOMIC_OPS(sub, sub)
+
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op, asm_op) \
+ ATOMIC_OP(op, asm_op) \
+ ATOMIC_FETCH_OP(op, asm_op)
+
+ATOMIC_OPS(and, and)
+ATOMIC_OPS(andnot, bic)
+ATOMIC_OPS(or, or)
+ATOMIC_OPS(xor, xor)
+
+#define arch_atomic_andnot arch_atomic_andnot
+
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
+#endif
diff --git a/arch/arc/include/asm/atomic-spinlock.h b/arch/arc/include/asm/atomic-spinlock.h
new file mode 100644
index 000000000000..89d12a60f84c
--- /dev/null
+++ b/arch/arc/include/asm/atomic-spinlock.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ASM_ARC_ATOMIC_SPLOCK_H
+#define _ASM_ARC_ATOMIC_SPLOCK_H
+
+/*
+ * Non hardware assisted Atomic-R-M-W
+ * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
+ */
+
+static inline void arch_atomic_set(atomic_t *v, int i)
+{
+ /*
+ * Independent of hardware support, all of the atomic_xxx() APIs need
+ * to follow the same locking rules to make sure that a "hardware"
+ * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
+ * sequence
+ *
+ * Thus atomic_set() despite being 1 insn (and seemingly atomic)
+ * requires the locking.
+ */
+ unsigned long flags;
+
+ atomic_ops_lock(flags);
+ WRITE_ONCE(v->counter, i);
+ atomic_ops_unlock(flags);
+}
+
+#define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
+
+#define ATOMIC_OP(op, c_op, asm_op) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ \
+ atomic_ops_lock(flags); \
+ v->counter c_op i; \
+ atomic_ops_unlock(flags); \
+}
+
+#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ unsigned int temp; \
+ \
+ /* \
+ * spin lock/unlock provides the needed smp_mb() before/after \
+ */ \
+ atomic_ops_lock(flags); \
+ temp = v->counter; \
+ temp c_op i; \
+ v->counter = temp; \
+ atomic_ops_unlock(flags); \
+ \
+ return temp; \
+}
+
+#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ unsigned int orig; \
+ \
+ /* \
+ * spin lock/unlock provides the needed smp_mb() before/after \
+ */ \
+ atomic_ops_lock(flags); \
+ orig = v->counter; \
+ v->counter c_op i; \
+ atomic_ops_unlock(flags); \
+ \
+ return orig; \
+}
+
+#define ATOMIC_OPS(op, c_op, asm_op) \
+ ATOMIC_OP(op, c_op, asm_op) \
+ ATOMIC_OP_RETURN(op, c_op, asm_op) \
+ ATOMIC_FETCH_OP(op, c_op, asm_op)
+
+ATOMIC_OPS(add, +=, add)
+ATOMIC_OPS(sub, -=, sub)
+
+#define arch_atomic_fetch_add arch_atomic_fetch_add
+#define arch_atomic_fetch_sub arch_atomic_fetch_sub
+#define arch_atomic_add_return arch_atomic_add_return
+#define arch_atomic_sub_return arch_atomic_sub_return
+
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op, c_op, asm_op) \
+ ATOMIC_OP(op, c_op, asm_op) \
+ ATOMIC_FETCH_OP(op, c_op, asm_op)
+
+ATOMIC_OPS(and, &=, and)
+ATOMIC_OPS(andnot, &= ~, bic)
+ATOMIC_OPS(or, |=, or)
+ATOMIC_OPS(xor, ^=, xor)
+
+#define arch_atomic_andnot arch_atomic_andnot
+
+#define arch_atomic_fetch_and arch_atomic_fetch_and
+#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
+#define arch_atomic_fetch_or arch_atomic_fetch_or
+#define arch_atomic_fetch_xor arch_atomic_fetch_xor
+
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
+#endif
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 83f03ca6caf6..e615c42b93ba 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -1,17 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASM_ARC_ATOMIC_H
#define _ASM_ARC_ATOMIC_H
-#ifdef __KERNEL__
-
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
#include <linux/compiler.h>
@@ -19,214 +14,23 @@
#include <asm/barrier.h>
#include <asm/smp.h>
-#define atomic_read(v) ((v)->counter)
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
#ifdef CONFIG_ARC_HAS_LLSC
-
-#define atomic_set(v, i) (((v)->counter) = (i))
-
-static inline void atomic_add(int i, atomic_t *v)
-{
- unsigned int temp;
-
- __asm__ __volatile__(
- "1: llock %0, [%1] \n"
- " add %0, %0, %2 \n"
- " scond %0, [%1] \n"
- " bnz 1b \n"
- : "=&r"(temp) /* Early clobber, to prevent reg reuse */
- : "r"(&v->counter), "ir"(i)
- : "cc");
-}
-
-static inline void atomic_sub(int i, atomic_t *v)
-{
- unsigned int temp;
-
- __asm__ __volatile__(
- "1: llock %0, [%1] \n"
- " sub %0, %0, %2 \n"
- " scond %0, [%1] \n"
- " bnz 1b \n"
- : "=&r"(temp)
- : "r"(&v->counter), "ir"(i)
- : "cc");
-}
-
-/* add and also return the new value */
-static inline int atomic_add_return(int i, atomic_t *v)
-{
- unsigned int temp;
-
- __asm__ __volatile__(
- "1: llock %0, [%1] \n"
- " add %0, %0, %2 \n"
- " scond %0, [%1] \n"
- " bnz 1b \n"
- : "=&r"(temp)
- : "r"(&v->counter), "ir"(i)
- : "cc");
-
- return temp;
-}
-
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
- unsigned int temp;
-
- __asm__ __volatile__(
- "1: llock %0, [%1] \n"
- " sub %0, %0, %2 \n"
- " scond %0, [%1] \n"
- " bnz 1b \n"
- : "=&r"(temp)
- : "r"(&v->counter), "ir"(i)
- : "cc");
-
- return temp;
-}
-
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
-{
- unsigned int temp;
-
- __asm__ __volatile__(
- "1: llock %0, [%1] \n"
- " bic %0, %0, %2 \n"
- " scond %0, [%1] \n"
- " bnz 1b \n"
- : "=&r"(temp)
- : "r"(addr), "ir"(mask)
- : "cc");
-}
-
-#else /* !CONFIG_ARC_HAS_LLSC */
-
-#ifndef CONFIG_SMP
-
- /* violating atomic_xxx API locking protocol in UP for optimization sake */
-#define atomic_set(v, i) (((v)->counter) = (i))
-
+#include <asm/atomic-llsc.h>
#else
-
-static inline void atomic_set(atomic_t *v, int i)
-{
- /*
- * Independent of hardware support, all of the atomic_xxx() APIs need
- * to follow the same locking rules to make sure that a "hardware"
- * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
- * sequence
- *
- * Thus atomic_set() despite being 1 insn (and seemingly atomic)
- * requires the locking.
- */
- unsigned long flags;
-
- atomic_ops_lock(flags);
- v->counter = i;
- atomic_ops_unlock(flags);
-}
+#include <asm/atomic-spinlock.h>
#endif
/*
- * Non hardware assisted Atomic-R-M-W
- * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
- */
-
-static inline void atomic_add(int i, atomic_t *v)
-{
- unsigned long flags;
-
- atomic_ops_lock(flags);
- v->counter += i;
- atomic_ops_unlock(flags);
-}
-
-static inline void atomic_sub(int i, atomic_t *v)
-{
- unsigned long flags;
-
- atomic_ops_lock(flags);
- v->counter -= i;
- atomic_ops_unlock(flags);
-}
-
-static inline int atomic_add_return(int i, atomic_t *v)
-{
- unsigned long flags;
- unsigned long temp;
-
- atomic_ops_lock(flags);
- temp = v->counter;
- temp += i;
- v->counter = temp;
- atomic_ops_unlock(flags);
-
- return temp;
-}
-
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
- unsigned long flags;
- unsigned long temp;
-
- atomic_ops_lock(flags);
- temp = v->counter;
- temp -= i;
- v->counter = temp;
- atomic_ops_unlock(flags);
-
- return temp;
-}
-
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
-{
- unsigned long flags;
-
- atomic_ops_lock(flags);
- *addr &= ~mask;
- atomic_ops_unlock(flags);
-}
-
-#endif /* !CONFIG_ARC_HAS_LLSC */
-
-/**
- * __atomic_add_unless - add unless the number is a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v
+ * 64-bit atomics
*/
-#define __atomic_add_unless(v, a, u) \
-({ \
- int c, old; \
- c = atomic_read(v); \
- while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
- c = old; \
- c; \
-})
-
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-
-#define atomic_inc(v) atomic_add(1, v)
-#define atomic_dec(v) atomic_sub(1, v)
-
-#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
-#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
-#define atomic_inc_return(v) atomic_add_return(1, (v))
-#define atomic_dec_return(v) atomic_sub_return(1, (v))
-#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-
-#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
-
-#define ATOMIC_INIT(i) { (i) }
-
+#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
-
+#else
+#include <asm/atomic64-arcv2.h>
#endif
-#endif
+#endif /* !__ASSEMBLER__ */
#endif
diff --git a/arch/arc/include/asm/atomic64-arcv2.h b/arch/arc/include/asm/atomic64-arcv2.h
new file mode 100644
index 000000000000..73080a664369
--- /dev/null
+++ b/arch/arc/include/asm/atomic64-arcv2.h
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+/*
+ * ARCv2 supports 64-bit exclusive load (LLOCKD) / store (SCONDD)
+ * - The address HAS to be 64-bit aligned
+ */
+
+#ifndef _ASM_ARC_ATOMIC64_ARCV2_H
+#define _ASM_ARC_ATOMIC64_ARCV2_H
+
+typedef struct {
+ s64 __aligned(8) counter;
+} atomic64_t;
+
+#define ATOMIC64_INIT(a) { (a) }
+
+static inline s64 arch_atomic64_read(const atomic64_t *v)
+{
+ s64 val;
+
+ __asm__ __volatile__(
+ " ldd %0, [%1] \n"
+ : "=r"(val)
+ : "r"(&v->counter));
+
+ return val;
+}
+
+static inline void arch_atomic64_set(atomic64_t *v, s64 a)
+{
+ /*
+ * This could have been a simple assignment in "C" but would need
+ * explicit volatile. Otherwise gcc optimizers could elide the store
+ * which borked atomic64 self-test
+ * In the inline asm version, memory clobber needed for exact same
+ * reason, to tell gcc about the store.
+ *
+ * This however is not needed for sibling atomic64_add() etc since both
+ * load/store are explicitly done in inline asm. As long as API is used
+ * for each access, gcc has no way to optimize away any load/store
+ */
+ __asm__ __volatile__(
+ " std %0, [%1] \n"
+ :
+ : "r"(a), "r"(&v->counter)
+ : "memory");
+}
+
+#define ATOMIC64_OP(op, op1, op2) \
+static inline void arch_atomic64_##op(s64 a, atomic64_t *v) \
+{ \
+ s64 val; \
+ \
+ __asm__ __volatile__( \
+ "1: \n" \
+ " llockd %0, [%1] \n" \
+ " " #op1 " %L0, %L0, %L2 \n" \
+ " " #op2 " %H0, %H0, %H2 \n" \
+ " scondd %0, [%1] \n" \
+ " bnz 1b \n" \
+ : "=&r"(val) \
+ : "r"(&v->counter), "ir"(a) \
+ : "cc", "memory"); \
+} \
+
+#define ATOMIC64_OP_RETURN(op, op1, op2) \
+static inline s64 arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
+{ \
+ s64 val; \
+ \
+ __asm__ __volatile__( \
+ "1: \n" \
+ " llockd %0, [%1] \n" \
+ " " #op1 " %L0, %L0, %L2 \n" \
+ " " #op2 " %H0, %H0, %H2 \n" \
+ " scondd %0, [%1] \n" \
+ " bnz 1b \n" \
+ : [val] "=&r"(val) \
+ : "r"(&v->counter), "ir"(a) \
+ : "cc", "memory"); \
+ \
+ return val; \
+}
+
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
+
+#define ATOMIC64_FETCH_OP(op, op1, op2) \
+static inline s64 arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
+{ \
+ s64 val, orig; \
+ \
+ __asm__ __volatile__( \
+ "1: \n" \
+ " llockd %0, [%2] \n" \
+ " " #op1 " %L1, %L0, %L3 \n" \
+ " " #op2 " %H1, %H0, %H3 \n" \
+ " scondd %1, [%2] \n" \
+ " bnz 1b \n" \
+ : "=&r"(orig), "=&r"(val) \
+ : "r"(&v->counter), "ir"(a) \
+ : "cc", "memory"); \
+ \
+ return orig; \
+}
+
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
+
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
+
+#define ATOMIC64_OPS(op, op1, op2) \
+ ATOMIC64_OP(op, op1, op2) \
+ ATOMIC64_OP_RETURN(op, op1, op2) \
+ ATOMIC64_FETCH_OP(op, op1, op2)
+
+ATOMIC64_OPS(add, add.f, adc)
+ATOMIC64_OPS(sub, sub.f, sbc)
+
+#undef ATOMIC64_OPS
+#define ATOMIC64_OPS(op, op1, op2) \
+ ATOMIC64_OP(op, op1, op2) \
+ ATOMIC64_FETCH_OP(op, op1, op2)
+
+ATOMIC64_OPS(and, and, and)
+ATOMIC64_OPS(andnot, bic, bic)
+ATOMIC64_OPS(or, or, or)
+ATOMIC64_OPS(xor, xor, xor)
+
+#define arch_atomic64_andnot arch_atomic64_andnot
+
+#undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
+#undef ATOMIC64_OP_RETURN
+#undef ATOMIC64_OP
+
+static inline u64 __arch_cmpxchg64_relaxed(volatile void *ptr, u64 old, u64 new)
+{
+ u64 prev;
+
+ __asm__ __volatile__(
+ "1: llockd %0, [%1] \n"
+ " brne %L0, %L2, 2f \n"
+ " brne %H0, %H2, 2f \n"
+ " scondd %3, [%1] \n"
+ " bnz 1b \n"
+ "2: \n"
+ : "=&r"(prev)
+ : "r"(ptr), "ir"(old), "r"(new)
+ : "memory", "cc");
+
+ return prev;
+}
+#define arch_cmpxchg64_relaxed __arch_cmpxchg64_relaxed
+
+static inline s64 arch_atomic64_xchg(atomic64_t *ptr, s64 new)
+{
+ s64 prev;
+
+ smp_mb();
+
+ __asm__ __volatile__(
+ "1: llockd %0, [%1] \n"
+ " scondd %2, [%1] \n"
+ " bnz 1b \n"
+ "2: \n"
+ : "=&r"(prev)
+ : "r"(ptr), "r"(new)
+ : "cc"); /* memory clobber comes from smp_mb() */
+
+ smp_mb();
+
+ return prev;
+}
+#define arch_atomic64_xchg arch_atomic64_xchg
+
+static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
+{
+ s64 val;
+
+ smp_mb();
+
+ __asm__ __volatile__(
+ "1: llockd %0, [%1] \n"
+ " sub.f %L0, %L0, 1 # w0 - 1, set C on borrow\n"
+ " sub.c %H0, %H0, 1 # if C set, w1 - 1\n"
+ " brlt %H0, 0, 2f \n"
+ " scondd %0, [%1] \n"
+ " bnz 1b \n"
+ "2: \n"
+ : "=&r"(val)
+ : "r"(&v->counter)
+ : "cc"); /* memory clobber comes from smp_mb() */
+
+ smp_mb();
+
+ return val;
+}
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
+
+static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+ s64 old, temp;
+
+ smp_mb();
+
+ __asm__ __volatile__(
+ "1: llockd %0, [%2] \n"
+ " brne %L0, %L4, 2f # continue to add since v != u \n"
+ " breq.d %H0, %H4, 3f # return since v == u \n"
+ "2: \n"
+ " add.f %L1, %L0, %L3 \n"
+ " adc %H1, %H0, %H3 \n"
+ " scondd %1, [%2] \n"
+ " bnz 1b \n"
+ "3: \n"
+ : "=&r"(old), "=&r" (temp)
+ : "r"(&v->counter), "r"(a), "r"(u)
+ : "cc"); /* memory clobber comes from smp_mb() */
+
+ smp_mb();
+
+ return old;
+}
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
+
+#endif
diff --git a/arch/arc/include/asm/barrier.h b/arch/arc/include/asm/barrier.h
index f6cb7c4ffb35..4637de9e02fa 100644
--- a/arch/arc/include/asm/barrier.h
+++ b/arch/arc/include/asm/barrier.h
@@ -1,42 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
*/
#ifndef __ASM_BARRIER_H
#define __ASM_BARRIER_H
-#ifndef __ASSEMBLY__
-
-/* TODO-vineetg: Need to see what this does, don't we need sync anywhere */
-#define mb() __asm__ __volatile__ ("" : : : "memory")
-#define rmb() mb()
-#define wmb() mb()
-#define set_mb(var, value) do { var = value; mb(); } while (0)
-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
-#define read_barrier_depends() mb()
-
-/* TODO-vineetg verify the correctness of macros here */
-#ifdef CONFIG_SMP
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
+#ifdef CONFIG_ISA_ARCV2
+
+/*
+ * ARCv2 based HS38 cores are in-order issue, but still weakly ordered
+ * due to micro-arch buffering/queuing of load/store, cache hit vs. miss ...
+ *
+ * Explicit barrier provided by DMB instruction
+ * - Operand supports fine grained load/store/load+store semantics
+ * - Ensures that selected memory operation issued before it will complete
+ * before any subsequent memory operation of same type
+ * - DMB guarantees SMP as well as local barrier semantics
+ * (asm-generic/barrier.h ensures sane smp_*mb if not defined here, i.e.
+ * UP: barrier(), SMP: smp_*mb == *mb)
+ * - DSYNC provides DMB+completion_of_cache_bpu_maintenance_ops hence not needed
+ * in the general case. Plus it only provides full barrier.
+ */
+
+#define mb() asm volatile("dmb 3\n" : : : "memory")
+#define rmb() asm volatile("dmb 1\n" : : : "memory")
+#define wmb() asm volatile("dmb 2\n" : : : "memory")
+
#else
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#endif
-#define smp_mb__before_atomic_dec() barrier()
-#define smp_mb__after_atomic_dec() barrier()
-#define smp_mb__before_atomic_inc() barrier()
-#define smp_mb__after_atomic_inc() barrier()
+/*
+ * ARCompact based cores (ARC700) only have SYNC instruction which is super
+ * heavy weight as it flushes the pipeline as well.
+ * There are no real SMP implementations of such cores.
+ */
-#define smp_read_barrier_depends() do { } while (0)
+#define mb() asm volatile("sync\n" : : : "memory")
#endif
+#include <asm-generic/barrier.h>
+
#endif
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
index 647a83a8e756..df894235fdbc 100644
--- a/arch/arc/include/asm/bitops.h
+++ b/arch/arc/include/asm/bitops.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASM_BITOPS_H
@@ -13,390 +10,12 @@
#error only <linux/bitops.h> can be included directly
#endif
-#ifdef __KERNEL__
-
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
#include <linux/compiler.h>
-/*
- * Hardware assisted read-modify-write using ARC700 LLOCK/SCOND insns.
- * The Kconfig glue ensures that in SMP, this is only set if the container
- * SoC/platform has cross-core coherent LLOCK/SCOND
- */
-#if defined(CONFIG_ARC_HAS_LLSC)
-
-static inline void set_bit(unsigned long nr, volatile unsigned long *m)
-{
- unsigned int temp;
-
- m += nr >> 5;
-
- if (__builtin_constant_p(nr))
- nr &= 0x1f;
-
- __asm__ __volatile__(
- "1: llock %0, [%1] \n"
- " bset %0, %0, %2 \n"
- " scond %0, [%1] \n"
- " bnz 1b \n"
- : "=&r"(temp)
- : "r"(m), "ir"(nr)
- : "cc");
-}
-
-static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
-{
- unsigned int temp;
-
- m += nr >> 5;
-
- if (__builtin_constant_p(nr))
- nr &= 0x1f;
-
- __asm__ __volatile__(
- "1: llock %0, [%1] \n"
- " bclr %0, %0, %2 \n"
- " scond %0, [%1] \n"
- " bnz 1b \n"
- : "=&r"(temp)
- : "r"(m), "ir"(nr)
- : "cc");
-}
-
-static inline void change_bit(unsigned long nr, volatile unsigned long *m)
-{
- unsigned int temp;
-
- m += nr >> 5;
-
- if (__builtin_constant_p(nr))
- nr &= 0x1f;
-
- __asm__ __volatile__(
- "1: llock %0, [%1] \n"
- " bxor %0, %0, %2 \n"
- " scond %0, [%1] \n"
- " bnz 1b \n"
- : "=&r"(temp)
- : "r"(m), "ir"(nr)
- : "cc");
-}
-
-/*
- * Semantically:
- * Test the bit
- * if clear
- * set it and return 0 (old value)
- * else
- * return 1 (old value).
- *
- * Since ARC lacks a equivalent h/w primitive, the bit is set unconditionally
- * and the old value of bit is returned
- */
-static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
-{
- unsigned long old, temp;
-
- m += nr >> 5;
-
- if (__builtin_constant_p(nr))
- nr &= 0x1f;
-
- __asm__ __volatile__(
- "1: llock %0, [%2] \n"
- " bset %1, %0, %3 \n"
- " scond %1, [%2] \n"
- " bnz 1b \n"
- : "=&r"(old), "=&r"(temp)
- : "r"(m), "ir"(nr)
- : "cc");
-
- return (old & (1 << nr)) != 0;
-}
-
-static inline int
-test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
-{
- unsigned int old, temp;
-
- m += nr >> 5;
-
- if (__builtin_constant_p(nr))
- nr &= 0x1f;
-
- __asm__ __volatile__(
- "1: llock %0, [%2] \n"
- " bclr %1, %0, %3 \n"
- " scond %1, [%2] \n"
- " bnz 1b \n"
- : "=&r"(old), "=&r"(temp)
- : "r"(m), "ir"(nr)
- : "cc");
-
- return (old & (1 << nr)) != 0;
-}
-
-static inline int
-test_and_change_bit(unsigned long nr, volatile unsigned long *m)
-{
- unsigned int old, temp;
-
- m += nr >> 5;
-
- if (__builtin_constant_p(nr))
- nr &= 0x1f;
-
- __asm__ __volatile__(
- "1: llock %0, [%2] \n"
- " bxor %1, %0, %3 \n"
- " scond %1, [%2] \n"
- " bnz 1b \n"
- : "=&r"(old), "=&r"(temp)
- : "r"(m), "ir"(nr)
- : "cc");
-
- return (old & (1 << nr)) != 0;
-}
-
-#else /* !CONFIG_ARC_HAS_LLSC */
-
-#include <asm/smp.h>
-
-/*
- * Non hardware assisted Atomic-R-M-W
- * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
- *
- * There's "significant" micro-optimization in writing our own variants of
- * bitops (over generic variants)
- *
- * (1) The generic APIs have "signed" @nr while we have it "unsigned"
- * This avoids extra code to be generated for pointer arithmatic, since
- * is "not sure" that index is NOT -ve
- * (2) Utilize the fact that ARCompact bit fidding insn (BSET/BCLR/ASL) etc
- * only consider bottom 5 bits of @nr, so NO need to mask them off.
- * (GCC Quirk: however for constant @nr we still need to do the masking
- * at compile time)
- */
-
-static inline void set_bit(unsigned long nr, volatile unsigned long *m)
-{
- unsigned long temp, flags;
- m += nr >> 5;
-
- if (__builtin_constant_p(nr))
- nr &= 0x1f;
-
- bitops_lock(flags);
-
- temp = *m;
- *m = temp | (1UL << nr);
-
- bitops_unlock(flags);
-}
-
-static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
-{
- unsigned long temp, flags;
- m += nr >> 5;
-
- if (__builtin_constant_p(nr))
- nr &= 0x1f;
-
- bitops_lock(flags);
-
- temp = *m;
- *m = temp & ~(1UL << nr);
-
- bitops_unlock(flags);
-}
-
-static inline void change_bit(unsigned long nr, volatile unsigned long *m)
-{
- unsigned long temp, flags;
- m += nr >> 5;
-
- if (__builtin_constant_p(nr))
- nr &= 0x1f;
-
- bitops_lock(flags);
-
- temp = *m;
- *m = temp ^ (1UL << nr);
-
- bitops_unlock(flags);
-}
-
-static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
-{
- unsigned long old, flags;
- m += nr >> 5;
-
- if (__builtin_constant_p(nr))
- nr &= 0x1f;
-
- bitops_lock(flags);
-
- old = *m;
- *m = old | (1 << nr);
-
- bitops_unlock(flags);
-
- return (old & (1 << nr)) != 0;
-}
-
-static inline int
-test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
-{
- unsigned long old, flags;
- m += nr >> 5;
-
- if (__builtin_constant_p(nr))
- nr &= 0x1f;
-
- bitops_lock(flags);
-
- old = *m;
- *m = old & ~(1 << nr);
-
- bitops_unlock(flags);
-
- return (old & (1 << nr)) != 0;
-}
-
-static inline int
-test_and_change_bit(unsigned long nr, volatile unsigned long *m)
-{
- unsigned long old, flags;
- m += nr >> 5;
-
- if (__builtin_constant_p(nr))
- nr &= 0x1f;
-
- bitops_lock(flags);
-
- old = *m;
- *m = old ^ (1 << nr);
-
- bitops_unlock(flags);
-
- return (old & (1 << nr)) != 0;
-}
-
-#endif /* CONFIG_ARC_HAS_LLSC */
-
-/***************************************
- * Non atomic variants
- **************************************/
-
-static inline void __set_bit(unsigned long nr, volatile unsigned long *m)
-{
- unsigned long temp;
- m += nr >> 5;
-
- if (__builtin_constant_p(nr))
- nr &= 0x1f;
-
- temp = *m;
- *m = temp | (1UL << nr);
-}
-
-static inline void __clear_bit(unsigned long nr, volatile unsigned long *m)
-{
- unsigned long temp;
- m += nr >> 5;
-
- if (__builtin_constant_p(nr))
- nr &= 0x1f;
-
- temp = *m;
- *m = temp & ~(1UL << nr);
-}
-
-static inline void __change_bit(unsigned long nr, volatile unsigned long *m)
-{
- unsigned long temp;
- m += nr >> 5;
-
- if (__builtin_constant_p(nr))
- nr &= 0x1f;
-
- temp = *m;
- *m = temp ^ (1UL << nr);
-}
-
-static inline int
-__test_and_set_bit(unsigned long nr, volatile unsigned long *m)
-{
- unsigned long old;
- m += nr >> 5;
-
- if (__builtin_constant_p(nr))
- nr &= 0x1f;
-
- old = *m;
- *m = old | (1 << nr);
-
- return (old & (1 << nr)) != 0;
-}
-
-static inline int
-__test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
-{
- unsigned long old;
- m += nr >> 5;
-
- if (__builtin_constant_p(nr))
- nr &= 0x1f;
-
- old = *m;
- *m = old & ~(1 << nr);
-
- return (old & (1 << nr)) != 0;
-}
-
-static inline int
-__test_and_change_bit(unsigned long nr, volatile unsigned long *m)
-{
- unsigned long old;
- m += nr >> 5;
-
- if (__builtin_constant_p(nr))
- nr &= 0x1f;
-
- old = *m;
- *m = old ^ (1 << nr);
-
- return (old & (1 << nr)) != 0;
-}
-
-/*
- * This routine doesn't need to be atomic.
- */
-static inline int
-__constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
-{
- return ((1UL << (nr & 31)) &
- (((const volatile unsigned int *)addr)[nr >> 5])) != 0;
-}
-
-static inline int
-__test_bit(unsigned int nr, const volatile unsigned long *addr)
-{
- unsigned long mask;
-
- addr += nr >> 5;
-
- /* ARC700 only considers 5 bits in bit-fiddling insn */
- mask = 1 << nr;
-
- return ((mask & *addr) != 0);
-}
-
-#define test_bit(nr, addr) (__builtin_constant_p(nr) ? \
- __constant_test_bit((nr), (addr)) : \
- __test_bit((nr), (addr)))
+#ifdef CONFIG_ISA_ARCOMPACT
/*
* Count the number of zeros, starting from MSB
@@ -420,7 +39,7 @@ static inline __attribute__ ((const)) int clz(unsigned int x)
return res;
}
-static inline int constant_fls(int x)
+static inline int constant_fls(unsigned int x)
{
int r = 32;
@@ -442,10 +61,8 @@ static inline int constant_fls(int x)
x <<= 2;
r -= 2;
}
- if (!(x & 0x80000000u)) {
- x <<= 1;
+ if (!(x & 0x80000000u))
r -= 1;
- }
return r;
}
@@ -454,7 +71,7 @@ static inline int constant_fls(int x)
* @result: [1-32]
* fls(1) = 1, fls(0x80000000) = 32, fls(0) = 0
*/
-static inline __attribute__ ((const)) int fls(unsigned long x)
+static inline __attribute__ ((const)) int fls(unsigned int x)
{
if (__builtin_constant_p(x))
return constant_fls(x);
@@ -465,7 +82,7 @@ static inline __attribute__ ((const)) int fls(unsigned long x)
/*
* __fls: Similar to fls, but zero based (0-31)
*/
-static inline __attribute__ ((const)) int __fls(unsigned long x)
+static inline __attribute__ ((const)) unsigned long __fls(unsigned long x)
{
if (!x)
return 0;
@@ -482,7 +99,7 @@ static inline __attribute__ ((const)) int __fls(unsigned long x)
/*
* __ffs: Similar to ffs, but zero based (0-31)
*/
-static inline __attribute__ ((const)) int __ffs(unsigned long word)
+static inline __attribute__ ((const)) unsigned long __ffs(unsigned long word)
{
if (!word)
return word;
@@ -490,27 +107,93 @@ static inline __attribute__ ((const)) int __ffs(unsigned long word)
return ffs(word) - 1;
}
+#else /* CONFIG_ISA_ARCV2 */
+
+/*
+ * fls = Find Last Set in word
+ * @result: [1-32]
+ * fls(1) = 1, fls(0x80000000) = 32, fls(0) = 0
+ */
+static inline __attribute__ ((const)) int fls(unsigned int x)
+{
+ int n;
+
+ asm volatile(
+ " fls.f %0, %1 \n" /* 0:31; 0(Z) if src 0 */
+ " add.nz %0, %0, 1 \n" /* 0:31 -> 1:32 */
+ : "=r"(n) /* Early clobber not needed */
+ : "r"(x)
+ : "cc");
+
+ return n;
+}
+
+/*
+ * __fls: Similar to fls, but zero based (0-31). Also 0 if no bit set
+ */
+static inline __attribute__ ((const)) unsigned long __fls(unsigned long x)
+{
+ if (__builtin_constant_p(x))
+ return x ? BITS_PER_LONG - 1 - __builtin_clzl(x) : 0;
+ /* FLS insn has exactly same semantics as the API */
+ return __builtin_arc_fls(x);
+}
+
+/*
+ * ffs = Find First Set in word (LSB to MSB)
+ * @result: [1-32], 0 if all 0's
+ */
+static inline __attribute__ ((const)) int ffs(unsigned int x)
+{
+ int n;
+
+ asm volatile(
+ " ffs.f %0, %1 \n" /* 0:31; 31(Z) if src 0 */
+ " add.nz %0, %0, 1 \n" /* 0:31 -> 1:32 */
+ " mov.z %0, 0 \n" /* 31(Z)-> 0 */
+ : "=r"(n) /* Early clobber not needed */
+ : "r"(x)
+ : "cc");
+
+ return n;
+}
+
+/*
+ * __ffs: Similar to ffs, but zero based (0-31)
+ */
+static inline __attribute__ ((const)) unsigned long __ffs(unsigned long x)
+{
+ unsigned long n;
+
+ asm volatile(
+ " ffs.f %0, %1 \n" /* 0:31; 31(Z) if src 0 */
+ " mov.z %0, 0 \n" /* 31(Z)-> 0 */
+ : "=r"(n)
+ : "r"(x)
+ : "cc");
+
+ return n;
+
+}
+
+#endif /* CONFIG_ISA_ARCOMPACT */
+
/*
* ffz = Find First Zero in word.
* @return:[0-31], 32 if all 1's
*/
#define ffz(x) __ffs(~(x))
-/* TODO does this affect uni-processor code */
-#define smp_mb__before_clear_bit() barrier()
-#define smp_mb__after_clear_bit() barrier()
-
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/lock.h>
+#include <asm-generic/bitops/atomic.h>
+#include <asm-generic/bitops/non-atomic.h>
-#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic-setbit.h>
-#endif /* !__ASSEMBLY__ */
-
-#endif /* __KERNEL__ */
+#endif /* !__ASSEMBLER__ */
#endif
diff --git a/arch/arc/include/asm/bug.h b/arch/arc/include/asm/bug.h
index 5b18e94c6678..171c16021f70 100644
--- a/arch/arc/include/asm/bug.h
+++ b/arch/arc/include/asm/bug.h
@@ -1,36 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASM_ARC_BUG_H
#define _ASM_ARC_BUG_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/ptrace.h>
struct task_struct;
void show_regs(struct pt_regs *regs);
-void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs);
+void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs,
+ const char *loglvl);
void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
unsigned long address);
void die(const char *str, struct pt_regs *regs, unsigned long address);
-#define BUG() do { \
- dump_stack(); \
- pr_warn("Kernel BUG in %s: %s: %d!\n", \
- __FILE__, __func__, __LINE__); \
+#define BUG() do { \
+ pr_warn("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \
+ barrier_before_unreachable(); \
+ __builtin_trap(); \
} while (0)
#define HAVE_ARCH_BUG
#include <asm-generic/bug.h>
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index 5802849a6cae..040a97f4dd82 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ARC_ASM_CACHE_H
@@ -17,22 +14,18 @@
#endif
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-
-/* For a rare case where customers have differently config I/D */
-#define ARC_ICACHE_LINE_LEN L1_CACHE_BYTES
-#define ARC_DCACHE_LINE_LEN L1_CACHE_BYTES
-
-#define ICACHE_LINE_MASK (~(ARC_ICACHE_LINE_LEN - 1))
-#define DCACHE_LINE_MASK (~(ARC_DCACHE_LINE_LEN - 1))
+#define CACHE_LINE_MASK (~(L1_CACHE_BYTES - 1))
/*
- * ARC700 doesn't cache any access in top 256M.
+ * ARC700 doesn't cache any access in top 1G (0xc000_0000 to 0xFFFF_FFFF)
* Ideal for wiring memory mapped peripherals as we don't need to do
* explicit uncached accesses (LD.di/ST.di) hence more portable drivers
*/
#define ARC_UNCACHED_ADDR_SPACE 0xc0000000
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
+
+#include <linux/build_bug.h>
/* Uncached access macros */
#define arc_read_uncached_32(ptr) \
@@ -53,12 +46,83 @@
: "r"(data), "r"(ptr)); \
})
-#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+/* Largest line length for either L1 or L2 is 128 bytes */
+#define SMP_CACHE_BYTES 128
+#define cache_line_size() SMP_CACHE_BYTES
+#define ARCH_DMA_MINALIGN SMP_CACHE_BYTES
+
+/*
+ * Make sure slab-allocated buffers are 64-bit aligned when atomic64_t uses
+ * ARCv2 64-bit atomics (LLOCKD/SCONDD). This guarantess runtime 64-bit
+ * alignment for any atomic64_t embedded in buffer.
+ * Default ARCH_SLAB_MINALIGN is __alignof__(long long) which has a relaxed
+ * value of 4 (and not 8) in ARC ABI.
+ */
+#if defined(CONFIG_ARC_HAS_LL64) && defined(CONFIG_ARC_HAS_LLSC)
+#define ARCH_SLAB_MINALIGN 8
+#endif
+
+extern int ioc_enable;
+extern unsigned long perip_base, perip_end;
+
+#endif /* !__ASSEMBLER__ */
+
+/* Instruction cache related Auxiliary registers */
+#define ARC_REG_IC_BCR 0x77 /* Build Config reg */
+#define ARC_REG_IC_IVIC 0x10
+#define ARC_REG_IC_CTRL 0x11
+#define ARC_REG_IC_IVIR 0x16
+#define ARC_REG_IC_ENDR 0x17
+#define ARC_REG_IC_IVIL 0x19
+#define ARC_REG_IC_PTAG 0x1E
+#define ARC_REG_IC_PTAG_HI 0x1F
+
+/* Bit val in IC_CTRL */
+#define IC_CTRL_DIS 0x1
+
+/* Data cache related Auxiliary registers */
+#define ARC_REG_DC_BCR 0x72 /* Build Config reg */
+#define ARC_REG_DC_IVDC 0x47
+#define ARC_REG_DC_CTRL 0x48
+#define ARC_REG_DC_IVDL 0x4A
+#define ARC_REG_DC_FLSH 0x4B
+#define ARC_REG_DC_FLDL 0x4C
+#define ARC_REG_DC_STARTR 0x4D
+#define ARC_REG_DC_ENDR 0x4E
+#define ARC_REG_DC_PTAG 0x5C
+#define ARC_REG_DC_PTAG_HI 0x5F
+
+/* Bit val in DC_CTRL */
+#define DC_CTRL_DIS 0x001
+#define DC_CTRL_INV_MODE_FLUSH 0x040
+#define DC_CTRL_FLUSH_STATUS 0x100
+#define DC_CTRL_RGN_OP_INV 0x200
+#define DC_CTRL_RGN_OP_MSK 0x200
+
+/*System-level cache (L2 cache) related Auxiliary registers */
+#define ARC_REG_SLC_CFG 0x901
+#define ARC_REG_SLC_CTRL 0x903
+#define ARC_REG_SLC_FLUSH 0x904
+#define ARC_REG_SLC_INVALIDATE 0x905
+#define ARC_AUX_SLC_IVDL 0x910
+#define ARC_AUX_SLC_FLDL 0x912
+#define ARC_REG_SLC_RGN_START 0x914
+#define ARC_REG_SLC_RGN_START1 0x915
+#define ARC_REG_SLC_RGN_END 0x916
+#define ARC_REG_SLC_RGN_END1 0x917
-extern void arc_cache_init(void);
-extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
-extern void __init read_decode_cache_bcr(void);
+/* Bit val in SLC_CONTROL */
+#define SLC_CTRL_DIS 0x001
+#define SLC_CTRL_IM 0x040
+#define SLC_CTRL_BUSY 0x100
+#define SLC_CTRL_RGN_OP_INV 0x200
-#endif /* !__ASSEMBLY__ */
+/* IO coherency related Auxiliary registers */
+#define ARC_REG_IO_COH_ENABLE 0x500
+#define ARC_IO_COH_ENABLE_BIT BIT(0)
+#define ARC_REG_IO_COH_PARTIAL 0x501
+#define ARC_IO_COH_PARTIAL_BIT BIT(0)
+#define ARC_REG_IO_COH_AP0_BASE 0x508
+#define ARC_REG_IO_COH_AP0_SIZE 0x509
#endif /* _ASM_CACHE_H */
diff --git a/arch/arc/include/asm/cacheflush.h b/arch/arc/include/asm/cacheflush.h
index 6abc4972bc93..329c94cd45d8 100644
--- a/arch/arc/include/asm/cacheflush.h
+++ b/arch/arc/include/asm/cacheflush.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* vineetg: May 2011: for Non-aliasing VIPT D-cache following can be NOPs
* -flush_cache_dup_mm (fork)
* -likewise for flush_cache_mm (exit/execve)
@@ -21,65 +18,37 @@
#include <linux/mm.h>
#include <asm/shmparam.h>
-/*
- * Semantically we need this because icache doesn't snoop dcache/dma.
- * However ARC Cache flush requires paddr as well as vaddr, latter not available
- * in the flush_icache_page() API. So we no-op it but do the equivalent work
- * in update_mmu_cache()
- */
-#define flush_icache_page(vma, page)
-
void flush_cache_all(void);
-void flush_icache_range(unsigned long start, unsigned long end);
-void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len);
-void __inv_icache_page(unsigned long paddr, unsigned long vaddr);
-void ___flush_dcache_page(unsigned long paddr, unsigned long vaddr);
-#define __flush_dcache_page(p, v) \
- ___flush_dcache_page((unsigned long)p, (unsigned long)v)
+void flush_icache_range(unsigned long kstart, unsigned long kend);
+void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len);
+void __inv_icache_pages(phys_addr_t paddr, unsigned long vaddr, unsigned nr);
+void __flush_dcache_pages(phys_addr_t paddr, unsigned long vaddr, unsigned nr);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
void flush_dcache_page(struct page *page);
+void flush_dcache_folio(struct folio *folio);
+#define flush_dcache_folio flush_dcache_folio
-void dma_cache_wback_inv(unsigned long start, unsigned long sz);
-void dma_cache_inv(unsigned long start, unsigned long sz);
-void dma_cache_wback(unsigned long start, unsigned long sz);
+void dma_cache_wback_inv(phys_addr_t start, unsigned long sz);
+void dma_cache_inv(phys_addr_t start, unsigned long sz);
+void dma_cache_wback(phys_addr_t start, unsigned long sz);
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
/* TBD: optimize this */
#define flush_cache_vmap(start, end) flush_cache_all()
+#define flush_cache_vmap_early(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) flush_cache_all()
#define flush_cache_dup_mm(mm) /* called on fork (VIVT only) */
-#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
-
#define flush_cache_mm(mm) /* called on munmap/exit */
#define flush_cache_range(mm, u_vstart, u_vend)
#define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */
-#else /* VIPT aliasing dcache */
-
-/* To clear out stale userspace mappings */
-void flush_cache_mm(struct mm_struct *mm);
-void flush_cache_range(struct vm_area_struct *vma,
- unsigned long start,unsigned long end);
-void flush_cache_page(struct vm_area_struct *vma,
- unsigned long user_addr, unsigned long page);
-
-/*
- * To make sure that userspace mapping is flushed to memory before
- * get_user_pages() uses a kernel mapping to access the page
- */
-#define ARCH_HAS_FLUSH_ANON_PAGE
-void flush_anon_page(struct vm_area_struct *vma,
- struct page *page, unsigned long u_vaddr);
-
-#endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */
-
/*
* A new pagecache page has PG_arch_1 clear - thus dcache dirty by default
* This works around some PIO based drivers which don't call flush_dcache_page
@@ -87,26 +56,6 @@ void flush_anon_page(struct vm_area_struct *vma,
*/
#define PG_dc_clean PG_arch_1
-/*
- * Simple wrapper over config option
- * Bootup code ensures that hardware matches kernel configuration
- */
-static inline int cache_is_vipt_aliasing(void)
-{
- return IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
-}
-
-#define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & 1)
-
-/*
- * checks if two addresses (after page aligning) index into same cache set
- */
-#define addr_not_cache_congruent(addr1, addr2) \
-({ \
- cache_is_vipt_aliasing() ? \
- (CACHE_COLOR(addr1) != CACHE_COLOR(addr2)) : 0; \
-})
-
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
diff --git a/arch/arc/include/asm/cachetype.h b/arch/arc/include/asm/cachetype.h
new file mode 100644
index 000000000000..acd3b6cb4bf5
--- /dev/null
+++ b/arch/arc/include/asm/cachetype.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_ARC_CACHETYPE_H
+#define __ASM_ARC_CACHETYPE_H
+
+#define cpu_dcache_is_aliasing() false
+#define cpu_icache_is_aliasing() true
+
+#endif
diff --git a/arch/arc/include/asm/checksum.h b/arch/arc/include/asm/checksum.h
index 10957298b7a3..0b485800a392 100644
--- a/arch/arc/include/asm/checksum.h
+++ b/arch/arc/include/asm/checksum.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Joern Rennecke <joern.rennecke@embecosm.com>: Jan 2012
* -Insn Scheduling improvements to csum core routines.
* = csum_fold( ) largely derived from ARM version.
@@ -27,7 +24,7 @@
*/
static inline __sum16 csum_fold(__wsum s)
{
- unsigned r = s << 16 | s >> 16; /* ror */
+ unsigned int r = s << 16 | s >> 16; /* ror */
s = ~s;
s -= r;
return s >> 16;
@@ -70,8 +67,8 @@ ip_fast_csum(const void *iph, unsigned int ihl)
* SA [4], DA [4], zeroes [1], Proto[1], TCP Seg(hdr+data) Len [2]
*/
static inline __wsum
-csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
- unsigned short proto, __wsum sum)
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
+ __u8 proto, __wsum sum)
{
__asm__ __volatile__(
" add.f %0, %0, %1 \n"
diff --git a/arch/arc/include/asm/clk.h b/arch/arc/include/asm/clk.h
deleted file mode 100644
index bf9d29f5bd53..000000000000
--- a/arch/arc/include/asm/clk.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_ARC_CLK_H
-#define _ASM_ARC_CLK_H
-
-/* Although we can't really hide core_freq, the accessor is still better way */
-extern unsigned long core_freq;
-
-static inline unsigned long arc_get_core_freq(void)
-{
- return core_freq;
-}
-
-extern int arc_set_core_freq(unsigned long);
-
-#endif
diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h
index 03cd6894855d..76f43db0890f 100644
--- a/arch/arc/include/asm/cmpxchg.h
+++ b/arch/arc/include/asm/cmpxchg.h
@@ -1,143 +1,145 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_ARC_CMPXCHG_H
#define __ASM_ARC_CMPXCHG_H
+#include <linux/build_bug.h>
#include <linux/types.h>
+#include <linux/cmpxchg-emu.h>
+
+#include <asm/barrier.h>
#include <asm/smp.h>
#ifdef CONFIG_ARC_HAS_LLSC
-static inline unsigned long
-__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
-{
- unsigned long prev;
-
- __asm__ __volatile__(
- "1: llock %0, [%1] \n"
- " brne %0, %2, 2f \n"
- " scond %3, [%1] \n"
- " bnz 1b \n"
- "2: \n"
- : "=&r"(prev)
- : "r"(ptr), "ir"(expected),
- "r"(new) /* can't be "ir". scond can't take limm for "b" */
- : "cc");
-
- return prev;
-}
-
-#else
+/*
+ * if (*ptr == @old)
+ * *ptr = @new
+ */
+#define __cmpxchg(ptr, old, new) \
+({ \
+ __typeof__(*(ptr)) _prev; \
+ \
+ __asm__ __volatile__( \
+ "1: llock %0, [%1] \n" \
+ " brne %0, %2, 2f \n" \
+ " scond %3, [%1] \n" \
+ " bnz 1b \n" \
+ "2: \n" \
+ : "=&r"(_prev) /* Early clobber prevent reg reuse */ \
+ : "r"(ptr), /* Not "m": llock only supports reg */ \
+ "ir"(old), \
+ "r"(new) /* Not "ir": scond can't take LIMM */ \
+ : "cc", \
+ "memory"); /* gcc knows memory is clobbered */ \
+ \
+ _prev; \
+})
-static inline unsigned long
-__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
-{
- unsigned long flags;
- int prev;
- volatile unsigned long *p = ptr;
+#define arch_cmpxchg_relaxed(ptr, old, new) \
+({ \
+ __typeof__(ptr) _p_ = (ptr); \
+ __typeof__(*(ptr)) _o_ = (old); \
+ __typeof__(*(ptr)) _n_ = (new); \
+ __typeof__(*(ptr)) _prev_; \
+ \
+ switch(sizeof((_p_))) { \
+ case 1: \
+ _prev_ = (__typeof__(*(ptr)))cmpxchg_emu_u8((volatile u8 *__force)_p_, (uintptr_t)_o_, (uintptr_t)_n_); \
+ break; \
+ case 4: \
+ _prev_ = __cmpxchg(_p_, _o_, _n_); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ _prev_; \
+})
- atomic_ops_lock(flags);
- prev = *p;
- if (prev == expected)
- *p = new;
- atomic_ops_unlock(flags);
- return prev;
-}
+#else
-#endif /* CONFIG_ARC_HAS_LLSC */
+#define arch_cmpxchg(ptr, old, new) \
+({ \
+ volatile __typeof__(ptr) _p_ = (ptr); \
+ __typeof__(*(ptr)) _o_ = (old); \
+ __typeof__(*(ptr)) _n_ = (new); \
+ __typeof__(*(ptr)) _prev_; \
+ unsigned long __flags; \
+ \
+ /* \
+ * spin lock/unlock provide the needed smp_mb() before/after \
+ */ \
+ atomic_ops_lock(__flags); \
+ _prev_ = *_p_; \
+ if (_prev_ == _o_) \
+ *_p_ = _n_; \
+ atomic_ops_unlock(__flags); \
+ _prev_; \
+})
-#define cmpxchg(ptr, o, n) ((typeof(*(ptr)))__cmpxchg((ptr), \
- (unsigned long)(o), (unsigned long)(n)))
+#endif
/*
- * Since not supported natively, ARC cmpxchg() uses atomic_ops_lock (UP/SMP)
- * just to gaurantee semantics.
- * atomic_cmpxchg() needs to use the same locks as it's other atomic siblings
- * which also happens to be atomic_ops_lock.
- *
- * Thus despite semantically being different, implementation of atomic_cmpxchg()
- * is same as cmpxchg().
+ * xchg
*/
-#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#ifdef CONFIG_ARC_HAS_LLSC
+#define __arch_xchg(ptr, val) \
+({ \
+ __asm__ __volatile__( \
+ " ex %0, [%1] \n" /* set new value */ \
+ : "+r"(val) \
+ : "r"(ptr) \
+ : "memory"); \
+ _val_; /* get old value */ \
+})
-/*
- * xchg (reg with memory) based on "Native atomic" EX insn
- */
-static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
- int size)
-{
- extern unsigned long __xchg_bad_pointer(void);
-
- switch (size) {
- case 4:
- __asm__ __volatile__(
- " ex %0, [%1] \n"
- : "+r"(val)
- : "r"(ptr)
- : "memory");
-
- return val;
- }
- return __xchg_bad_pointer();
-}
-
-#define _xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \
- sizeof(*(ptr))))
+#define arch_xchg_relaxed(ptr, val) \
+({ \
+ __typeof__(ptr) _p_ = (ptr); \
+ __typeof__(*(ptr)) _val_ = (val); \
+ \
+ switch(sizeof(*(_p_))) { \
+ case 4: \
+ _val_ = __arch_xchg(_p_, _val_); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ _val_; \
+})
+
+#else /* !CONFIG_ARC_HAS_LLSC */
/*
- * On ARC700, EX insn is inherently atomic, so by default "vanilla" xchg() need
- * not require any locking. However there's a quirk.
- * ARC lacks native CMPXCHG, thus emulated (see above), using external locking -
- * incidently it "reuses" the same atomic_ops_lock used by atomic APIs.
- * Now, llist code uses cmpxchg() and xchg() on same data, so xchg() needs to
- * abide by same serializing rules, thus ends up using atomic_ops_lock as well.
- *
- * This however is only relevant if SMP and/or ARC lacks LLSC
- * if (UP or LLSC)
- * xchg doesn't need serialization
- * else <==> !(UP or LLSC) <==> (!UP and !LLSC) <==> (SMP and !LLSC)
- * xchg needs serialization
+ * EX instructions is baseline and present in !LLSC too. But in this
+ * regime it still needs use @atomic_ops_lock spinlock to allow interop
+ * with cmpxchg() which uses spinlock in !LLSC
+ * (llist.h use xchg and cmpxchg on sama data)
*/
-#if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP)
-
-#define xchg(ptr, with) \
-({ \
- unsigned long flags; \
- typeof(*(ptr)) old_val; \
- \
- atomic_ops_lock(flags); \
- old_val = _xchg(ptr, with); \
- atomic_ops_unlock(flags); \
- old_val; \
+#define arch_xchg(ptr, val) \
+({ \
+ __typeof__(ptr) _p_ = (ptr); \
+ __typeof__(*(ptr)) _val_ = (val); \
+ \
+ unsigned long __flags; \
+ \
+ atomic_ops_lock(__flags); \
+ \
+ __asm__ __volatile__( \
+ " ex %0, [%1] \n" \
+ : "+r"(_val_) \
+ : "r"(_p_) \
+ : "memory"); \
+ \
+ atomic_ops_unlock(__flags); \
+ _val_; \
})
-#else
-
-#define xchg(ptr, with) _xchg(ptr, with)
-
#endif
-/*
- * "atomic" variant of xchg()
- * REQ: It needs to follow the same serialization rules as other atomic_xxx()
- * Since xchg() doesn't always do that, it would seem that following defintion
- * is incorrect. But here's the rationale:
- * SMP : Even xchg() takes the atomic_ops_lock, so OK.
- * LLSC: atomic_ops_lock are not relevent at all (even if SMP, since LLSC
- * is natively "SMP safe", no serialization required).
- * UP : other atomics disable IRQ, so no way a difft ctxt atomic_xchg()
- * could clobber them. atomic_xchg() itself would be 1 insn, so it
- * can't be clobbered by others. Thus no serialization required when
- * atomic_xchg is involved.
- */
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-
#endif
diff --git a/arch/arc/include/asm/current.h b/arch/arc/include/asm/current.h
index 87b918585c4a..03ffd005f3fa 100644
--- a/arch/arc/include/asm/current.h
+++ b/arch/arc/include/asm/current.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Vineetg: May 16th, 2008
* - Current macro is now implemented as "global register" r25
*/
@@ -12,21 +9,17 @@
#ifndef _ASM_ARC_CURRENT_H
#define _ASM_ARC_CURRENT_H
-#ifdef __KERNEL__
-
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#ifdef CONFIG_ARC_CURR_IN_REG
-register struct task_struct *curr_arc asm("r25");
+register struct task_struct *curr_arc asm("gp");
#define current (curr_arc)
#else
#include <asm-generic/current.h>
#endif /* ! CONFIG_ARC_CURR_IN_REG */
-#endif /* ! __ASSEMBLY__ */
-
-#endif /* __KERNEL__ */
+#endif /* ! __ASSEMBLER__ */
#endif /* _ASM_ARC_CURRENT_H */
diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
index 442ce5d0f709..54db798f0aa0 100644
--- a/arch/arc/include/asm/delay.h
+++ b/arch/arc/include/asm/delay.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Delay routines using pre computed loops_per_jiffy value.
*
* vineetg: Feb 2012
@@ -17,16 +14,21 @@
#ifndef __ASM_ARC_UDELAY_H
#define __ASM_ARC_UDELAY_H
+#include <asm-generic/types.h>
#include <asm/param.h> /* HZ */
+extern unsigned long loops_per_jiffy;
+
static inline void __delay(unsigned long loops)
{
__asm__ __volatile__(
- "1: sub.f %0, %0, 1 \n"
- " jpnz 1b \n"
- : "+r"(loops)
+ " mov lp_count, %0 \n"
+ " lp 1f \n"
+ " nop \n"
+ "1: \n"
:
- : "cc");
+ : "r"(loops)
+ : "lp_count");
}
extern void __bad_udelay(void);
@@ -53,11 +55,10 @@ static inline void __udelay(unsigned long usecs)
{
unsigned long loops;
- /* (long long) cast ensures 64 bit MPY - real or emulated
+ /* (u64) cast ensures 64 bit MPY - real or emulated
* HZ * 4295 is pre-evaluated by gcc - hence only 2 mpy ops
*/
- loops = ((long long)(usecs * 4295 * HZ) *
- (long long)(loops_per_jiffy)) >> 32;
+ loops = ((u64) usecs * 4295 * HZ * loops_per_jiffy) >> 32;
__delay(loops);
}
diff --git a/arch/arc/include/asm/disasm.h b/arch/arc/include/asm/disasm.h
index f1cce3d059a1..61fb4d7affa7 100644
--- a/arch/arc/include/asm/disasm.h
+++ b/arch/arc/include/asm/disasm.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* several functions that help interpret ARC instructions
* used for unaligned accesses, kprobes and kgdb
*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ARC_DISASM_H__
diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h
deleted file mode 100644
index 45b8e0cea176..000000000000
--- a/arch/arc/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- * DMA Mapping glue for ARC
- *
- * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef ASM_ARC_DMA_MAPPING_H
-#define ASM_ARC_DMA_MAPPING_H
-
-#include <asm-generic/dma-coherent.h>
-#include <asm/cacheflush.h>
-
-#ifndef CONFIG_ARC_PLAT_NEEDS_CPU_TO_DMA
-/*
- * dma_map_* API take cpu addresses, which is kernel logical address in the
- * untranslated address space (0x8000_0000) based. The dma address (bus addr)
- * ideally needs to be 0x0000_0000 based hence these glue routines.
- * However given that intermediate bus bridges can ignore the high bit, we can
- * do with these routines being no-ops.
- * If a platform/device comes up which sriclty requires 0 based bus addr
- * (e.g. AHB-PCI bridge on Angel4 board), then it can provide it's own versions
- */
-#define plat_dma_addr_to_kernel(dev, addr) ((unsigned long)(addr))
-#define plat_kernel_addr_to_dma(dev, ptr) ((dma_addr_t)(ptr))
-
-#else
-#include <plat/dma_addr.h>
-#endif
-
-void *dma_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp);
-
-void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle);
-
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp);
-
-void dma_free_coherent(struct device *dev, size_t size, void *kvaddr,
- dma_addr_t dma_handle);
-
-/* drivers/base/dma-mapping.c */
-extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size);
-extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t dma_addr,
- size_t size);
-
-#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
-#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
-
-/*
- * streaming DMA Mapping API...
- * CPU accesses page via normal paddr, thus needs to explicitly made
- * consistent before each use
- */
-
-static inline void __inline_dma_cache_sync(unsigned long paddr, size_t size,
- enum dma_data_direction dir)
-{
- switch (dir) {
- case DMA_FROM_DEVICE:
- dma_cache_inv(paddr, size);
- break;
- case DMA_TO_DEVICE:
- dma_cache_wback(paddr, size);
- break;
- case DMA_BIDIRECTIONAL:
- dma_cache_wback_inv(paddr, size);
- break;
- default:
- pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr);
- }
-}
-
-void __arc_dma_cache_sync(unsigned long paddr, size_t size,
- enum dma_data_direction dir);
-
-#define _dma_cache_sync(addr, sz, dir) \
-do { \
- if (__builtin_constant_p(dir)) \
- __inline_dma_cache_sync(addr, sz, dir); \
- else \
- __arc_dma_cache_sync(addr, sz, dir); \
-} \
-while (0);
-
-static inline dma_addr_t
-dma_map_single(struct device *dev, void *cpu_addr, size_t size,
- enum dma_data_direction dir)
-{
- _dma_cache_sync((unsigned long)cpu_addr, size, dir);
- return plat_kernel_addr_to_dma(dev, cpu_addr);
-}
-
-static inline void
-dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction dir)
-{
-}
-
-static inline dma_addr_t
-dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir)
-{
- unsigned long paddr = page_to_phys(page) + offset;
- return dma_map_single(dev, (void *)paddr, size, dir);
-}
-
-static inline void
-dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction dir)
-{
-}
-
-static inline int
-dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir)
-{
- struct scatterlist *s;
- int i;
-
- for_each_sg(sg, s, nents, i)
- s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
- s->length, dir);
-
- return nents;
-}
-
-static inline void
-dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir)
-{
- struct scatterlist *s;
- int i;
-
- for_each_sg(sg, s, nents, i)
- dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
-}
-
-static inline void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction dir)
-{
- _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle), size,
- DMA_FROM_DEVICE);
-}
-
-static inline void
-dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction dir)
-{
- _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle), size,
- DMA_TO_DEVICE);
-}
-
-static inline void
-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle) + offset,
- size, DMA_FROM_DEVICE);
-}
-
-static inline void
-dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle) + offset,
- size, DMA_TO_DEVICE);
-}
-
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction dir)
-{
- int i;
-
- for (i = 0; i < nelems; i++, sg++)
- _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
-}
-
-static inline void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction dir)
-{
- int i;
-
- for (i = 0; i < nelems; i++, sg++)
- _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
-}
-
-static inline int dma_supported(struct device *dev, u64 dma_mask)
-{
- /* Support 32 bit DMA mask exclusively */
- return dma_mask == DMA_BIT_MASK(32);
-}
-
-static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return 0;
-}
-
-static inline int dma_set_mask(struct device *dev, u64 dma_mask)
-{
- if (!dev->dma_mask || !dma_supported(dev, dma_mask))
- return -EIO;
-
- *dev->dma_mask = dma_mask;
-
- return 0;
-}
-
-#endif
diff --git a/arch/arc/include/asm/dma.h b/arch/arc/include/asm/dma.h
index ca7c45181de9..02431027ed2f 100644
--- a/arch/arc/include/asm/dma.h
+++ b/arch/arc/include/asm/dma.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef ASM_ARC_DMA_H
diff --git a/arch/arc/include/asm/dsp-impl.h b/arch/arc/include/asm/dsp-impl.h
new file mode 100644
index 000000000000..fd5fdaad90c1
--- /dev/null
+++ b/arch/arc/include/asm/dsp-impl.h
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
+ */
+#ifndef __ASM_ARC_DSP_IMPL_H
+#define __ASM_ARC_DSP_IMPL_H
+
+#include <asm/dsp.h>
+
+#define DSP_CTRL_DISABLED_ALL 0
+
+#ifdef __ASSEMBLER__
+
+/* clobbers r5 register */
+.macro DSP_EARLY_INIT
+#ifdef CONFIG_ISA_ARCV2
+ lr r5, [ARC_AUX_DSP_BUILD]
+ bmsk r5, r5, 7
+ breq r5, 0, 1f
+ mov r5, DSP_CTRL_DISABLED_ALL
+ sr r5, [ARC_AUX_DSP_CTRL]
+1:
+#endif
+.endm
+
+/* clobbers r10, r11 registers pair */
+.macro DSP_SAVE_REGFILE_IRQ
+#if defined(CONFIG_ARC_DSP_KERNEL)
+ /*
+ * Drop any changes to DSP_CTRL made by userspace so userspace won't be
+ * able to break kernel - reset it to DSP_CTRL_DISABLED_ALL value
+ */
+ mov r10, DSP_CTRL_DISABLED_ALL
+ sr r10, [ARC_AUX_DSP_CTRL]
+
+#elif defined(CONFIG_ARC_DSP_SAVE_RESTORE_REGS)
+ /*
+ * Save DSP_CTRL register and reset it to value suitable for kernel
+ * (DSP_CTRL_DISABLED_ALL)
+ */
+ mov r10, DSP_CTRL_DISABLED_ALL
+ aex r10, [ARC_AUX_DSP_CTRL]
+ st r10, [sp, PT_DSP_CTRL]
+
+#endif
+.endm
+
+/* clobbers r10, r11 registers pair */
+.macro DSP_RESTORE_REGFILE_IRQ
+#if defined(CONFIG_ARC_DSP_SAVE_RESTORE_REGS)
+ ld r10, [sp, PT_DSP_CTRL]
+ sr r10, [ARC_AUX_DSP_CTRL]
+
+#endif
+.endm
+
+#else /* __ASEMBLY__ */
+
+#include <linux/sched.h>
+#include <asm/asserts.h>
+#include <asm/switch_to.h>
+
+#ifdef CONFIG_ARC_DSP_SAVE_RESTORE_REGS
+
+/*
+ * As we save new and restore old AUX register value in the same place we
+ * can optimize a bit and use AEX instruction (swap contents of an auxiliary
+ * register with a core register) instead of LR + SR pair.
+ */
+#define AUX_SAVE_RESTORE(_saveto, _readfrom, _offt, _aux) \
+do { \
+ long unsigned int _scratch; \
+ \
+ __asm__ __volatile__( \
+ "ld %0, [%2, %4] \n" \
+ "aex %0, [%3] \n" \
+ "st %0, [%1, %4] \n" \
+ : \
+ "=&r" (_scratch) /* must be early clobber */ \
+ : \
+ "r" (_saveto), \
+ "r" (_readfrom), \
+ "Ir" (_aux), \
+ "Ir" (_offt) \
+ : \
+ "memory" \
+ ); \
+} while (0)
+
+#define DSP_AUX_SAVE_RESTORE(_saveto, _readfrom, _aux) \
+ AUX_SAVE_RESTORE(_saveto, _readfrom, \
+ offsetof(struct dsp_callee_regs, _aux), \
+ ARC_AUX_##_aux)
+
+static inline void dsp_save_restore(struct task_struct *prev,
+ struct task_struct *next)
+{
+ long unsigned int *saveto = &prev->thread.dsp.ACC0_GLO;
+ long unsigned int *readfrom = &next->thread.dsp.ACC0_GLO;
+
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, ACC0_GLO);
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, ACC0_GHI);
+
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, DSP_BFLY0);
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, DSP_FFT_CTRL);
+
+#ifdef CONFIG_ARC_DSP_AGU_USERSPACE
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, AGU_AP0);
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, AGU_AP1);
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, AGU_AP2);
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, AGU_AP3);
+
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, AGU_OS0);
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, AGU_OS1);
+
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, AGU_MOD0);
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, AGU_MOD1);
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, AGU_MOD2);
+ DSP_AUX_SAVE_RESTORE(saveto, readfrom, AGU_MOD3);
+#endif /* CONFIG_ARC_DSP_AGU_USERSPACE */
+}
+
+#else /* !CONFIG_ARC_DSP_SAVE_RESTORE_REGS */
+#define dsp_save_restore(p, n)
+#endif /* CONFIG_ARC_DSP_SAVE_RESTORE_REGS */
+
+static inline bool dsp_exist(void)
+{
+ struct bcr_generic bcr;
+
+ READ_BCR(ARC_AUX_DSP_BUILD, bcr);
+ return !!bcr.ver;
+}
+
+static inline bool agu_exist(void)
+{
+ struct bcr_generic bcr;
+
+ READ_BCR(ARC_AUX_AGU_BUILD, bcr);
+ return !!bcr.ver;
+}
+
+static inline void dsp_config_check(void)
+{
+ CHK_OPT_STRICT(CONFIG_ARC_DSP_HANDLED, dsp_exist());
+ CHK_OPT_WEAK(CONFIG_ARC_DSP_AGU_USERSPACE, agu_exist());
+}
+
+#endif /* __ASEMBLY__ */
+#endif /* __ASM_ARC_DSP_IMPL_H */
diff --git a/arch/arc/include/asm/dsp.h b/arch/arc/include/asm/dsp.h
new file mode 100644
index 000000000000..eeaaf4e4eabd
--- /dev/null
+++ b/arch/arc/include/asm/dsp.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
+ */
+#ifndef __ASM_ARC_DSP_H
+#define __ASM_ARC_DSP_H
+
+#ifndef __ASSEMBLER__
+
+/*
+ * DSP-related saved registers - need to be saved only when you are
+ * scheduled out.
+ * structure fields name must correspond to aux register definitions for
+ * automatic offset calculation in DSP_AUX_SAVE_RESTORE macros
+ */
+struct dsp_callee_regs {
+ unsigned long ACC0_GLO, ACC0_GHI, DSP_BFLY0, DSP_FFT_CTRL;
+#ifdef CONFIG_ARC_DSP_AGU_USERSPACE
+ unsigned long AGU_AP0, AGU_AP1, AGU_AP2, AGU_AP3;
+ unsigned long AGU_OS0, AGU_OS1;
+ unsigned long AGU_MOD0, AGU_MOD1, AGU_MOD2, AGU_MOD3;
+#endif
+};
+
+#endif /* !__ASSEMBLER__ */
+
+#endif /* __ASM_ARC_DSP_H */
diff --git a/arch/arc/include/asm/dwarf.h b/arch/arc/include/asm/dwarf.h
new file mode 100644
index 000000000000..1524c5cf8b59
--- /dev/null
+++ b/arch/arc/include/asm/dwarf.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2016-17 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef _ASM_ARC_DWARF_H
+#define _ASM_ARC_DWARF_H
+
+#ifdef __ASSEMBLER__
+
+#ifdef ARC_DW2_UNWIND_AS_CFI
+
+#define CFI_STARTPROC .cfi_startproc
+#define CFI_ENDPROC .cfi_endproc
+#define CFI_DEF_CFA .cfi_def_cfa
+#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
+#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
+#define CFI_OFFSET .cfi_offset
+#define CFI_REL_OFFSET .cfi_rel_offset
+#define CFI_REGISTER .cfi_register
+#define CFI_RESTORE .cfi_restore
+#define CFI_UNDEFINED .cfi_undefined
+
+#else
+
+#define CFI_IGNORE #
+
+#define CFI_STARTPROC CFI_IGNORE
+#define CFI_ENDPROC CFI_IGNORE
+#define CFI_DEF_CFA CFI_IGNORE
+#define CFI_DEF_CFA_OFFSET CFI_IGNORE
+#define CFI_DEF_CFA_REGISTER CFI_IGNORE
+#define CFI_OFFSET CFI_IGNORE
+#define CFI_REL_OFFSET CFI_IGNORE
+#define CFI_REGISTER CFI_IGNORE
+#define CFI_RESTORE CFI_IGNORE
+#define CFI_UNDEFINED CFI_IGNORE
+
+#endif /* !ARC_DW2_UNWIND_AS_CFI */
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* _ASM_ARC_DWARF_H */
diff --git a/arch/arc/include/asm/elf.h b/arch/arc/include/asm/elf.h
index a26282857683..0284ace0e1ab 100644
--- a/arch/arc/include/asm/elf.h
+++ b/arch/arc/include/asm/elf.h
@@ -1,28 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_ARC_ELF_H
#define __ASM_ARC_ELF_H
#include <linux/types.h>
+#include <linux/elf-em.h>
#include <uapi/asm/elf.h>
-/* These ELF defines belong to uapi but libc elf.h already defines them */
-#define EM_ARCOMPACT 93
+#define EM_ARC_INUSE (IS_ENABLED(CONFIG_ISA_ARCOMPACT) ? \
+ EM_ARCOMPACT : EM_ARCV2)
/* ARC Relocations (kernel Modules only) */
#define R_ARC_32 0x4
#define R_ARC_32_ME 0x1B
-#define R_ARC_S25H_PCREL 0x10
-#define R_ARC_S25W_PCREL 0x11
+#define R_ARC_32_PCREL 0x31
/*to set parameters in the core dumps */
-#define ELF_ARCH EM_ARCOMPACT
+#define ELF_ARCH EM_ARC_INUSE
#define ELF_CLASS ELFCLASS32
#ifdef CONFIG_CPU_BIG_ENDIAN
@@ -50,7 +47,7 @@ extern int elf_check_arch(const struct elf32_hdr *);
* the loader. We need to make sure that it is out of the way of the program
* that it will "exec", and that there is sufficient room for the brk.
*/
-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
+#define ELF_ET_DYN_BASE (2UL * TASK_SIZE / 3)
/*
* When the program starts, a1 contains a pointer to a function to be
diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h
new file mode 100644
index 000000000000..3802a2daaf86
--- /dev/null
+++ b/arch/arc/include/asm/entry-arcv2.h
@@ -0,0 +1,326 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_ARC_ENTRY_ARCV2_H
+#define __ASM_ARC_ENTRY_ARCV2_H
+
+#include <asm/asm-offsets.h>
+#include <asm/dsp-impl.h>
+#include <asm/irqflags-arcv2.h>
+#include <asm/thread_info.h> /* For THREAD_SIZE */
+
+/*
+ * Interrupt/Exception stack layout (pt_regs) for ARCv2
+ * (End of struct aligned to end of page [unless nested])
+ *
+ * INTERRUPT EXCEPTION
+ *
+ * manual --------------------- manual
+ * | orig_r0 |
+ * | event/ECR |
+ * | bta |
+ * | gp |
+ * | fp |
+ * | sp |
+ * | r12 |
+ * | r30 |
+ * | r58 |
+ * | r59 |
+ * hw autosave ---------------------
+ * optional | r0 |
+ * | r1 |
+ * ~ ~
+ * | r9 |
+ * | r10 |
+ * | r11 |
+ * | blink |
+ * | lpe |
+ * | lps |
+ * | lpc |
+ * | ei base |
+ * | ldi base |
+ * | jli base |
+ * ---------------------
+ * hw autosave | pc / eret |
+ * mandatory | stat32 / erstatus |
+ * ---------------------
+ */
+
+/*------------------------------------------------------------------------*/
+.macro INTERRUPT_PROLOGUE
+
+ ; Before jumping to Interrupt Vector, hardware micro-ops did following:
+ ; 1. SP auto-switched to kernel mode stack
+ ; 2. STATUS32.Z flag set if in U mode at time of interrupt (U:1,K:0)
+ ; 3. Auto save: (mandatory) Push PC and STAT32 on stack
+ ; hardware does even if CONFIG_ARC_IRQ_NO_AUTOSAVE
+ ; 4a. Auto save: (optional) r0-r11, blink, LPE,LPS,LPC, JLI,LDI,EI
+ ;
+ ; Now
+ ; 4b. If Auto-save (optional) not enabled in hw, manually save them
+ ; 5. Manually save: r12,r30, sp,fp,gp, ACCL pair
+ ;
+ ; At the end, SP points to pt_regs
+
+#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE
+ ; carve pt_regs on stack (case #3), PC/STAT32 already on stack
+ sub sp, sp, SZ_PT_REGS - 8
+
+ __SAVE_REGFILE_HARD
+#else
+ ; carve pt_regs on stack (case #4), which grew partially already
+ sub sp, sp, PT_r0
+#endif
+
+ __SAVE_REGFILE_SOFT
+.endm
+
+/*------------------------------------------------------------------------*/
+.macro EXCEPTION_PROLOGUE_KEEP_AE
+
+ ; Before jumping to Exception Vector, hardware micro-ops did following:
+ ; 1. SP auto-switched to kernel mode stack
+ ; 2. STATUS32.Z flag set if in U mode at time of exception (U:1,K:0)
+ ;
+ ; Now manually save rest of reg file
+ ; At the end, SP points to pt_regs
+
+ sub sp, sp, SZ_PT_REGS ; carve space for pt_regs
+
+ ; _HARD saves r10 clobbered by _SOFT as scratch hence comes first
+
+ __SAVE_REGFILE_HARD
+ __SAVE_REGFILE_SOFT
+
+ st r0, [sp] ; orig_r0
+
+ lr r10, [eret]
+ lr r11, [erstatus]
+ ST2 r10, r11, PT_ret
+
+ lr r10, [ecr]
+ lr r11, [erbta]
+ ST2 r10, r11, PT_event
+
+ ; OUTPUT: r10 has ECR expected by EV_Trap
+.endm
+
+.macro EXCEPTION_PROLOGUE
+
+ EXCEPTION_PROLOGUE_KEEP_AE ; return ECR in r10
+
+ lr r0, [efa]
+ mov r1, sp
+
+ FAKE_RET_FROM_EXCPN ; clobbers r9
+.endm
+
+/*------------------------------------------------------------------------
+ * This macro saves the registers manually which would normally be autosaved
+ * by hardware on taken interrupts. It is used by
+ * - exception handlers (which don't have autosave)
+ * - interrupt autosave disabled due to CONFIG_ARC_IRQ_NO_AUTOSAVE
+ */
+.macro __SAVE_REGFILE_HARD
+
+ ST2 r0, r1, PT_r0
+ ST2 r2, r3, PT_r2
+ ST2 r4, r5, PT_r4
+ ST2 r6, r7, PT_r6
+ ST2 r8, r9, PT_r8
+ ST2 r10, r11, PT_r10
+
+ st blink, [sp, PT_blink]
+
+ lr r10, [lp_end]
+ lr r11, [lp_start]
+ ST2 r10, r11, PT_lpe
+
+ st lp_count, [sp, PT_lpc]
+
+ ; skip JLI, LDI, EI for now
+.endm
+
+/*------------------------------------------------------------------------
+ * This macros saves a bunch of other registers which can't be autosaved for
+ * various reasons:
+ * - r12: the last caller saved scratch reg since hardware saves in pairs so r0-r11
+ * - r30: free reg, used by gcc as scratch
+ * - ACCL/ACCH pair when they exist
+ */
+.macro __SAVE_REGFILE_SOFT
+
+ st fp, [sp, PT_fp] ; r27
+ st r30, [sp, PT_r30]
+ st r12, [sp, PT_r12]
+ st r26, [sp, PT_r26] ; gp
+
+ ; Saving pt_regs->sp correctly requires some extra work due to the way
+ ; Auto stack switch works
+ ; - U mode: retrieve it from AUX_USER_SP
+ ; - K mode: add the offset from current SP where H/w starts auto push
+ ;
+ ; 1. Utilize the fact that Z bit is set if Intr taken in U mode
+ ; 2. Upon entry SP is always saved (for any inspection, unwinding etc),
+ ; but on return, restored only if U mode
+
+ lr r10, [AUX_USER_SP] ; U mode SP
+
+ ; ISA requires ADD.nz to have same dest and src reg operands
+ mov.nz r10, sp
+ add2.nz r10, r10, SZ_PT_REGS/4 ; K mode SP
+
+ st r10, [sp, PT_sp] ; SP (pt_regs->sp)
+
+#ifdef CONFIG_ARC_HAS_ACCL_REGS
+ ST2 r58, r59, PT_r58
+#endif
+
+ /* clobbers r10, r11 registers pair */
+ DSP_SAVE_REGFILE_IRQ
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+ GET_CURR_TASK_ON_CPU gp
+#endif
+
+.endm
+
+/*------------------------------------------------------------------------*/
+.macro __RESTORE_REGFILE_SOFT
+
+ ld fp, [sp, PT_fp]
+ ld r30, [sp, PT_r30]
+ ld r12, [sp, PT_r12]
+ ld r26, [sp, PT_r26]
+
+ ; Restore SP (into AUX_USER_SP) only if returning to U mode
+ ; - for K mode, it will be implicitly restored as stack is unwound
+ ; - Z flag set on K is inverse of what hardware does on interrupt entry
+ ; but that doesn't really matter
+ bz 1f
+
+ ld r10, [sp, PT_sp] ; SP (pt_regs->sp)
+ sr r10, [AUX_USER_SP]
+1:
+
+ /* clobbers r10, r11 registers pair */
+ DSP_RESTORE_REGFILE_IRQ
+
+#ifdef CONFIG_ARC_HAS_ACCL_REGS
+ LD2 r58, r59, PT_r58
+#endif
+.endm
+
+/*------------------------------------------------------------------------*/
+.macro __RESTORE_REGFILE_HARD
+
+ ld blink, [sp, PT_blink]
+
+ LD2 r10, r11, PT_lpe
+ sr r10, [lp_end]
+ sr r11, [lp_start]
+
+ ld r10, [sp, PT_lpc] ; lp_count can't be target of LD
+ mov lp_count, r10
+
+ LD2 r0, r1, PT_r0
+ LD2 r2, r3, PT_r2
+ LD2 r4, r5, PT_r4
+ LD2 r6, r7, PT_r6
+ LD2 r8, r9, PT_r8
+ LD2 r10, r11, PT_r10
+.endm
+
+
+/*------------------------------------------------------------------------*/
+.macro INTERRUPT_EPILOGUE
+
+ ; INPUT: r0 has STAT32 of calling context
+ ; INPUT: Z flag set if returning to K mode
+
+ ; _SOFT clobbers r10 restored by _HARD hence the order
+
+ __RESTORE_REGFILE_SOFT
+
+#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE
+ __RESTORE_REGFILE_HARD
+
+ ; SP points to PC/STAT32: hw restores them despite NO_AUTOSAVE
+ add sp, sp, SZ_PT_REGS - 8
+#else
+ add sp, sp, PT_r0
+#endif
+
+.endm
+
+/*------------------------------------------------------------------------*/
+.macro EXCEPTION_EPILOGUE
+
+ ; INPUT: r0 has STAT32 of calling context
+
+ btst r0, STATUS_U_BIT ; Z flag set if K, used in restoring SP
+
+ ld r10, [sp, PT_bta]
+ sr r10, [erbta]
+
+ LD2 r10, r11, PT_ret
+ sr r10, [eret]
+ sr r11, [erstatus]
+
+ __RESTORE_REGFILE_SOFT
+ __RESTORE_REGFILE_HARD
+
+ add sp, sp, SZ_PT_REGS
+.endm
+
+.macro FAKE_RET_FROM_EXCPN
+ lr r9, [status32]
+ bclr r9, r9, STATUS_AE_BIT
+ bset r9, r9, STATUS_IE_BIT
+ kflag r9
+.endm
+
+/* Get thread_info of "current" tsk */
+.macro GET_CURR_THR_INFO_FROM_SP reg
+ bmskn \reg, sp, THREAD_SHIFT - 1
+.endm
+
+/* Get CPU-ID of this core */
+.macro GET_CPU_ID reg
+ lr \reg, [identity]
+ xbfu \reg, \reg, 0xE8 /* 00111 01000 */
+ /* M = 8-1 N = 8 */
+.endm
+
+.macro SAVE_ABI_CALLEE_REGS
+ push r13
+ push r14
+ push r15
+ push r16
+ push r17
+ push r18
+ push r19
+ push r20
+ push r21
+ push r22
+ push r23
+ push r24
+ push r25
+.endm
+
+.macro RESTORE_ABI_CALLEE_REGS
+ pop r25
+ pop r24
+ pop r23
+ pop r22
+ pop r21
+ pop r20
+ pop r19
+ pop r18
+ pop r17
+ pop r16
+ pop r15
+ pop r14
+ pop r13
+.endm
+
+#endif
diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h
new file mode 100644
index 000000000000..00946fe04c9b
--- /dev/null
+++ b/arch/arc/include/asm/entry-compact.h
@@ -0,0 +1,387 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Vineetg: March 2009 (Supporting 2 levels of Interrupts)
+ * Stack switching code can no longer reliably rely on the fact that
+ * if we are NOT in user mode, stack is switched to kernel mode.
+ * e.g. L2 IRQ interrupted a L1 ISR which had not yet completed
+ * its prologue including stack switching from user mode
+ *
+ * Vineetg: Aug 28th 2008: Bug #94984
+ * -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
+ * Normally CPU does this automatically, however when doing FAKE rtie,
+ * we also need to explicitly do this. The problem in macros
+ * FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit
+ * was being "CLEARED" rather then "SET". Actually "SET" clears ZOL context
+ *
+ * Vineetg: May 5th 2008
+ * -Modified CALLEE_REG save/restore macros to handle the fact that
+ * r25 contains the kernel current task ptr
+ * - Defined Stack Switching Macro to be reused in all intr/excp hdlrs
+ * - Shaved off 11 instructions from RESTORE_ALL_INT1 by using the
+ * address Write back load ld.ab instead of separate ld/add instn
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef __ASM_ARC_ENTRY_COMPACT_H
+#define __ASM_ARC_ENTRY_COMPACT_H
+
+#include <asm/asm-offsets.h>
+#include <asm/irqflags-compact.h>
+#include <asm/thread_info.h> /* For THREAD_SIZE */
+
+/* Note on the LD/ST addr modes with addr reg wback
+ *
+ * LD.a same as LD.aw
+ *
+ * LD.a reg1, [reg2, x] => Pre Incr
+ * Eff Addr for load = [reg2 + x]
+ *
+ * LD.ab reg1, [reg2, x] => Post Incr
+ * Eff Addr for load = [reg2]
+ */
+
+.macro PUSHAX aux
+ lr r9, [\aux]
+ push r9
+.endm
+
+.macro POPAX aux
+ pop r9
+ sr r9, [\aux]
+.endm
+
+.macro SAVE_R0_TO_R12
+ push r0
+ push r1
+ push r2
+ push r3
+ push r4
+ push r5
+ push r6
+ push r7
+ push r8
+ push r9
+ push r10
+ push r11
+ push r12
+.endm
+
+.macro RESTORE_R12_TO_R0
+ pop r12
+ pop r11
+ pop r10
+ pop r9
+ pop r8
+ pop r7
+ pop r6
+ pop r5
+ pop r4
+ pop r3
+ pop r2
+ pop r1
+ pop r0
+.endm
+
+.macro SAVE_ABI_CALLEE_REGS
+ push r13
+ push r14
+ push r15
+ push r16
+ push r17
+ push r18
+ push r19
+ push r20
+ push r21
+ push r22
+ push r23
+ push r24
+ push r25
+.endm
+
+.macro RESTORE_ABI_CALLEE_REGS
+ pop r25
+ pop r24
+ pop r23
+ pop r22
+ pop r21
+ pop r20
+ pop r19
+ pop r18
+ pop r17
+ pop r16
+ pop r15
+ pop r14
+ pop r13
+.endm
+
+/*--------------------------------------------------------------
+ * Switch to Kernel Mode stack if SP points to User Mode stack
+ *
+ * Entry : r9 contains pre-IRQ/exception/trap status32
+ * Exit : SP set to K mode stack
+ * SP at the time of entry (K/U) saved @ pt_regs->sp
+ * Clobbers: r9
+ *-------------------------------------------------------------*/
+
+.macro SWITCH_TO_KERNEL_STK
+
+ /* User Mode when this happened ? Yes: Proceed to switch stack */
+ bbit1 r9, STATUS_U_BIT, 88f
+
+ /* OK we were already in kernel mode when this event happened, thus can
+ * assume SP is kernel mode SP. _NO_ need to do any stack switching
+ */
+
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+ /* However....
+ * If Level 2 Interrupts enabled, we may end up with a corner case:
+ * 1. User Task executing
+ * 2. L1 IRQ taken, ISR starts (CPU auto-switched to KERNEL mode)
+ * 3. But before it could switch SP from USER to KERNEL stack
+ * a L2 IRQ "Interrupts" L1
+ * That way although L2 IRQ happened in Kernel mode, stack is still
+ * not switched.
+ * To handle this, we may need to switch stack even if in kernel mode
+ * provided SP has values in range of USER mode stack ( < 0x7000_0000 )
+ */
+ brlo sp, VMALLOC_START, 88f
+
+ /* TODO: vineetg:
+ * We need to be a bit more cautious here. What if a kernel bug in
+ * L1 ISR, caused SP to go whaco (some small value which looks like
+ * USER stk) and then we take L2 ISR.
+ * Above brlo alone would treat it as a valid L1-L2 scenario
+ * instead of shouting around
+ * The only feasible way is to make sure this L2 happened in
+ * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
+ * L1 ISR before it switches stack
+ */
+
+#endif
+
+ /*------Intr/Ecxp happened in kernel mode, SP already setup ------ */
+ /* save it nevertheless @ pt_regs->sp for uniformity */
+
+ b.d 66f
+ st sp, [sp, PT_sp - SZ_PT_REGS]
+
+88: /*------Intr/Ecxp happened in user mode, "switch" stack ------ */
+
+ GET_CURR_TASK_ON_CPU r9
+
+ /* With current tsk in r9, get its kernel mode stack base */
+ GET_TSK_STACK_BASE r9, r9
+
+ /* save U mode SP @ pt_regs->sp */
+ st sp, [r9, PT_sp - SZ_PT_REGS]
+
+ /* final SP switch */
+ mov sp, r9
+66:
+.endm
+
+/*------------------------------------------------------------
+ * "FAKE" a rtie to return from CPU Exception context
+ * This is to re-enable Exceptions within exception
+ * Look at EV_ProtV to see how this is actually used
+ *-------------------------------------------------------------*/
+
+.macro FAKE_RET_FROM_EXCPN
+
+ lr r9, [status32]
+ bclr r9, r9, STATUS_AE_BIT
+ or r9, r9, (STATUS_E1_MASK|STATUS_E2_MASK)
+ sr r9, [erstatus]
+ mov r9, 55f
+ sr r9, [eret]
+ rtie
+55:
+.endm
+
+/*--------------------------------------------------------------
+ * For early Exception/ISR Prologue, a core reg is temporarily needed to
+ * code the rest of prolog (stack switching). This is done by stashing
+ * it to memory (non-SMP case) or SCRATCH0 Aux Reg (SMP).
+ *
+ * Before saving the full regfile - this reg is restored back, only
+ * to be saved again on kernel mode stack, as part of pt_regs.
+ *-------------------------------------------------------------*/
+.macro PROLOG_FREEUP_REG reg, mem
+ st \reg, [\mem]
+.endm
+
+.macro PROLOG_RESTORE_REG reg, mem
+ ld \reg, [\mem]
+.endm
+
+/*--------------------------------------------------------------
+ * Exception Entry prologue
+ * -Switches stack to K mode (if not already)
+ * -Saves the register file
+ *
+ * After this it is safe to call the "C" handlers
+ *-------------------------------------------------------------*/
+.macro EXCEPTION_PROLOGUE_KEEP_AE
+
+ /* Need at least 1 reg to code the early exception prologue */
+ PROLOG_FREEUP_REG r9, @ex_saved_reg1
+
+ /* U/K mode at time of exception (stack not switched if already K) */
+ lr r9, [erstatus]
+
+ /* ARC700 doesn't provide auto-stack switching */
+ SWITCH_TO_KERNEL_STK
+
+ st.a r0, [sp, -8] /* orig_r0 needed for syscall (skip ECR slot) */
+ sub sp, sp, 4 /* skip pt_regs->sp, already saved above */
+
+ /* Restore r9 used to code the early prologue */
+ PROLOG_RESTORE_REG r9, @ex_saved_reg1
+
+ /* now we are ready to save the regfile */
+ SAVE_R0_TO_R12
+ PUSH gp
+ PUSH fp
+ PUSH blink
+ PUSHAX eret
+ PUSHAX erstatus
+ PUSH lp_count
+ PUSHAX lp_end
+ PUSHAX lp_start
+ PUSHAX erbta
+
+ lr r10, [ecr]
+ st r10, [sp, PT_event]
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+ /* gp already saved on stack: now load with "current" */
+ GET_CURR_TASK_ON_CPU gp
+#endif
+ ; OUTPUT: r10 has ECR expected by EV_Trap
+.endm
+
+.macro EXCEPTION_PROLOGUE
+
+ EXCEPTION_PROLOGUE_KEEP_AE ; return ECR in r10
+
+ lr r0, [efa]
+ mov r1, sp
+
+ FAKE_RET_FROM_EXCPN ; clobbers r9
+.endm
+
+/*--------------------------------------------------------------
+ * Restore all registers used by system call or Exceptions
+ * SP should always be pointing to the next free stack element
+ * when entering this macro.
+ *
+ * NOTE:
+ *
+ * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
+ * for memory load operations. If used in that way interrupts are deferred
+ * by hardware and that is not good.
+ *-------------------------------------------------------------*/
+.macro EXCEPTION_EPILOGUE
+
+ POPAX erbta
+ POPAX lp_start
+ POPAX lp_end
+
+ POP r9
+ mov lp_count, r9 ;LD to lp_count is not allowed
+
+ POPAX erstatus
+ POPAX eret
+ POP blink
+ POP fp
+ POP gp
+ RESTORE_R12_TO_R0
+
+ ld sp, [sp] /* restore original sp */
+ /* orig_r0, ECR skipped automatically */
+.endm
+
+/* Dummy ECR values for Interrupts */
+#define event_IRQ1 0x0031abcd
+#define event_IRQ2 0x0032abcd
+
+.macro INTERRUPT_PROLOGUE LVL
+
+ /* free up r9 as scratchpad */
+ PROLOG_FREEUP_REG r9, @int\LVL\()_saved_reg
+
+ /* Which mode (user/kernel) was the system in when intr occurred */
+ lr r9, [status32_l\LVL\()]
+
+ SWITCH_TO_KERNEL_STK
+
+
+ st.a 0x003\LVL\()abcd, [sp, -4] /* Dummy ECR */
+ sub sp, sp, 8 /* skip orig_r0 (not needed)
+ skip pt_regs->sp, already saved above */
+
+ /* Restore r9 used to code the early prologue */
+ PROLOG_RESTORE_REG r9, @int\LVL\()_saved_reg
+
+ SAVE_R0_TO_R12
+ PUSH gp
+ PUSH fp
+ PUSH blink
+ PUSH ilink\LVL\()
+ PUSHAX status32_l\LVL\()
+ PUSH lp_count
+ PUSHAX lp_end
+ PUSHAX lp_start
+ PUSHAX bta_l\LVL\()
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+ /* gp already saved on stack: now load with "current" */
+ GET_CURR_TASK_ON_CPU gp
+#endif
+.endm
+
+/*--------------------------------------------------------------
+ * Restore all registers used by interrupt handlers.
+ *
+ * NOTE:
+ *
+ * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
+ * for memory load operations. If used in that way interrupts are deferred
+ * by hardware and that is not good.
+ *-------------------------------------------------------------*/
+.macro INTERRUPT_EPILOGUE LVL
+
+ POPAX bta_l\LVL\()
+ POPAX lp_start
+ POPAX lp_end
+
+ POP r9
+ mov lp_count, r9 ;LD to lp_count is not allowed
+
+ POPAX status32_l\LVL\()
+ POP ilink\LVL\()
+ POP blink
+ POP fp
+ POP gp
+ RESTORE_R12_TO_R0
+
+ ld sp, [sp] /* restore original sp; orig_r0, ECR skipped implicitly */
+.endm
+
+/* Get thread_info of "current" tsk */
+.macro GET_CURR_THR_INFO_FROM_SP reg
+ bic \reg, sp, (THREAD_SIZE - 1)
+.endm
+
+/* Get CPU-ID of this core */
+.macro GET_CPU_ID reg
+ lr \reg, [identity]
+ lsr \reg, \reg, 8
+ bmsk \reg, \reg, 7
+.endm
+
+#endif /* __ASM_ARC_ENTRY_COMPACT_H */
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h
index 8943c028d4bb..f453af251a1a 100644
--- a/arch/arc/include/asm/entry.h
+++ b/arch/arc/include/asm/entry.h
@@ -1,219 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Vineetg: March 2009 (Supporting 2 levels of Interrupts)
- * Stack switching code can no longer reliably rely on the fact that
- * if we are NOT in user mode, stack is switched to kernel mode.
- * e.g. L2 IRQ interrupted a L1 ISR which had not yet completed
- * it's prologue including stack switching from user mode
- *
- * Vineetg: Aug 28th 2008: Bug #94984
- * -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
- * Normally CPU does this automatically, however when doing FAKE rtie,
- * we also need to explicitly do this. The problem in macros
- * FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit
- * was being "CLEARED" rather then "SET". Actually "SET" clears ZOL context
- *
- * Vineetg: May 5th 2008
- * -Modified CALLEE_REG save/restore macros to handle the fact that
- * r25 contains the kernel current task ptr
- * - Defined Stack Switching Macro to be reused in all intr/excp hdlrs
- * - Shaved off 11 instructions from RESTORE_ALL_INT1 by using the
- * address Write back load ld.ab instead of seperate ld/add instn
- *
- * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
*/
#ifndef __ASM_ARC_ENTRY_H
#define __ASM_ARC_ENTRY_H
-#ifdef __ASSEMBLY__
-#include <asm/unistd.h> /* For NR_syscalls defination */
-#include <asm/asm-offsets.h>
+#include <asm/unistd.h> /* For NR_syscalls definition */
#include <asm/arcregs.h>
#include <asm/ptrace.h>
#include <asm/processor.h> /* For VMALLOC_START */
-#include <asm/thread_info.h> /* For THREAD_SIZE */
+#include <asm/mmu.h>
-/* Note on the LD/ST addr modes with addr reg wback
- *
- * LD.a same as LD.aw
- *
- * LD.a reg1, [reg2, x] => Pre Incr
- * Eff Addr for load = [reg2 + x]
- *
- * LD.ab reg1, [reg2, x] => Post Incr
- * Eff Addr for load = [reg2]
- */
-
-.macro PUSH reg
- st.a \reg, [sp, -4]
-.endm
-
-.macro PUSHAX aux
- lr r9, [\aux]
- PUSH r9
-.endm
-
-.macro POP reg
- ld.ab \reg, [sp, 4]
-.endm
-
-.macro POPAX aux
- POP r9
- sr r9, [\aux]
-.endm
-
-/*--------------------------------------------------------------
- * Helpers to save/restore Scratch Regs:
- * used by Interrupt/Exception Prologue/Epilogue
- *-------------------------------------------------------------*/
-.macro SAVE_R0_TO_R12
- PUSH r0
- PUSH r1
- PUSH r2
- PUSH r3
- PUSH r4
- PUSH r5
- PUSH r6
- PUSH r7
- PUSH r8
- PUSH r9
- PUSH r10
- PUSH r11
- PUSH r12
-.endm
+#ifdef __ASSEMBLER__
-.macro RESTORE_R12_TO_R0
- POP r12
- POP r11
- POP r10
- POP r9
- POP r8
- POP r7
- POP r6
- POP r5
- POP r4
- POP r3
- POP r2
- POP r1
- POP r0
-
-#ifdef CONFIG_ARC_CURR_IN_REG
- ld r25, [sp, 12]
+#ifdef CONFIG_ISA_ARCOMPACT
+#include <asm/entry-compact.h> /* ISA specific bits */
+#else
+#include <asm/entry-arcv2.h>
#endif
-.endm
-
-/*--------------------------------------------------------------
- * Helpers to save/restore callee-saved regs:
- * used by several macros below
- *-------------------------------------------------------------*/
-.macro SAVE_R13_TO_R24
- PUSH r13
- PUSH r14
- PUSH r15
- PUSH r16
- PUSH r17
- PUSH r18
- PUSH r19
- PUSH r20
- PUSH r21
- PUSH r22
- PUSH r23
- PUSH r24
-.endm
-
-.macro RESTORE_R24_TO_R13
- POP r24
- POP r23
- POP r22
- POP r21
- POP r20
- POP r19
- POP r18
- POP r17
- POP r16
- POP r15
- POP r14
- POP r13
-.endm
-#define OFF_USER_R25_FROM_R24 (SZ_CALLEE_REGS + SZ_PT_REGS - 8)/4
-
-/*--------------------------------------------------------------
- * Collect User Mode callee regs as struct callee_regs - needed by
- * fork/do_signal/unaligned-access-emulation.
- * (By default only scratch regs are saved on entry to kernel)
- *
- * Special handling for r25 if used for caching Task Pointer.
- * It would have been saved in task->thread.user_r25 already, but to keep
- * the interface same it is copied into regular r25 placeholder in
- * struct callee_regs.
- *-------------------------------------------------------------*/
+/*
+ * save user mode callee regs as struct callee_regs
+ * - needed by fork/do_signal/unaligned-access-emulation.
+ */
.macro SAVE_CALLEE_SAVED_USER
+ SAVE_ABI_CALLEE_REGS
+.endm
- SAVE_R13_TO_R24
-
-#ifdef CONFIG_ARC_CURR_IN_REG
- ; Retrieve orig r25 and save it on stack
- ld.as r12, [sp, OFF_USER_R25_FROM_R24]
- st.a r12, [sp, -4]
-#else
- PUSH r25
-#endif
-
+/*
+ * restore user mode callee regs as struct callee_regs
+ * - could have been changed by ptrace tracer or unaligned-access fixup
+ */
+.macro RESTORE_CALLEE_SAVED_USER
+ RESTORE_ABI_CALLEE_REGS
.endm
-/*--------------------------------------------------------------
- * Save kernel Mode callee regs at the time of Contect Switch.
- *
- * Special handling for r25 if used for caching Task Pointer.
- * Kernel simply skips saving it since it will be loaded with
- * incoming task pointer anyways
- *-------------------------------------------------------------*/
+/*
+ * save/restore kernel mode callee regs at the time of context switch
+ */
.macro SAVE_CALLEE_SAVED_KERNEL
-
- SAVE_R13_TO_R24
-
-#ifdef CONFIG_ARC_CURR_IN_REG
- sub sp, sp, 4
-#else
- PUSH r25
-#endif
+ SAVE_ABI_CALLEE_REGS
.endm
-/*--------------------------------------------------------------
- * Opposite of SAVE_CALLEE_SAVED_KERNEL
- *-------------------------------------------------------------*/
.macro RESTORE_CALLEE_SAVED_KERNEL
-
-#ifdef CONFIG_ARC_CURR_IN_REG
- add sp, sp, 4 /* skip usual r25 placeholder */
-#else
- POP r25
-#endif
- RESTORE_R24_TO_R13
-.endm
-
-/*--------------------------------------------------------------
- * Opposite of SAVE_CALLEE_SAVED_USER
- *
- * ptrace tracer or unaligned-access fixup might have changed a user mode
- * callee reg which is saved back to usual r25 storage location
- *-------------------------------------------------------------*/
-.macro RESTORE_CALLEE_SAVED_USER
-
-#ifdef CONFIG_ARC_CURR_IN_REG
- ld.ab r12, [sp, 4]
- st.as r12, [sp, OFF_USER_R25_FROM_R24]
-#else
- POP r25
-#endif
- RESTORE_R24_TO_R13
+ RESTORE_ABI_CALLEE_REGS
.endm
/*--------------------------------------------------------------
@@ -224,7 +56,7 @@
.endm
/*-------------------------------------------------------------
- * given a tsk struct, get to the base of it's kernel mode stack
+ * given a tsk struct, get to the base of its kernel mode stack
* tsk->thread_info is really a PAGE, whose bottom hoists stack
* which grows upwards towards thread_info
*------------------------------------------------------------*/
@@ -239,117 +71,6 @@
.endm
-/*--------------------------------------------------------------
- * Switch to Kernel Mode stack if SP points to User Mode stack
- *
- * Entry : r9 contains pre-IRQ/exception/trap status32
- * Exit : SP is set to kernel mode stack pointer
- * If CURR_IN_REG, r25 set to "current" task pointer
- * Clobbers: r9
- *-------------------------------------------------------------*/
-
-.macro SWITCH_TO_KERNEL_STK
-
- /* User Mode when this happened ? Yes: Proceed to switch stack */
- bbit1 r9, STATUS_U_BIT, 88f
-
- /* OK we were already in kernel mode when this event happened, thus can
- * assume SP is kernel mode SP. _NO_ need to do any stack switching
- */
-
-#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
- /* However....
- * If Level 2 Interrupts enabled, we may end up with a corner case:
- * 1. User Task executing
- * 2. L1 IRQ taken, ISR starts (CPU auto-switched to KERNEL mode)
- * 3. But before it could switch SP from USER to KERNEL stack
- * a L2 IRQ "Interrupts" L1
- * Thay way although L2 IRQ happened in Kernel mode, stack is still
- * not switched.
- * To handle this, we may need to switch stack even if in kernel mode
- * provided SP has values in range of USER mode stack ( < 0x7000_0000 )
- */
- brlo sp, VMALLOC_START, 88f
-
- /* TODO: vineetg:
- * We need to be a bit more cautious here. What if a kernel bug in
- * L1 ISR, caused SP to go whaco (some small value which looks like
- * USER stk) and then we take L2 ISR.
- * Above brlo alone would treat it as a valid L1-L2 sceanrio
- * instead of shouting alound
- * The only feasible way is to make sure this L2 happened in
- * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
- * L1 ISR before it switches stack
- */
-
-#endif
-
- /* Save Pre Intr/Exception KERNEL MODE SP on kernel stack
- * safe-keeping not really needed, but it keeps the epilogue code
- * (SP restore) simpler/uniform.
- */
- b.d 66f
- mov r9, sp
-
-88: /*------Intr/Ecxp happened in user mode, "switch" stack ------ */
-
- GET_CURR_TASK_ON_CPU r9
-
- /* With current tsk in r9, get it's kernel mode stack base */
- GET_TSK_STACK_BASE r9, r9
-
-66:
-#ifdef CONFIG_ARC_CURR_IN_REG
- /*
- * Treat r25 as scratch reg, save it on stack first
- * Load it with current task pointer
- */
- st r25, [r9, -4]
- GET_CURR_TASK_ON_CPU r25
-#endif
-
- /* Save Pre Intr/Exception User SP on kernel stack */
- st.a sp, [r9, -16] ; Make room for orig_r0, ECR, user_r25
-
- /* CAUTION:
- * SP should be set at the very end when we are done with everything
- * In case of 2 levels of interrupt we depend on value of SP to assume
- * that everything else is done (loading r25 etc)
- */
-
- /* set SP to point to kernel mode stack */
- mov sp, r9
-
- /* ----- Stack Switched to kernel Mode, Now save REG FILE ----- */
-
-.endm
-
-/*------------------------------------------------------------
- * "FAKE" a rtie to return from CPU Exception context
- * This is to re-enable Exceptions within exception
- * Look at EV_ProtV to see how this is actually used
- *-------------------------------------------------------------*/
-
-.macro FAKE_RET_FROM_EXCPN reg
-
- ld \reg, [sp, PT_status32]
- bic \reg, \reg, (STATUS_U_MASK|STATUS_DE_MASK)
- bset \reg, \reg, STATUS_L_BIT
- sr \reg, [erstatus]
- mov \reg, 55f
- sr \reg, [eret]
-
- rtie
-55:
-.endm
-
-/*
- * @reg [OUT] &thread_info of "current"
- */
-.macro GET_CURR_THR_INFO_FROM_SP reg
- bic \reg, sp, (THREAD_SIZE - 1)
-.endm
-
/*
* @reg [OUT] thread_info->flags of "current"
*/
@@ -358,206 +79,12 @@
ld \reg, [\reg, THREAD_INFO_FLAGS]
.endm
-/*--------------------------------------------------------------
- * For early Exception Prologue, a core reg is temporarily needed to
- * code the rest of prolog (stack switching). This is done by stashing
- * it to memory (non-SMP case) or SCRATCH0 Aux Reg (SMP).
- *
- * Before saving the full regfile - this reg is restored back, only
- * to be saved again on kernel mode stack, as part of ptregs.
- *-------------------------------------------------------------*/
-.macro EXCPN_PROLOG_FREEUP_REG reg
-#ifdef CONFIG_SMP
- sr \reg, [ARC_REG_SCRATCH_DATA0]
-#else
- st \reg, [@ex_saved_reg1]
-#endif
-.endm
-
-.macro EXCPN_PROLOG_RESTORE_REG reg
#ifdef CONFIG_SMP
- lr \reg, [ARC_REG_SCRATCH_DATA0]
-#else
- ld \reg, [@ex_saved_reg1]
-#endif
-.endm
-
-/*--------------------------------------------------------------
- * Save all registers used by Exceptions (TLB Miss, Prot-V, Mem err etc)
- * Requires SP to be already switched to kernel mode Stack
- * sp points to the next free element on the stack at exit of this macro.
- * Registers are pushed / popped in the order defined in struct ptregs
- * in asm/ptrace.h
- * Note that syscalls are implemented via TRAP which is also a exception
- * from CPU's point of view
- *-------------------------------------------------------------*/
-.macro SAVE_ALL_SYS
-
- lr r9, [ecr]
- st r9, [sp, 8] /* ECR */
- st r0, [sp, 4] /* orig_r0, needed only for sys calls */
-
- /* Restore r9 used to code the early prologue */
- EXCPN_PROLOG_RESTORE_REG r9
-
- SAVE_R0_TO_R12
- PUSH gp
- PUSH fp
- PUSH blink
- PUSHAX eret
- PUSHAX erstatus
- PUSH lp_count
- PUSHAX lp_end
- PUSHAX lp_start
- PUSHAX erbta
-.endm
-
-/*--------------------------------------------------------------
- * Restore all registers used by system call or Exceptions
- * SP should always be pointing to the next free stack element
- * when entering this macro.
- *
- * NOTE:
- *
- * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
- * for memory load operations. If used in that way interrupts are deffered
- * by hardware and that is not good.
- *-------------------------------------------------------------*/
-.macro RESTORE_ALL_SYS
- POPAX erbta
- POPAX lp_start
- POPAX lp_end
-
- POP r9
- mov lp_count, r9 ;LD to lp_count is not allowed
-
- POPAX erstatus
- POPAX eret
- POP blink
- POP fp
- POP gp
- RESTORE_R12_TO_R0
-
- ld sp, [sp] /* restore original sp */
- /* orig_r0, ECR, user_r25 skipped automatically */
-.endm
-
-/*--------------------------------------------------------------
- * Save all registers used by interrupt handlers.
- *-------------------------------------------------------------*/
-.macro SAVE_ALL_INT1
-
- /* restore original r9 to be saved as part of reg-file */
-#ifdef CONFIG_SMP
- lr r9, [ARC_REG_SCRATCH_DATA0]
-#else
- ld r9, [@int1_saved_reg]
-#endif
-
- /* now we are ready to save the remaining context :) */
- st event_IRQ1, [sp, 8] /* Dummy ECR */
- st 0, [sp, 4] /* orig_r0 , N/A for IRQ */
-
- SAVE_R0_TO_R12
- PUSH gp
- PUSH fp
- PUSH blink
- PUSH ilink1
- PUSHAX status32_l1
- PUSH lp_count
- PUSHAX lp_end
- PUSHAX lp_start
- PUSHAX bta_l1
-.endm
-
-.macro SAVE_ALL_INT2
-
- /* TODO-vineetg: SMP we can't use global nor can we use
- * SCRATCH0 as we do for int1 because while int1 is using
- * it, int2 can come
- */
- /* retsore original r9 , saved in sys_saved_r9 */
- ld r9, [@int2_saved_reg]
-
- /* now we are ready to save the remaining context :) */
- st event_IRQ2, [sp, 8] /* Dummy ECR */
- st 0, [sp, 4] /* orig_r0 , N/A for IRQ */
-
- SAVE_R0_TO_R12
- PUSH gp
- PUSH fp
- PUSH blink
- PUSH ilink2
- PUSHAX status32_l2
- PUSH lp_count
- PUSHAX lp_end
- PUSHAX lp_start
- PUSHAX bta_l2
-.endm
-
-/*--------------------------------------------------------------
- * Restore all registers used by interrupt handlers.
- *
- * NOTE:
- *
- * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
- * for memory load operations. If used in that way interrupts are deffered
- * by hardware and that is not good.
- *-------------------------------------------------------------*/
-
-.macro RESTORE_ALL_INT1
- POPAX bta_l1
- POPAX lp_start
- POPAX lp_end
-
- POP r9
- mov lp_count, r9 ;LD to lp_count is not allowed
-
- POPAX status32_l1
- POP ilink1
- POP blink
- POP fp
- POP gp
- RESTORE_R12_TO_R0
-
- ld sp, [sp] /* restore original sp */
- /* orig_r0, ECR, user_r25 skipped automatically */
-.endm
-
-.macro RESTORE_ALL_INT2
- POPAX bta_l2
- POPAX lp_start
- POPAX lp_end
-
- POP r9
- mov lp_count, r9 ;LD to lp_count is not allowed
-
- POPAX status32_l2
- POP ilink2
- POP blink
- POP fp
- POP gp
- RESTORE_R12_TO_R0
-
- ld sp, [sp] /* restore original sp */
- /* orig_r0, ECR, user_r25 skipped automatically */
-.endm
-
-
-/* Get CPU-ID of this core */
-.macro GET_CPU_ID reg
- lr \reg, [identity]
- lsr \reg, \reg, 8
- bmsk \reg, \reg, 7
-.endm
-
-#ifdef CONFIG_SMP
-
-/*-------------------------------------------------
+/*
* Retrieve the current running task on this CPU
- * 1. Determine curr CPU id.
- * 2. Use it to index into _current_task[ ]
+ * - loads it from backing _current_task[] (and can't use the
+ * caching reg for current task
*/
.macro GET_CURR_TASK_ON_CPU reg
GET_CPU_ID \reg
@@ -579,7 +106,7 @@
add2 \tmp, @_current_task, \tmp
st \tsk, [\tmp]
#ifdef CONFIG_ARC_CURR_IN_REG
- mov r25, \tsk
+ mov gp, \tsk
#endif
.endm
@@ -594,21 +121,20 @@
.macro SET_CURR_TASK_ON_CPU tsk, tmp
st \tsk, [@_current_task]
#ifdef CONFIG_ARC_CURR_IN_REG
- mov r25, \tsk
+ mov gp, \tsk
#endif
.endm
#endif /* SMP / UNI */
-/* ------------------------------------------------------------------
+/*
* Get the ptr to some field of Current Task at @off in task struct
- * -Uses r25 for Current task ptr if that is enabled
+ * - Uses current task cached in reg if enabled
*/
-
#ifdef CONFIG_ARC_CURR_IN_REG
.macro GET_CURR_TASK_FIELD_PTR off, reg
- add \reg, r25, \off
+ add \reg, gp, \off
.endm
#else
@@ -620,6 +146,23 @@
#endif /* CONFIG_ARC_CURR_IN_REG */
-#endif /* __ASSEMBLY__ */
+#else /* !__ASSEMBLER__ */
+
+extern void do_signal(struct pt_regs *);
+extern void do_notify_resume(struct pt_regs *);
+extern int do_privilege_fault(unsigned long, struct pt_regs *);
+extern int do_extension_fault(unsigned long, struct pt_regs *);
+extern int insterror_is_error(unsigned long, struct pt_regs *);
+extern int do_memory_error(unsigned long, struct pt_regs *);
+extern int trap_is_brkpt(unsigned long, struct pt_regs *);
+extern int do_misaligned_error(unsigned long, struct pt_regs *);
+extern int do_trap5_error(unsigned long, struct pt_regs *);
+extern int do_misaligned_access(unsigned long, struct pt_regs *, struct callee_regs *);
+extern void do_machine_check_fault(unsigned long, struct pt_regs *);
+extern void do_non_swi_trap(unsigned long, struct pt_regs *);
+extern void do_insterror_or_kprobe(unsigned long, struct pt_regs *);
+extern void do_page_fault(unsigned long, struct pt_regs *);
+
+#endif
#endif /* __ASM_ARC_ENTRY_H */
diff --git a/arch/arc/include/asm/exec.h b/arch/arc/include/asm/exec.h
index 28abc6905e07..6134175d96a3 100644
--- a/arch/arc/include/asm/exec.h
+++ b/arch/arc/include/asm/exec.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_ARC_EXEC_H
diff --git a/arch/arc/include/asm/fpu.h b/arch/arc/include/asm/fpu.h
new file mode 100644
index 000000000000..006bcf88a7a5
--- /dev/null
+++ b/arch/arc/include/asm/fpu.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Synopsys, Inc. (www.synopsys.com)
+ *
+ */
+
+#ifndef _ASM_ARC_FPU_H
+#define _ASM_ARC_FPU_H
+
+#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
+
+#include <asm/ptrace.h>
+
+#ifdef CONFIG_ISA_ARCOMPACT
+
+/* These DPFP regs need to be saved/restored across ctx-sw */
+struct arc_fpu {
+ struct {
+ unsigned int l, h;
+ } aux_dpfp[2];
+};
+
+#define fpu_init_task(regs)
+
+#else
+
+/*
+ * ARCv2 FPU Control aux register
+ * - bits to enable Traps on Exceptions
+ * - Rounding mode
+ *
+ * ARCv2 FPU Status aux register
+ * - FPU exceptions flags (Inv, Div-by-Zero, overflow, underflow, inexact)
+ * - Flag Write Enable to clear flags explicitly (vs. by fpu instructions
+ * only
+ */
+
+struct arc_fpu {
+ unsigned int ctrl, status;
+};
+
+extern void fpu_init_task(struct pt_regs *regs);
+
+#endif /* !CONFIG_ISA_ARCOMPACT */
+
+struct task_struct;
+
+extern void fpu_save_restore(struct task_struct *p, struct task_struct *n);
+
+#else /* !CONFIG_ARC_FPU_SAVE_RESTORE */
+
+#define fpu_save_restore(p, n)
+#define fpu_init_task(regs)
+
+#endif /* CONFIG_ARC_FPU_SAVE_RESTORE */
+
+#endif /* _ASM_ARC_FPU_H */
diff --git a/arch/arc/include/asm/futex.h b/arch/arc/include/asm/futex.h
index 4dc64ddebece..607d1c16d4dd 100644
--- a/arch/arc/include/asm/futex.h
+++ b/arch/arc/include/asm/futex.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Vineetg: August 2010: From Android kernel work
*/
@@ -16,18 +13,22 @@
#include <linux/uaccess.h>
#include <asm/errno.h>
+#ifdef CONFIG_ARC_HAS_LLSC
+
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\
\
+ smp_mb(); \
__asm__ __volatile__( \
- "1: ld %1, [%2] \n" \
+ "1: llock %1, [%2] \n" \
insn "\n" \
- "2: st %0, [%2] \n" \
+ "2: scond %0, [%2] \n" \
+ " bnz 1b \n" \
" mov %0, 0 \n" \
"3: \n" \
" .section .fixup,\"ax\" \n" \
" .align 4 \n" \
"4: mov %0, %4 \n" \
- " b 3b \n" \
+ " j 3b \n" \
" .previous \n" \
" .section __ex_table,\"a\" \n" \
" .align 4 \n" \
@@ -37,29 +38,56 @@
\
: "=&r" (ret), "=&r" (oldval) \
: "r" (uaddr), "r" (oparg), "ir" (-EFAULT) \
- : "cc", "memory")
+ : "cc", "memory"); \
+ smp_mb() \
+
+#else /* !CONFIG_ARC_HAS_LLSC */
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\
+ \
+ smp_mb(); \
+ __asm__ __volatile__( \
+ "1: ld %1, [%2] \n" \
+ insn "\n" \
+ "2: st %0, [%2] \n" \
+ " mov %0, 0 \n" \
+ "3: \n" \
+ " .section .fixup,\"ax\" \n" \
+ " .align 4 \n" \
+ "4: mov %0, %4 \n" \
+ " j 3b \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " .align 4 \n" \
+ " .word 1b, 4b \n" \
+ " .word 2b, 4b \n" \
+ " .previous \n" \
+ \
+ : "=&r" (ret), "=&r" (oldval) \
+ : "r" (uaddr), "r" (oparg), "ir" (-EFAULT) \
+ : "cc", "memory"); \
+ smp_mb() \
+
+#endif
+
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+ if (!access_ok(uaddr, sizeof(u32)))
return -EFAULT;
- pagefault_disable(); /* implies preempt_disable() */
+#ifndef CONFIG_ARC_HAS_LLSC
+ preempt_disable(); /* to guarantee atomic r-m-w of futex op */
+#endif
switch (op) {
case FUTEX_OP_SET:
__futex_atomic_op("mov %0, %3", ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_ADD:
+ /* oldval = *uaddr; *uaddr += oparg ; ret = *uaddr */
__futex_atomic_op("add %0, %1, %3", ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_OR:
@@ -75,77 +103,67 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
ret = -ENOSYS;
}
- pagefault_enable(); /* subsumes preempt_enable() */
-
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ:
- ret = (oldval == cmparg);
- break;
- case FUTEX_OP_CMP_NE:
- ret = (oldval != cmparg);
- break;
- case FUTEX_OP_CMP_LT:
- ret = (oldval < cmparg);
- break;
- case FUTEX_OP_CMP_GE:
- ret = (oldval >= cmparg);
- break;
- case FUTEX_OP_CMP_LE:
- ret = (oldval <= cmparg);
- break;
- case FUTEX_OP_CMP_GT:
- ret = (oldval > cmparg);
- break;
- default:
- ret = -ENOSYS;
- }
- }
+#ifndef CONFIG_ARC_HAS_LLSC
+ preempt_enable();
+#endif
+
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
-/* Compare-xchg with preemption disabled.
- * Notes:
- * -Best-Effort: Exchg happens only if compare succeeds.
- * If compare fails, returns; leaving retry/looping to upper layers
- * -successful cmp-xchg: return orig value in @addr (same as cmp val)
- * -Compare fails: return orig value in @addr
- * -user access r/w fails: return -EFAULT
+/*
+ * cmpxchg of futex (pagefaults disabled by caller)
+ * Return 0 for success, -EFAULT otherwise
*/
static inline int
-futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
- u32 newval)
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 expval,
+ u32 newval)
{
- u32 val;
+ int ret = 0;
+ u32 existval;
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+ if (!access_ok(uaddr, sizeof(u32)))
return -EFAULT;
- pagefault_disable(); /* implies preempt_disable() */
+#ifndef CONFIG_ARC_HAS_LLSC
+ preempt_disable(); /* to guarantee atomic r-m-w of futex op */
+#endif
+ smp_mb();
- /* TBD : can use llock/scond */
__asm__ __volatile__(
- "1: ld %0, [%3] \n"
- " brne %0, %1, 3f \n"
- "2: st %2, [%3] \n"
+#ifdef CONFIG_ARC_HAS_LLSC
+ "1: llock %1, [%4] \n"
+ " brne %1, %2, 3f \n"
+ "2: scond %3, [%4] \n"
+ " bnz 1b \n"
+#else
+ "1: ld %1, [%4] \n"
+ " brne %1, %2, 3f \n"
+ "2: st %3, [%4] \n"
+#endif
"3: \n"
" .section .fixup,\"ax\" \n"
- "4: mov %0, %4 \n"
- " b 3b \n"
+ "4: mov %0, %5 \n"
+ " j 3b \n"
" .previous \n"
" .section __ex_table,\"a\" \n"
" .align 4 \n"
" .word 1b, 4b \n"
" .word 2b, 4b \n"
" .previous\n"
- : "=&r"(val)
- : "r"(oldval), "r"(newval), "r"(uaddr), "ir"(-EFAULT)
+ : "+&r"(ret), "=&r"(existval)
+ : "r"(expval), "r"(newval), "r"(uaddr), "ir"(-EFAULT)
: "cc", "memory");
- pagefault_enable(); /* subsumes preempt_enable() */
+ smp_mb();
- *uval = val;
- return val;
+#ifndef CONFIG_ARC_HAS_LLSC
+ preempt_enable();
+#endif
+ *uval = existval;
+ return ret;
}
#endif
diff --git a/arch/arc/include/asm/highmem.h b/arch/arc/include/asm/highmem.h
new file mode 100644
index 000000000000..a6b8e2c352c4
--- /dev/null
+++ b/arch/arc/include/asm/highmem.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef _ASM_HIGHMEM_H
+#define _ASM_HIGHMEM_H
+
+#ifdef CONFIG_HIGHMEM
+
+#include <uapi/asm/page.h>
+#include <asm/kmap_size.h>
+
+#define FIXMAP_SIZE PGDIR_SIZE
+#define PKMAP_SIZE PGDIR_SIZE
+
+/* start after vmalloc area */
+#define FIXMAP_BASE (PAGE_OFFSET - FIXMAP_SIZE - PKMAP_SIZE)
+
+#define FIX_KMAP_SLOTS (KM_MAX_IDX * NR_CPUS)
+#define FIX_KMAP_BEGIN (0UL)
+#define FIX_KMAP_END ((FIX_KMAP_BEGIN + FIX_KMAP_SLOTS) - 1)
+
+#define FIXADDR_TOP (FIXMAP_BASE + (FIX_KMAP_END << PAGE_SHIFT))
+
+/*
+ * This should be converted to the asm-generic version, but of course this
+ * is needlessly different from all other architectures. Sigh - tglx
+ */
+#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
+#define __virt_to_fix(x) (((FIXADDR_TOP - ((x) & PAGE_MASK))) >> PAGE_SHIFT)
+
+/* start after fixmap area */
+#define PKMAP_BASE (FIXMAP_BASE + FIXMAP_SIZE)
+#define LAST_PKMAP (PKMAP_SIZE >> PAGE_SHIFT)
+#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
+#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
+
+#include <asm/cacheflush.h>
+
+extern void kmap_init(void);
+
+#define arch_kmap_local_post_unmap(vaddr) \
+ local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE)
+
+static inline void flush_cache_kmaps(void)
+{
+ flush_cache_all();
+}
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/hugepage.h b/arch/arc/include/asm/hugepage.h
new file mode 100644
index 000000000000..7765dc105d54
--- /dev/null
+++ b/arch/arc/include/asm/hugepage.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013-15 Synopsys, Inc. (www.synopsys.com)
+ */
+
+
+#ifndef _ASM_ARC_HUGEPAGE_H
+#define _ASM_ARC_HUGEPAGE_H
+
+#include <linux/types.h>
+#include <asm-generic/pgtable-nopmd.h>
+
+/*
+ * Hugetlb definitions.
+ */
+#define HPAGE_SHIFT PMD_SHIFT
+#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
+#define HPAGE_MASK (~(HPAGE_SIZE - 1))
+
+static inline pte_t pmd_pte(pmd_t pmd)
+{
+ return __pte(pmd_val(pmd));
+}
+
+static inline pmd_t pte_pmd(pte_t pte)
+{
+ return __pmd(pte_val(pte));
+}
+
+#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
+#define pmd_mkwrite_novma(pmd) pte_pmd(pte_mkwrite_novma(pmd_pte(pmd)))
+#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
+#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
+#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
+#define pmd_mkhuge(pmd) pte_pmd(pte_mkhuge(pmd_pte(pmd)))
+#define pmd_mkinvalid(pmd) pte_pmd(pte_mknotpresent(pmd_pte(pmd)))
+#define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
+
+#define pmd_write(pmd) pte_write(pmd_pte(pmd))
+#define pmd_young(pmd) pte_young(pmd_pte(pmd))
+#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
+
+#define pmd_trans_huge(pmd) (pmd_val(pmd) & _PAGE_HW_SZ)
+
+#define pfn_pmd(pfn, prot) (__pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+
+static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+{
+ /*
+ * open-coded pte_modify() with additional retaining of HW_SZ bit
+ * so that pmd_trans_huge() remains true for this PMD
+ */
+ return __pmd((pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HW_SZ)) | pgprot_val(newprot));
+}
+
+static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t pmd)
+{
+ *pmdp = pmd;
+}
+
+extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t *pmd);
+
+#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
+extern void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+
+/* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
+#define pmdp_establish generic_pmdp_establish
+
+#endif
diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
index 473424d7528b..00171a212b3c 100644
--- a/arch/arc/include/asm/io.h
+++ b/arch/arc/include/asm/io.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASM_ARC_IO_H
@@ -12,19 +9,38 @@
#include <linux/types.h>
#include <asm/byteorder.h>
#include <asm/page.h>
+#include <linux/unaligned.h>
+
+#ifdef CONFIG_ISA_ARCV2
+#include <asm/barrier.h>
+#define __iormb() rmb()
+#define __iowmb() wmb()
+#else
+#define __iormb() do { } while (0)
+#define __iowmb() do { } while (0)
+#endif
+
+extern void __iomem *ioremap(phys_addr_t paddr, unsigned long size);
+#define ioremap ioremap
+#define ioremap_prot ioremap_prot
+#define iounmap iounmap
+static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
+{
+ return (void __iomem *)port;
+}
-#define PCI_IOBASE ((void __iomem *)0)
-
-extern void __iomem *ioremap(unsigned long physaddr, unsigned long size);
-extern void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
- unsigned long flags);
-extern void iounmap(const void __iomem *addr);
+static inline void ioport_unmap(void __iomem *addr)
+{
+}
-#define ioremap_nocache(phy, sz) ioremap(phy, sz)
-#define ioremap_wc(phy, sz) ioremap(phy, sz)
+/*
+ * io{read,write}{16,32}be() macros
+ */
+#define ioread16be(p) ({ u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
+#define ioread32be(p) ({ u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
-/* Change struct page to physical address */
-#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
+#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force u16)cpu_to_be16(v), p); })
+#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force u32)cpu_to_be32(v), p); })
#define __raw_readb __raw_readb
static inline u8 __raw_readb(const volatile void __iomem *addr)
@@ -68,6 +84,42 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
return w;
}
+/*
+ * {read,write}s{b,w,l}() repeatedly access the same IO address in
+ * native endianness in 8-, 16-, 32-bit chunks {into,from} memory,
+ * @count times
+ */
+#define __raw_readsx(t,f) \
+static inline void __raw_reads##f(const volatile void __iomem *addr, \
+ void *ptr, unsigned int count) \
+{ \
+ bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0; \
+ u##t *buf = ptr; \
+ \
+ if (!count) \
+ return; \
+ \
+ /* Some ARC CPU's don't support unaligned accesses */ \
+ if (is_aligned) { \
+ do { \
+ u##t x = __raw_read##f(addr); \
+ *buf++ = x; \
+ } while (--count); \
+ } else { \
+ do { \
+ u##t x = __raw_read##f(addr); \
+ put_unaligned(x, buf++); \
+ } while (--count); \
+ } \
+}
+
+#define __raw_readsb __raw_readsb
+__raw_readsx(8, b)
+#define __raw_readsw __raw_readsw
+__raw_readsx(16, w)
+#define __raw_readsl __raw_readsl
+__raw_readsx(32, l)
+
#define __raw_writeb __raw_writeb
static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
{
@@ -100,6 +152,80 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
}
+#define __raw_writesx(t,f) \
+static inline void __raw_writes##f(volatile void __iomem *addr, \
+ const void *ptr, unsigned int count) \
+{ \
+ bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0; \
+ const u##t *buf = ptr; \
+ \
+ if (!count) \
+ return; \
+ \
+ /* Some ARC CPU's don't support unaligned accesses */ \
+ if (is_aligned) { \
+ do { \
+ __raw_write##f(*buf++, addr); \
+ } while (--count); \
+ } else { \
+ do { \
+ __raw_write##f(get_unaligned(buf++), addr); \
+ } while (--count); \
+ } \
+}
+
+#define __raw_writesb __raw_writesb
+__raw_writesx(8, b)
+#define __raw_writesw __raw_writesw
+__raw_writesx(16, w)
+#define __raw_writesl __raw_writesl
+__raw_writesx(32, l)
+
+/*
+ * MMIO can also get buffered/optimized in micro-arch, so barriers needed
+ * Based on ARM model for the typical use case
+ *
+ * <ST [DMA buffer]>
+ * <writel MMIO "go" reg>
+ * or:
+ * <readl MMIO "status" reg>
+ * <LD [DMA buffer]>
+ *
+ * http://lkml.kernel.org/r/20150622133656.GG1583@arm.com
+ */
+#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
+#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
+#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
+#define readsb(p,d,l) ({ __raw_readsb(p,d,l); __iormb(); })
+#define readsw(p,d,l) ({ __raw_readsw(p,d,l); __iormb(); })
+#define readsl(p,d,l) ({ __raw_readsl(p,d,l); __iormb(); })
+
+#define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); })
+#define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); })
+#define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); })
+#define writesb(p,d,l) ({ __iowmb(); __raw_writesb(p,d,l); })
+#define writesw(p,d,l) ({ __iowmb(); __raw_writesw(p,d,l); })
+#define writesl(p,d,l) ({ __iowmb(); __raw_writesl(p,d,l); })
+
+/*
+ * Relaxed API for drivers which can handle barrier ordering themselves
+ *
+ * Also these are defined to perform little endian accesses.
+ * To provide the typical device register semantics of fixed endian,
+ * swap the byte order for Big Endian
+ *
+ * http://lkml.kernel.org/r/201603100845.30602.arnd@arndb.de
+ */
+#define readb_relaxed(c) __raw_readb(c)
+#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \
+ __raw_readw(c)); __r; })
+#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
+ __raw_readl(c)); __r; })
+
+#define writeb_relaxed(v,c) __raw_writeb(v,c)
+#define writew_relaxed(v,c) __raw_writew((__force u16) cpu_to_le16(v),c)
+#define writel_relaxed(v,c) __raw_writel((__force u32) cpu_to_le32(v),c)
+
#include <asm-generic/io.h>
#endif /* _ASM_ARC_IO_H */
diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h
index c0a72105ee0b..9cd79263acba 100644
--- a/arch/arc/include/asm/irq.h
+++ b/arch/arc/include/asm/irq.h
@@ -1,26 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_ARC_IRQ_H
#define __ASM_ARC_IRQ_H
-#define NR_CPU_IRQS 32 /* number of interrupt lines of ARC770 CPU */
-#define NR_IRQS 128 /* allow some CPU external IRQ handling */
+/*
+ * ARCv2 can support 240 interrupts in the core interrupts controllers and
+ * 128 interrupts in IDU. Thus 512 virtual IRQs must be enough for most
+ * configurations of boards.
+ * This doesn't affect ARCompact, but we change it to same value
+ */
+#define NR_IRQS 512
/* Platform Independent IRQs */
-#define TIMER0_IRQ 3
-#define TIMER1_IRQ 4
+#ifdef CONFIG_ISA_ARCV2
+#define IPI_IRQ 19
+#define SOFTIRQ_IRQ 21
+#define FIRST_EXT_IRQ 24
+#endif
+#include <linux/interrupt.h>
#include <asm-generic/irq.h>
-extern void __init arc_init_IRQ(void);
-extern int __init get_hw_config_num_irq(void);
-
-void arc_local_timer_setup(unsigned int cpu);
+extern void arc_init_IRQ(void);
+extern void arch_do_IRQ(unsigned int, struct pt_regs *);
#endif
diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h
new file mode 100644
index 000000000000..30aea562f8aa
--- /dev/null
+++ b/arch/arc/include/asm/irqflags-arcv2.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef __ASM_IRQFLAGS_ARCV2_H
+#define __ASM_IRQFLAGS_ARCV2_H
+
+#include <asm/arcregs.h>
+
+/* status32 Bits */
+#define STATUS_AD_BIT 19 /* Disable Align chk: core supports non-aligned */
+#define STATUS_IE_BIT 31
+
+#define STATUS_AD_MASK (1<<STATUS_AD_BIT)
+#define STATUS_IE_MASK (1<<STATUS_IE_BIT)
+
+/* status32 Bits as encoded/expected by CLRI/SETI */
+#define CLRI_STATUS_IE_BIT 4
+
+#define CLRI_STATUS_E_MASK 0xF
+#define CLRI_STATUS_IE_MASK (1 << CLRI_STATUS_IE_BIT)
+
+#define AUX_USER_SP 0x00D
+#define AUX_IRQ_CTRL 0x00E
+#define AUX_IRQ_ACT 0x043 /* Active Intr across all levels */
+#define AUX_IRQ_LVL_PEND 0x200 /* Pending Intr across all levels */
+#define AUX_IRQ_HINT 0x201 /* For generating Soft Interrupts */
+#define AUX_IRQ_PRIORITY 0x206
+#define ICAUSE 0x40a
+#define AUX_IRQ_SELECT 0x40b
+#define AUX_IRQ_ENABLE 0x40c
+
+/* Was Intr taken in User Mode */
+#define AUX_IRQ_ACT_BIT_U 31
+
+/*
+ * Hardware supports 16 priorities (0 highest, 15 lowest)
+ * Linux by default runs at 1, priority 0 reserved for NMI style interrupts
+ */
+#define ARCV2_IRQ_DEF_PRIO 1
+
+/* seed value for status register */
+#ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS
+#define __AD_ENB STATUS_AD_MASK
+#else
+#define __AD_ENB 0
+#endif
+
+#define ISA_INIT_STATUS_BITS (STATUS_IE_MASK | __AD_ENB | \
+ (ARCV2_IRQ_DEF_PRIO << 1))
+
+#ifndef __ASSEMBLER__
+
+/*
+ * Save IRQ state and disable IRQs
+ */
+static inline long arch_local_irq_save(void)
+{
+ unsigned long flags;
+
+ __asm__ __volatile__(" clri %0 \n" : "=r" (flags) : : "memory");
+
+ return flags;
+}
+
+/*
+ * restore saved IRQ state
+ */
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+ __asm__ __volatile__(" seti %0 \n" : : "r" (flags) : "memory");
+}
+
+/*
+ * Unconditionally Enable IRQs
+ */
+static inline void arch_local_irq_enable(void)
+{
+ unsigned int irqact = read_aux_reg(AUX_IRQ_ACT);
+
+ if (irqact & 0xffff)
+ write_aux_reg(AUX_IRQ_ACT, irqact & ~0xffff);
+
+ __asm__ __volatile__(" seti \n" : : : "memory");
+}
+
+/*
+ * Unconditionally Disable IRQs
+ */
+static inline void arch_local_irq_disable(void)
+{
+ __asm__ __volatile__(" clri \n" : : : "memory");
+}
+
+/*
+ * save IRQ state
+ */
+static inline long arch_local_save_flags(void)
+{
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " lr %0, [status32] \n"
+ : "=&r"(temp)
+ :
+ : "memory");
+
+ /* To be compatible with irq_save()/irq_restore()
+ * encode the irq bits as expected by CLRI/SETI
+ * (this was needed to make CONFIG_TRACE_IRQFLAGS work)
+ */
+ temp = (1 << 5) |
+ ((!!(temp & STATUS_IE_MASK)) << CLRI_STATUS_IE_BIT) |
+ ((temp >> 1) & CLRI_STATUS_E_MASK);
+ return temp;
+}
+
+/*
+ * Query IRQ state
+ */
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+ return !(flags & CLRI_STATUS_IE_MASK);
+}
+
+static inline int arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+static inline void arc_softirq_trigger(int irq)
+{
+ write_aux_reg(AUX_IRQ_HINT, irq);
+}
+
+static inline void arc_softirq_clear(int irq)
+{
+ write_aux_reg(AUX_IRQ_HINT, 0);
+}
+
+#else
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+
+.macro TRACE_ASM_IRQ_DISABLE
+ bl trace_hardirqs_off
+.endm
+
+.macro TRACE_ASM_IRQ_ENABLE
+ bl trace_hardirqs_on
+.endm
+
+#else
+
+.macro TRACE_ASM_IRQ_DISABLE
+.endm
+
+.macro TRACE_ASM_IRQ_ENABLE
+.endm
+
+#endif
+.macro IRQ_DISABLE scratch
+ clri
+ TRACE_ASM_IRQ_DISABLE
+.endm
+
+.macro IRQ_ENABLE scratch
+ TRACE_ASM_IRQ_ENABLE
+ seti
+.endm
+
+#endif /* __ASSEMBLER__ */
+
+#endif
diff --git a/arch/arc/include/asm/irqflags-compact.h b/arch/arc/include/asm/irqflags-compact.h
new file mode 100644
index 000000000000..85c2f6bcde0c
--- /dev/null
+++ b/arch/arc/include/asm/irqflags-compact.h
@@ -0,0 +1,201 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef __ASM_IRQFLAGS_ARCOMPACT_H
+#define __ASM_IRQFLAGS_ARCOMPACT_H
+
+/* vineetg: March 2010 : local_irq_save( ) optimisation
+ * -Remove explicit mov of current status32 into reg, that is not needed
+ * -Use BIC insn instead of INVERTED + AND
+ * -Conditionally disable interrupts (if they are not enabled, don't disable)
+*/
+
+#include <asm/arcregs.h>
+
+/* status32 Reg bits related to Interrupt Handling */
+#define STATUS_E1_BIT 1 /* Int 1 enable */
+#define STATUS_E2_BIT 2 /* Int 2 enable */
+#define STATUS_A1_BIT 3 /* Int 1 active */
+#define STATUS_A2_BIT 4 /* Int 2 active */
+#define STATUS_AE_BIT 5 /* Exception active */
+
+#define STATUS_E1_MASK (1<<STATUS_E1_BIT)
+#define STATUS_E2_MASK (1<<STATUS_E2_BIT)
+#define STATUS_A1_MASK (1<<STATUS_A1_BIT)
+#define STATUS_A2_MASK (1<<STATUS_A2_BIT)
+#define STATUS_AE_MASK (1<<STATUS_AE_BIT)
+#define STATUS_IE_MASK (STATUS_E1_MASK | STATUS_E2_MASK)
+
+/* Other Interrupt Handling related Aux regs */
+#define AUX_IRQ_LEV 0x200 /* IRQ Priority: L1 or L2 */
+#define AUX_IRQ_HINT 0x201 /* For generating Soft Interrupts */
+#define AUX_IRQ_LV12 0x43 /* interrupt level register */
+
+#define AUX_IENABLE 0x40c
+#define AUX_ITRIGGER 0x40d
+#define AUX_IPULSE 0x415
+
+#define ISA_INIT_STATUS_BITS STATUS_IE_MASK
+
+#ifndef __ASSEMBLER__
+
+/******************************************************************
+ * IRQ Control Macros
+ *
+ * All of them have "memory" clobber (compiler barrier) which is needed to
+ * ensure that LD/ST requiring irq safety (R-M-W when LLSC is not available)
+ * are redone after IRQs are re-enabled (and gcc doesn't reuse stale register)
+ *
+ * Noted at the time of Abilis Timer List corruption
+ *
+ * Orig Bug + Rejected solution:
+ * https://lore.kernel.org/lkml/1364553218-31255-1-git-send-email-vgupta@synopsys.com
+ *
+ * Reasoning:
+ * https://lore.kernel.org/lkml/CA+55aFyFWjpSVQM6M266tKrG_ZXJzZ-nYejpmXYQXbrr42mGPQ@mail.gmail.com
+ *
+ ******************************************************************/
+
+/*
+ * Save IRQ state and disable IRQs
+ */
+static inline long arch_local_irq_save(void)
+{
+ unsigned long temp, flags;
+
+ __asm__ __volatile__(
+ " lr %1, [status32] \n"
+ " bic %0, %1, %2 \n"
+ " and.f 0, %1, %2 \n"
+ " flag.nz %0 \n"
+ : "=r"(temp), "=r"(flags)
+ : "n"((STATUS_E1_MASK | STATUS_E2_MASK))
+ : "memory", "cc");
+
+ return flags;
+}
+
+/*
+ * restore saved IRQ state
+ */
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+
+ __asm__ __volatile__(
+ " flag %0 \n"
+ :
+ : "r"(flags)
+ : "memory");
+}
+
+/*
+ * Unconditionally Enable IRQs
+ */
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+extern void arch_local_irq_enable(void);
+#else
+static inline void arch_local_irq_enable(void)
+{
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " lr %0, [status32] \n"
+ " or %0, %0, %1 \n"
+ " flag %0 \n"
+ : "=&r"(temp)
+ : "n"((STATUS_E1_MASK | STATUS_E2_MASK))
+ : "cc", "memory");
+}
+#endif
+
+/*
+ * Unconditionally Disable IRQs
+ */
+static inline void arch_local_irq_disable(void)
+{
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " lr %0, [status32] \n"
+ " and %0, %0, %1 \n"
+ " flag %0 \n"
+ : "=&r"(temp)
+ : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK))
+ : "memory");
+}
+
+/*
+ * save IRQ state
+ */
+static inline long arch_local_save_flags(void)
+{
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " lr %0, [status32] \n"
+ : "=&r"(temp)
+ :
+ : "memory");
+
+ return temp;
+}
+
+/*
+ * Query IRQ state
+ */
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+ return !(flags & (STATUS_E1_MASK
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+ | STATUS_E2_MASK
+#endif
+ ));
+}
+
+static inline int arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+#else
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+
+.macro TRACE_ASM_IRQ_DISABLE
+ bl trace_hardirqs_off
+.endm
+
+.macro TRACE_ASM_IRQ_ENABLE
+ bl trace_hardirqs_on
+.endm
+
+#else
+
+.macro TRACE_ASM_IRQ_DISABLE
+.endm
+
+.macro TRACE_ASM_IRQ_ENABLE
+.endm
+
+#endif
+
+.macro IRQ_DISABLE scratch
+ lr \scratch, [status32]
+ bic \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
+ flag \scratch
+ TRACE_ASM_IRQ_DISABLE
+.endm
+
+.macro IRQ_ENABLE scratch
+ TRACE_ASM_IRQ_ENABLE
+ lr \scratch, [status32]
+ or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
+ flag \scratch
+.endm
+
+#endif /* __ASSEMBLER__ */
+
+#endif
diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h
index d99f79bcf865..edf201a699d8 100644
--- a/arch/arc/include/asm/irqflags.h
+++ b/arch/arc/include/asm/irqflags.h
@@ -1,177 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_ARC_IRQFLAGS_H
#define __ASM_ARC_IRQFLAGS_H
-/* vineetg: March 2010 : local_irq_save( ) optimisation
- * -Remove explicit mov of current status32 into reg, that is not needed
- * -Use BIC insn instead of INVERTED + AND
- * -Conditionally disable interrupts (if they are not enabled, don't disable)
-*/
-
-#ifdef __KERNEL__
-
-#include <asm/arcregs.h>
-
-/* status32 Reg bits related to Interrupt Handling */
-#define STATUS_E1_BIT 1 /* Int 1 enable */
-#define STATUS_E2_BIT 2 /* Int 2 enable */
-#define STATUS_A1_BIT 3 /* Int 1 active */
-#define STATUS_A2_BIT 4 /* Int 2 active */
-
-#define STATUS_E1_MASK (1<<STATUS_E1_BIT)
-#define STATUS_E2_MASK (1<<STATUS_E2_BIT)
-#define STATUS_A1_MASK (1<<STATUS_A1_BIT)
-#define STATUS_A2_MASK (1<<STATUS_A2_BIT)
-
-/* Other Interrupt Handling related Aux regs */
-#define AUX_IRQ_LEV 0x200 /* IRQ Priority: L1 or L2 */
-#define AUX_IRQ_HINT 0x201 /* For generating Soft Interrupts */
-#define AUX_IRQ_LV12 0x43 /* interrupt level register */
-
-#define AUX_IENABLE 0x40c
-#define AUX_ITRIGGER 0x40d
-#define AUX_IPULSE 0x415
-
-#ifndef __ASSEMBLY__
-
-/******************************************************************
- * IRQ Control Macros
- ******************************************************************/
-
-/*
- * Save IRQ state and disable IRQs
- */
-static inline long arch_local_irq_save(void)
-{
- unsigned long temp, flags;
-
- __asm__ __volatile__(
- " lr %1, [status32] \n"
- " bic %0, %1, %2 \n"
- " and.f 0, %1, %2 \n"
- " flag.nz %0 \n"
- : "=r"(temp), "=r"(flags)
- : "n"((STATUS_E1_MASK | STATUS_E2_MASK))
- : "memory", "cc");
-
- return flags;
-}
-
-/*
- * restore saved IRQ state
- */
-static inline void arch_local_irq_restore(unsigned long flags)
-{
-
- __asm__ __volatile__(
- " flag %0 \n"
- :
- : "r"(flags)
- : "memory");
-}
-
-/*
- * Unconditionally Enable IRQs
- */
-extern void arch_local_irq_enable(void);
-
-/*
- * Unconditionally Disable IRQs
- */
-static inline void arch_local_irq_disable(void)
-{
- unsigned long temp;
-
- __asm__ __volatile__(
- " lr %0, [status32] \n"
- " and %0, %0, %1 \n"
- " flag %0 \n"
- : "=&r"(temp)
- : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK))
- : "memory");
-}
-
-/*
- * save IRQ state
- */
-static inline long arch_local_save_flags(void)
-{
- unsigned long temp;
-
- __asm__ __volatile__(
- " lr %0, [status32] \n"
- : "=&r"(temp)
- :
- : "memory");
-
- return temp;
-}
-
-/*
- * Query IRQ state
- */
-static inline int arch_irqs_disabled_flags(unsigned long flags)
-{
- return !(flags & (STATUS_E1_MASK
-#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
- | STATUS_E2_MASK
-#endif
- ));
-}
-
-static inline int arch_irqs_disabled(void)
-{
- return arch_irqs_disabled_flags(arch_local_save_flags());
-}
-
-static inline void arch_mask_irq(unsigned int irq)
-{
- unsigned int ienb;
-
- ienb = read_aux_reg(AUX_IENABLE);
- ienb &= ~(1 << irq);
- write_aux_reg(AUX_IENABLE, ienb);
-}
-
-static inline void arch_unmask_irq(unsigned int irq)
-{
- unsigned int ienb;
-
- ienb = read_aux_reg(AUX_IENABLE);
- ienb |= (1 << irq);
- write_aux_reg(AUX_IENABLE, ienb);
-}
-
+#ifdef CONFIG_ISA_ARCOMPACT
+#include <asm/irqflags-compact.h>
#else
-
-.macro IRQ_DISABLE scratch
- lr \scratch, [status32]
- bic \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
- flag \scratch
-.endm
-
-.macro IRQ_DISABLE_SAVE scratch, save
- lr \scratch, [status32]
- mov \save, \scratch /* Make a copy */
- bic \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
- flag \scratch
-.endm
-
-.macro IRQ_ENABLE scratch
- lr \scratch, [status32]
- or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
- flag \scratch
-.endm
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* KERNEL */
+#include <asm/irqflags-arcv2.h>
+#endif
#endif
diff --git a/arch/arc/include/asm/jump_label.h b/arch/arc/include/asm/jump_label.h
new file mode 100644
index 000000000000..66ead75784d9
--- /dev/null
+++ b/arch/arc/include/asm/jump_label.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARC_JUMP_LABEL_H
+#define _ASM_ARC_JUMP_LABEL_H
+
+#ifndef __ASSEMBLER__
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+
+#define JUMP_LABEL_NOP_SIZE 4
+
+/*
+ * NOTE about '.balign 4':
+ *
+ * To make atomic update of patched instruction available we need to guarantee
+ * that this instruction doesn't cross L1 cache line boundary.
+ *
+ * As of today we simply align instruction which can be patched by 4 byte using
+ * ".balign 4" directive. In that case patched instruction is aligned with one
+ * 16-bit NOP_S if this is required.
+ * However 'align by 4' directive is much stricter than it actually required.
+ * It's enough that our 32-bit instruction don't cross L1 cache line boundary /
+ * L1 I$ fetch block boundary which can be achieved by using
+ * ".bundle_align_mode" assembler directive. That will save us from adding
+ * useless NOP_S padding in most of the cases.
+ *
+ * TODO: switch to ".bundle_align_mode" directive using whin it will be
+ * supported by ARC toolchain.
+ */
+
+static __always_inline bool arch_static_branch(struct static_key *key,
+ bool branch)
+{
+ asm goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)" \n"
+ "1: \n"
+ "nop \n"
+ ".pushsection __jump_table, \"aw\" \n"
+ ".word 1b, %l[l_yes], %c0 \n"
+ ".popsection \n"
+ : : "i" (&((char *)key)[branch]) : : l_yes);
+
+ return false;
+l_yes:
+ return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key,
+ bool branch)
+{
+ asm goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)" \n"
+ "1: \n"
+ "b %l[l_yes] \n"
+ ".pushsection __jump_table, \"aw\" \n"
+ ".word 1b, %l[l_yes], %c0 \n"
+ ".popsection \n"
+ : : "i" (&((char *)key)[branch]) : : l_yes);
+
+ return false;
+l_yes:
+ return true;
+}
+
+typedef u32 jump_label_t;
+
+struct jump_entry {
+ jump_label_t code;
+ jump_label_t target;
+ jump_label_t key;
+};
+
+#endif /* __ASSEMBLER__ */
+#endif
diff --git a/arch/arc/include/asm/kdebug.h b/arch/arc/include/asm/kdebug.h
index 3fbe6c472c0a..f92049d1d33a 100644
--- a/arch/arc/include/asm/kdebug.h
+++ b/arch/arc/include/asm/kdebug.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASM_ARC_KDEBUG_H
diff --git a/arch/arc/include/asm/kgdb.h b/arch/arc/include/asm/kgdb.h
index b65fca7ffeb5..f9f71b90963f 100644
--- a/arch/arc/include/asm/kgdb.h
+++ b/arch/arc/include/asm/kgdb.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* kgdb support for ARC
*
* Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ARC_KGDB_H__
@@ -19,7 +16,7 @@
* register API yet */
#undef DBG_MAX_REG_NUM
-#define GDB_MAX_REGS 39
+#define GDB_MAX_REGS 87
#define BREAK_INSTR_SIZE 2
#define CACHE_FLUSH_IS_SAFE 1
@@ -33,23 +30,27 @@ static inline void arch_kgdb_breakpoint(void)
extern void kgdb_trap(struct pt_regs *regs);
-enum arc700_linux_regnums {
+/* This is the numbering of registers according to the GDB. See GDB's
+ * arc-tdep.h for details.
+ *
+ * Registers are ordered for GDB 7.5. It is incompatible with GDB 6.8. */
+enum arc_linux_regnums {
_R0 = 0,
_R1, _R2, _R3, _R4, _R5, _R6, _R7, _R8, _R9, _R10, _R11, _R12, _R13,
_R14, _R15, _R16, _R17, _R18, _R19, _R20, _R21, _R22, _R23, _R24,
_R25, _R26,
- _BTA = 27,
- _LP_START = 28,
- _LP_END = 29,
- _LP_COUNT = 30,
- _STATUS32 = 31,
- _BLINK = 32,
- _FP = 33,
- __SP = 34,
- _EFA = 35,
- _RET = 36,
- _ORIG_R8 = 37,
- _STOP_PC = 38
+ _FP = 27,
+ __SP = 28,
+ _R30 = 30,
+ _BLINK = 31,
+ _LP_COUNT = 60,
+ _STOP_PC = 64,
+ _RET = 64,
+ _LP_START = 65,
+ _LP_END = 66,
+ _STATUS32 = 67,
+ _ECR = 76,
+ _BTA = 82,
};
#else
diff --git a/arch/arc/include/asm/kprobes.h b/arch/arc/include/asm/kprobes.h
index 944dbedb38b5..68e8301c0df2 100644
--- a/arch/arc/include/asm/kprobes.h
+++ b/arch/arc/include/asm/kprobes.h
@@ -1,14 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ARC_KPROBES_H
#define _ARC_KPROBES_H
+#include <asm-generic/kprobes.h>
+
#ifdef CONFIG_KPROBES
typedef u16 kprobe_opcode_t;
@@ -33,9 +32,6 @@ struct kprobe;
void arch_remove_kprobe(struct kprobe *p);
-int kprobe_exceptions_notify(struct notifier_block *self,
- unsigned long val, void *data);
-
struct prev_kprobe {
struct kprobe *kp;
unsigned long status;
@@ -43,18 +39,14 @@ struct prev_kprobe {
struct kprobe_ctlblk {
unsigned int kprobe_status;
- struct pt_regs jprobe_saved_regs;
- char jprobes_stack[MAX_STACK_SIZE];
struct prev_kprobe prev_kprobe;
};
int kprobe_fault_handler(struct pt_regs *regs, unsigned long cause);
-void kretprobe_trampoline(void);
+void __kretprobe_trampoline(void);
void trap_is_kprobe(unsigned long address, struct pt_regs *regs);
#else
-static void trap_is_kprobe(unsigned long address, struct pt_regs *regs)
-{
-}
-#endif
+#define trap_is_kprobe(address, regs)
+#endif /* CONFIG_KPROBES */
-#endif
+#endif /* _ARC_KPROBES_H */
diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h
index 0283e9e44e0d..ba3cb65b5eaa 100644
--- a/arch/arc/include/asm/linkage.h
+++ b/arch/arc/include/asm/linkage.h
@@ -1,28 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_LINKAGE_H
#define __ASM_LINKAGE_H
-#ifdef __ASSEMBLY__
+#include <asm/dwarf.h>
-/* Can't use the ENTRY macro in linux/linkage.h
- * gas considers ';' as comment vs. newline
- */
-.macro ARC_ENTRY name
- .global \name
- .align 4
- \name:
+#define ASM_NL ` /* use '`' to mark new line in macro */
+#define __ALIGN .align 4
+#define __ALIGN_STR __stringify(__ALIGN)
+
+#ifdef __ASSEMBLER__
+
+.macro ST2 e, o, off
+#ifdef CONFIG_ARC_HAS_LL64
+ std \e, [sp, \off]
+#else
+ st \e, [sp, \off]
+ st \o, [sp, \off+4]
+#endif
.endm
-.macro ARC_EXIT name
-#define ASM_PREV_SYM_ADDR(name) .-##name
- .size \ name, ASM_PREV_SYM_ADDR(\name)
+.macro LD2 e, o, off
+#ifdef CONFIG_ARC_HAS_LL64
+ ldd \e, [sp, \off]
+#else
+ ld \e, [sp, \off]
+ ld \o, [sp, \off+4]
+#endif
.endm
/* annotation for data we want in DCCM - if enabled in .config */
@@ -44,20 +51,30 @@
#endif
.endm
-#else /* !__ASSEMBLY__ */
+#define ENTRY_CFI(name) \
+ .globl name ASM_NL \
+ ALIGN ASM_NL \
+ name: ASM_NL \
+ CFI_STARTPROC ASM_NL
+
+#define END_CFI(name) \
+ CFI_ENDPROC ASM_NL \
+ .size name, .-name
+
+#else /* !__ASSEMBLER__ */
#ifdef CONFIG_ARC_HAS_ICCM
-#define __arcfp_code __attribute__((__section__(".text.arcfp")))
+#define __arcfp_code __section(".text.arcfp")
#else
-#define __arcfp_code __attribute__((__section__(".text")))
+#define __arcfp_code __section(".text")
#endif
#ifdef CONFIG_ARC_HAS_DCCM
-#define __arcfp_data __attribute__((__section__(".data.arcfp")))
+#define __arcfp_data __section(".data.arcfp")
#else
-#define __arcfp_data __attribute__((__section__(".data")))
+#define __arcfp_data __section(".data")
#endif
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif
diff --git a/arch/arc/include/asm/mach_desc.h b/arch/arc/include/asm/mach_desc.h
index 9998dc846ebb..c4e197059379 100644
--- a/arch/arc/include/asm/mach_desc.h
+++ b/arch/arc/include/asm/mach_desc.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
*
* based on METAG mach/arch.h (which in turn was based on ARM)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASM_ARC_MACH_DESC_H_
@@ -23,11 +20,8 @@
* @dt_compat: Array of device tree 'compatible' strings
* (XXX: although only 1st entry is looked at)
* @init_early: Very early callback [called from setup_arch()]
- * @init_irq: setup external IRQ controllers [called from init_IRQ()]
- * @init_smp: for each CPU (e.g. setup IPI)
+ * @init_per_cpu: for each CPU as it is coming up (SMP as well as UP)
* [(M):init_IRQ(), (o):start_kernel_secondary()]
- * @init_time: platform specific clocksource/clockevent registration
- * [called from time_init()]
* @init_machine: arch initcall level callback (e.g. populate static
* platform devices or parse Devicetree)
* @init_late: Late initcall level callback
@@ -36,13 +30,8 @@
struct machine_desc {
const char *name;
const char **dt_compat;
-
void (*init_early)(void);
- void (*init_irq)(void);
-#ifdef CONFIG_SMP
- void (*init_smp)(unsigned int);
-#endif
- void (*init_time)(void);
+ void (*init_per_cpu)(unsigned int);
void (*init_machine)(void);
void (*init_late)(void);
@@ -51,22 +40,12 @@ struct machine_desc {
/*
* Current machine - only accessible during boot.
*/
-extern struct machine_desc *machine_desc;
+extern const struct machine_desc *machine_desc;
/*
* Machine type table - also only accessible during boot
*/
-extern struct machine_desc __arch_info_begin[], __arch_info_end[];
-#define for_each_machine_desc(p) \
- for (p = __arch_info_begin; p < __arch_info_end; p++)
-
-static inline struct machine_desc *default_machine_desc(void)
-{
- /* the default machine is the last one linked in */
- if (__arch_info_end - 1 < __arch_info_begin)
- return NULL;
- return __arch_info_end - 1;
-}
+extern const struct machine_desc __arch_info_begin[], __arch_info_end[];
/*
* Set of macros to define architecture features.
@@ -74,14 +53,12 @@ static inline struct machine_desc *default_machine_desc(void)
*/
#define MACHINE_START(_type, _name) \
static const struct machine_desc __mach_desc_##_type \
-__used \
-__attribute__((__section__(".arch.info.init"))) = { \
+__used __section(".arch.info.init") = { \
.name = _name,
#define MACHINE_END \
};
-extern struct machine_desc *setup_machine_fdt(void *dt);
-extern void __init copy_devtree(void);
+extern const struct machine_desc *setup_machine_fdt(void *dt);
#endif
diff --git a/arch/arc/include/asm/mmu-arcv2.h b/arch/arc/include/asm/mmu-arcv2.h
new file mode 100644
index 000000000000..5e5482026ac9
--- /dev/null
+++ b/arch/arc/include/asm/mmu-arcv2.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012, 2019-20 Synopsys, Inc. (www.synopsys.com)
+ *
+ * MMUv3 (arc700) / MMUv4 (archs) are software page walked and software managed.
+ * This file contains the TLB access registers and commands
+ */
+
+#ifndef _ASM_ARC_MMU_ARCV2_H
+#define _ASM_ARC_MMU_ARCV2_H
+
+#include <soc/arc/arc_aux.h>
+
+/*
+ * TLB Management regs
+ */
+#define ARC_REG_MMU_BCR 0x06f
+
+#ifdef CONFIG_ARC_MMU_V3
+#define ARC_REG_TLBPD0 0x405
+#define ARC_REG_TLBPD1 0x406
+#define ARC_REG_TLBPD1HI 0 /* Dummy: allows common code */
+#define ARC_REG_TLBINDEX 0x407
+#define ARC_REG_TLBCOMMAND 0x408
+#define ARC_REG_PID 0x409
+#define ARC_REG_SCRATCH_DATA0 0x418
+#else
+#define ARC_REG_TLBPD0 0x460
+#define ARC_REG_TLBPD1 0x461
+#define ARC_REG_TLBPD1HI 0x463
+#define ARC_REG_TLBINDEX 0x464
+#define ARC_REG_TLBCOMMAND 0x465
+#define ARC_REG_PID 0x468
+#define ARC_REG_SCRATCH_DATA0 0x46c
+#endif
+
+/* Bits in MMU PID reg */
+#define __TLB_ENABLE (1 << 31)
+#define __PROG_ENABLE (1 << 30)
+#define MMU_ENABLE (__TLB_ENABLE | __PROG_ENABLE)
+
+/* Bits in TLB Index reg */
+#define TLB_LKUP_ERR 0x80000000
+
+#ifdef CONFIG_ARC_MMU_V3
+#define TLB_DUP_ERR (TLB_LKUP_ERR | 0x00000001)
+#else
+#define TLB_DUP_ERR (TLB_LKUP_ERR | 0x40000000)
+#endif
+
+/*
+ * TLB Commands
+ */
+#define TLBWrite 0x1
+#define TLBRead 0x2
+#define TLBGetIndex 0x3
+#define TLBProbe 0x4
+#define TLBWriteNI 0x5 /* write JTLB without inv uTLBs */
+#define TLBIVUTLB 0x6 /* explicitly inv uTLBs */
+
+#ifdef CONFIG_ARC_MMU_V4
+#define TLBInsertEntry 0x7
+#define TLBDeleteEntry 0x8
+#endif
+
+/* Masks for actual TLB "PD"s */
+#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ)
+#define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
+
+#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK_PHYS | _PAGE_CACHEABLE)
+
+#ifndef __ASSEMBLER__
+
+struct mm_struct;
+extern int pae40_exist_but_not_enab(void);
+
+static inline int is_pae40_enabled(void)
+{
+ return IS_ENABLED(CONFIG_ARC_HAS_PAE40);
+}
+
+static inline void mmu_setup_asid(struct mm_struct *mm, unsigned long asid)
+{
+ write_aux_reg(ARC_REG_PID, asid | MMU_ENABLE);
+}
+
+static inline void mmu_setup_pgd(struct mm_struct *mm, void *pgd)
+{
+ /* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */
+#ifdef CONFIG_ISA_ARCV2
+ write_aux_reg(ARC_REG_SCRATCH_DATA0, (unsigned int)pgd);
+#endif
+}
+
+#else
+
+.macro ARC_MMU_REENABLE reg
+ lr \reg, [ARC_REG_PID]
+ or \reg, \reg, MMU_ENABLE
+ sr \reg, [ARC_REG_PID]
+.endm
+
+#endif /* !__ASSEMBLER__ */
+
+#endif
diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h
index 7c03fe61759c..e3b35ceab582 100644
--- a/arch/arc/include/asm/mmu.h
+++ b/arch/arc/include/asm/mmu.h
@@ -1,67 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASM_ARC_MMU_H
#define _ASM_ARC_MMU_H
-#if defined(CONFIG_ARC_MMU_V1)
-#define CONFIG_ARC_MMU_VER 1
-#elif defined(CONFIG_ARC_MMU_V2)
-#define CONFIG_ARC_MMU_VER 2
-#elif defined(CONFIG_ARC_MMU_V3)
-#define CONFIG_ARC_MMU_VER 3
-#endif
-
-/* MMU Management regs */
-#define ARC_REG_MMU_BCR 0x06f
-#define ARC_REG_TLBPD0 0x405
-#define ARC_REG_TLBPD1 0x406
-#define ARC_REG_TLBINDEX 0x407
-#define ARC_REG_TLBCOMMAND 0x408
-#define ARC_REG_PID 0x409
-#define ARC_REG_SCRATCH_DATA0 0x418
-
-/* Bits in MMU PID register */
-#define MMU_ENABLE (1 << 31) /* Enable MMU for process */
-
-/* Error code if probe fails */
-#define TLB_LKUP_ERR 0x80000000
-
-/* TLB Commands */
-#define TLBWrite 0x1
-#define TLBRead 0x2
-#define TLBGetIndex 0x3
-#define TLBProbe 0x4
+#ifndef __ASSEMBLER__
-#if (CONFIG_ARC_MMU_VER >= 2)
-#define TLBWriteNI 0x5 /* write JTLB without inv uTLBs */
-#define TLBIVUTLB 0x6 /* explicitly inv uTLBs */
-#endif
-
-#ifndef __ASSEMBLY__
+#include <linux/threads.h> /* NR_CPUS */
typedef struct {
- unsigned long asid; /* Pvt Addr-Space ID for mm */
-#ifdef CONFIG_ARC_TLB_DBG
- struct task_struct *tsk;
-#endif
+ unsigned long asid[NR_CPUS]; /* 8 bit MMU PID + Generation cycle */
} mm_context_t;
-#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
-void tlb_paranoid_check(unsigned int pid_sw, unsigned long address);
-#else
-#define tlb_paranoid_check(a, b)
-#endif
+struct pt_regs;
+extern void do_tlb_overlap_fault(unsigned long, unsigned long, struct pt_regs *);
-void arc_mmu_init(void);
-extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len);
-void __init read_decode_mmu_bcr(void);
+#endif
-#endif /* !__ASSEMBLY__ */
+#include <asm/mmu-arcv2.h>
#endif
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h
index 0d71fb11b57c..9963bb1a5733 100644
--- a/arch/arc/include/asm/mmu_context.h
+++ b/arch/arc/include/asm/mmu_context.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* vineetg: May 2011
* -Refactored get_new_mmu_context( ) to only handle live-mm.
* retiring-mm handled in other hooks
@@ -18,111 +15,86 @@
#ifndef _ASM_ARC_MMU_CONTEXT_H
#define _ASM_ARC_MMU_CONTEXT_H
-#include <asm/arcregs.h>
-#include <asm/tlb.h>
+#include <linux/sched/mm.h>
+#include <asm/tlb.h>
#include <asm-generic/mm_hooks.h>
-/* ARC700 ASID Management
+/* ARC ASID Management
*
- * ARC MMU provides 8-bit ASID (0..255) to TAG TLB entries, allowing entries
- * with same vaddr (different tasks) to co-exit. This provides for
- * "Fast Context Switch" i.e. no TLB flush on ctxt-switch
+ * MMU tags TLBs with an 8-bit ASID, avoiding need to flush the TLB on
+ * context-switch.
*
- * Linux assigns each task a unique ASID. A simple round-robin allocation
- * of H/w ASID is done using software tracker @asid_cache.
- * When it reaches max 255, the allocation cycle starts afresh by flushing
- * the entire TLB and wrapping ASID back to zero.
+ * ASID is managed per cpu, so task threads across CPUs can have different
+ * ASID. Global ASID management is needed if hardware supports TLB shootdown
+ * and/or shared TLB across cores, which ARC doesn't.
*
- * For book-keeping, Linux uses a couple of data-structures:
- * -mm_struct has an @asid field to keep a note of task's ASID (needed at the
- * time of say switch_mm( )
- * -An array of mm structs @asid_mm_map[] for asid->mm the reverse mapping,
- * given an ASID, finding the mm struct associated.
+ * Each task is assigned unique ASID, with a simple round-robin allocator
+ * tracked in @asid_cpu. When 8-bit value rolls over,a new cycle is started
+ * over from 0, and TLB is flushed
*
- * The round-robin allocation algorithm allows for ASID stealing.
- * If asid tracker is at "x-1", a new req will allocate "x", even if "x" was
- * already assigned to another (switched-out) task. Obviously the prev owner
- * is marked with an invalid ASID to make it request for a new ASID when it
- * gets scheduled next time. However its TLB entries (with ASID "x") could
- * exist, which must be cleared before the same ASID is used by the new owner.
- * Flushing them would be plausible but costly solution. Instead we force a
- * allocation policy quirk, which ensures that a stolen ASID won't have any
- * TLB entries associates, alleviating the need to flush.
- * The quirk essentially is not allowing ASID allocated in prev cycle
- * to be used past a roll-over in the next cycle.
- * When this happens (i.e. task ASID > asid tracker), task needs to refresh
- * its ASID, aligning it to current value of tracker. If the task doesn't get
- * scheduled past a roll-over, hence its ASID is not yet realigned with
- * tracker, such ASID is anyways safely reusable because it is
- * gauranteed that TLB entries with that ASID wont exist.
+ * A new allocation cycle, post rollover, could potentially reassign an ASID
+ * to a different task. Thus the rule is to refresh the ASID in a new cycle.
+ * The 32 bit @asid_cpu (and mm->asid) have 8 bits MMU PID and rest 24 bits
+ * serve as cycle/generation indicator and natural 32 bit unsigned math
+ * automagically increments the generation when lower 8 bits rollover.
*/
-#define FIRST_ASID 0
-#define MAX_ASID 255 /* 8 bit PID field in PID Aux reg */
-#define NO_ASID (MAX_ASID + 1) /* ASID Not alloc to mmu ctxt */
-#define NUM_ASID ((MAX_ASID - FIRST_ASID) + 1)
+#define MM_CTXT_ASID_MASK 0x000000ff /* MMU PID reg :8 bit PID */
+#define MM_CTXT_CYCLE_MASK (~MM_CTXT_ASID_MASK)
-/* ASID to mm struct mapping */
-extern struct mm_struct *asid_mm_map[NUM_ASID + 1];
+#define MM_CTXT_FIRST_CYCLE (MM_CTXT_ASID_MASK + 1)
+#define MM_CTXT_NO_ASID 0UL
-extern int asid_cache;
+#define asid_mm(mm, cpu) mm->context.asid[cpu]
+#define hw_pid(mm, cpu) (asid_mm(mm, cpu) & MM_CTXT_ASID_MASK)
+
+DECLARE_PER_CPU(unsigned int, asid_cache);
+#define asid_cpu(cpu) per_cpu(asid_cache, cpu)
/*
- * Assign a new ASID to task. If the task already has an ASID, it is
- * relinquished.
+ * Get a new ASID if task doesn't have a valid one (unalloc or from prev cycle)
+ * Also set the MMU PID register to existing/updated ASID
*/
static inline void get_new_mmu_context(struct mm_struct *mm)
{
- struct mm_struct *prev_owner;
+ const unsigned int cpu = smp_processor_id();
unsigned long flags;
local_irq_save(flags);
/*
- * Relinquish the currently owned ASID (if any).
- * Doing unconditionally saves a cmp-n-branch; for already unused
- * ASID slot, the value was/remains NULL
+ * Move to new ASID if it was not from current alloc-cycle/generation.
+ * This is done by ensuring that the generation bits in both mm->ASID
+ * and cpu's ASID counter are exactly same.
+ *
+ * Note: Callers needing new ASID unconditionally, independent of
+ * generation, e.g. local_flush_tlb_mm() for forking parent,
+ * first need to destroy the context, setting it to invalid
+ * value.
*/
- asid_mm_map[mm->context.asid] = (struct mm_struct *)NULL;
+ if (!((asid_mm(mm, cpu) ^ asid_cpu(cpu)) & MM_CTXT_CYCLE_MASK))
+ goto set_hw;
- /* move to new ASID */
- if (++asid_cache > MAX_ASID) { /* ASID roll-over */
- asid_cache = FIRST_ASID;
- flush_tlb_all();
- }
+ /* move to new ASID and handle rollover */
+ if (unlikely(!(++asid_cpu(cpu) & MM_CTXT_ASID_MASK))) {
- /*
- * Is next ASID already owned by some-one else (we are stealing it).
- * If so, let the orig owner be aware of this, so when it runs, it
- * asks for a brand new ASID. This would only happen for a long-lived
- * task with ASID from prev allocation cycle (before ASID roll-over).
- *
- * This might look wrong - if we are re-using some other task's ASID,
- * won't we use it's stale TLB entries too. Actually switch_mm( ) takes
- * care of such a case: it ensures that task with ASID from prev alloc
- * cycle, when scheduled will refresh it's ASID: see switch_mm( ) below
- * The stealing scenario described here will only happen if that task
- * didn't get a chance to refresh it's ASID - implying stale entries
- * won't exist.
- */
- prev_owner = asid_mm_map[asid_cache];
- if (prev_owner)
- prev_owner->context.asid = NO_ASID;
+ local_flush_tlb_all();
- /* Assign new ASID to tsk */
- asid_mm_map[asid_cache] = mm;
- mm->context.asid = asid_cache;
+ /*
+ * Above check for rollover of 8 bit ASID in 32 bit container.
+ * If the container itself wrapped around, set it to a non zero
+ * "generation" to distinguish from no context
+ */
+ if (!asid_cpu(cpu))
+ asid_cpu(cpu) = MM_CTXT_FIRST_CYCLE;
+ }
-#ifdef CONFIG_ARC_TLB_DBG
- pr_info("ARC_TLB_DBG: NewMM=0x%x OldMM=0x%x task_struct=0x%x Task: %s,"
- " pid:%u, assigned asid:%lu\n",
- (unsigned int)mm, (unsigned int)prev_owner,
- (unsigned int)(mm->context.tsk), (mm->context.tsk)->comm,
- (mm->context.tsk)->pid, mm->context.asid);
-#endif
+ /* Assign new ASID to tsk */
+ asid_mm(mm, cpu) = asid_cpu(cpu);
- write_aux_reg(ARC_REG_PID, asid_cache | MMU_ENABLE);
+set_hw:
+ mmu_setup_asid(mm, hw_pid(mm, cpu));
local_irq_restore(flags);
}
@@ -131,83 +103,72 @@ static inline void get_new_mmu_context(struct mm_struct *mm)
* Initialize the context related info for a new mm_struct
* instance.
*/
+#define init_new_context init_new_context
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
- mm->context.asid = NO_ASID;
-#ifdef CONFIG_ARC_TLB_DBG
- mm->context.tsk = tsk;
-#endif
+ int i;
+
+ for_each_possible_cpu(i)
+ asid_mm(mm, i) = MM_CTXT_NO_ASID;
+
return 0;
}
+#define destroy_context destroy_context
+static inline void destroy_context(struct mm_struct *mm)
+{
+ unsigned long flags;
+
+ /* Needed to elide CONFIG_DEBUG_PREEMPT warning */
+ local_irq_save(flags);
+ asid_mm(mm, smp_processor_id()) = MM_CTXT_NO_ASID;
+ local_irq_restore(flags);
+}
+
/* Prepare the MMU for task: setup PID reg with allocated ASID
If task doesn't have an ASID (never alloc or stolen, get a new ASID)
*/
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
-#ifndef CONFIG_SMP
- /* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */
- write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
-#endif
+ const int cpu = smp_processor_id();
/*
- * Get a new ASID if task doesn't have a valid one. Possible when
- * -task never had an ASID (fresh after fork)
- * -it's ASID was stolen - past an ASID roll-over.
- * -There's a third obscure scenario (if this task is running for the
- * first time afer an ASID rollover), where despite having a valid
- * ASID, we force a get for new ASID - see comments at top.
- *
- * Both the non-alloc scenario and first-use-after-rollover can be
- * detected using the single condition below: NO_ASID = 256
- * while asid_cache is always a valid ASID value (0-255).
+ * Note that the mm_cpumask is "aggregating" only, we don't clear it
+ * for the switched-out task, unlike some other arches.
+ * It is used to enlist cpus for sending TLB flush IPIs and not sending
+ * it to CPUs where a task once ran-on, could cause stale TLB entry
+ * re-use, specially for a multi-threaded task.
+ * e.g. T1 runs on C1, migrates to C3. T2 running on C2 munmaps.
+ * For a non-aggregating mm_cpumask, IPI not sent C1, and if T1
+ * were to re-migrate to C1, it could access the unmapped region
+ * via any existing stale TLB entries.
*/
- if (next->context.asid > asid_cache) {
- get_new_mmu_context(next);
- } else {
- /*
- * XXX: This will never happen given the chks above
- * BUG_ON(next->context.asid > MAX_ASID);
- */
- write_aux_reg(ARC_REG_PID, next->context.asid | MMU_ENABLE);
- }
-
-}
-
-static inline void destroy_context(struct mm_struct *mm)
-{
- unsigned long flags;
-
- local_irq_save(flags);
+ cpumask_set_cpu(cpu, mm_cpumask(next));
- asid_mm_map[mm->context.asid] = NULL;
- mm->context.asid = NO_ASID;
+ mmu_setup_pgd(next, next->pgd);
- local_irq_restore(flags);
+ get_new_mmu_context(next);
}
+/*
+ * activate_mm defaults (in asm-generic) to switch_mm and is called at the
+ * time of execve() to get a new ASID Note the subtlety here:
+ * get_new_mmu_context() behaves differently here vs. in switch_mm(). Here
+ * it always returns a new ASID, because mm has an unallocated "initial"
+ * value, while in latter, it moves to a new ASID, only if it was
+ * unallocated
+ */
+
/* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping
* for retiring-mm. However destroy_context( ) still needs to do that because
* between mm_release( ) = >deactive_mm( ) and
* mmput => .. => __mmdrop( ) => destroy_context( )
- * there is a good chance that task gets sched-out/in, making it's ASID valid
+ * there is a good chance that task gets sched-out/in, making its ASID valid
* again (this teased me for a whole day).
*/
-#define deactivate_mm(tsk, mm) do { } while (0)
-
-static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
-{
-#ifndef CONFIG_SMP
- write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
-#endif
-
- /* Unconditionally get a new ASID */
- get_new_mmu_context(next);
-
-}
-#define enter_lazy_tlb(mm, tsk)
+#include <asm-generic/mmu_context.h>
#endif /* __ASM_ARC_MMU_CONTEXT_H */
diff --git a/arch/arc/include/asm/module.h b/arch/arc/include/asm/module.h
index 518222bb3f8e..f534a1fef070 100644
--- a/arch/arc/include/asm/module.h
+++ b/arch/arc/include/asm/module.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Amit Bhor, Sameer Dhavale: Codito Technologies 2004
-
*/
#ifndef _ASM_ARC_MODULE_H
@@ -14,15 +10,12 @@
#include <asm-generic/module.h>
-#ifdef CONFIG_ARC_DW2_UNWIND
struct mod_arch_specific {
+#ifdef CONFIG_ARC_DW2_UNWIND
void *unw_info;
int unw_sec_idx;
-};
#endif
-
-#define MODULE_PROC_FAMILY "ARC700"
-
-#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
+ const char *secstr;
+};
#endif /* _ASM_ARC_MODULE_H */
diff --git a/arch/arc/include/asm/mutex.h b/arch/arc/include/asm/mutex.h
deleted file mode 100644
index a2f88ff9f506..000000000000
--- a/arch/arc/include/asm/mutex.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-/*
- * xchg() based mutex fast path maintains a state of 0 or 1, as opposed to
- * atomic dec based which can "count" any number of lock contenders.
- * This ideally needs to be fixed in core, but for now switching to dec ver.
- */
-#if defined(CONFIG_SMP) && (CONFIG_NR_CPUS > 2)
-#include <asm-generic/mutex-dec.h>
-#else
-#include <asm-generic/mutex-xchg.h>
-#endif
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
index 9c8aa41e45c2..9720fe6b2c24 100644
--- a/arch/arc/include/asm/page.h
+++ b/arch/arc/include/asm/page.h
@@ -1,22 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_ARC_PAGE_H
#define __ASM_ARC_PAGE_H
#include <uapi/asm/page.h>
+#ifdef CONFIG_ARC_HAS_PAE40
+
+#define MAX_POSSIBLE_PHYSMEM_BITS 40
+#define PAGE_MASK_PHYS (0xff00000000ull | PAGE_MASK)
+
+#else /* CONFIG_ARC_HAS_PAE40 */
-#ifndef __ASSEMBLY__
+#define MAX_POSSIBLE_PHYSMEM_BITS 32
+#define PAGE_MASK_PHYS PAGE_MASK
-#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
-#define free_user_page(page, addr) free_page(addr)
+#endif /* CONFIG_ARC_HAS_PAE40 */
+
+#ifndef __ASSEMBLER__
#define clear_page(paddr) memset((paddr), 0, PAGE_SIZE)
+#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
struct vm_area_struct;
@@ -28,82 +34,108 @@ void copy_user_highpage(struct page *to, struct page *from,
unsigned long u_vaddr, struct vm_area_struct *vma);
void clear_user_page(void *to, unsigned long u_vaddr, struct page *page);
-#undef STRICT_MM_TYPECHECKS
+typedef struct {
+ unsigned long pgd;
+} pgd_t;
+
+#define pgd_val(x) ((x).pgd)
+#define __pgd(x) ((pgd_t) { (x) })
+
+#if CONFIG_PGTABLE_LEVELS > 3
-#ifdef STRICT_MM_TYPECHECKS
-/*
- * These are used to make use of C type-checking..
- */
typedef struct {
+ unsigned long pud;
+} pud_t;
+
+#define pud_val(x) ((x).pud)
+#define __pud(x) ((pud_t) { (x) })
+
+#endif
+
+#if CONFIG_PGTABLE_LEVELS > 2
+
+typedef struct {
+ unsigned long pmd;
+} pmd_t;
+
+#define pmd_val(x) ((x).pmd)
+#define __pmd(x) ((pmd_t) { (x) })
+
+#endif
+
+typedef struct {
+#ifdef CONFIG_ARC_HAS_PAE40
+ unsigned long long pte;
+#else
unsigned long pte;
+#endif
} pte_t;
-typedef struct {
- unsigned long pgd;
-} pgd_t;
+
+#define pte_val(x) ((x).pte)
+#define __pte(x) ((pte_t) { (x) })
+
typedef struct {
unsigned long pgprot;
} pgprot_t;
-typedef unsigned long pgtable_t;
-
-#define pte_val(x) ((x).pte)
-#define pgd_val(x) ((x).pgd)
-#define pgprot_val(x) ((x).pgprot)
-#define __pte(x) ((pte_t) { (x) })
-#define __pgd(x) ((pgd_t) { (x) })
-#define __pgprot(x) ((pgprot_t) { (x) })
+#define pgprot_val(x) ((x).pgprot)
+#define __pgprot(x) ((pgprot_t) { (x) })
+#define pte_pgprot(x) __pgprot(pte_val(x))
-#define pte_pgprot(x) __pgprot(pte_val(x))
+typedef struct page *pgtable_t;
-#else /* !STRICT_MM_TYPECHECKS */
+/*
+ * When HIGHMEM is enabled we have holes in the memory map so we need
+ * pfn_valid() that takes into account the actual extents of the physical
+ * memory
+ */
+#ifdef CONFIG_HIGHMEM
-typedef unsigned long pte_t;
-typedef unsigned long pgd_t;
-typedef unsigned long pgprot_t;
-typedef unsigned long pgtable_t;
+extern unsigned long arch_pfn_offset;
+#define ARCH_PFN_OFFSET arch_pfn_offset
-#define pte_val(x) (x)
-#define pgd_val(x) (x)
-#define pgprot_val(x) (x)
-#define __pte(x) (x)
-#define __pgprot(x) (x)
-#define pte_pgprot(x) (x)
+extern int pfn_valid(unsigned long pfn);
+#define pfn_valid pfn_valid
-#endif
+#else /* CONFIG_HIGHMEM */
-#define ARCH_PFN_OFFSET (CONFIG_LINUX_LINK_BASE >> PAGE_SHIFT)
+#define ARCH_PFN_OFFSET virt_to_pfn((void *)CONFIG_LINUX_RAM_BASE)
-#define pfn_valid(pfn) (((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
+#endif /* CONFIG_HIGHMEM */
/*
* __pa, __va, virt_to_page (ALERT: deprecated, don't use them)
*
* These macros have historically been misnamed
* virt here means link-address/program-address as embedded in object code.
- * So if kernel img is linked at 0x8000_0000 onwards, 0x8010_0000 will be
- * 128th page, and virt_to_page( ) will return the struct page corresp to it.
- * mem_map[ ] is an array of struct page for each page frame in the system
- *
- * Independent of where linux is linked at, link-addr = physical address
- * So the old macro __pa = vaddr + PAGE_OFFSET - CONFIG_LINUX_LINK_BASE
- * would have been wrong in case kernel is not at 0x8zs
+ * And for ARC, link-addr = physical address
*/
-#define __pa(vaddr) ((unsigned long)vaddr)
-#define __va(paddr) ((void *)((unsigned long)(paddr)))
+#define __pa(vaddr) ((unsigned long)(vaddr))
+#define __va(paddr) ((void *)((unsigned long)(paddr)))
-#define virt_to_page(kaddr) \
- (mem_map + ((__pa(kaddr) - CONFIG_LINUX_LINK_BASE) >> PAGE_SHIFT))
+/*
+ * Use virt_to_pfn with caution:
+ * If used in pte or paddr related macros, it could cause truncation
+ * in PAE40 builds
+ * As a rule of thumb, only use it in helpers starting with virt_
+ * You have been warned !
+ */
+static inline unsigned long virt_to_pfn(const void *kaddr)
+{
+ return __pa(kaddr) >> PAGE_SHIFT;
+}
-#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
+#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr))
/* Default Permissions for stack/heaps pages (Non Executable) */
-#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE)
+#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC
#define WANT_PAGE_VIRTUAL 1
#include <asm-generic/memory_model.h> /* page_to_pfn, pfn_to_page */
#include <asm-generic/getorder.h>
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif
diff --git a/arch/arc/include/asm/pci.h b/arch/arc/include/asm/pci.h
new file mode 100644
index 000000000000..a6858e111764
--- /dev/null
+++ b/arch/arc/include/asm/pci.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef _ASM_ARC_PCI_H
+#define _ASM_ARC_PCI_H
+
+#ifdef __KERNEL__
+#include <linux/ioport.h>
+
+#define PCIBIOS_MIN_IO 0x100
+#define PCIBIOS_MIN_MEM 0x100000
+
+#define pcibios_assign_all_busses() 1
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_ARC_PCI_H */
diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h
index 115ad96480e6..d5719a260864 100644
--- a/arch/arc/include/asm/perf_event.h
+++ b/arch/arc/include/asm/perf_event.h
@@ -1,13 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Linux performance counter support for ARC
*
+ * Copyright (C) 2014-2015 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2011-2013 Synopsys, Inc. (www.synopsys.com)
*/
#ifndef __ASM_PERF_EVENT_H
#define __ASM_PERF_EVENT_H
+/* Max number of counters that PCT block may ever have */
+#define ARC_PERF_MAX_COUNTERS 32
+
+#define ARC_REG_CC_BUILD 0xF6
+#define ARC_REG_CC_INDEX 0x240
+#define ARC_REG_CC_NAME0 0x241
+#define ARC_REG_CC_NAME1 0x242
+
+#define ARC_REG_PCT_BUILD 0xF5
+#define ARC_REG_PCT_COUNTL 0x250
+#define ARC_REG_PCT_COUNTH 0x251
+#define ARC_REG_PCT_SNAPL 0x252
+#define ARC_REG_PCT_SNAPH 0x253
+#define ARC_REG_PCT_CONFIG 0x254
+#define ARC_REG_PCT_CONTROL 0x255
+#define ARC_REG_PCT_INDEX 0x256
+#define ARC_REG_PCT_INT_CNTL 0x25C
+#define ARC_REG_PCT_INT_CNTH 0x25D
+#define ARC_REG_PCT_INT_CTRL 0x25E
+#define ARC_REG_PCT_INT_ACT 0x25F
+
+#define ARC_REG_PCT_CONFIG_USER (1 << 18) /* count in user mode */
+#define ARC_REG_PCT_CONFIG_KERN (1 << 19) /* count in kernel mode */
+
+#define ARC_REG_PCT_CONTROL_CC (1 << 16) /* clear counts */
+#define ARC_REG_PCT_CONTROL_SN (1 << 17) /* snapshot */
+
+struct arc_reg_pct_build {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int m:8, c:8, r:5, i:1, s:2, v:8;
+#else
+ unsigned int v:8, s:2, i:1, r:5, c:8, m:8;
+#endif
+};
+
+struct arc_reg_cc_build {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int c:16, r:8, v:8;
+#else
+ unsigned int v:8, r:8, c:16;
+#endif
+};
+
+#define PERF_COUNT_ARC_DCLM (PERF_COUNT_HW_MAX + 0)
+#define PERF_COUNT_ARC_DCSM (PERF_COUNT_HW_MAX + 1)
+#define PERF_COUNT_ARC_ICM (PERF_COUNT_HW_MAX + 2)
+#define PERF_COUNT_ARC_BPOK (PERF_COUNT_HW_MAX + 3)
+#define PERF_COUNT_ARC_EDTLB (PERF_COUNT_HW_MAX + 4)
+#define PERF_COUNT_ARC_EITLB (PERF_COUNT_HW_MAX + 5)
+#define PERF_COUNT_ARC_LDC (PERF_COUNT_HW_MAX + 6)
+#define PERF_COUNT_ARC_STC (PERF_COUNT_HW_MAX + 7)
+
+#define PERF_COUNT_ARC_HW_MAX (PERF_COUNT_HW_MAX + 8)
+
+#ifdef CONFIG_PERF_EVENTS
+#define perf_arch_bpf_user_pt_regs(regs) (struct user_regs_struct *)regs
+#endif
+
#endif /* __ASM_PERF_EVENT_H */
diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h
index 36a9f20c21a3..dfae070fe8d5 100644
--- a/arch/arc/include/asm/pgalloc.h
+++ b/arch/arc/include/asm/pgalloc.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* vineetg: June 2011
* -"/proc/meminfo | grep PageTables" kept on increasing
* Recently added pgtable dtor was not getting called.
@@ -21,10 +18,10 @@
* vineetg: April 2010
* -Switched pgtable_t from being struct page * to unsigned long
* =Needed so that Page Table allocator (pte_alloc_one) is not forced to
- * to deal with struct page. Thay way in future we can make it allocate
+ * deal with struct page. That way in future we can make it allocate
* multiple PG Tbls in one Page Frame
* =sweet side effect is avoiding calls to ugly page_address( ) from the
- * pg-tlb allocator sub-sys (pte_alloc_one, ptr_free, pmd_populate
+ * pg-tlb allocator sub-sys (pte_alloc_one, ptr_free, pmd_populate)
*
* Amit Bhor, Sameer Dhavale: Codito Technologies 2004
*/
@@ -34,101 +31,62 @@
#include <linux/mm.h>
#include <linux/log2.h>
+#include <asm-generic/pgalloc.h>
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
{
- pmd_set(pmd, pte);
+ /*
+ * The cast to long below is OK in 32-bit PAE40 regime with long long pte
+ * Despite "wider" pte, the pte table needs to be in non-PAE low memory
+ * as all higher levels can only hold long pointers.
+ *
+ * The cast itself is needed given simplistic definition of set_pmd()
+ */
+ set_pmd(pmd, __pmd((unsigned long)pte));
}
-static inline void
-pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t ptep)
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte_page)
{
- pmd_set(pmd, (pte_t *) ptep);
-}
-
-static inline int __get_order_pgd(void)
-{
- return get_order(PTRS_PER_PGD * 4);
+ set_pmd(pmd, __pmd((unsigned long)page_address(pte_page)));
}
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
- int num, num2;
- pgd_t *ret = (pgd_t *) __get_free_pages(GFP_KERNEL, __get_order_pgd());
+ pgd_t *ret = __pgd_alloc(mm, 0);
if (ret) {
- num = USER_PTRS_PER_PGD + USER_KERNEL_GUTTER / PGDIR_SIZE;
- memzero(ret, num * sizeof(pgd_t));
+ int num, num2;
+ num = USER_PTRS_PER_PGD + USER_KERNEL_GUTTER / PGDIR_SIZE;
num2 = VMALLOC_SIZE / PGDIR_SIZE;
memcpy(ret + num, swapper_pg_dir + num, num2 * sizeof(pgd_t));
-
- memzero(ret + num + num2,
- (PTRS_PER_PGD - num - num2) * sizeof(pgd_t));
-
}
return ret;
}
-static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
-{
- free_pages((unsigned long)pgd, __get_order_pgd());
-}
-
+#if CONFIG_PGTABLE_LEVELS > 3
-/*
- * With software-only page-tables, addr-split for traversal is tweakable and
- * that directly governs how big tables would be at each level.
- * Further, the MMU page size is configurable.
- * Thus we need to programatically assert the size constraint
- * All of this is const math, allowing gcc to do constant folding/propagation.
- */
-
-static inline int __get_order_pte(void)
+static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4dp, pud_t *pudp)
{
- return get_order(PTRS_PER_PTE * 4);
+ set_p4d(p4dp, __p4d((unsigned long)pudp));
}
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
- unsigned long address)
-{
- pte_t *pte;
+#define __pud_free_tlb(tlb, pmd, addr) pud_free((tlb)->mm, pmd)
- pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO,
- __get_order_pte());
+#endif
- return pte;
-}
+#if CONFIG_PGTABLE_LEVELS > 2
-static inline pgtable_t
-pte_alloc_one(struct mm_struct *mm, unsigned long address)
+static inline void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmdp)
{
- pgtable_t pte_pg;
-
- pte_pg = __get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte());
- if (pte_pg) {
- memzero((void *)pte_pg, PTRS_PER_PTE * 4);
- pgtable_page_ctor(virt_to_page(pte_pg));
- }
-
- return pte_pg;
+ set_pud(pudp, __pud((unsigned long)pmdp));
}
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
-{
- free_pages((unsigned long)pte, __get_order_pte()); /* takes phy addr */
-}
+#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
-static inline void pte_free(struct mm_struct *mm, pgtable_t ptep)
-{
- pgtable_page_dtor(virt_to_page(ptep));
- free_pages(ptep, __get_order_pte());
-}
+#endif
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
-#define check_pgt_cache() do { } while (0)
-#define pmd_pgtable(pmd) pmd_page_vaddr(pmd)
-
#endif /* _ASM_ARC_PGALLOC_H */
diff --git a/arch/arc/include/asm/pgtable-bits-arcv2.h b/arch/arc/include/asm/pgtable-bits-arcv2.h
new file mode 100644
index 000000000000..4630c5acca05
--- /dev/null
+++ b/arch/arc/include/asm/pgtable-bits-arcv2.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+/*
+ * page table flags for software walked/managed MMUv3 (ARC700) and MMUv4 (HS)
+ * There correspond to the corresponding bits in the TLB
+ */
+
+#ifndef _ASM_ARC_PGTABLE_BITS_ARCV2_H
+#define _ASM_ARC_PGTABLE_BITS_ARCV2_H
+
+#ifdef CONFIG_ARC_CACHE_PAGES
+#define _PAGE_CACHEABLE (1 << 0) /* Cached (H) */
+#else
+#define _PAGE_CACHEABLE 0
+#endif
+
+#define _PAGE_EXECUTE (1 << 1) /* User Execute (H) */
+#define _PAGE_WRITE (1 << 2) /* User Write (H) */
+#define _PAGE_READ (1 << 3) /* User Read (H) */
+#define _PAGE_ACCESSED (1 << 4) /* Accessed (s) */
+#define _PAGE_DIRTY (1 << 5) /* Modified (s) */
+#define _PAGE_SPECIAL (1 << 6)
+#define _PAGE_GLOBAL (1 << 8) /* ASID agnostic (H) */
+#define _PAGE_PRESENT (1 << 9) /* PTE/TLB Valid (H) */
+
+/* We borrow bit 5 to store the exclusive marker in swap PTEs. */
+#define _PAGE_SWP_EXCLUSIVE _PAGE_DIRTY
+
+#ifdef CONFIG_ARC_MMU_V4
+#define _PAGE_HW_SZ (1 << 10) /* Normal/super (H) */
+#else
+#define _PAGE_HW_SZ 0
+#endif
+
+/* Defaults for every user page */
+#define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
+
+/* Set of bits not changed in pte_modify */
+#define _PAGE_CHG_MASK (PAGE_MASK_PHYS | _PAGE_ACCESSED | _PAGE_DIRTY | \
+ _PAGE_SPECIAL)
+
+/* More Abbrevaited helpers */
+#define PAGE_U_NONE __pgprot(___DEF)
+#define PAGE_U_R __pgprot(___DEF | _PAGE_READ)
+#define PAGE_U_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE)
+#define PAGE_U_X_R __pgprot(___DEF | _PAGE_READ | _PAGE_EXECUTE)
+#define PAGE_U_X_W_R __pgprot(___DEF \
+ | _PAGE_READ | _PAGE_WRITE | _PAGE_EXECUTE)
+#define PAGE_KERNEL __pgprot(___DEF | _PAGE_GLOBAL \
+ | _PAGE_READ | _PAGE_WRITE | _PAGE_EXECUTE)
+
+#define PAGE_SHARED PAGE_U_W_R
+
+#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE))
+
+/*
+ * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
+ *
+ * Certain cases have 1:1 mapping
+ * e.g. __P101 means VM_READ, VM_EXEC and !VM_SHARED
+ * which directly corresponds to PAGE_U_X_R
+ *
+ * Other rules which cause the divergence from 1:1 mapping
+ *
+ * 1. Although ARC700 can do exclusive execute/write protection (meaning R
+ * can be tracked independently of X/W unlike some other CPUs), still to
+ * keep things consistent with other archs:
+ * -Write implies Read: W => R
+ * -Execute implies Read: X => R
+ *
+ * 2. Pvt Writable doesn't have Write Enabled initially: Pvt-W => !W
+ * This is to enable COW mechanism
+ */
+ /* xwr */
+#ifndef __ASSEMBLER__
+
+#define pte_write(pte) (pte_val(pte) & _PAGE_WRITE)
+#define pte_dirty(pte) (pte_val(pte) & _PAGE_DIRTY)
+#define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED)
+#define pte_special(pte) (pte_val(pte) & _PAGE_SPECIAL)
+
+#define PTE_BIT_FUNC(fn, op) \
+ static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
+
+PTE_BIT_FUNC(mknotpresent, &= ~(_PAGE_PRESENT));
+PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE));
+PTE_BIT_FUNC(mkwrite_novma, |= (_PAGE_WRITE));
+PTE_BIT_FUNC(mkclean, &= ~(_PAGE_DIRTY));
+PTE_BIT_FUNC(mkdirty, |= (_PAGE_DIRTY));
+PTE_BIT_FUNC(mkold, &= ~(_PAGE_ACCESSED));
+PTE_BIT_FUNC(mkyoung, |= (_PAGE_ACCESSED));
+PTE_BIT_FUNC(mkspecial, |= (_PAGE_SPECIAL));
+PTE_BIT_FUNC(mkhuge, |= (_PAGE_HW_SZ));
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
+}
+
+struct vm_fault;
+void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep, unsigned int nr);
+
+#define update_mmu_cache(vma, addr, ptep) \
+ update_mmu_cache_range(NULL, vma, addr, ptep, 1)
+
+/*
+ * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
+ * are !pte_none() && !pte_present().
+ *
+ * Format of swap PTEs:
+ *
+ * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * <-------------- offset -------------> <--- zero --> E < type ->
+ *
+ * E is the exclusive marker that is not stored in swap entries.
+ * The zero'ed bits include _PAGE_PRESENT.
+ */
+#define __swp_entry(type, off) ((swp_entry_t) \
+ { ((type) & 0x1f) | ((off) << 13) })
+
+/* Decode a PTE containing swap "identifier "into constituents */
+#define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f)
+#define __swp_offset(pte_lookalike) ((pte_lookalike).val >> 13)
+
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+static inline bool pte_swp_exclusive(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
+}
+
+PTE_BIT_FUNC(swp_mkexclusive, |= (_PAGE_SWP_EXCLUSIVE));
+PTE_BIT_FUNC(swp_clear_exclusive, &= ~(_PAGE_SWP_EXCLUSIVE));
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#include <asm/hugepage.h>
+#endif
+
+#endif /* __ASSEMBLER__ */
+
+#endif
diff --git a/arch/arc/include/asm/pgtable-levels.h b/arch/arc/include/asm/pgtable-levels.h
new file mode 100644
index 000000000000..c8f9273372c0
--- /dev/null
+++ b/arch/arc/include/asm/pgtable-levels.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Synopsys, Inc. (www.synopsys.com)
+ */
+
+/*
+ * Helpers for implemenintg paging levels
+ */
+
+#ifndef _ASM_ARC_PGTABLE_LEVELS_H
+#define _ASM_ARC_PGTABLE_LEVELS_H
+
+#if CONFIG_PGTABLE_LEVELS == 2
+
+/*
+ * 2 level paging setup for software walked MMUv3 (ARC700) and MMUv4 (HS)
+ *
+ * [31] 32 bit virtual address [0]
+ * -------------------------------------------------------
+ * | | <---------- PGDIR_SHIFT ----------> |
+ * | | | <-- PAGE_SHIFT --> |
+ * -------------------------------------------------------
+ * | | |
+ * | | --> off in page frame
+ * | ---> index into Page Table
+ * ----> index into Page Directory
+ *
+ * Given software walk, the vaddr split is arbitrary set to 11:8:13
+ * However enabling of super page in a 2 level regime pegs PGDIR_SHIFT to
+ * super page size.
+ */
+
+#if defined(CONFIG_ARC_HUGEPAGE_16M)
+#define PGDIR_SHIFT 24
+#elif defined(CONFIG_ARC_HUGEPAGE_2M)
+#define PGDIR_SHIFT 21
+#else
+/*
+ * No Super page case
+ * Default value provides 11:8:13 (8K), 10:10:12 (4K)
+ * Limits imposed by pgtable_t only PAGE_SIZE long
+ * (so 4K page can only have 1K entries: or 10 bits)
+ */
+#ifdef CONFIG_ARC_PAGE_SIZE_4K
+#define PGDIR_SHIFT 22
+#else
+#define PGDIR_SHIFT 21
+#endif
+
+#endif
+
+#else /* CONFIG_PGTABLE_LEVELS != 2 */
+
+/*
+ * A default 3 level paging testing setup in software walked MMU
+ * MMUv4 (8K page): <4> : <7> : <8> : <13>
+ * A default 4 level paging testing setup in software walked MMU
+ * MMUv4 (8K page): <4> : <3> : <4> : <8> : <13>
+ */
+#define PGDIR_SHIFT 28
+#if CONFIG_PGTABLE_LEVELS > 3
+#define PUD_SHIFT 25
+#endif
+#if CONFIG_PGTABLE_LEVELS > 2
+#define PMD_SHIFT 21
+#endif
+
+#endif /* CONFIG_PGTABLE_LEVELS */
+
+#define PGDIR_SIZE BIT(PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE - 1))
+#define PTRS_PER_PGD BIT(32 - PGDIR_SHIFT)
+
+#if CONFIG_PGTABLE_LEVELS > 3
+#define PUD_SIZE BIT(PUD_SHIFT)
+#define PUD_MASK (~(PUD_SIZE - 1))
+#define PTRS_PER_PUD BIT(PGDIR_SHIFT - PUD_SHIFT)
+#endif
+
+#if CONFIG_PGTABLE_LEVELS > 2
+#define PMD_SIZE BIT(PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE - 1))
+#define PTRS_PER_PMD BIT(PUD_SHIFT - PMD_SHIFT)
+#endif
+
+#define PTRS_PER_PTE BIT(PMD_SHIFT - PAGE_SHIFT)
+
+#ifndef __ASSEMBLER__
+
+#if CONFIG_PGTABLE_LEVELS > 3
+#include <asm-generic/pgtable-nop4d.h>
+#elif CONFIG_PGTABLE_LEVELS > 2
+#include <asm-generic/pgtable-nopud.h>
+#else
+#include <asm-generic/pgtable-nopmd.h>
+#endif
+
+/*
+ * 1st level paging: pgd
+ */
+#define pgd_ERROR(e) \
+ pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+#if CONFIG_PGTABLE_LEVELS > 3
+
+/* In 4 level paging, p4d_* macros work on pgd */
+#define p4d_none(x) (!p4d_val(x))
+#define p4d_bad(x) ((p4d_val(x) & ~PAGE_MASK))
+#define p4d_present(x) (p4d_val(x))
+#define p4d_clear(xp) do { p4d_val(*(xp)) = 0; } while (0)
+#define p4d_pgtable(p4d) ((pud_t *)(p4d_val(p4d) & PAGE_MASK))
+#define p4d_page(p4d) virt_to_page(p4d_pgtable(p4d))
+#define set_p4d(p4dp, p4d) (*(p4dp) = p4d)
+
+/*
+ * 2nd level paging: pud
+ */
+#define pud_ERROR(e) \
+ pr_crit("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e))
+
+#endif
+
+#if CONFIG_PGTABLE_LEVELS > 2
+
+/*
+ * In 3 level paging, pud_* macros work on pgd
+ * In 4 level paging, pud_* macros work on pud
+ */
+#define pud_none(x) (!pud_val(x))
+#define pud_bad(x) ((pud_val(x) & ~PAGE_MASK))
+#define pud_present(x) (pud_val(x))
+#define pud_clear(xp) do { pud_val(*(xp)) = 0; } while (0)
+#define pud_pgtable(pud) ((pmd_t *)(pud_val(pud) & PAGE_MASK))
+#define pud_page(pud) virt_to_page(pud_pgtable(pud))
+#define set_pud(pudp, pud) (*(pudp) = pud)
+
+/*
+ * 3rd level paging: pmd
+ */
+#define pmd_ERROR(e) \
+ pr_crit("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
+
+#define pmd_pfn(pmd) ((pmd_val(pmd) & PMD_MASK) >> PAGE_SHIFT)
+#define pfn_pmd(pfn,prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+
+#endif
+
+/*
+ * Due to the strange way generic pgtable level folding works, the pmd_* macros
+ * - are valid even for 2 levels (which supposedly only has pgd - pte)
+ * - behave differently for 2 vs. 3
+ * In 2 level paging (pgd -> pte), pmd_* macros work on pgd
+ * In 3+ level paging (pgd -> pmd -> pte), pmd_* macros work on pmd
+ */
+#define pmd_none(x) (!pmd_val(x))
+#define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK))
+#define pmd_present(x) (pmd_val(x))
+#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
+#define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK)
+#define pmd_pfn(pmd) ((pmd_val(pmd) & PAGE_MASK) >> PAGE_SHIFT)
+#define pmd_page(pmd) virt_to_page((void *)pmd_page_vaddr(pmd))
+#define set_pmd(pmdp, pmd) (*(pmdp) = pmd)
+#define pmd_pgtable(pmd) ((pgtable_t) pmd_page(pmd))
+
+/*
+ * 4th level paging: pte
+ */
+#define pte_ERROR(e) \
+ pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+
+#define PFN_PTE_SHIFT PAGE_SHIFT
+#define pte_none(x) (!pte_val(x))
+#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
+#define pte_clear(mm,addr,ptep) set_pte_at(mm, addr, ptep, __pte(0))
+#define pte_page(pte) pfn_to_page(pte_pfn(pte))
+#define set_pte(ptep, pte) ((*(ptep)) = (pte))
+#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
+#define pfn_pte(pfn, prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
+
+#ifdef CONFIG_ISA_ARCV2
+#define pmd_leaf(x) (pmd_val(x) & _PAGE_HW_SZ)
+#endif
+
+#endif /* !__ASSEMBLER__ */
+
+#endif
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 4749a0eee1cf..bd580e2b62d7 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -1,415 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * vineetg: May 2011
- * -Folded PAGE_PRESENT (used by VM) and PAGE_VALID (used by MMU) into 1.
- * They are semantically the same although in different contexts
- * VALID marks a TLB entry exists and it will only happen if PRESENT
- * - Utilise some unused free bits to confine PTE flags to 12 bits
- * This is a must for 4k pg-sz
- *
- * vineetg: Mar 2011 - changes to accomodate MMU TLB Page Descriptor mods
- * -TLB Locking never really existed, except for initial specs
- * -SILENT_xxx not needed for our port
- * -Per my request, MMU V3 changes the layout of some of the bits
- * to avoid a few shifts in TLB Miss handlers.
- *
- * vineetg: April 2010
- * -PGD entry no longer contains any flags. If empty it is 0, otherwise has
- * Pg-Tbl ptr. Thus pmd_present(), pmd_valid(), pmd_set( ) become simpler
- *
- * vineetg: April 2010
- * -Switched form 8:11:13 split for page table lookup to 11:8:13
- * -this speeds up page table allocation itself as we now have to memset 1K
- * instead of 8k per page table.
- * -TODO: Right now page table alloc is 8K and rest 7K is unused
- * need to optimise it
- *
- * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
*/
#ifndef _ASM_ARC_PGTABLE_H
#define _ASM_ARC_PGTABLE_H
+#include <linux/bits.h>
+
+#include <asm/pgtable-levels.h>
+#include <asm/pgtable-bits-arcv2.h>
#include <asm/page.h>
#include <asm/mmu.h>
-#include <asm-generic/pgtable-nopmd.h>
-
-/**************************************************************************
- * Page Table Flags
- *
- * ARC700 MMU only deals with softare managed TLB entries.
- * Page Tables are purely for Linux VM's consumption and the bits below are
- * suited to that (uniqueness). Hence some are not implemented in the TLB and
- * some have different value in TLB.
- * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in
- * seperate PD0 and PD1, which combined forms a translation entry)
- * while for PTE perspective, they are 8 and 9 respectively
- * with MMU v3: Most bits (except SHARED) represent the exact hardware pos
- * (saves some bit shift ops in TLB Miss hdlrs)
- */
-
-#if (CONFIG_ARC_MMU_VER <= 2)
-
-#define _PAGE_ACCESSED (1<<1) /* Page is accessed (S) */
-#define _PAGE_CACHEABLE (1<<2) /* Page is cached (H) */
-#define _PAGE_U_EXECUTE (1<<3) /* Page has user execute perm (H) */
-#define _PAGE_U_WRITE (1<<4) /* Page has user write perm (H) */
-#define _PAGE_U_READ (1<<5) /* Page has user read perm (H) */
-#define _PAGE_K_EXECUTE (1<<6) /* Page has kernel execute perm (H) */
-#define _PAGE_K_WRITE (1<<7) /* Page has kernel write perm (H) */
-#define _PAGE_K_READ (1<<8) /* Page has kernel perm (H) */
-#define _PAGE_GLOBAL (1<<9) /* Page is global (H) */
-#define _PAGE_MODIFIED (1<<10) /* Page modified (dirty) (S) */
-#define _PAGE_FILE (1<<10) /* page cache/ swap (S) */
-#define _PAGE_PRESENT (1<<11) /* TLB entry is valid (H) */
-
-#else
-
-/* PD1 */
-#define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */
-#define _PAGE_U_EXECUTE (1<<1) /* Page has user execute perm (H) */
-#define _PAGE_U_WRITE (1<<2) /* Page has user write perm (H) */
-#define _PAGE_U_READ (1<<3) /* Page has user read perm (H) */
-#define _PAGE_K_EXECUTE (1<<4) /* Page has kernel execute perm (H) */
-#define _PAGE_K_WRITE (1<<5) /* Page has kernel write perm (H) */
-#define _PAGE_K_READ (1<<6) /* Page has kernel perm (H) */
-#define _PAGE_ACCESSED (1<<7) /* Page is accessed (S) */
-
-/* PD0 */
-#define _PAGE_GLOBAL (1<<8) /* Page is global (H) */
-#define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */
-#define _PAGE_SHARED_CODE (1<<10) /* Shared Code page with cmn vaddr
- usable for shared TLB entries (H) */
-
-#define _PAGE_MODIFIED (1<<11) /* Page modified (dirty) (S) */
-#define _PAGE_FILE (1<<12) /* page cache/ swap (S) */
-
-#define _PAGE_SHARED_CODE_H (1<<31) /* Hardware counterpart of above */
-#endif
-/* Kernel allowed all permissions for all pages */
-#define _K_PAGE_PERMS (_PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ | \
- _PAGE_GLOBAL | _PAGE_PRESENT)
-
-#ifdef CONFIG_ARC_CACHE_PAGES
-#define _PAGE_DEF_CACHEABLE _PAGE_CACHEABLE
-#else
-#define _PAGE_DEF_CACHEABLE (0)
-#endif
-
-/* Helper for every "user" page
- * -kernel can R/W/X
- * -by default cached, unless config otherwise
- * -present in memory
- */
-#define ___DEF (_PAGE_PRESENT | _PAGE_DEF_CACHEABLE)
-
-#define _PAGE_READ (_PAGE_U_READ | _PAGE_K_READ)
-#define _PAGE_WRITE (_PAGE_U_WRITE | _PAGE_K_WRITE)
-#define _PAGE_EXECUTE (_PAGE_U_EXECUTE | _PAGE_K_EXECUTE)
-
-/* Set of bits not changed in pte_modify */
-#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED)
-
-/* More Abbrevaited helpers */
-#define PAGE_U_NONE __pgprot(___DEF)
-#define PAGE_U_R __pgprot(___DEF | _PAGE_READ)
-#define PAGE_U_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE)
-#define PAGE_U_X_R __pgprot(___DEF | _PAGE_READ | _PAGE_EXECUTE)
-#define PAGE_U_X_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE | \
- _PAGE_EXECUTE)
-
-#define PAGE_SHARED PAGE_U_W_R
-
-/* While kernel runs out of unstrslated space, vmalloc/modules use a chunk of
- * kernel vaddr space - visible in all addr spaces, but kernel mode only
- * Thus Global, all-kernel-access, no-user-access, cached
- */
-#define PAGE_KERNEL __pgprot(_K_PAGE_PERMS | _PAGE_DEF_CACHEABLE)
-
-/* ioremap */
-#define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS)
-
-/* Masks for actual TLB "PD"s */
-#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT)
-#define PTE_BITS_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE | \
- _PAGE_U_EXECUTE | _PAGE_U_WRITE | _PAGE_U_READ | \
- _PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ)
-
-/**************************************************************************
- * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
- *
- * Certain cases have 1:1 mapping
- * e.g. __P101 means VM_READ, VM_EXEC and !VM_SHARED
- * which directly corresponds to PAGE_U_X_R
- *
- * Other rules which cause the divergence from 1:1 mapping
- *
- * 1. Although ARC700 can do exclusive execute/write protection (meaning R
- * can be tracked independet of X/W unlike some other CPUs), still to
- * keep things consistent with other archs:
- * -Write implies Read: W => R
- * -Execute implies Read: X => R
- *
- * 2. Pvt Writable doesn't have Write Enabled initially: Pvt-W => !W
- * This is to enable COW mechanism
- */
- /* xwr */
-#define __P000 PAGE_U_NONE
-#define __P001 PAGE_U_R
-#define __P010 PAGE_U_R /* Pvt-W => !W */
-#define __P011 PAGE_U_R /* Pvt-W => !W */
-#define __P100 PAGE_U_X_R /* X => R */
-#define __P101 PAGE_U_X_R
-#define __P110 PAGE_U_X_R /* Pvt-W => !W and X => R */
-#define __P111 PAGE_U_X_R /* Pvt-W => !W */
-
-#define __S000 PAGE_U_NONE
-#define __S001 PAGE_U_R
-#define __S010 PAGE_U_W_R /* W => R */
-#define __S011 PAGE_U_W_R
-#define __S100 PAGE_U_X_R /* X => R */
-#define __S101 PAGE_U_X_R
-#define __S110 PAGE_U_X_W_R /* X => R */
-#define __S111 PAGE_U_X_W_R
-
-/****************************************************************
- * Page Table Lookup split
- *
- * We implement 2 tier paging and since this is all software, we are free
- * to customize the span of a PGD / PTE entry to suit us
- *
- * 32 bit virtual address
- * -------------------------------------------------------
- * | BITS_FOR_PGD | BITS_FOR_PTE | BITS_IN_PAGE |
- * -------------------------------------------------------
- * | | |
- * | | --> off in page frame
- * | |
- * | ---> index into Page Table
- * |
- * ----> index into Page Directory
- */
-
-#define BITS_IN_PAGE PAGE_SHIFT
-
-/* Optimal Sizing of Pg Tbl - based on MMU page size */
-#if defined(CONFIG_ARC_PAGE_SIZE_8K)
-#define BITS_FOR_PTE 8
-#elif defined(CONFIG_ARC_PAGE_SIZE_16K)
-#define BITS_FOR_PTE 8
-#elif defined(CONFIG_ARC_PAGE_SIZE_4K)
-#define BITS_FOR_PTE 9
-#endif
-
-#define BITS_FOR_PGD (32 - BITS_FOR_PTE - BITS_IN_PAGE)
-
-#define PGDIR_SHIFT (BITS_FOR_PTE + BITS_IN_PAGE)
-#define PGDIR_SIZE (1UL << PGDIR_SHIFT) /* vaddr span, not PDG sz */
-#define PGDIR_MASK (~(PGDIR_SIZE-1))
-
-#ifdef __ASSEMBLY__
-#define PTRS_PER_PTE (1 << BITS_FOR_PTE)
-#define PTRS_PER_PGD (1 << BITS_FOR_PGD)
-#else
-#define PTRS_PER_PTE (1UL << BITS_FOR_PTE)
-#define PTRS_PER_PGD (1UL << BITS_FOR_PGD)
-#endif
/*
* Number of entries a user land program use.
* TASK_SIZE is the maximum vaddr that can be used by a userland program.
*/
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
-/*
- * No special requirements for lowest virtual address we permit any user space
- * mapping to be mapped at.
- */
-#define FIRST_USER_ADDRESS 0
-
-
-/****************************************************************
- * Bucket load of VM Helpers
- */
-
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
-#define pte_ERROR(e) \
- pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
-#define pgd_ERROR(e) \
- pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
-
-/* the zero page used for uninitialized and anonymous pages */
extern char empty_zero_page[PAGE_SIZE];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
-#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
-
-#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
-#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
-
-/* find the page descriptor of the Page Tbl ref by PMD entry */
-#define pmd_page(pmd) virt_to_page(pmd_val(pmd) & PAGE_MASK)
-
-/* find the logical addr (phy for ARC) of the Page Tbl ref by PMD entry */
-#define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK)
-
-/* In a 2 level sys, setup the PGD entry with PTE value */
-static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
-{
- pmd_val(*pmdp) = (unsigned long)ptep;
-}
-
-#define pte_none(x) (!pte_val(x))
-#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
-#define pte_clear(mm, addr, ptep) set_pte_at(mm, addr, ptep, __pte(0))
-
-#define pmd_none(x) (!pmd_val(x))
-#define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK))
-#define pmd_present(x) (pmd_val(x))
-#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
-
-#define pte_page(x) (mem_map + \
- (unsigned long)(((pte_val(x) - PAGE_OFFSET) >> PAGE_SHIFT)))
-
-#define mk_pte(page, pgprot) \
-({ \
- pte_t pte; \
- pte_val(pte) = __pa(page_address(page)) + pgprot_val(pgprot); \
- pte; \
-})
-
-/* TBD: Non linear mapping stuff */
-static inline int pte_file(pte_t pte)
-{
- return pte_val(pte) & _PAGE_FILE;
-}
-
-#define PTE_FILE_MAX_BITS 30
-#define pgoff_to_pte(x) __pte(x)
-#define pte_to_pgoff(x) (pte_val(x) >> 2)
-#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
-#define pfn_pte(pfn, prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
-#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-
-/*
- * pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system)
- * and returns ptr to PTE entry corresponding to @addr
- */
-#define pte_offset(dir, addr) ((pte_t *)(pmd_page_vaddr(*dir)) +\
- __pte_index(addr))
-
-/* No mapping of Page Tables in high mem etc, so following same as above */
-#define pte_offset_kernel(dir, addr) pte_offset(dir, addr)
-#define pte_offset_map(dir, addr) pte_offset(dir, addr)
-
-/* Zoo of pte_xxx function */
-#define pte_read(pte) (pte_val(pte) & _PAGE_READ)
-#define pte_write(pte) (pte_val(pte) & _PAGE_WRITE)
-#define pte_dirty(pte) (pte_val(pte) & _PAGE_MODIFIED)
-#define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED)
-#define pte_special(pte) (0)
-
-#define PTE_BIT_FUNC(fn, op) \
- static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
-
-PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE));
-PTE_BIT_FUNC(mkwrite, |= (_PAGE_WRITE));
-PTE_BIT_FUNC(mkclean, &= ~(_PAGE_MODIFIED));
-PTE_BIT_FUNC(mkdirty, |= (_PAGE_MODIFIED));
-PTE_BIT_FUNC(mkold, &= ~(_PAGE_ACCESSED));
-PTE_BIT_FUNC(mkyoung, |= (_PAGE_ACCESSED));
-PTE_BIT_FUNC(exprotect, &= ~(_PAGE_EXECUTE));
-PTE_BIT_FUNC(mkexec, |= (_PAGE_EXECUTE));
-
-static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
-
-static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
-{
- return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
-}
-
-/* Macro to mark a page protection as uncacheable */
-#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE))
-
-static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pteval)
-{
- set_pte(ptep, pteval);
-}
-
-/*
- * All kernel related VM pages are in init's mm.
- */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
-#define pgd_offset(mm, addr) (((mm)->pgd)+pgd_index(addr))
-
-/*
- * Macro to quickly access the PGD entry, utlising the fact that some
- * arch may cache the pointer to Page Directory of "current" task
- * in a MMU register
- *
- * Thus task->mm->pgd (3 pointer dereferences, cache misses etc simply
- * becomes read a register
- *
- * ********CAUTION*******:
- * Kernel code might be dealing with some mm_struct of NON "current"
- * Thus use this macro only when you are certain that "current" is current
- * e.g. when dealing with signal frame setup code etc
- */
-#ifndef CONFIG_SMP
-#define pgd_offset_fast(mm, addr) \
-({ \
- pgd_t *pgd_base = (pgd_t *) read_aux_reg(ARC_REG_SCRATCH_DATA0); \
- pgd_base + pgd_index(addr); \
-})
-#else
-#define pgd_offset_fast(mm, addr) pgd_offset(mm, addr)
-#endif
-
-extern void paging_init(void);
extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
- pte_t *ptep);
-
-/* Encode swap {type,off} tuple into PTE
- * We reserve 13 bits for 5-bit @type, keeping bits 12-5 zero, ensuring that
- * both PAGE_FILE and PAGE_PRESENT are zero in a PTE holding swap "identifier"
- */
-#define __swp_entry(type, off) ((swp_entry_t) { \
- ((type) & 0x1f) | ((off) << 13) })
-
-/* Decode a PTE containing swap "identifier "into constituents */
-#define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f)
-#define __swp_offset(pte_lookalike) ((pte_lookalike).val << 13)
-
-/* NOPs, to keep generic kernel happy */
-#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
-#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-
-#define kern_addr_valid(addr) (1)
-
-/*
- * remap a physical page `pfn' of size `size' with page protection `prot'
- * into virtual address `from'
- */
-#include <asm-generic/pgtable.h>
/* to cope with aliasing VIPT cache */
#define HAVE_ARCH_UNMAPPED_AREA
-/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init() do { } while (0)
-
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
index 15334ab66b56..7f7901ac6643 100644
--- a/arch/arc/include/asm/processor.h
+++ b/arch/arc/include/asm/processor.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* vineetg: March 2009
* -Implemented task_pt_regs( )
*
@@ -14,130 +11,87 @@
#ifndef __ASM_ARC_PROCESSOR_H
#define __ASM_ARC_PROCESSOR_H
-#ifdef __KERNEL__
-
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
-#include <asm/arcregs.h> /* for STATUS_E1_MASK et all */
#include <asm/ptrace.h>
+#include <asm/dsp.h>
+#include <asm/fpu.h>
/* Arch specific stuff which needs to be saved per task.
* However these items are not so important so as to earn a place in
* struct thread_info
*/
struct thread_struct {
- unsigned long ksp; /* kernel mode stack pointer */
unsigned long callee_reg; /* pointer to callee regs */
unsigned long fault_address; /* dbls as brkpt holder as well */
+#ifdef CONFIG_ARC_DSP_SAVE_RESTORE_REGS
+ struct dsp_callee_regs dsp;
+#endif
#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
struct arc_fpu fpu;
#endif
};
-#define INIT_THREAD { \
- .ksp = sizeof(init_stack) + (unsigned long) init_stack, \
-}
+#define INIT_THREAD { }
/* Forward declaration, a strange C thing */
struct task_struct;
-/*
- * Return saved PC of a blocked thread.
- */
-unsigned long thread_saved_pc(struct task_struct *t);
-
#define task_pt_regs(p) \
((struct pt_regs *)(THREAD_SIZE + (void *)task_stack_page(p)) - 1)
-/* Free all resources held by a thread. */
-#define release_thread(thread) do { } while (0)
-
-/* Prepare to copy thread state - unlazy all lazy status */
-#define prepare_to_copy(tsk) do { } while (0)
-
/*
* A lot of busy-wait loops in SMP are based off of non-volatile data otherwise
* get optimised away by gcc
*/
-#ifdef CONFIG_SMP
-#define cpu_relax() __asm__ __volatile__ ("" : : : "memory")
-#else
-#define cpu_relax() do { } while (0)
-#endif
-
-#define copy_segments(tsk, mm) do { } while (0)
-#define release_segments(mm) do { } while (0)
+#define cpu_relax() barrier()
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->ret)
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp)
/*
- * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
+ * Where about of Task's sp, fp, blink when it was last seen in kernel mode.
* Look in process.c for details of kernel stack layout
*/
-#define KSTK_ESP(tsk) (tsk->thread.ksp)
+#define TSK_K_ESP(tsk) (task_thread_info(tsk)->ksp)
-#define KSTK_REG(tsk, off) (*((unsigned int *)(KSTK_ESP(tsk) + \
+#define TSK_K_REG(tsk, off) (*((unsigned long *)(TSK_K_ESP(tsk) + \
sizeof(struct callee_regs) + off)))
-#define KSTK_BLINK(tsk) KSTK_REG(tsk, 4)
-#define KSTK_FP(tsk) KSTK_REG(tsk, 0)
+#define TSK_K_BLINK(tsk) TSK_K_REG(tsk, 4)
+#define TSK_K_FP(tsk) TSK_K_REG(tsk, 0)
+
+extern void start_thread(struct pt_regs * regs, unsigned long pc,
+ unsigned long usp);
+
+extern unsigned int __get_wchan(struct task_struct *p);
+
+#endif /* !__ASSEMBLER__ */
/*
- * Do necessary setup to start up a newly executed thread.
+ * Default System Memory Map on ARC
+ *
+ * ---------------------------- (lower 2G, Translated) -------------------------
+ * 0x0000_0000 0x5FFF_FFFF (user vaddr: TASK_SIZE)
+ * 0x6000_0000 0x6FFF_FFFF (reserved gutter between U/K)
+ * 0x7000_0000 0x7FFF_FFFF (kvaddr: vmalloc/modules/pkmap..)
*
- * E1,E2 so that Interrupts are enabled in user mode
- * L set, so Loop inhibited to begin with
- * lp_start and lp_end seeded with bogus non-zero values so to easily catch
- * the ARC700 sr to lp_start hardware bug
+ * PAGE_OFFSET ---------------- (Upper 2G, Untranslated) -----------------------
+ * 0x8000_0000 0xBFFF_FFFF (kernel direct mapped)
+ * 0xC000_0000 0xFFFF_FFFF (peripheral uncached space)
+ * -----------------------------------------------------------------------------
*/
-#define start_thread(_regs, _pc, _usp) \
-do { \
- set_fs(USER_DS); /* reads from user space */ \
- (_regs)->ret = (_pc); \
- /* Interrupts enabled in User Mode */ \
- (_regs)->status32 = STATUS_U_MASK | STATUS_L_MASK \
- | STATUS_E1_MASK | STATUS_E2_MASK; \
- (_regs)->sp = (_usp); \
- /* bogus seed values for debugging */ \
- (_regs)->lp_start = 0x10; \
- (_regs)->lp_end = 0x80; \
-} while (0)
-
-extern unsigned int get_wchan(struct task_struct *p);
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- * Should the PC register be read instead ? This macro does not seem to
- * be used in many places so this wont be all that bad.
- */
-#define current_text_addr() ({ __label__ _l; _l: &&_l; })
+#define TASK_SIZE 0x60000000
-#endif /* !__ASSEMBLY__ */
+#define VMALLOC_START (PAGE_OFFSET - (CONFIG_ARC_KVADDR_SIZE << 20))
-/* Kernels Virtual memory area.
- * Unlike other architectures(MIPS, sh, cris ) ARC 700 does not have a
- * "kernel translated" region (like KSEG2 in MIPS). So we use a upper part
- * of the translated bottom 2GB for kernel virtual memory and protect
- * these pages from user accesses by disabling Ru, Eu and Wu.
- */
-#define VMALLOC_SIZE (0x10000000) /* 256M */
-#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
-#define VMALLOC_END (PAGE_OFFSET)
+/* 1 PGDIR_SIZE each for fixmap/pkmap, 2 PGDIR_SIZE gutter (see asm/highmem.h) */
+#define VMALLOC_SIZE ((CONFIG_ARC_KVADDR_SIZE << 20) - PMD_SIZE * 4)
-/* Most of the architectures seem to be keeping some kind of padding between
- * userspace TASK_SIZE and PAGE_OFFSET. i.e TASK_SIZE != PAGE_OFFSET.
- */
-#define USER_KERNEL_GUTTER 0x10000000
-
-/* User address space:
- * On ARC700, CPU allows the entire lower half of 32 bit address space to be
- * translated. Thus potentially 2G (0:0x7FFF_FFFF) could be User vaddr space.
- * However we steal 256M for kernel addr (0x7000_0000:0x7FFF_FFFF) and another
- * 256M (0x6000_0000:0x6FFF_FFFF) is gutter between user/kernel spaces
- * Thus total User vaddr space is (0:0x5FFF_FFFF)
- */
-#define TASK_SIZE (PAGE_OFFSET - VMALLOC_SIZE - USER_KERNEL_GUTTER)
+#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
+
+#define USER_KERNEL_GUTTER (VMALLOC_START - TASK_SIZE)
#define STACK_TOP TASK_SIZE
#define STACK_TOP_MAX STACK_TOP
@@ -147,6 +101,4 @@ extern unsigned int get_wchan(struct task_struct *p);
*/
#define TASK_UNMAPPED_BASE (TASK_SIZE / 3)
-#endif /* __KERNEL__ */
-
#endif /* __ASM_ARC_PROCESSOR_H */
diff --git a/arch/arc/include/asm/prom.h b/arch/arc/include/asm/prom.h
deleted file mode 100644
index 692d0d0789a7..000000000000
--- a/arch/arc/include/asm/prom.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_ARC_PROM_H_
-#define _ASM_ARC_PROM_H_
-
-#define HAVE_ARCH_DEVTREE_FIXUPS
-
-#endif
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
index c9938e7a7dbd..f6c052af8f4d 100644
--- a/arch/arc/include/asm/ptrace.h
+++ b/arch/arc/include/asm/ptrace.h
@@ -1,48 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Amit Bhor, Sameer Dhavale: Codito Technologies 2004
*/
#ifndef __ASM_ARC_PTRACE_H
#define __ASM_ARC_PTRACE_H
#include <uapi/asm/ptrace.h>
+#include <linux/compiler.h>
+
+#ifndef __ASSEMBLER__
-#ifndef __ASSEMBLY__
+typedef union {
+ struct {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned long state:8, vec:8, cause:8, param:8;
+#else
+ unsigned long param:8, cause:8, vec:8, state:8;
+#endif
+ };
+ unsigned long full;
+} ecr_reg;
/* THE pt_regs: Defines how regs are saved during entry into kernel */
+#ifdef CONFIG_ISA_ARCOMPACT
struct pt_regs {
/* Real registers */
- long bta; /* bta_l1, bta_l2, erbta */
- long lp_start;
- long lp_end;
- long lp_count;
- long status32; /* status32_l1, status32_l2, erstatus */
- long ret; /* ilink1, ilink2 or eret */
- long blink;
- long fp;
- long r26; /* gp */
- long r12;
- long r11;
- long r10;
- long r9;
- long r8;
- long r7;
- long r6;
- long r5;
- long r4;
- long r3;
- long r2;
- long r1;
- long r0;
- long sp; /* user/kernel sp depending on where we came from */
- long orig_r0;
+ unsigned long bta; /* bta_l1, bta_l2, erbta */
+
+ unsigned long lp_start, lp_end, lp_count;
+
+ unsigned long status32; /* status32_l1, status32_l2, erstatus */
+ unsigned long ret; /* ilink1, ilink2 or eret */
+ unsigned long blink;
+ unsigned long fp;
+ unsigned long r26; /* gp */
+
+ unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
+
+ unsigned long sp; /* User/Kernel depending on where we came from */
+ unsigned long orig_r0;
/*
* To distinguish bet excp, syscall, irq
@@ -51,40 +51,59 @@ struct pt_regs {
* Last word used by Linux for extra state mgmt (syscall-restart)
* For interrupts, use artificial ECR values to note current prio-level
*/
- union {
- struct {
-#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned long state:8, ecr_vec:8,
- ecr_cause:8, ecr_param:8;
+ ecr_reg ecr;
+};
+
+struct callee_regs {
+ unsigned long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
+};
+
+#define MAX_REG_OFFSET offsetof(struct pt_regs, ecr)
+
#else
- unsigned long ecr_param:8, ecr_cause:8,
- ecr_vec:8, state:8;
+
+struct pt_regs {
+
+ unsigned long orig_r0;
+
+ ecr_reg ecr; /* Exception Cause Reg */
+
+ unsigned long bta; /* erbta */
+
+ unsigned long fp;
+ unsigned long r30;
+ unsigned long r12;
+ unsigned long r26; /* gp */
+
+#ifdef CONFIG_ARC_HAS_ACCL_REGS
+ unsigned long r58, r59; /* ACCL/ACCH used by FPU / DSP MPY */
+#endif
+#ifdef CONFIG_ARC_DSP_SAVE_RESTORE_REGS
+ unsigned long DSP_CTRL;
#endif
- };
- unsigned long event;
- };
- long user_r25;
-};
+ unsigned long sp; /* user/kernel sp depending on entry */
+
+ /*------- Below list auto saved by h/w -----------*/
+ unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
+
+ unsigned long blink;
+ unsigned long lp_end, lp_start, lp_count;
-/* Callee saved registers - need to be saved only when you are scheduled out */
+ unsigned long ei, ldi, jli;
+
+ unsigned long ret;
+ unsigned long status32;
+};
struct callee_regs {
- long r25;
- long r24;
- long r23;
- long r22;
- long r21;
- long r20;
- long r19;
- long r18;
- long r17;
- long r16;
- long r15;
- long r14;
- long r13;
+ unsigned long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
};
+#define MAX_REG_OFFSET offsetof(struct pt_regs, status32)
+
+#endif
+
#define instruction_pointer(regs) ((regs)->ret)
#define profile_pc(regs) instruction_pointer(regs)
@@ -103,13 +122,13 @@ struct callee_regs {
/* return 1 if PC in delay slot */
#define delay_mode(regs) ((regs->status32 & STATUS_DE_MASK) == STATUS_DE_MASK)
-#define in_syscall(regs) ((regs->ecr_vec == ECR_V_TRAP) && !regs->ecr_param)
-#define in_brkpt_trap(regs) ((regs->ecr_vec == ECR_V_TRAP) && regs->ecr_param)
+#define in_syscall(regs) ((regs->ecr.vec == ECR_V_TRAP) && !regs->ecr.param)
+#define in_brkpt_trap(regs) ((regs->ecr.vec == ECR_V_TRAP) && regs->ecr.param)
#define STATE_SCALL_RESTARTED 0x01
-#define syscall_wont_restart(reg) (reg->state |= STATE_SCALL_RESTARTED)
-#define syscall_restartable(reg) !(reg->state & STATE_SCALL_RESTARTED)
+#define syscall_wont_restart(regs) (regs->ecr.state |= STATE_SCALL_RESTARTED)
+#define syscall_restartable(regs) !(regs->ecr.state & STATE_SCALL_RESTARTED)
#define current_pt_regs() \
({ \
@@ -121,9 +140,38 @@ struct callee_regs {
static inline long regs_return_value(struct pt_regs *regs)
{
- return regs->r0;
+ return (long)regs->r0;
}
-#endif /* !__ASSEMBLY__ */
+static inline void instruction_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ instruction_pointer(regs) = val;
+}
+
+static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+{
+ return regs->sp;
+}
+
+extern int regs_query_register_offset(const char *name);
+extern const char *regs_query_register_name(unsigned int offset);
+extern bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr);
+extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
+ unsigned int n);
+
+static inline unsigned long regs_get_register(struct pt_regs *regs,
+ unsigned int offset)
+{
+ if (unlikely(offset > MAX_REG_OFFSET))
+ return 0;
+
+ return *(unsigned long *)((unsigned long)regs + offset);
+}
+
+extern int syscall_trace_enter(struct pt_regs *);
+extern void syscall_trace_exit(struct pt_regs *);
+
+#endif /* !__ASSEMBLER__ */
#endif /* __ASM_PTRACE_H */
diff --git a/arch/arc/include/asm/sections.h b/arch/arc/include/asm/sections.h
index 6fc1159dfefe..860b4fd67a54 100644
--- a/arch/arc/include/asm/sections.h
+++ b/arch/arc/include/asm/sections.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASM_ARC_SECTIONS_H
@@ -11,8 +8,6 @@
#include <asm-generic/sections.h>
-extern char _int_vec_base_lds[];
extern char __arc_dccm_base[];
-extern char __dtb_start[];
#endif
diff --git a/arch/arc/include/asm/segment.h b/arch/arc/include/asm/segment.h
deleted file mode 100644
index da2c45979817..000000000000
--- a/arch/arc/include/asm/segment.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ASMARC_SEGMENT_H
-#define __ASMARC_SEGMENT_H
-
-#ifndef __ASSEMBLY__
-
-typedef unsigned long mm_segment_t;
-
-#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
-
-#define KERNEL_DS MAKE_MM_SEG(0)
-#define USER_DS MAKE_MM_SEG(TASK_SIZE)
-
-#define segment_eq(a, b) ((a) == (b))
-
-#endif /* __ASSEMBLY__ */
-#endif /* __ASMARC_SEGMENT_H */
diff --git a/arch/arc/include/asm/serial.h b/arch/arc/include/asm/serial.h
index 602b0970a764..83062c8b97ad 100644
--- a/arch/arc/include/asm/serial.h
+++ b/arch/arc/include/asm/serial.h
@@ -1,35 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASM_ARC_SERIAL_H
#define _ASM_ARC_SERIAL_H
/*
- * early-8250 requires BASE_BAUD to be defined and includes this header.
- * We put in a typical value:
- * (core clk / 16) - i.e. UART samples 16 times per sec.
- * Athough in multi-platform-image this might not work, specially if the
- * clk driving the UART is different.
- * We can't use DeviceTree as this is typically for early serial.
+ * early 8250 (now earlycon) requires BASE_BAUD to be defined in this header.
+ * However to still determine it dynamically (for multi-platform images)
+ * we do this in a helper by parsing the FDT early
*/
-#include <asm/clk.h>
+extern unsigned int __init arc_early_base_baud(void);
-#define BASE_BAUD (arc_get_core_freq() / 16)
-
-/*
- * This is definitely going to break early 8250 consoles on multi-platform
- * images but hey, it won't add any code complexity for a debug feature of
- * one broken driver.
- */
-#ifdef CONFIG_ARC_PLAT_TB10X
-#undef BASE_BAUD
-#define BASE_BAUD (arc_get_core_freq() / 16 / 3)
-#endif
+#define BASE_BAUD arc_early_base_baud()
#endif /* _ASM_ARC_SERIAL_H */
diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h
index 229e50681497..1c6db599e1fc 100644
--- a/arch/arc/include/asm/setup.h
+++ b/arch/arc/include/asm/setup.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
-#ifndef __ASMARC_SETUP_H
-#define __ASMARC_SETUP_H
+#ifndef __ASM_ARC_SETUP_H
+#define __ASM_ARC_SETUP_H
#include <linux/types.h>
@@ -23,15 +20,26 @@ struct id_to_str {
const char *str;
};
-struct cpuinfo_data {
- struct id_to_str info;
- int up_range;
-};
-
extern int root_mountflags, end_mem;
-extern int running_on_hw;
-void __init setup_processor(void);
+void setup_processor(void);
void __init setup_arch_memory(void);
+long __init arc_get_mem_sz(void);
+
+/* Helpers used in arc_*_mumbojumbo routines */
+#define IS_AVAIL1(v, s) ((v) ? s : "")
+#define IS_DISABLED_RUN(v) ((v) ? "" : "(disabled) ")
+#define IS_USED_RUN(v) ((v) ? "" : "(not used) ")
+#define IS_USED_CFG(cfg) IS_USED_RUN(IS_ENABLED(cfg))
+#define IS_AVAIL2(v, s, cfg) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_USED_CFG(cfg))
+#define IS_AVAIL3(v, v2, s) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_DISABLED_RUN(v2))
+
+extern void arc_mmu_init(void);
+extern int arc_mmu_mumbojumbo(int cpu_id, char *buf, int len);
+
+extern void arc_cache_init(void);
+extern int arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
+
+extern void __init handle_uboot_args(void);
#endif /* __ASMARC_SETUP_H */
diff --git a/arch/arc/include/asm/shmparam.h b/arch/arc/include/asm/shmparam.h
index fffeecc04270..719112af0f41 100644
--- a/arch/arc/include/asm/shmparam.h
+++ b/arch/arc/include/asm/shmparam.h
@@ -1,15 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ARC_ASM_SHMPARAM_H
#define __ARC_ASM_SHMPARAM_H
-/* Handle upto 2 cache bins */
+/* Handle up to 2 cache bins */
#define SHMLBA (2 * PAGE_SIZE)
/* Enforce SHMLBA in shmat */
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h
index c4fb211dcd25..990f834909f0 100644
--- a/arch/arc/include/asm/smp.h
+++ b/arch/arc/include/asm/smp.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_ARC_SMP_H
@@ -30,40 +27,57 @@ extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
* APIs provided by arch SMP code to rest of arch code
*/
extern void __init smp_init_cpus(void);
-extern void __init first_lines_of_secondary(void);
+extern void first_lines_of_secondary(void);
extern const char *arc_platform_smp_cpuinfo(void);
+extern void arc_platform_smp_wait_to_boot(int);
+extern void start_kernel_secondary(void);
/*
* API expected BY platform smp code (FROM arch smp code)
*
* smp_ipi_irq_setup:
- * Takes @cpu and @irq to which the arch-common ISR is hooked up
+ * Takes @cpu and @hwirq to which the arch-common ISR is hooked up
*/
-extern int smp_ipi_irq_setup(int cpu, int irq);
+extern int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq);
/*
* struct plat_smp_ops - SMP callbacks provided by platform to ARC SMP
*
* @info: SoC SMP specific info for /proc/cpuinfo etc
+ * @init_early_smp: A SMP specific h/w block can init itself
+ * Could be common across platforms so not covered by
+ * mach_desc->init_early()
+ * @init_per_cpu: Called for each core so SMP h/w block driver can do
+ * any needed setup per cpu (e.g. IPI request)
* @cpu_kick: For Master to kickstart a cpu (optionally at a PC)
- * @ipi_send: To send IPI to a @cpumask
- * @ips_clear: To clear IPI received by @cpu at @irq
+ * @ipi_send: To send IPI to a @cpu
+ * @ips_clear: To clear IPI received at @irq
*/
struct plat_smp_ops {
const char *info;
+ void (*init_early_smp)(void);
+ void (*init_per_cpu)(int cpu);
void (*cpu_kick)(int cpu, unsigned long pc);
- void (*ipi_send)(void *callmap);
- void (*ipi_clear)(int cpu, int irq);
+ void (*ipi_send)(int cpu);
+ void (*ipi_clear)(int irq);
};
/* TBD: stop exporting it for direct population by platform */
extern struct plat_smp_ops plat_smp_ops;
-#endif /* CONFIG_SMP */
+#else /* CONFIG_SMP */
+
+static inline void smp_init_cpus(void) {}
+static inline const char *arc_platform_smp_cpuinfo(void)
+{
+ return "";
+}
+
+#endif /* !CONFIG_SMP */
/*
* ARC700 doesn't support atomic Read-Modify-Write ops.
- * Originally Interrupts had to be disabled around code to gaurantee atomicity.
+ * Originally Interrupts had to be disabled around code to guarantee atomicity.
* The LLOCK/SCOND insns allow writing interrupt-hassle-free based atomic ops
* based on retry-if-irq-in-atomic (with hardware assist).
* However despite these, we provide the IRQ disabling variant
@@ -71,8 +85,8 @@ extern struct plat_smp_ops plat_smp_ops;
* (1) These insn were introduced only in 4.10 release. So for older released
* support needed.
*
- * (2) In a SMP setup, the LLOCK/SCOND atomiticity across CPUs needs to be
- * gaurantted by the platform (not something which core handles).
+ * (2) In a SMP setup, the LLOCK/SCOND atomicity across CPUs needs to be
+ * guaranteed by the platform (not something which core handles).
* Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
* disabling for atomicity.
*
@@ -93,7 +107,6 @@ extern struct plat_smp_ops plat_smp_ops;
#include <asm/spinlock.h>
extern arch_spinlock_t smp_atomic_ops_lock;
-extern arch_spinlock_t smp_bitops_lock;
#define atomic_ops_lock(flags) do { \
local_irq_save(flags); \
@@ -105,24 +118,11 @@ extern arch_spinlock_t smp_bitops_lock;
local_irq_restore(flags); \
} while (0)
-#define bitops_lock(flags) do { \
- local_irq_save(flags); \
- arch_spin_lock(&smp_bitops_lock); \
-} while (0)
-
-#define bitops_unlock(flags) do { \
- arch_spin_unlock(&smp_bitops_lock); \
- local_irq_restore(flags); \
-} while (0)
-
#else /* !CONFIG_SMP */
#define atomic_ops_lock(flags) local_irq_save(flags)
#define atomic_ops_unlock(flags) local_irq_restore(flags)
-#define bitops_lock(flags) local_irq_save(flags)
-#define bitops_unlock(flags) local_irq_restore(flags)
-
#endif /* !CONFIG_SMP */
#endif /* !CONFIG_ARC_HAS_LLSC */
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
index f158197ac5b0..192871608925 100644
--- a/arch/arc/include/asm/spinlock.h
+++ b/arch/arc/include/asm/spinlock.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_SPINLOCK_H
@@ -14,61 +11,296 @@
#include <asm/barrier.h>
#define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-#define arch_spin_unlock_wait(x) \
- do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
+
+#ifdef CONFIG_ARC_HAS_LLSC
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
- unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
+ unsigned int val;
+
+ __asm__ __volatile__(
+ "1: llock %[val], [%[slock]] \n"
+ " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */
+ " scond %[LOCKED], [%[slock]] \n" /* acquire */
+ " bnz 1b \n"
+ " \n"
+ : [val] "=&r" (val)
+ : [slock] "r" (&(lock->slock)),
+ [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
+ : "memory", "cc");
+
+ /*
+ * ACQUIRE barrier to ensure load/store after taking the lock
+ * don't "bleed-up" out of the critical section (leak-in is allowed)
+ * http://www.spinics.net/lists/kernel/msg2010409.html
+ *
+ * ARCv2 only has load-load, store-store and all-all barrier
+ * thus need the full all-all barrier
+ */
+ smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ unsigned int val, got_it = 0;
+
+ __asm__ __volatile__(
+ "1: llock %[val], [%[slock]] \n"
+ " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
+ " scond %[LOCKED], [%[slock]] \n" /* acquire */
+ " bnz 1b \n"
+ " mov %[got_it], 1 \n"
+ "4: \n"
+ " \n"
+ : [val] "=&r" (val),
+ [got_it] "+&r" (got_it)
+ : [slock] "r" (&(lock->slock)),
+ [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
+ : "memory", "cc");
+
+ smp_mb();
+
+ return got_it;
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ smp_mb();
+
+ WRITE_ONCE(lock->slock, __ARCH_SPIN_LOCK_UNLOCKED__);
+}
+
+/*
+ * Read-write spinlocks, allowing multiple readers but only one writer.
+ * Unfair locking as Writers could be starved indefinitely by Reader(s)
+ */
+
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+ unsigned int val;
+
+ /*
+ * zero means writer holds the lock exclusively, deny Reader.
+ * Otherwise grant lock to first/subseq reader
+ *
+ * if (rw->counter > 0) {
+ * rw->counter--;
+ * ret = 1;
+ * }
+ */
+
+ __asm__ __volatile__(
+ "1: llock %[val], [%[rwlock]] \n"
+ " brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */
+ " sub %[val], %[val], 1 \n" /* reader lock */
+ " scond %[val], [%[rwlock]] \n"
+ " bnz 1b \n"
+ " \n"
+ : [val] "=&r" (val)
+ : [rwlock] "r" (&(rw->counter)),
+ [WR_LOCKED] "ir" (0)
+ : "memory", "cc");
+
+ smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+ unsigned int val, got_it = 0;
+
+ __asm__ __volatile__(
+ "1: llock %[val], [%[rwlock]] \n"
+ " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
+ " sub %[val], %[val], 1 \n" /* counter-- */
+ " scond %[val], [%[rwlock]] \n"
+ " bnz 1b \n" /* retry if collided with someone */
+ " mov %[got_it], 1 \n"
+ " \n"
+ "4: ; --- done --- \n"
+
+ : [val] "=&r" (val),
+ [got_it] "+&r" (got_it)
+ : [rwlock] "r" (&(rw->counter)),
+ [WR_LOCKED] "ir" (0)
+ : "memory", "cc");
+
+ smp_mb();
+
+ return got_it;
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+ unsigned int val;
+
+ /*
+ * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
+ * deny writer. Otherwise if unlocked grant to writer
+ * Hence the claim that Linux rwlocks are unfair to writers.
+ * (can be starved for an indefinite time by readers).
+ *
+ * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
+ * rw->counter = 0;
+ * ret = 1;
+ * }
+ */
+
+ __asm__ __volatile__(
+ "1: llock %[val], [%[rwlock]] \n"
+ " brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */
+ " mov %[val], %[WR_LOCKED] \n"
+ " scond %[val], [%[rwlock]] \n"
+ " bnz 1b \n"
+ " \n"
+ : [val] "=&r" (val)
+ : [rwlock] "r" (&(rw->counter)),
+ [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
+ [WR_LOCKED] "ir" (0)
+ : "memory", "cc");
+
+ smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+ unsigned int val, got_it = 0;
+
+ __asm__ __volatile__(
+ "1: llock %[val], [%[rwlock]] \n"
+ " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
+ " mov %[val], %[WR_LOCKED] \n"
+ " scond %[val], [%[rwlock]] \n"
+ " bnz 1b \n" /* retry if collided with someone */
+ " mov %[got_it], 1 \n"
+ " \n"
+ "4: ; --- done --- \n"
+
+ : [val] "=&r" (val),
+ [got_it] "+&r" (got_it)
+ : [rwlock] "r" (&(rw->counter)),
+ [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
+ [WR_LOCKED] "ir" (0)
+ : "memory", "cc");
+
+ smp_mb();
+
+ return got_it;
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+ unsigned int val;
+
+ smp_mb();
+
+ /*
+ * rw->counter++;
+ */
+ __asm__ __volatile__(
+ "1: llock %[val], [%[rwlock]] \n"
+ " add %[val], %[val], 1 \n"
+ " scond %[val], [%[rwlock]] \n"
+ " bnz 1b \n"
+ " \n"
+ : [val] "=&r" (val)
+ : [rwlock] "r" (&(rw->counter))
+ : "memory", "cc");
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+ smp_mb();
+
+ WRITE_ONCE(rw->counter, __ARCH_RW_LOCK_UNLOCKED__);
+}
+
+#else /* !CONFIG_ARC_HAS_LLSC */
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
+
+ /*
+ * Per lkmm, smp_mb() is only required after _lock (and before_unlock)
+ * for ACQ and REL semantics respectively. However EX based spinlocks
+ * need the extra smp_mb to workaround a hardware quirk.
+ */
+ smp_mb();
__asm__ __volatile__(
"1: ex %0, [%1] \n"
" breq %0, %2, 1b \n"
- : "+&r" (tmp)
+ : "+&r" (val)
: "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
: "memory");
+
+ smp_mb();
}
+/* 1 - lock taken successfully */
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
- unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
+ unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
+
+ smp_mb();
__asm__ __volatile__(
"1: ex %0, [%1] \n"
- : "+r" (tmp)
+ : "+r" (val)
: "r"(&(lock->slock))
: "memory");
- return (tmp == __ARCH_SPIN_LOCK_UNLOCKED__);
+ smp_mb();
+
+ return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
}
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
- lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
+ unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
+
+ /*
+ * RELEASE barrier: given the instructions avail on ARCv2, full barrier
+ * is the only option
+ */
+ smp_mb();
+
+ /*
+ * EX is not really required here, a simple STore of 0 suffices.
+ * However this causes tasklist livelocks in SystemC based SMP virtual
+ * platforms where the systemc core scheduler uses EX as a cue for
+ * moving to next core. Do a git log of this file for details
+ */
+ __asm__ __volatile__(
+ " ex %0, [%1] \n"
+ : "+r" (val)
+ : "r"(&(lock->slock))
+ : "memory");
+
+ /*
+ * see pairing version/comment in arch_spin_lock above
+ */
smp_mb();
}
/*
* Read-write spinlocks, allowing multiple readers but only one writer.
+ * Unfair locking as Writers could be starved indefinitely by Reader(s)
*
* The spinlock itself is contained in @counter and access to it is
* serialized with @lock_mutex.
- *
- * Unfair locking as Writers could be starved indefinitely by Reader(s)
*/
-/* Would read_trylock() succeed? */
-#define arch_read_can_lock(x) ((x)->counter > 0)
-
-/* Would write_trylock() succeed? */
-#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
-
/* 1 - lock taken successfully */
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
int ret = 0;
+ unsigned long flags;
+ local_irq_save(flags);
arch_spin_lock(&(rw->lock_mutex));
/*
@@ -81,8 +313,8 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
}
arch_spin_unlock(&(rw->lock_mutex));
+ local_irq_restore(flags);
- smp_mb();
return ret;
}
@@ -90,7 +322,9 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
int ret = 0;
+ unsigned long flags;
+ local_irq_save(flags);
arch_spin_lock(&(rw->lock_mutex));
/*
@@ -104,6 +338,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
ret = 1;
}
arch_spin_unlock(&(rw->lock_mutex));
+ local_irq_restore(flags);
return ret;
}
@@ -122,23 +357,26 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
+ unsigned long flags;
+
+ local_irq_save(flags);
arch_spin_lock(&(rw->lock_mutex));
rw->counter++;
arch_spin_unlock(&(rw->lock_mutex));
+ local_irq_restore(flags);
}
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
+ unsigned long flags;
+
+ local_irq_save(flags);
arch_spin_lock(&(rw->lock_mutex));
rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
arch_spin_unlock(&(rw->lock_mutex));
+ local_irq_restore(flags);
}
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
-#define arch_spin_relax(lock) cpu_relax()
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
+#endif
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/arc/include/asm/spinlock_types.h b/arch/arc/include/asm/spinlock_types.h
index 8276bfd61704..7cd0373998a7 100644
--- a/arch/arc/include/asm/spinlock_types.h
+++ b/arch/arc/include/asm/spinlock_types.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_SPINLOCK_TYPES_H
@@ -20,13 +17,15 @@ typedef struct {
#define __ARCH_SPIN_LOCK_LOCKED { __ARCH_SPIN_LOCK_LOCKED__ }
/*
- * Unlocked: 0x01_00_00_00
- * Read lock(s): 0x00_FF_00_00 to say 0x01
- * Write lock: 0x0, but only possible if prior value "unlocked" 0x0100_0000
+ * Unlocked : 0x0100_0000
+ * Read lock(s) : 0x00FF_FFFF to 0x01 (Multiple Readers decrement it)
+ * Write lock : 0x0, but only if prior value is "unlocked" 0x0100_0000
*/
typedef struct {
volatile unsigned int counter;
+#ifndef CONFIG_ARC_HAS_LLSC
arch_spinlock_t lock_mutex;
+#endif
} arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000
diff --git a/arch/arc/include/asm/stacktrace.h b/arch/arc/include/asm/stacktrace.h
new file mode 100644
index 000000000000..4c50fb003df0
--- /dev/null
+++ b/arch/arc/include/asm/stacktrace.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#ifndef __ASM_STACKTRACE_H
+#define __ASM_STACKTRACE_H
+
+#include <linux/sched.h>
+
+/**
+ * arc_unwind_core - Unwind the kernel mode stack for an execution context
+ * @tsk: NULL for current task, specific task otherwise
+ * @regs: pt_regs used to seed the unwinder {SP, FP, BLINK, PC}
+ * If NULL, use pt_regs of @tsk (if !NULL) otherwise
+ * use the current values of {SP, FP, BLINK, PC}
+ * @consumer_fn: Callback invoked for each frame unwound
+ * Returns 0 to continue unwinding, -1 to stop
+ * @arg: Arg to callback
+ *
+ * Returns the address of first function in stack
+ *
+ * Semantics:
+ * - synchronous unwinding (e.g. dump_stack): @tsk NULL, @regs NULL
+ * - Asynchronous unwinding of sleeping task: @tsk !NULL, @regs NULL
+ * - Asynchronous unwinding of intr/excp etc: @tsk !NULL, @regs !NULL
+ */
+notrace noinline unsigned int arc_unwind_core(
+ struct task_struct *tsk, struct pt_regs *regs,
+ int (*consumer_fn) (unsigned int, void *),
+ void *arg);
+
+#endif /* __ASM_STACKTRACE_H */
diff --git a/arch/arc/include/asm/string.h b/arch/arc/include/asm/string.h
index 87676c8f1412..3182ea9dcdde 100644
--- a/arch/arc/include/asm/string.h
+++ b/arch/arc/include/asm/string.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* vineetg: May 2011
* -We had half-optimised memset/memcpy, got better versions of those
* -Added memcmp, strchr, strcpy, strcmp, strlen
@@ -17,8 +14,6 @@
#include <linux/types.h>
-#ifdef __KERNEL__
-
#define __HAVE_ARCH_MEMSET
#define __HAVE_ARCH_MEMCPY
#define __HAVE_ARCH_MEMCMP
@@ -36,5 +31,4 @@ extern char *strcpy(char *dest, const char *src);
extern int strcmp(const char *cs, const char *ct);
extern __kernel_size_t strlen(const char *);
-#endif /* __KERNEL__ */
#endif /* _ASM_ARC_STRING_H */
diff --git a/arch/arc/include/asm/switch_to.h b/arch/arc/include/asm/switch_to.h
index 1b171ab5fec0..5806106a65f9 100644
--- a/arch/arc/include/asm/switch_to.h
+++ b/arch/arc/include/asm/switch_to.h
@@ -1,38 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASM_ARC_SWITCH_TO_H
#define _ASM_ARC_SWITCH_TO_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/sched.h>
-
-#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
-
-extern void fpu_save_restore(struct task_struct *p, struct task_struct *n);
-#define ARC_FPU_PREV(p, n) fpu_save_restore(p, n)
-#define ARC_FPU_NEXT(t)
-
-#else
-
-#define ARC_FPU_PREV(p, n)
-#define ARC_FPU_NEXT(n)
-
-#endif /* !CONFIG_ARC_FPU_SAVE_RESTORE */
+#include <asm/dsp-impl.h>
+#include <asm/fpu.h>
struct task_struct *__switch_to(struct task_struct *p, struct task_struct *n);
#define switch_to(prev, next, last) \
do { \
- ARC_FPU_PREV(prev, next); \
+ dsp_save_restore(prev, next); \
+ fpu_save_restore(prev, next); \
last = __switch_to(prev, next);\
- ARC_FPU_NEXT(next); \
mb(); \
} while (0)
diff --git a/arch/arc/include/asm/syscall.h b/arch/arc/include/asm/syscall.h
index 29de09804306..728d625a10f1 100644
--- a/arch/arc/include/asm/syscall.h
+++ b/arch/arc/include/asm/syscall.h
@@ -1,19 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASM_ARC_SYSCALL_H
#define _ASM_ARC_SYSCALL_H 1
+#include <uapi/linux/audit.h>
#include <linux/err.h>
#include <linux/sched.h>
#include <asm/unistd.h>
#include <asm/ptrace.h> /* in_syscall() */
+extern void *sys_call_table[];
+
static inline long
syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
{
@@ -24,6 +24,17 @@ syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
}
static inline void
+syscall_set_nr(struct task_struct *task, struct pt_regs *regs, int nr)
+{
+ /*
+ * Unlike syscall_get_nr(), syscall_set_nr() can be called only when
+ * the target task is stopped for tracing on entering syscall, so
+ * there is no need to have the same check syscall_get_nr() has.
+ */
+ regs->r8 = nr;
+}
+
+static inline void
syscall_rollback(struct task_struct *task, struct pt_regs *regs)
{
regs->r0 = regs->orig_r0;
@@ -55,12 +66,11 @@ syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
*/
static inline void
syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
- unsigned int i, unsigned int n, unsigned long *args)
+ unsigned long *args)
{
unsigned long *inside_ptregs = &(regs->r0);
- inside_ptregs -= i;
-
- BUG_ON((i + n) > 6);
+ unsigned int n = 6;
+ unsigned int i = 0;
while (n--) {
args[i++] = (*inside_ptregs);
@@ -68,4 +78,28 @@ syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
}
}
+static inline void
+syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
+ unsigned long *args)
+{
+ unsigned long *inside_ptregs = &regs->r0;
+ unsigned int n = 6;
+ unsigned int i = 0;
+
+ while (n--) {
+ *inside_ptregs = args[i++];
+ inside_ptregs--;
+ }
+}
+
+static inline int
+syscall_get_arch(struct task_struct *task)
+{
+ return IS_ENABLED(CONFIG_ISA_ARCOMPACT)
+ ? (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)
+ ? AUDIT_ARCH_ARCOMPACTBE : AUDIT_ARCH_ARCOMPACT)
+ : (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)
+ ? AUDIT_ARCH_ARCV2BE : AUDIT_ARCH_ARCV2);
+}
+
#endif
diff --git a/arch/arc/include/asm/syscalls.h b/arch/arc/include/asm/syscalls.h
index dd785befe7fd..c3f4714a4f5c 100644
--- a/arch/arc/include/asm/syscalls.h
+++ b/arch/arc/include/asm/syscalls.h
@@ -1,27 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASM_ARC_SYSCALLS_H
#define _ASM_ARC_SYSCALLS_H 1
-#ifdef __KERNEL__
-
#include <linux/compiler.h>
#include <linux/linkage.h>
#include <linux/types.h>
int sys_clone_wrapper(int, int, int, int, int);
+int sys_clone3_wrapper(void *, size_t);
int sys_cacheflush(uint32_t, uint32_t uint32_t);
int sys_arc_settls(void *);
int sys_arc_gettls(void);
+int sys_arc_usr_cmpxchg(int *, int, int);
#include <asm-generic/syscalls.h>
-#endif /* __KERNEL__ */
-
#endif
diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h
index 2d50a4cdd7f3..255d2c774219 100644
--- a/arch/arc/include/asm/thread_info.h
+++ b/arch/arc/include/asm/thread_info.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Vineetg: Oct 2009
* No need for ARC specific thread_info allocator (kmalloc/free). This is
* anyways one page allocation, thus slab alloc can be short-circuited and
@@ -16,8 +13,6 @@
#ifndef _ASM_THREAD_INFO_H
#define _ASM_THREAD_INFO_H
-#ifdef __KERNEL__
-
#include <asm/page.h>
#ifdef CONFIG_16KSTACKS
@@ -27,11 +22,11 @@
#endif
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
+#define THREAD_SHIFT (PAGE_SHIFT << THREAD_SIZE_ORDER)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/thread_info.h>
-#include <asm/segment.h>
/*
* low level task data that entry.S needs immediate access to
@@ -42,45 +37,32 @@
*/
struct thread_info {
unsigned long flags; /* low level flags */
- int preempt_count; /* 0 => preemptable, <0 => BUG */
- struct task_struct *task; /* main task structure */
- mm_segment_t addr_limit; /* thread address space */
- struct exec_domain *exec_domain;/* execution domain */
- __u32 cpu; /* current CPU */
+ unsigned long ksp; /* kernel mode stack top in __switch_to */
+ int preempt_count; /* 0 => preemptible, <0 => BUG */
+ int cpu; /* current CPU */
unsigned long thr_ptr; /* TLS ptr */
- struct restart_block restart_block;
+ struct task_struct *task; /* main task structure */
};
/*
- * macros/functions for gaining access to the thread information structure
- *
- * preempt_count needs to be 1 initially, until the scheduler is functional.
+ * initilaize thread_info for any @tsk
+ * - this is not related to init_task per se
*/
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
- .exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
- .addr_limit = KERNEL_DS, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
static inline __attribute_const__ struct thread_info *current_thread_info(void)
{
register unsigned long sp asm("sp");
return (struct thread_info *)(sp & ~(THREAD_SIZE - 1));
}
-#endif /* !__ASSEMBLY__ */
-
-#define PREEMPT_ACTIVE 0x10000000
+#endif /* !__ASSEMBLER__ */
/*
* thread information flags
@@ -94,28 +76,31 @@ static inline __attribute_const__ struct thread_info *current_thread_info(void)
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */
+#define TIF_NOTIFY_SIGNAL 5 /* signal notifications exist */
#define TIF_SYSCALL_TRACE 15 /* syscall trace active */
-
/* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_MEMDIE 16
+#define TIF_SYSCALL_TRACEPOINT 17 /* syscall tracepoint instrumentation */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
+#define _TIF_NOTIFY_SIGNAL (1<<TIF_NOTIFY_SIGNAL)
#define _TIF_MEMDIE (1<<TIF_MEMDIE)
+#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
/* work to do on interrupt/exception return */
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
- _TIF_NOTIFY_RESUME)
+ _TIF_NOTIFY_RESUME | _TIF_NOTIFY_SIGNAL)
+
+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT)
/*
* _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it.
- * SYSCALL_TRACE is anways seperately/unconditionally tested right after a
- * syscall, so all that reamins to be tested is _TIF_WORK_MASK
+ * SYSCALL_TRACE is anyway separately/unconditionally tested right after a
+ * syscall, so all that remains to be tested is _TIF_WORK_MASK
*/
-#endif /* __KERNEL__ */
-
#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/arc/include/asm/timex.h b/arch/arc/include/asm/timex.h
index 0a82960a75e9..48b3482bc97f 100644
--- a/arch/arc/include/asm/timex.h
+++ b/arch/arc/include/asm/timex.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASM_ARC_TIMEX_H
diff --git a/arch/arc/include/asm/tlb-mmu1.h b/arch/arc/include/asm/tlb-mmu1.h
deleted file mode 100644
index 8a1ec96012ae..000000000000
--- a/arch/arc/include/asm/tlb-mmu1.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ASM_TLB_MMU_V1_H__
-#define __ASM_TLB_MMU_V1_H__
-
-#include <asm/mmu.h>
-
-#if defined(__ASSEMBLY__) && (CONFIG_ARC_MMU_VER == 1)
-
-.macro TLB_WRITE_HEURISTICS
-
-#define JH_HACK1
-#undef JH_HACK2
-#undef JH_HACK3
-
-#ifdef JH_HACK3
-; Calculate set index for 2-way MMU
-; -avoiding use of GetIndex from MMU
-; and its unpleasant LFSR pseudo-random sequence
-;
-; r1 = TLBPD0 from TLB_RELOAD above
-;
-; -- jh_ex_way_set not cleared on startup
-; didn't want to change setup.c
-; hence extra instruction to clean
-;
-; -- should be in cache since in same line
-; as r0/r1 saves above
-;
-ld r0,[jh_ex_way_sel] ; victim pointer
-and r0,r0,1 ; clean
-xor.f r0,r0,1 ; flip
-st r0,[jh_ex_way_sel] ; store back
-asr r0,r1,12 ; get set # <<1, note bit 12=R=0
-or.nz r0,r0,1 ; set way bit
-and r0,r0,0xff ; clean
-sr r0,[ARC_REG_TLBINDEX]
-#endif
-
-#ifdef JH_HACK2
-; JH hack #2
-; Faster than hack #1 in non-thrash case, but hard-coded for 2-way MMU
-; Slower in thrash case (where it matters) because more code is executed
-; Inefficient due to two-register paradigm of this miss handler
-;
-/* r1 = data TLBPD0 at this point */
-lr r0,[eret] /* instruction address */
-xor r0,r0,r1 /* compare set # */
-and.f r0,r0,0x000fe000 /* 2-way MMU mask */
-bne 88f /* not in same set - no need to probe */
-
-lr r0,[eret] /* instruction address */
-and r0,r0,PAGE_MASK /* VPN of instruction address */
-; lr r1,[ARC_REG_TLBPD0] /* Data VPN+ASID - already in r1 from TLB_RELOAD*/
-and r1,r1,0xff /* Data ASID */
-or r0,r0,r1 /* Instruction address + Data ASID */
-
-lr r1,[ARC_REG_TLBPD0] /* save TLBPD0 containing data TLB*/
-sr r0,[ARC_REG_TLBPD0] /* write instruction address to TLBPD0 */
-sr TLBProbe, [ARC_REG_TLBCOMMAND] /* Look for instruction */
-lr r0,[ARC_REG_TLBINDEX] /* r0 = index where instruction is, if at all */
-sr r1,[ARC_REG_TLBPD0] /* restore TLBPD0 */
-
-xor r0,r0,1 /* flip bottom bit of data index */
-b.d 89f
-sr r0,[ARC_REG_TLBINDEX] /* and put it back */
-88:
-sr TLBGetIndex, [ARC_REG_TLBCOMMAND]
-89:
-#endif
-
-#ifdef JH_HACK1
-;
-; Always checks whether instruction will be kicked out by dtlb miss
-;
-mov_s r3, r1 ; save PD0 prepared by TLB_RELOAD in r3
-lr r0,[eret] /* instruction address */
-and r0,r0,PAGE_MASK /* VPN of instruction address */
-bmsk r1,r3,7 /* Data ASID, bits 7-0 */
-or_s r0,r0,r1 /* Instruction address + Data ASID */
-
-sr r0,[ARC_REG_TLBPD0] /* write instruction address to TLBPD0 */
-sr TLBProbe, [ARC_REG_TLBCOMMAND] /* Look for instruction */
-lr r0,[ARC_REG_TLBINDEX] /* r0 = index where instruction is, if at all */
-sr r3,[ARC_REG_TLBPD0] /* restore TLBPD0 */
-
-sr TLBGetIndex, [ARC_REG_TLBCOMMAND]
-lr r1,[ARC_REG_TLBINDEX] /* r1 = index where MMU wants to put data */
-cmp r0,r1 /* if no match on indices, go around */
-xor.eq r1,r1,1 /* flip bottom bit of data index */
-sr r1,[ARC_REG_TLBINDEX] /* and put it back */
-#endif
-
-.endm
-
-#endif
-
-#endif
diff --git a/arch/arc/include/asm/tlb.h b/arch/arc/include/asm/tlb.h
index a9db5f62aaf3..975b35d3738d 100644
--- a/arch/arc/include/asm/tlb.h
+++ b/arch/arc/include/asm/tlb.h
@@ -1,46 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASM_ARC_TLB_H
#define _ASM_ARC_TLB_H
-#define tlb_flush(tlb) \
-do { \
- if (tlb->fullmm) \
- flush_tlb_mm((tlb)->mm); \
-} while (0)
-
-/*
- * This pair is called at time of munmap/exit to flush cache and TLB entries
- * for mappings being torn down.
- * 1) cache-flush part -implemented via tlb_start_vma( ) for VIPT aliasing D$
- * 2) tlb-flush part - implemted via tlb_end_vma( ) flushes the TLB range
- *
- * Note, read http://lkml.org/lkml/2004/1/15/6
- */
-#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
-#define tlb_start_vma(tlb, vma)
-#else
-#define tlb_start_vma(tlb, vma) \
-do { \
- if (!tlb->fullmm) \
- flush_cache_range(vma, vma->vm_start, vma->vm_end); \
-} while(0)
-#endif
-
-#define tlb_end_vma(tlb, vma) \
-do { \
- if (!tlb->fullmm) \
- flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
-} while (0)
-
-#define __tlb_remove_tlb_entry(tlb, ptep, address)
-
#include <linux/pagemap.h>
#include <asm-generic/tlb.h>
diff --git a/arch/arc/include/asm/tlbflush.h b/arch/arc/include/asm/tlbflush.h
index b2f9bc7f68c8..992a2837a53f 100644
--- a/arch/arc/include/asm/tlbflush.h
+++ b/arch/arc/include/asm/tlbflush.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_ARC_TLBFLUSH__
@@ -17,12 +14,29 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
void local_flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+#endif
-/* XXX: Revisit for SMP */
+#ifndef CONFIG_SMP
#define flush_tlb_range(vma, s, e) local_flush_tlb_range(vma, s, e)
#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
#define flush_tlb_kernel_range(s, e) local_flush_tlb_kernel_range(s, e)
#define flush_tlb_all() local_flush_tlb_all()
#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
-
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define flush_pmd_tlb_range(vma, s, e) local_flush_pmd_tlb_range(vma, s, e)
+#endif
+#else
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct *mm);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
+#endif
+#endif /* CONFIG_SMP */
#endif
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
index 32420824375b..1e8809ea000a 100644
--- a/arch/arc/include/asm/uaccess.h
+++ b/arch/arc/include/asm/uaccess.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* vineetg: June 2010
* -__clear_user( ) called multiple times during elf load was byte loop
* converted to do as much word clear as possible.
@@ -24,39 +21,8 @@
#ifndef _ASM_ARC_UACCESS_H
#define _ASM_ARC_UACCESS_H
-#include <linux/sched.h>
-#include <asm/errno.h>
#include <linux/string.h> /* for generic string functions */
-
-#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
-
-/*
- * Algorthmically, for __user_ok() we want do:
- * (start < TASK_SIZE) && (start+len < TASK_SIZE)
- * where TASK_SIZE could either be retrieved from thread_info->addr_limit or
- * emitted directly in code.
- *
- * This can however be rewritten as follows:
- * (len <= TASK_SIZE) && (start+len < TASK_SIZE)
- *
- * Because it essentially checks if buffer end is within limit and @len is
- * non-ngeative, which implies that buffer start will be within limit too.
- *
- * The reason for rewriting being, for majorit yof cases, @len is generally
- * compile time constant, causing first sub-expression to be compile time
- * subsumed.
- *
- * The second part would generate weird large LIMMs e.g. (0x6000_0000 - 0x10),
- * so we check for TASK_SIZE using get_fs() since the addr_limit load from mem
- * would already have been done at this call site for __kernel_ok()
- *
- */
-#define __user_ok(addr, sz) (((sz) <= TASK_SIZE) && \
- (((addr)+(sz)) <= get_fs()))
-#define __access_ok(addr, sz) (unlikely(__kernel_ok) || \
- likely(__user_ok((addr), (sz))))
-
/*********** Single byte/hword/word copies ******************/
#define __get_user_fn(sz, u, k) \
@@ -83,7 +49,10 @@
"2: ;nop\n" \
" .section .fixup, \"ax\"\n" \
" .align 4\n" \
- "3: mov %0, %3\n" \
+ "3: # return -EFAULT\n" \
+ " mov %0, %3\n" \
+ " # zero out dst ptr\n" \
+ " mov %1, 0\n" \
" j 2b\n" \
" .previous\n" \
" .section __ex_table, \"a\"\n" \
@@ -101,7 +70,11 @@
"2: ;nop\n" \
" .section .fixup, \"ax\"\n" \
" .align 4\n" \
- "3: mov %0, %3\n" \
+ "3: # return -EFAULT\n" \
+ " mov %0, %3\n" \
+ " # zero out dst ptr\n" \
+ " mov %1, 0\n" \
+ " mov %R1, 0\n" \
" j 2b\n" \
" .previous\n" \
" .section __ex_table, \"a\"\n" \
@@ -163,7 +136,7 @@
static inline unsigned long
-__arc_copy_from_user(void *to, const void __user *from, unsigned long n)
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
long res = 0;
char val;
@@ -173,8 +146,9 @@ __arc_copy_from_user(void *to, const void __user *from, unsigned long n)
if (n == 0)
return 0;
- /* unaligned */
- if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
+ /* fallback for unaligned access when hardware doesn't support */
+ if (!IS_ENABLED(CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS) &&
+ (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3))) {
unsigned char tmp;
@@ -202,7 +176,7 @@ __arc_copy_from_user(void *to, const void __user *from, unsigned long n)
*/
"=&r" (tmp), "+r" (to), "+r" (from)
:
- : "lp_count", "lp_start", "lp_end", "memory");
+ : "lp_count", "memory");
return n;
}
@@ -389,11 +363,8 @@ __arc_copy_from_user(void *to, const void __user *from, unsigned long n)
return res;
}
-extern unsigned long slowpath_copy_to_user(void __user *to, const void *from,
- unsigned long n);
-
static inline unsigned long
-__arc_copy_to_user(void __user *to, const void *from, unsigned long n)
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
long res = 0;
char val;
@@ -403,8 +374,9 @@ __arc_copy_to_user(void __user *to, const void *from, unsigned long n)
if (n == 0)
return 0;
- /* unaligned */
- if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
+ /* fallback for unaligned access when hardware doesn't support */
+ if (!IS_ENABLED(CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS) &&
+ (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3))) {
unsigned char tmp;
@@ -431,7 +403,7 @@ __arc_copy_to_user(void __user *to, const void *from, unsigned long n)
*/
"=&r" (tmp), "+r" (to), "+r" (from)
:
- : "lp_count", "lp_start", "lp_end", "memory");
+ : "lp_count", "memory");
return n;
}
@@ -614,7 +586,7 @@ __arc_copy_to_user(void __user *to, const void *from, unsigned long n)
return res;
}
-static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
+static inline unsigned long __clear_user(void __user *to, unsigned long n)
{
long res = n;
unsigned char *d_char = to;
@@ -651,101 +623,16 @@ static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
" .previous \n"
: "+r"(d_char), "+r"(res)
: "i"(0)
- : "lp_count", "lp_start", "lp_end", "memory");
-
- return res;
-}
-
-static inline long
-__arc_strncpy_from_user(char *dst, const char __user *src, long count)
-{
- long res = count;
- char val;
- unsigned int hw_count;
-
- if (count == 0)
- return 0;
-
- __asm__ __volatile__(
- " lp 2f \n"
- "1: ldb.ab %3, [%2, 1] \n"
- " breq.d %3, 0, 2f \n"
- " stb.ab %3, [%1, 1] \n"
- "2: sub %0, %6, %4 \n"
- "3: ;nop \n"
- " .section .fixup, \"ax\" \n"
- " .align 4 \n"
- "4: mov %0, %5 \n"
- " j 3b \n"
- " .previous \n"
- " .section __ex_table, \"a\" \n"
- " .align 4 \n"
- " .word 1b, 4b \n"
- " .previous \n"
- : "=r"(res), "+r"(dst), "+r"(src), "=&r"(val), "=l"(hw_count)
- : "g"(-EFAULT), "ir"(count), "4"(count) /* this "4" seeds lp_count */
- : "memory");
-
- return res;
-}
-
-static inline long __arc_strnlen_user(const char __user *s, long n)
-{
- long res, tmp1, cnt;
- char val;
-
- __asm__ __volatile__(
- " mov %2, %1 \n"
- "1: ldb.ab %3, [%0, 1] \n"
- " breq.d %3, 0, 2f \n"
- " sub.f %2, %2, 1 \n"
- " bnz 1b \n"
- " sub %2, %2, 1 \n"
- "2: sub %0, %1, %2 \n"
- "3: ;nop \n"
- " .section .fixup, \"ax\" \n"
- " .align 4 \n"
- "4: mov %0, 0 \n"
- " j 3b \n"
- " .previous \n"
- " .section __ex_table, \"a\" \n"
- " .align 4 \n"
- " .word 1b, 4b \n"
- " .previous \n"
- : "=r"(res), "=r"(tmp1), "=r"(cnt), "=r"(val)
- : "0"(s), "1"(n)
- : "memory");
+ : "lp_count", "memory");
return res;
}
-#ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
-#define __copy_from_user(t, f, n) __arc_copy_from_user(t, f, n)
-#define __copy_to_user(t, f, n) __arc_copy_to_user(t, f, n)
-#define __clear_user(d, n) __arc_clear_user(d, n)
-#define __strncpy_from_user(d, s, n) __arc_strncpy_from_user(d, s, n)
-#define __strnlen_user(s, n) __arc_strnlen_user(s, n)
-#else
-extern long arc_copy_from_user_noinline(void *to, const void __user * from,
- unsigned long n);
-extern long arc_copy_to_user_noinline(void __user *to, const void *from,
- unsigned long n);
-extern unsigned long arc_clear_user_noinline(void __user *to,
- unsigned long n);
-extern long arc_strncpy_from_user_noinline (char *dst, const char __user *src,
- long count);
-extern long arc_strnlen_user_noinline(const char __user *src, long n);
-
-#define __copy_from_user(t, f, n) arc_copy_from_user_noinline(t, f, n)
-#define __copy_to_user(t, f, n) arc_copy_to_user_noinline(t, f, n)
-#define __clear_user(d, n) arc_clear_user_noinline(d, n)
-#define __strncpy_from_user(d, s, n) arc_strncpy_from_user_noinline(d, s, n)
-#define __strnlen_user(s, n) arc_strnlen_user_noinline(s, n)
+#define INLINE_COPY_TO_USER
+#define INLINE_COPY_FROM_USER
-#endif
+#define __clear_user __clear_user
#include <asm-generic/uaccess.h>
-extern int fixup_exception(struct pt_regs *regs);
-
#endif
diff --git a/arch/arc/include/asm/unaligned.h b/arch/arc/include/asm/unaligned.h
deleted file mode 100644
index 60702f3751d2..000000000000
--- a/arch/arc/include/asm/unaligned.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_ARC_UNALIGNED_H
-#define _ASM_ARC_UNALIGNED_H
-
-/* ARC700 can't handle unaligned Data accesses. */
-
-#include <asm-generic/unaligned.h>
-#include <asm/ptrace.h>
-
-#ifdef CONFIG_ARC_MISALIGN_ACCESS
-int misaligned_fixup(unsigned long address, struct pt_regs *regs,
- struct callee_regs *cregs);
-#else
-static inline int
-misaligned_fixup(unsigned long address, struct pt_regs *regs,
- struct callee_regs *cregs)
-{
- return 0;
-}
-#endif
-
-#endif /* _ASM_ARC_UNALIGNED_H */
diff --git a/arch/arc/include/asm/unistd.h b/arch/arc/include/asm/unistd.h
new file mode 100644
index 000000000000..211c230d88d6
--- /dev/null
+++ b/arch/arc/include/asm/unistd.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_ARC_UNISTD_H
+#define _ASM_ARC_UNISTD_H
+
+#include <uapi/asm/unistd.h>
+
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SYS_VFORK
+#define __ARCH_WANT_SYS_FORK
+
+#define NR_syscalls __NR_syscalls
+
+#endif
diff --git a/arch/arc/include/asm/unwind.h b/arch/arc/include/asm/unwind.h
index 7ca628b6ee2a..e95a20453a17 100644
--- a/arch/arc/include/asm/unwind.h
+++ b/arch/arc/include/asm/unwind.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASM_ARC_UNWIND_H
@@ -112,7 +109,6 @@ struct unwind_frame_info {
extern int arc_unwind(struct unwind_frame_info *frame);
extern void arc_unwind_init(void);
-extern void arc_unwind_setup(void);
extern void *unwind_add_table(struct module *module, const void *table_start,
unsigned long table_size);
extern void unwind_remove_table(void *handle, int init_only);
@@ -152,9 +148,6 @@ static inline void arc_unwind_init(void)
{
}
-static inline void arc_unwind_setup(void)
-{
-}
#define unwind_add_table(a, b, c)
#define unwind_remove_table(a, b)
diff --git a/arch/arc/include/asm/vermagic.h b/arch/arc/include/asm/vermagic.h
new file mode 100644
index 000000000000..a10257d2c62c
--- /dev/null
+++ b/arch/arc/include/asm/vermagic.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_VERMAGIC_H
+#define _ASM_VERMAGIC_H
+
+#define MODULE_ARCH_VERMAGIC "ARC700"
+
+#endif /* _ASM_VERMAGIC_H */
diff --git a/arch/arc/include/asm/vmalloc.h b/arch/arc/include/asm/vmalloc.h
new file mode 100644
index 000000000000..973095aad665
--- /dev/null
+++ b/arch/arc/include/asm/vmalloc.h
@@ -0,0 +1,4 @@
+#ifndef _ASM_ARC_VMALLOC_H
+#define _ASM_ARC_VMALLOC_H
+
+#endif /* _ASM_ARC_VMALLOC_H */