summaryrefslogtreecommitdiff
path: root/arch/x86/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/amd_nb.h1
-rw-r--r--arch/x86/include/asm/asm.h6
-rw-r--r--arch/x86/include/asm/clocksource.h20
-rw-r--r--arch/x86/include/asm/cpu.h12
-rw-r--r--arch/x86/include/asm/cpu_device_id.h132
-rw-r--r--arch/x86/include/asm/cpufeatures.h5
-rw-r--r--arch/x86/include/asm/dwarf2.h4
-rw-r--r--arch/x86/include/asm/efi.h23
-rw-r--r--arch/x86/include/asm/futex.h99
-rw-r--r--arch/x86/include/asm/intel-family.h17
-rw-r--r--arch/x86/include/asm/irq.h2
-rw-r--r--arch/x86/include/asm/kprobes.h1
-rw-r--r--arch/x86/include/asm/mce.h9
-rw-r--r--arch/x86/include/asm/mshyperv.h4
-rw-r--r--arch/x86/include/asm/msr-index.h9
-rw-r--r--arch/x86/include/asm/mwait.h2
-rw-r--r--arch/x86/include/asm/perf_event.h15
-rw-r--r--arch/x86/include/asm/pgtable.h13
-rw-r--r--arch/x86/include/asm/pgtable_types.h2
-rw-r--r--arch/x86/include/asm/pkeys.h5
-rw-r--r--arch/x86/include/asm/preempt.h8
-rw-r--r--arch/x86/include/asm/processor.h13
-rw-r--r--arch/x86/include/asm/sections.h20
-rw-r--r--arch/x86/include/asm/set_memory.h2
-rw-r--r--arch/x86/include/asm/sigframe.h6
-rw-r--r--arch/x86/include/asm/sighandling.h7
-rw-r--r--arch/x86/include/asm/syscall.h16
-rw-r--r--arch/x86/include/asm/syscall_wrapper.h287
-rw-r--r--arch/x86/include/asm/syscalls.h34
-rw-r--r--arch/x86/include/asm/thread_info.h14
-rw-r--r--arch/x86/include/asm/topology.h25
-rw-r--r--arch/x86/include/asm/traps.h17
-rw-r--r--arch/x86/include/asm/uaccess.h233
-rw-r--r--arch/x86/include/asm/uaccess_32.h27
-rw-r--r--arch/x86/include/asm/uaccess_64.h108
-rw-r--r--arch/x86/include/asm/unistd.h7
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h2
-rw-r--r--arch/x86/include/asm/vdso/clocksource.h10
-rw-r--r--arch/x86/include/asm/vdso/gettimeofday.h6
-rw-r--r--arch/x86/include/asm/vdso/processor.h23
-rw-r--r--arch/x86/include/asm/vdso/vsyscall.h15
-rw-r--r--arch/x86/include/asm/vgtod.h12
42 files changed, 570 insertions, 703 deletions
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
index 1ae4e5791afa..c7df20e78b09 100644
--- a/arch/x86/include/asm/amd_nb.h
+++ b/arch/x86/include/asm/amd_nb.h
@@ -12,7 +12,6 @@ struct amd_nb_bus_dev_range {
u8 dev_limit;
};
-extern const struct pci_device_id amd_nb_misc_ids[];
extern const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[];
extern bool early_is_amd_nb(u32 value);
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index cd339b88d5d4..0f63585edf5f 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -138,9 +138,6 @@
# define _ASM_EXTABLE_FAULT(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_fault)
-# define _ASM_EXTABLE_EX(from, to) \
- _ASM_EXTABLE_HANDLE(from, to, ex_handler_ext)
-
# define _ASM_NOKPROBE(entry) \
.pushsection "_kprobe_blacklist","aw" ; \
_ASM_ALIGN ; \
@@ -166,9 +163,6 @@
# define _ASM_EXTABLE_FAULT(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_fault)
-# define _ASM_EXTABLE_EX(from, to) \
- _ASM_EXTABLE_HANDLE(from, to, ex_handler_ext)
-
/* For C file, we already have NOKPROBE_SYMBOL macro */
#endif
diff --git a/arch/x86/include/asm/clocksource.h b/arch/x86/include/asm/clocksource.h
index dc4cfc888d6d..dc9dc7b3911a 100644
--- a/arch/x86/include/asm/clocksource.h
+++ b/arch/x86/include/asm/clocksource.h
@@ -4,14 +4,18 @@
#ifndef _ASM_X86_CLOCKSOURCE_H
#define _ASM_X86_CLOCKSOURCE_H
-#define VCLOCK_NONE 0 /* No vDSO clock available. */
-#define VCLOCK_TSC 1 /* vDSO should use vread_tsc. */
-#define VCLOCK_PVCLOCK 2 /* vDSO should use vread_pvclock. */
-#define VCLOCK_HVCLOCK 3 /* vDSO should use vread_hvclock. */
-#define VCLOCK_MAX 3
+#include <asm/vdso/clocksource.h>
-struct arch_clocksource_data {
- int vclock_mode;
-};
+extern unsigned int vclocks_used;
+
+static inline bool vclock_was_used(int vclock)
+{
+ return READ_ONCE(vclocks_used) & (1U << vclock);
+}
+
+static inline void vclocks_set_used(unsigned int which)
+{
+ WRITE_ONCE(vclocks_used, READ_ONCE(vclocks_used) | (1 << which));
+}
#endif /* _ASM_X86_CLOCKSOURCE_H */
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index adc6cc86b062..ff6f3ca649b3 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -40,4 +40,16 @@ int mwait_usable(const struct cpuinfo_x86 *);
unsigned int x86_family(unsigned int sig);
unsigned int x86_model(unsigned int sig);
unsigned int x86_stepping(unsigned int sig);
+#ifdef CONFIG_CPU_SUP_INTEL
+extern void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c);
+extern void switch_to_sld(unsigned long tifn);
+extern bool handle_user_split_lock(struct pt_regs *regs, long error_code);
+#else
+static inline void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c) {}
+static inline void switch_to_sld(unsigned long tifn) {}
+static inline bool handle_user_split_lock(struct pt_regs *regs, long error_code)
+{
+ return false;
+}
+#endif
#endif /* _ASM_X86_CPU_H */
diff --git a/arch/x86/include/asm/cpu_device_id.h b/arch/x86/include/asm/cpu_device_id.h
index 31c379c1da41..cf3d621c6892 100644
--- a/arch/x86/include/asm/cpu_device_id.h
+++ b/arch/x86/include/asm/cpu_device_id.h
@@ -5,9 +5,139 @@
/*
* Declare drivers belonging to specific x86 CPUs
* Similar in spirit to pci_device_id and related PCI functions
+ *
+ * The wildcard initializers are in mod_devicetable.h because
+ * file2alias needs them. Sigh.
*/
-
#include <linux/mod_devicetable.h>
+/* Get the INTEL_FAM* model defines */
+#include <asm/intel-family.h>
+/* And the X86_VENDOR_* ones */
+#include <asm/processor.h>
+
+/* Centaur FAM6 models */
+#define X86_CENTAUR_FAM6_C7_A 0xa
+#define X86_CENTAUR_FAM6_C7_D 0xd
+#define X86_CENTAUR_FAM6_NANO 0xf
+
+/**
+ * X86_MATCH_VENDOR_FAM_MODEL_FEATURE - Base macro for CPU matching
+ * @_vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
+ * The name is expanded to X86_VENDOR_@_vendor
+ * @_family: The family number or X86_FAMILY_ANY
+ * @_model: The model number, model constant or X86_MODEL_ANY
+ * @_feature: A X86_FEATURE bit or X86_FEATURE_ANY
+ * @_data: Driver specific data or NULL. The internal storage
+ * format is unsigned long. The supplied value, pointer
+ * etc. is casted to unsigned long internally.
+ *
+ * Use only if you need all selectors. Otherwise use one of the shorter
+ * macros of the X86_MATCH_* family. If there is no matching shorthand
+ * macro, consider to add one. If you really need to wrap one of the macros
+ * into another macro at the usage site for good reasons, then please
+ * start this local macro with X86_MATCH to allow easy grepping.
+ */
+#define X86_MATCH_VENDOR_FAM_MODEL_FEATURE(_vendor, _family, _model, \
+ _feature, _data) { \
+ .vendor = X86_VENDOR_##_vendor, \
+ .family = _family, \
+ .model = _model, \
+ .feature = _feature, \
+ .driver_data = (unsigned long) _data \
+}
+
+/**
+ * X86_MATCH_VENDOR_FAM_FEATURE - Macro for matching vendor, family and CPU feature
+ * @vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
+ * The name is expanded to X86_VENDOR_@vendor
+ * @family: The family number or X86_FAMILY_ANY
+ * @feature: A X86_FEATURE bit
+ * @data: Driver specific data or NULL. The internal storage
+ * format is unsigned long. The supplied value, pointer
+ * etc. is casted to unsigned long internally.
+ *
+ * All other missing arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are
+ * set to wildcards.
+ */
+#define X86_MATCH_VENDOR_FAM_FEATURE(vendor, family, feature, data) \
+ X86_MATCH_VENDOR_FAM_MODEL_FEATURE(vendor, family, \
+ X86_MODEL_ANY, feature, data)
+
+/**
+ * X86_MATCH_VENDOR_FEATURE - Macro for matching vendor and CPU feature
+ * @vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
+ * The name is expanded to X86_VENDOR_@vendor
+ * @feature: A X86_FEATURE bit
+ * @data: Driver specific data or NULL. The internal storage
+ * format is unsigned long. The supplied value, pointer
+ * etc. is casted to unsigned long internally.
+ *
+ * All other missing arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are
+ * set to wildcards.
+ */
+#define X86_MATCH_VENDOR_FEATURE(vendor, feature, data) \
+ X86_MATCH_VENDOR_FAM_FEATURE(vendor, X86_FAMILY_ANY, feature, data)
+
+/**
+ * X86_MATCH_FEATURE - Macro for matching a CPU feature
+ * @feature: A X86_FEATURE bit
+ * @data: Driver specific data or NULL. The internal storage
+ * format is unsigned long. The supplied value, pointer
+ * etc. is casted to unsigned long internally.
+ *
+ * All other missing arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are
+ * set to wildcards.
+ */
+#define X86_MATCH_FEATURE(feature, data) \
+ X86_MATCH_VENDOR_FEATURE(ANY, feature, data)
+
+/**
+ * X86_MATCH_VENDOR_FAM_MODEL - Match vendor, family and model
+ * @vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
+ * The name is expanded to X86_VENDOR_@vendor
+ * @family: The family number or X86_FAMILY_ANY
+ * @model: The model number, model constant or X86_MODEL_ANY
+ * @data: Driver specific data or NULL. The internal storage
+ * format is unsigned long. The supplied value, pointer
+ * etc. is casted to unsigned long internally.
+ *
+ * All other missing arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are
+ * set to wildcards.
+ */
+#define X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, data) \
+ X86_MATCH_VENDOR_FAM_MODEL_FEATURE(vendor, family, model, \
+ X86_FEATURE_ANY, data)
+
+/**
+ * X86_MATCH_VENDOR_FAM - Match vendor and family
+ * @vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
+ * The name is expanded to X86_VENDOR_@vendor
+ * @family: The family number or X86_FAMILY_ANY
+ * @data: Driver specific data or NULL. The internal storage
+ * format is unsigned long. The supplied value, pointer
+ * etc. is casted to unsigned long internally.
+ *
+ * All other missing arguments to X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are
+ * set of wildcards.
+ */
+#define X86_MATCH_VENDOR_FAM(vendor, family, data) \
+ X86_MATCH_VENDOR_FAM_MODEL(vendor, family, X86_MODEL_ANY, data)
+
+/**
+ * X86_MATCH_INTEL_FAM6_MODEL - Match vendor INTEL, family 6 and model
+ * @model: The model name without the INTEL_FAM6_ prefix or ANY
+ * The model name is expanded to INTEL_FAM6_@model internally
+ * @data: Driver specific data or NULL. The internal storage
+ * format is unsigned long. The supplied value, pointer
+ * etc. is casted to unsigned long internally.
+ *
+ * The vendor is set to INTEL, the family to 6 and all other missing
+ * arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are set to wildcards.
+ *
+ * See X86_MATCH_VENDOR_FAM_MODEL_FEATURE() for further information.
+ */
+#define X86_MATCH_INTEL_FAM6_MODEL(model, data) \
+ X86_MATCH_VENDOR_FAM_MODEL(INTEL, 6, INTEL_FAM6_##model, data)
/*
* Match specific microcode revisions.
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index f3327cb56edf..db189945e9b0 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -217,7 +217,7 @@
#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
-#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
+#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 or above (Zen) */
#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */
#define X86_FEATURE_IBRS_ENHANCED ( 7*32+30) /* Enhanced IBRS */
#define X86_FEATURE_MSR_IA32_FEAT_CTL ( 7*32+31) /* "" MSR IA32_FEAT_CTL configured */
@@ -285,6 +285,7 @@
#define X86_FEATURE_CQM_MBM_LOCAL (11*32+ 3) /* LLC Local MBM monitoring */
#define X86_FEATURE_FENCE_SWAPGS_USER (11*32+ 4) /* "" LFENCE in user entry SWAPGS path */
#define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */
+#define X86_FEATURE_SPLIT_LOCK_DETECT (11*32+ 6) /* #AC for split lock */
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
#define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */
@@ -299,6 +300,7 @@
#define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */
#define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */
#define X86_FEATURE_AMD_STIBP_ALWAYS_ON (13*32+17) /* "" Single Thread Indirect Branch Predictors always-on preferred */
+#define X86_FEATURE_AMD_PPIN (13*32+23) /* Protected Processor Inventory Number */
#define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */
#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
@@ -367,6 +369,7 @@
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
#define X86_FEATURE_FLUSH_L1D (18*32+28) /* Flush L1D cache */
#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
+#define X86_FEATURE_CORE_CAPABILITIES (18*32+30) /* "" IA32_CORE_CAPABILITIES MSR */
#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */
/*
diff --git a/arch/x86/include/asm/dwarf2.h b/arch/x86/include/asm/dwarf2.h
index ae391f609840..f71a0cce9373 100644
--- a/arch/x86/include/asm/dwarf2.h
+++ b/arch/x86/include/asm/dwarf2.h
@@ -42,8 +42,8 @@
* Emit CFI data in .debug_frame sections, not .eh_frame sections.
* The latter we currently just discard since we don't do DWARF
* unwinding at runtime. So only the offline DWARF information is
- * useful to anyone. Note we should not use this directive if
- * vmlinux.lds.S gets changed so it doesn't discard .eh_frame.
+ * useful to anyone. Note we should not use this directive if we
+ * ever decide to enable DWARF unwinding at runtime.
*/
.cfi_sections .debug_frame
#else
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 86169a24b0d8..cdcf48d52a12 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -10,6 +10,8 @@
#include <asm/mmu_context.h>
#include <linux/build_bug.h>
+extern unsigned long efi_fw_vendor, efi_config_table;
+
/*
* We map the EFI regions needed for runtime services non-contiguously,
* with preserved alignment on virtual addresses starting from -4G down
@@ -34,8 +36,6 @@ static inline bool efi_have_uv1_memmap(void)
#define EFI32_LOADER_SIGNATURE "EL32"
#define EFI64_LOADER_SIGNATURE "EL64"
-#define MAX_CMDLINE_ADDRESS UINT_MAX
-
#define ARCH_EFI_IRQ_FLAGS_MASK X86_EFLAGS_IF
/*
@@ -180,7 +180,6 @@ extern void __init efi_uv1_memmap_phys_epilog(pgd_t *save_pgd);
struct efi_setup_data {
u64 fw_vendor;
- u64 runtime;
u64 tables;
u64 smbios;
u64 reserved[8];
@@ -219,7 +218,8 @@ extern void efi_thunk_runtime_setup(void);
efi_status_t efi_set_virtual_address_map(unsigned long memory_map_size,
unsigned long descriptor_size,
u32 descriptor_version,
- efi_memory_desc_t *virtual_map);
+ efi_memory_desc_t *virtual_map,
+ unsigned long systab_phys);
/* arch specific definitions used by the stub code */
@@ -270,6 +270,11 @@ static inline void *efi64_zero_upper(void *p)
return p;
}
+static inline u32 efi64_convert_status(efi_status_t status)
+{
+ return (u32)(status | (u64)status >> 32);
+}
+
#define __efi64_argmap_free_pages(addr, size) \
((addr), 0, (size))
@@ -285,11 +290,21 @@ static inline void *efi64_zero_upper(void *p)
#define __efi64_argmap_locate_protocol(protocol, reg, interface) \
((protocol), (reg), efi64_zero_upper(interface))
+#define __efi64_argmap_locate_device_path(protocol, path, handle) \
+ ((protocol), (path), efi64_zero_upper(handle))
+
+#define __efi64_argmap_exit(handle, status, size, data) \
+ ((handle), efi64_convert_status(status), (size), (data))
+
/* PCI I/O */
#define __efi64_argmap_get_location(protocol, seg, bus, dev, func) \
((protocol), efi64_zero_upper(seg), efi64_zero_upper(bus), \
efi64_zero_upper(dev), efi64_zero_upper(func))
+/* LoadFile */
+#define __efi64_argmap_load_file(protocol, path, policy, bufsize, buf) \
+ ((protocol), (path), (policy), efi64_zero_upper(bufsize), (buf))
+
/*
* The macros below handle the plumbing for the argument mapping. To add a
* mapping for a specific EFI method, simply define a macro
diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
index 13c83fe97988..f9c00110a69a 100644
--- a/arch/x86/include/asm/futex.h
+++ b/arch/x86/include/asm/futex.h
@@ -12,76 +12,103 @@
#include <asm/processor.h>
#include <asm/smap.h>
-#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
- asm volatile("\t" ASM_STAC "\n" \
- "1:\t" insn "\n" \
- "2:\t" ASM_CLAC "\n" \
+#define unsafe_atomic_op1(insn, oval, uaddr, oparg, label) \
+do { \
+ int oldval = 0, ret; \
+ asm volatile("1:\t" insn "\n" \
+ "2:\n" \
"\t.section .fixup,\"ax\"\n" \
"3:\tmov\t%3, %1\n" \
"\tjmp\t2b\n" \
"\t.previous\n" \
_ASM_EXTABLE_UA(1b, 3b) \
: "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
- : "i" (-EFAULT), "0" (oparg), "1" (0))
+ : "i" (-EFAULT), "0" (oparg), "1" (0)); \
+ if (ret) \
+ goto label; \
+ *oval = oldval; \
+} while(0)
-#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
- asm volatile("\t" ASM_STAC "\n" \
- "1:\tmovl %2, %0\n" \
- "\tmovl\t%0, %3\n" \
+
+#define unsafe_atomic_op2(insn, oval, uaddr, oparg, label) \
+do { \
+ int oldval = 0, ret, tem; \
+ asm volatile("1:\tmovl %2, %0\n" \
+ "2:\tmovl\t%0, %3\n" \
"\t" insn "\n" \
- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
- "\tjnz\t1b\n" \
- "3:\t" ASM_CLAC "\n" \
+ "3:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
+ "\tjnz\t2b\n" \
+ "4:\n" \
"\t.section .fixup,\"ax\"\n" \
- "4:\tmov\t%5, %1\n" \
- "\tjmp\t3b\n" \
+ "5:\tmov\t%5, %1\n" \
+ "\tjmp\t4b\n" \
"\t.previous\n" \
- _ASM_EXTABLE_UA(1b, 4b) \
- _ASM_EXTABLE_UA(2b, 4b) \
+ _ASM_EXTABLE_UA(1b, 5b) \
+ _ASM_EXTABLE_UA(3b, 5b) \
: "=&a" (oldval), "=&r" (ret), \
"+m" (*uaddr), "=&r" (tem) \
- : "r" (oparg), "i" (-EFAULT), "1" (0))
+ : "r" (oparg), "i" (-EFAULT), "1" (0)); \
+ if (ret) \
+ goto label; \
+ *oval = oldval; \
+} while(0)
-static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+static __always_inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
u32 __user *uaddr)
{
- int oldval = 0, ret, tem;
-
- pagefault_disable();
+ if (!user_access_begin(uaddr, sizeof(u32)))
+ return -EFAULT;
switch (op) {
case FUTEX_OP_SET:
- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
+ unsafe_atomic_op1("xchgl %0, %2", oval, uaddr, oparg, Efault);
break;
case FUTEX_OP_ADD:
- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
- uaddr, oparg);
+ unsafe_atomic_op1(LOCK_PREFIX "xaddl %0, %2", oval,
+ uaddr, oparg, Efault);
break;
case FUTEX_OP_OR:
- __futex_atomic_op2("orl %4, %3", ret, oldval, uaddr, oparg);
+ unsafe_atomic_op2("orl %4, %3", oval, uaddr, oparg, Efault);
break;
case FUTEX_OP_ANDN:
- __futex_atomic_op2("andl %4, %3", ret, oldval, uaddr, ~oparg);
+ unsafe_atomic_op2("andl %4, %3", oval, uaddr, ~oparg, Efault);
break;
case FUTEX_OP_XOR:
- __futex_atomic_op2("xorl %4, %3", ret, oldval, uaddr, oparg);
+ unsafe_atomic_op2("xorl %4, %3", oval, uaddr, oparg, Efault);
break;
default:
- ret = -ENOSYS;
+ user_access_end();
+ return -ENOSYS;
}
-
- pagefault_enable();
-
- if (!ret)
- *oval = oldval;
-
- return ret;
+ user_access_end();
+ return 0;
+Efault:
+ user_access_end();
+ return -EFAULT;
}
static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
- return user_atomic_cmpxchg_inatomic(uval, uaddr, oldval, newval);
+ int ret = 0;
+
+ if (!user_access_begin(uaddr, sizeof(u32)))
+ return -EFAULT;
+ asm volatile("\n"
+ "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
+ "2:\n"
+ "\t.section .fixup, \"ax\"\n"
+ "3:\tmov %3, %0\n"
+ "\tjmp 2b\n"
+ "\t.previous\n"
+ _ASM_EXTABLE_UA(1b, 3b)
+ : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
+ : "i" (-EFAULT), "r" (newval), "1" (oldval)
+ : "memory"
+ );
+ user_access_end();
+ *uval = oldval;
+ return ret;
}
#endif
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 4981c293f926..8f1e94f29a16 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -35,6 +35,9 @@
* The #define line may optionally include a comment including platform names.
*/
+/* Wildcard match for FAM6 so X86_MATCH_INTEL_FAM6_MODEL(ANY) works */
+#define INTEL_FAM6_ANY X86_MODEL_ANY
+
#define INTEL_FAM6_CORE_YONAH 0x0E
#define INTEL_FAM6_CORE2_MEROM 0x0F
@@ -118,17 +121,7 @@
#define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */
#define INTEL_FAM6_XEON_PHI_KNM 0x85 /* Knights Mill */
-/* Useful macros */
-#define INTEL_CPU_FAM_ANY(_family, _model, _driver_data) \
-{ \
- .vendor = X86_VENDOR_INTEL, \
- .family = _family, \
- .model = _model, \
- .feature = X86_FEATURE_ANY, \
- .driver_data = (kernel_ulong_t)&_driver_data \
-}
-
-#define INTEL_CPU_FAM6(_model, _driver_data) \
- INTEL_CPU_FAM_ANY(6, INTEL_FAM6_##_model, _driver_data)
+/* Family 5 */
+#define INTEL_FAM5_QUARK_X1000 0x09 /* Quark X1000 SoC */
#endif /* _ASM_X86_INTEL_FAMILY_H */
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index a176f6165d85..72fba0eeeb30 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -36,7 +36,7 @@ extern void native_init_IRQ(void);
extern void handle_irq(struct irq_desc *desc, struct pt_regs *regs);
-extern __visible unsigned int do_IRQ(struct pt_regs *regs);
+extern __visible void do_IRQ(struct pt_regs *regs);
extern void init_ISA_irqs(void);
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index 95b1f053bd96..073eb7ad2f56 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -36,6 +36,7 @@ typedef u8 kprobe_opcode_t;
/* optinsn template addresses */
extern __visible kprobe_opcode_t optprobe_template_entry[];
+extern __visible kprobe_opcode_t optprobe_template_clac[];
extern __visible kprobe_opcode_t optprobe_template_val[];
extern __visible kprobe_opcode_t optprobe_template_call[];
extern __visible kprobe_opcode_t optprobe_template_end[];
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 4359b955e0b7..f9cea081c05b 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -102,7 +102,7 @@
#define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */
-#define MCE_LOG_LEN 32
+#define MCE_LOG_MIN_LEN 32U
#define MCE_LOG_SIGNATURE "MACHINECHECK"
/* AMD Scalable MCA */
@@ -135,11 +135,11 @@
*/
struct mce_log_buffer {
char signature[12]; /* "MACHINECHECK" */
- unsigned len; /* = MCE_LOG_LEN */
+ unsigned len; /* = elements in .mce_entry[] */
unsigned next;
unsigned flags;
unsigned recordlen; /* length of struct mce */
- struct mce entry[MCE_LOG_LEN];
+ struct mce entry[];
};
enum mce_notifier_prios {
@@ -238,9 +238,6 @@ extern void mce_disable_bank(int bank);
/*
* Exception handler
*/
-
-/* Call the installed machine check handler for this CPU setup. */
-extern void (*machine_check_vector)(struct pt_regs *, long error_code);
void do_machine_check(struct pt_regs *, long);
/*
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index 6b79515abb82..edc2c581704a 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -46,7 +46,9 @@ typedef int (*hyperv_fill_flush_list_func)(
#define hv_set_reference_tsc(val) \
wrmsrl(HV_X64_MSR_REFERENCE_TSC, val)
#define hv_set_clocksource_vdso(val) \
- ((val).archdata.vclock_mode = VCLOCK_HVCLOCK)
+ ((val).vdso_clock_mode = VDSO_CLOCKMODE_HVCLOCK)
+#define hv_enable_vdso_clocksource() \
+ vclocks_set_used(VDSO_CLOCKMODE_HVCLOCK);
#define hv_get_raw_timer() rdtsc_ordered()
void hyperv_callback_vector(void);
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index d5e517d1c3dd..12c9684d59ba 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -41,6 +41,10 @@
/* Intel MSRs. Some also available on other CPUs */
+#define MSR_TEST_CTRL 0x00000033
+#define MSR_TEST_CTRL_SPLIT_LOCK_DETECT_BIT 29
+#define MSR_TEST_CTRL_SPLIT_LOCK_DETECT BIT(MSR_TEST_CTRL_SPLIT_LOCK_DETECT_BIT)
+
#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
#define SPEC_CTRL_IBRS BIT(0) /* Indirect Branch Restricted Speculation */
#define SPEC_CTRL_STIBP_SHIFT 1 /* Single Thread Indirect Branch Predictor (STIBP) bit */
@@ -70,6 +74,11 @@
*/
#define MSR_IA32_UMWAIT_CONTROL_TIME_MASK (~0x03U)
+/* Abbreviated from Intel SDM name IA32_CORE_CAPABILITIES */
+#define MSR_IA32_CORE_CAPS 0x000000cf
+#define MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT_BIT 5
+#define MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT BIT(MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT_BIT)
+
#define MSR_PKG_CST_CONFIG_CONTROL 0x000000e2
#define NHM_C3_AUTO_DEMOTE (1UL << 25)
#define NHM_C1_AUTO_DEMOTE (1UL << 26)
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index 9d5252c9685c..b809f117f3f4 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -23,6 +23,8 @@
#define MWAITX_MAX_LOOPS ((u32)-1)
#define MWAITX_DISABLE_CSTATES 0xf0
+u32 get_umwait_control_msr(void);
+
static inline void __monitor(const void *eax, unsigned long ecx,
unsigned long edx)
{
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 29964b0e1075..e855e9cf2c37 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -50,11 +50,22 @@
#define AMD64_L3_SLICE_SHIFT 48
#define AMD64_L3_SLICE_MASK \
- ((0xFULL) << AMD64_L3_SLICE_SHIFT)
+ (0xFULL << AMD64_L3_SLICE_SHIFT)
+#define AMD64_L3_SLICEID_MASK \
+ (0x7ULL << AMD64_L3_SLICE_SHIFT)
#define AMD64_L3_THREAD_SHIFT 56
#define AMD64_L3_THREAD_MASK \
- ((0xFFULL) << AMD64_L3_THREAD_SHIFT)
+ (0xFFULL << AMD64_L3_THREAD_SHIFT)
+#define AMD64_L3_F19H_THREAD_MASK \
+ (0x3ULL << AMD64_L3_THREAD_SHIFT)
+
+#define AMD64_L3_EN_ALL_CORES BIT_ULL(47)
+#define AMD64_L3_EN_ALL_SLICES BIT_ULL(46)
+
+#define AMD64_L3_COREID_SHIFT 42
+#define AMD64_L3_COREID_MASK \
+ (0x7ULL << AMD64_L3_COREID_SHIFT)
#define X86_RAW_EVENT_MASK \
(ARCH_PERFMON_EVENTSEL_EVENT | \
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 7e118660bbd9..afda66a6d325 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -595,12 +595,6 @@ static inline pmd_t pmd_mknotpresent(pmd_t pmd)
__pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
}
-static inline pud_t pud_mknotpresent(pud_t pud)
-{
- return pfn_pud(pud_pfn(pud),
- __pgprot(pud_flags(pud) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
-}
-
static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask);
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
@@ -627,12 +621,15 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
return __pmd(val);
}
-/* mprotect needs to preserve PAT bits when updating vm_page_prot */
+/*
+ * mprotect needs to preserve PAT and encryption bits when updating
+ * vm_page_prot
+ */
#define pgprot_modify pgprot_modify
static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
{
pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
- pgprotval_t addbits = pgprot_val(newprot);
+ pgprotval_t addbits = pgprot_val(newprot) & ~_PAGE_CHG_MASK;
return __pgprot(preservebits | addbits);
}
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 0239998d8cdc..65c2ecd730c5 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -118,7 +118,7 @@
*/
#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
_PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
- _PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
+ _PAGE_SOFT_DIRTY | _PAGE_DEVMAP | _PAGE_ENC)
#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
/*
diff --git a/arch/x86/include/asm/pkeys.h b/arch/x86/include/asm/pkeys.h
index 19b137f1b3be..2ff9b98812b7 100644
--- a/arch/x86/include/asm/pkeys.h
+++ b/arch/x86/include/asm/pkeys.h
@@ -4,6 +4,11 @@
#define ARCH_DEFAULT_PKEY 0
+/*
+ * If more than 16 keys are ever supported, a thorough audit
+ * will be necessary to ensure that the types that store key
+ * numbers and masks have sufficient capacity.
+ */
#define arch_max_pkey() (boot_cpu_has(X86_FEATURE_OSPKE) ? 16 : 1)
extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index 3d4cb83a8828..69485ca13665 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -103,14 +103,14 @@ static __always_inline bool should_resched(int preempt_offset)
}
#ifdef CONFIG_PREEMPTION
- extern asmlinkage void ___preempt_schedule(void);
+ extern asmlinkage void preempt_schedule_thunk(void);
# define __preempt_schedule() \
- asm volatile ("call ___preempt_schedule" : ASM_CALL_CONSTRAINT)
+ asm volatile ("call preempt_schedule_thunk" : ASM_CALL_CONSTRAINT)
extern asmlinkage void preempt_schedule(void);
- extern asmlinkage void ___preempt_schedule_notrace(void);
+ extern asmlinkage void preempt_schedule_notrace_thunk(void);
# define __preempt_schedule_notrace() \
- asm volatile ("call ___preempt_schedule_notrace" : ASM_CALL_CONSTRAINT)
+ asm volatile ("call preempt_schedule_notrace_thunk" : ASM_CALL_CONSTRAINT)
extern asmlinkage void preempt_schedule_notrace(void);
#endif
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 09705ccc393c..3bcf27caf6c9 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -26,6 +26,7 @@ struct vm86;
#include <asm/fpu/types.h>
#include <asm/unwind_hints.h>
#include <asm/vmxfeatures.h>
+#include <asm/vdso/processor.h>
#include <linux/personality.h>
#include <linux/cache.h>
@@ -541,7 +542,6 @@ struct thread_struct {
mm_segment_t addr_limit;
unsigned int sig_on_uaccess_err:1;
- unsigned int uaccess_err:1; /* uaccess failed */
/* Floating point and extended processor state */
struct fpu fpu;
@@ -677,17 +677,6 @@ static inline unsigned int cpuid_edx(unsigned int op)
return edx;
}
-/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
-static __always_inline void rep_nop(void)
-{
- asm volatile("rep; nop" ::: "memory");
-}
-
-static __always_inline void cpu_relax(void)
-{
- rep_nop();
-}
-
/*
* This function forces the icache and prefetched instruction stream to
* catch up with reality in two very specific cases:
diff --git a/arch/x86/include/asm/sections.h b/arch/x86/include/asm/sections.h
index 036c360910c5..a6e8373a5170 100644
--- a/arch/x86/include/asm/sections.h
+++ b/arch/x86/include/asm/sections.h
@@ -2,6 +2,8 @@
#ifndef _ASM_X86_SECTIONS_H
#define _ASM_X86_SECTIONS_H
+#define arch_is_kernel_initmem_freed arch_is_kernel_initmem_freed
+
#include <asm-generic/sections.h>
#include <asm/extable.h>
@@ -14,4 +16,22 @@ extern char __end_rodata_hpage_align[];
extern char __end_of_kernel_reserve[];
+extern unsigned long _brk_start, _brk_end;
+
+static inline bool arch_is_kernel_initmem_freed(unsigned long addr)
+{
+ /*
+ * If _brk_start has not been cleared, brk allocation is incomplete,
+ * and we can not make assumptions about its use.
+ */
+ if (_brk_start)
+ return 0;
+
+ /*
+ * After brk allocation is complete, space between _brk_end and _end
+ * is available for allocation.
+ */
+ return addr >= _brk_end && addr < (unsigned long)&_end;
+}
+
#endif /* _ASM_X86_SECTIONS_H */
diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h
index 64c3dce374e5..950532ccbc4a 100644
--- a/arch/x86/include/asm/set_memory.h
+++ b/arch/x86/include/asm/set_memory.h
@@ -46,6 +46,8 @@ int set_memory_4k(unsigned long addr, int numpages);
int set_memory_encrypted(unsigned long addr, int numpages);
int set_memory_decrypted(unsigned long addr, int numpages);
int set_memory_np_noalias(unsigned long addr, int numpages);
+int set_memory_nonglobal(unsigned long addr, int numpages);
+int set_memory_global(unsigned long addr, int numpages);
int set_pages_array_uc(struct page **pages, int addrinarray);
int set_pages_array_wc(struct page **pages, int addrinarray);
diff --git a/arch/x86/include/asm/sigframe.h b/arch/x86/include/asm/sigframe.h
index f176114c04d4..84eab2724875 100644
--- a/arch/x86/include/asm/sigframe.h
+++ b/arch/x86/include/asm/sigframe.h
@@ -33,11 +33,7 @@ struct sigframe_ia32 {
* legacy application accessing/modifying it.
*/
struct _fpstate_32 fpstate_unused;
-#ifdef CONFIG_IA32_EMULATION
- unsigned int extramask[_COMPAT_NSIG_WORDS-1];
-#else /* !CONFIG_IA32_EMULATION */
- unsigned long extramask[_NSIG_WORDS-1];
-#endif /* CONFIG_IA32_EMULATION */
+ unsigned int extramask[1];
char retcode[8];
/* fp state follows here */
};
diff --git a/arch/x86/include/asm/sighandling.h b/arch/x86/include/asm/sighandling.h
index 2fcbd6f33ef7..65e667279e0f 100644
--- a/arch/x86/include/asm/sighandling.h
+++ b/arch/x86/include/asm/sighandling.h
@@ -14,12 +14,5 @@
X86_EFLAGS_CF | X86_EFLAGS_RF)
void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
-int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
- struct pt_regs *regs, unsigned long mask);
-
-
-#ifdef CONFIG_X86_X32_ABI
-asmlinkage long sys32_x32_rt_sigreturn(void);
-#endif
#endif /* _ASM_X86_SIGHANDLING_H */
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
index 8db3fdb6102e..7cbf733d11af 100644
--- a/arch/x86/include/asm/syscall.h
+++ b/arch/x86/include/asm/syscall.h
@@ -13,23 +13,14 @@
#include <uapi/linux/audit.h>
#include <linux/sched.h>
#include <linux/err.h>
-#include <asm/asm-offsets.h> /* For NR_syscalls */
#include <asm/thread_info.h> /* for TS_COMPAT */
#include <asm/unistd.h>
-#ifdef CONFIG_X86_64
-typedef asmlinkage long (*sys_call_ptr_t)(const struct pt_regs *);
-#else
-typedef asmlinkage long (*sys_call_ptr_t)(unsigned long, unsigned long,
- unsigned long, unsigned long,
- unsigned long, unsigned long);
-#endif /* CONFIG_X86_64 */
+typedef long (*sys_call_ptr_t)(const struct pt_regs *);
extern const sys_call_ptr_t sys_call_table[];
#if defined(CONFIG_X86_32)
#define ia32_sys_call_table sys_call_table
-#define __NR_syscall_compat_max __NR_syscall_max
-#define IA32_NR_syscalls NR_syscalls
#endif
#if defined(CONFIG_IA32_EMULATION)
@@ -168,6 +159,11 @@ static inline int syscall_get_arch(struct task_struct *task)
task->thread_info.status & TS_COMPAT)
? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
}
+
+void do_syscall_64(unsigned long nr, struct pt_regs *regs);
+void do_int80_syscall_32(struct pt_regs *regs);
+long do_fast_syscall_32(struct pt_regs *regs);
+
#endif /* CONFIG_X86_32 */
#endif /* _ASM_X86_SYSCALL_H */
diff --git a/arch/x86/include/asm/syscall_wrapper.h b/arch/x86/include/asm/syscall_wrapper.h
index e2389ce9bf58..a84333adeef2 100644
--- a/arch/x86/include/asm/syscall_wrapper.h
+++ b/arch/x86/include/asm/syscall_wrapper.h
@@ -8,6 +8,50 @@
struct pt_regs;
+extern long __x64_sys_ni_syscall(const struct pt_regs *regs);
+extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
+
+/*
+ * Instead of the generic __SYSCALL_DEFINEx() definition, the x86 version takes
+ * struct pt_regs *regs as the only argument of the syscall stub(s) named as:
+ * __x64_sys_*() - 64-bit native syscall
+ * __ia32_sys_*() - 32-bit native syscall or common compat syscall
+ * __ia32_compat_sys_*() - 32-bit compat syscall
+ * __x32_compat_sys_*() - 64-bit X32 compat syscall
+ *
+ * The registers are decoded according to the ABI:
+ * 64-bit: RDI, RSI, RDX, R10, R8, R9
+ * 32-bit: EBX, ECX, EDX, ESI, EDI, EBP
+ *
+ * The stub then passes the decoded arguments to the __se_sys_*() wrapper to
+ * perform sign-extension (omitted for zero-argument syscalls). Finally the
+ * arguments are passed to the __do_sys_*() function which is the actual
+ * syscall. These wrappers are marked as inline so the compiler can optimize
+ * the functions where appropriate.
+ *
+ * Example assembly (slightly re-ordered for better readability):
+ *
+ * <__x64_sys_recv>: <-- syscall with 4 parameters
+ * callq <__fentry__>
+ *
+ * mov 0x70(%rdi),%rdi <-- decode regs->di
+ * mov 0x68(%rdi),%rsi <-- decode regs->si
+ * mov 0x60(%rdi),%rdx <-- decode regs->dx
+ * mov 0x38(%rdi),%rcx <-- decode regs->r10
+ *
+ * xor %r9d,%r9d <-- clear %r9
+ * xor %r8d,%r8d <-- clear %r8
+ *
+ * callq __sys_recvfrom <-- do the actual work in __sys_recvfrom()
+ * which takes 6 arguments
+ *
+ * cltq <-- extend return value to 64-bit
+ * retq <-- return
+ *
+ * This approach avoids leaking random user-provided register content down
+ * the call chain.
+ */
+
/* Mapping of registers to parameters for syscalls on x86-64 and x32 */
#define SC_X86_64_REGS_TO_ARGS(x, ...) \
__MAP(x,__SC_ARGS \
@@ -21,68 +65,96 @@ struct pt_regs;
,,(unsigned int)regs->dx,,(unsigned int)regs->si \
,,(unsigned int)regs->di,,(unsigned int)regs->bp)
-#ifdef CONFIG_IA32_EMULATION
-/*
- * For IA32 emulation, we need to handle "compat" syscalls *and* create
- * additional wrappers (aptly named __ia32_sys_xyzzy) which decode the
- * ia32 regs in the proper order for shared or "common" syscalls. As some
- * syscalls may not be implemented, we need to expand COND_SYSCALL in
- * kernel/sys_ni.c and SYS_NI in kernel/time/posix-stubs.c to cover this
- * case as well.
- */
-#define __IA32_COMPAT_SYS_STUB0(x, name) \
- asmlinkage long __ia32_compat_sys_##name(const struct pt_regs *regs);\
- ALLOW_ERROR_INJECTION(__ia32_compat_sys_##name, ERRNO); \
- asmlinkage long __ia32_compat_sys_##name(const struct pt_regs *regs)\
+#define __SYS_STUB0(abi, name) \
+ long __##abi##_##name(const struct pt_regs *regs); \
+ ALLOW_ERROR_INJECTION(__##abi##_##name, ERRNO); \
+ long __##abi##_##name(const struct pt_regs *regs) \
+ __alias(__do_##name);
+
+#define __SYS_STUBx(abi, name, ...) \
+ long __##abi##_##name(const struct pt_regs *regs); \
+ ALLOW_ERROR_INJECTION(__##abi##_##name, ERRNO); \
+ long __##abi##_##name(const struct pt_regs *regs) \
{ \
- return __se_compat_sys_##name(); \
+ return __se_##name(__VA_ARGS__); \
}
-#define __IA32_COMPAT_SYS_STUBx(x, name, ...) \
- asmlinkage long __ia32_compat_sys##name(const struct pt_regs *regs);\
- ALLOW_ERROR_INJECTION(__ia32_compat_sys##name, ERRNO); \
- asmlinkage long __ia32_compat_sys##name(const struct pt_regs *regs)\
+#define __COND_SYSCALL(abi, name) \
+ __weak long __##abi##_##name(const struct pt_regs *__unused) \
{ \
- return __se_compat_sys##name(SC_IA32_REGS_TO_ARGS(x,__VA_ARGS__));\
+ return sys_ni_syscall(); \
}
+#define __SYS_NI(abi, name) \
+ SYSCALL_ALIAS(__##abi##_##name, sys_ni_posix_timers);
+
+#ifdef CONFIG_X86_64
+#define __X64_SYS_STUB0(name) \
+ __SYS_STUB0(x64, sys_##name)
+
+#define __X64_SYS_STUBx(x, name, ...) \
+ __SYS_STUBx(x64, sys##name, \
+ SC_X86_64_REGS_TO_ARGS(x, __VA_ARGS__))
+
+#define __X64_COND_SYSCALL(name) \
+ __COND_SYSCALL(x64, sys_##name)
+
+#define __X64_SYS_NI(name) \
+ __SYS_NI(x64, sys_##name)
+#else /* CONFIG_X86_64 */
+#define __X64_SYS_STUB0(name)
+#define __X64_SYS_STUBx(x, name, ...)
+#define __X64_COND_SYSCALL(name)
+#define __X64_SYS_NI(name)
+#endif /* CONFIG_X86_64 */
+
+#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
+#define __IA32_SYS_STUB0(name) \
+ __SYS_STUB0(ia32, sys_##name)
+
#define __IA32_SYS_STUBx(x, name, ...) \
- asmlinkage long __ia32_sys##name(const struct pt_regs *regs); \
- ALLOW_ERROR_INJECTION(__ia32_sys##name, ERRNO); \
- asmlinkage long __ia32_sys##name(const struct pt_regs *regs) \
- { \
- return __se_sys##name(SC_IA32_REGS_TO_ARGS(x,__VA_ARGS__));\
- }
+ __SYS_STUBx(ia32, sys##name, \
+ SC_IA32_REGS_TO_ARGS(x, __VA_ARGS__))
+
+#define __IA32_COND_SYSCALL(name) \
+ __COND_SYSCALL(ia32, sys_##name)
+#define __IA32_SYS_NI(name) \
+ __SYS_NI(ia32, sys_##name)
+#else /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
+#define __IA32_SYS_STUB0(name)
+#define __IA32_SYS_STUBx(x, name, ...)
+#define __IA32_COND_SYSCALL(name)
+#define __IA32_SYS_NI(name)
+#endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
+
+#ifdef CONFIG_IA32_EMULATION
/*
- * To keep the naming coherent, re-define SYSCALL_DEFINE0 to create an alias
- * named __ia32_sys_*()
+ * For IA32 emulation, we need to handle "compat" syscalls *and* create
+ * additional wrappers (aptly named __ia32_sys_xyzzy) which decode the
+ * ia32 regs in the proper order for shared or "common" syscalls. As some
+ * syscalls may not be implemented, we need to expand COND_SYSCALL in
+ * kernel/sys_ni.c and SYS_NI in kernel/time/posix-stubs.c to cover this
+ * case as well.
*/
+#define __IA32_COMPAT_SYS_STUB0(name) \
+ __SYS_STUB0(ia32, compat_sys_##name)
-#define SYSCALL_DEFINE0(sname) \
- SYSCALL_METADATA(_##sname, 0); \
- asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused);\
- ALLOW_ERROR_INJECTION(__x64_sys_##sname, ERRNO); \
- SYSCALL_ALIAS(__ia32_sys_##sname, __x64_sys_##sname); \
- asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused)
+#define __IA32_COMPAT_SYS_STUBx(x, name, ...) \
+ __SYS_STUBx(ia32, compat_sys##name, \
+ SC_IA32_REGS_TO_ARGS(x, __VA_ARGS__))
-#define COND_SYSCALL(name) \
- asmlinkage __weak long __x64_sys_##name(const struct pt_regs *__unused) \
- { \
- return sys_ni_syscall(); \
- } \
- asmlinkage __weak long __ia32_sys_##name(const struct pt_regs *__unused)\
- { \
- return sys_ni_syscall(); \
- }
+#define __IA32_COMPAT_COND_SYSCALL(name) \
+ __COND_SYSCALL(ia32, compat_sys_##name)
-#define SYS_NI(name) \
- SYSCALL_ALIAS(__x64_sys_##name, sys_ni_posix_timers); \
- SYSCALL_ALIAS(__ia32_sys_##name, sys_ni_posix_timers)
+#define __IA32_COMPAT_SYS_NI(name) \
+ __SYS_NI(ia32, compat_sys_##name)
#else /* CONFIG_IA32_EMULATION */
+#define __IA32_COMPAT_SYS_STUB0(name)
#define __IA32_COMPAT_SYS_STUBx(x, name, ...)
-#define __IA32_SYS_STUBx(x, fullname, name, ...)
+#define __IA32_COMPAT_COND_SYSCALL(name)
+#define __IA32_COMPAT_SYS_NI(name)
#endif /* CONFIG_IA32_EMULATION */
@@ -92,25 +164,23 @@ struct pt_regs;
* of the x86-64-style parameter ordering of x32 syscalls. The syscalls common
* with x86_64 obviously do not need such care.
*/
-#define __X32_COMPAT_SYS_STUB0(x, name, ...) \
- asmlinkage long __x32_compat_sys_##name(const struct pt_regs *regs);\
- ALLOW_ERROR_INJECTION(__x32_compat_sys_##name, ERRNO); \
- asmlinkage long __x32_compat_sys_##name(const struct pt_regs *regs)\
- { \
- return __se_compat_sys_##name();\
- }
+#define __X32_COMPAT_SYS_STUB0(name) \
+ __SYS_STUB0(x32, compat_sys_##name)
#define __X32_COMPAT_SYS_STUBx(x, name, ...) \
- asmlinkage long __x32_compat_sys##name(const struct pt_regs *regs);\
- ALLOW_ERROR_INJECTION(__x32_compat_sys##name, ERRNO); \
- asmlinkage long __x32_compat_sys##name(const struct pt_regs *regs)\
- { \
- return __se_compat_sys##name(SC_X86_64_REGS_TO_ARGS(x,__VA_ARGS__));\
- }
+ __SYS_STUBx(x32, compat_sys##name, \
+ SC_X86_64_REGS_TO_ARGS(x, __VA_ARGS__))
+
+#define __X32_COMPAT_COND_SYSCALL(name) \
+ __COND_SYSCALL(x32, compat_sys_##name)
+#define __X32_COMPAT_SYS_NI(name) \
+ __SYS_NI(x32, compat_sys_##name)
#else /* CONFIG_X86_X32 */
-#define __X32_COMPAT_SYS_STUB0(x, name)
+#define __X32_COMPAT_SYS_STUB0(name)
#define __X32_COMPAT_SYS_STUBx(x, name, ...)
+#define __X32_COMPAT_COND_SYSCALL(name)
+#define __X32_COMPAT_SYS_NI(name)
#endif /* CONFIG_X86_X32 */
@@ -121,15 +191,12 @@ struct pt_regs;
* of them.
*/
#define COMPAT_SYSCALL_DEFINE0(name) \
- static long __se_compat_sys_##name(void); \
- static inline long __do_compat_sys_##name(void); \
- __IA32_COMPAT_SYS_STUB0(x, name) \
- __X32_COMPAT_SYS_STUB0(x, name) \
- static long __se_compat_sys_##name(void) \
- { \
- return __do_compat_sys_##name(); \
- } \
- static inline long __do_compat_sys_##name(void)
+ static long \
+ __do_compat_sys_##name(const struct pt_regs *__unused); \
+ __IA32_COMPAT_SYS_STUB0(name) \
+ __X32_COMPAT_SYS_STUB0(name) \
+ static long \
+ __do_compat_sys_##name(const struct pt_regs *__unused)
#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
static long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
@@ -148,58 +215,19 @@ struct pt_regs;
* kernel/time/posix-stubs.c to cover this case as well.
*/
#define COND_SYSCALL_COMPAT(name) \
- cond_syscall(__ia32_compat_sys_##name); \
- cond_syscall(__x32_compat_sys_##name)
+ __IA32_COMPAT_COND_SYSCALL(name) \
+ __X32_COMPAT_COND_SYSCALL(name)
#define COMPAT_SYS_NI(name) \
- SYSCALL_ALIAS(__ia32_compat_sys_##name, sys_ni_posix_timers); \
- SYSCALL_ALIAS(__x32_compat_sys_##name, sys_ni_posix_timers)
+ __IA32_COMPAT_SYS_NI(name) \
+ __X32_COMPAT_SYS_NI(name)
#endif /* CONFIG_COMPAT */
-
-/*
- * Instead of the generic __SYSCALL_DEFINEx() definition, this macro takes
- * struct pt_regs *regs as the only argument of the syscall stub named
- * __x64_sys_*(). It decodes just the registers it needs and passes them on to
- * the __se_sys_*() wrapper performing sign extension and then to the
- * __do_sys_*() function doing the actual job. These wrappers and functions
- * are inlined (at least in very most cases), meaning that the assembly looks
- * as follows (slightly re-ordered for better readability):
- *
- * <__x64_sys_recv>: <-- syscall with 4 parameters
- * callq <__fentry__>
- *
- * mov 0x70(%rdi),%rdi <-- decode regs->di
- * mov 0x68(%rdi),%rsi <-- decode regs->si
- * mov 0x60(%rdi),%rdx <-- decode regs->dx
- * mov 0x38(%rdi),%rcx <-- decode regs->r10
- *
- * xor %r9d,%r9d <-- clear %r9
- * xor %r8d,%r8d <-- clear %r8
- *
- * callq __sys_recvfrom <-- do the actual work in __sys_recvfrom()
- * which takes 6 arguments
- *
- * cltq <-- extend return value to 64-bit
- * retq <-- return
- *
- * This approach avoids leaking random user-provided register content down
- * the call chain.
- *
- * If IA32_EMULATION is enabled, this macro generates an additional wrapper
- * named __ia32_sys_*() which decodes the struct pt_regs *regs according
- * to the i386 calling convention (bx, cx, dx, si, di, bp).
- */
#define __SYSCALL_DEFINEx(x, name, ...) \
- asmlinkage long __x64_sys##name(const struct pt_regs *regs); \
- ALLOW_ERROR_INJECTION(__x64_sys##name, ERRNO); \
static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\
- asmlinkage long __x64_sys##name(const struct pt_regs *regs) \
- { \
- return __se_sys##name(SC_X86_64_REGS_TO_ARGS(x,__VA_ARGS__));\
- } \
+ __X64_SYS_STUBx(x, name, __VA_ARGS__) \
__IA32_SYS_STUBx(x, name, __VA_ARGS__) \
static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
{ \
@@ -217,33 +245,28 @@ struct pt_regs;
* SYSCALL_DEFINEx() -- which is essential for the COND_SYSCALL() and SYS_NI()
* macros to work correctly.
*/
-#ifndef SYSCALL_DEFINE0
#define SYSCALL_DEFINE0(sname) \
SYSCALL_METADATA(_##sname, 0); \
- asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused);\
- ALLOW_ERROR_INJECTION(__x64_sys_##sname, ERRNO); \
- asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused)
-#endif
-
-#ifndef COND_SYSCALL
-#define COND_SYSCALL(name) \
- asmlinkage __weak long __x64_sys_##name(const struct pt_regs *__unused) \
- { \
- return sys_ni_syscall(); \
- }
-#endif
+ static long __do_sys_##sname(const struct pt_regs *__unused); \
+ __X64_SYS_STUB0(sname) \
+ __IA32_SYS_STUB0(sname) \
+ static long __do_sys_##sname(const struct pt_regs *__unused)
+
+#define COND_SYSCALL(name) \
+ __X64_COND_SYSCALL(name) \
+ __IA32_COND_SYSCALL(name)
-#ifndef SYS_NI
-#define SYS_NI(name) SYSCALL_ALIAS(__x64_sys_##name, sys_ni_posix_timers);
-#endif
+#define SYS_NI(name) \
+ __X64_SYS_NI(name) \
+ __IA32_SYS_NI(name)
/*
* For VSYSCALLS, we need to declare these three syscalls with the new
* pt_regs-based calling convention for in-kernel use.
*/
-asmlinkage long __x64_sys_getcpu(const struct pt_regs *regs);
-asmlinkage long __x64_sys_gettimeofday(const struct pt_regs *regs);
-asmlinkage long __x64_sys_time(const struct pt_regs *regs);
+long __x64_sys_getcpu(const struct pt_regs *regs);
+long __x64_sys_gettimeofday(const struct pt_regs *regs);
+long __x64_sys_time(const struct pt_regs *regs);
#endif /* _ASM_X86_SYSCALL_WRAPPER_H */
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
index 91b7b6e1a115..6714a358235d 100644
--- a/arch/x86/include/asm/syscalls.h
+++ b/arch/x86/include/asm/syscalls.h
@@ -8,42 +8,8 @@
#ifndef _ASM_X86_SYSCALLS_H
#define _ASM_X86_SYSCALLS_H
-#include <linux/compiler.h>
-#include <linux/linkage.h>
-#include <linux/signal.h>
-#include <linux/types.h>
-
/* Common in X86_32 and X86_64 */
/* kernel/ioport.c */
long ksys_ioperm(unsigned long from, unsigned long num, int turn_on);
-#ifdef CONFIG_X86_32
-/*
- * These definitions are only valid on pure 32-bit systems; x86-64 uses a
- * different syscall calling convention
- */
-asmlinkage long sys_ioperm(unsigned long, unsigned long, int);
-asmlinkage long sys_iopl(unsigned int);
-
-/* kernel/ldt.c */
-asmlinkage long sys_modify_ldt(int, void __user *, unsigned long);
-
-/* kernel/signal.c */
-asmlinkage long sys_rt_sigreturn(void);
-
-/* kernel/tls.c */
-asmlinkage long sys_set_thread_area(struct user_desc __user *);
-asmlinkage long sys_get_thread_area(struct user_desc __user *);
-
-/* X86_32 only */
-
-/* kernel/signal.c */
-asmlinkage long sys_sigreturn(void);
-
-/* kernel/vm86_32.c */
-struct vm86_struct;
-asmlinkage long sys_vm86old(struct vm86_struct __user *);
-asmlinkage long sys_vm86(unsigned long, unsigned long);
-
-#endif /* CONFIG_X86_32 */
#endif /* _ASM_X86_SYSCALLS_H */
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index cf4327986e98..8de8ceccb8bc 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -92,7 +92,7 @@ struct thread_info {
#define TIF_NOCPUID 15 /* CPUID is not accessible in userland */
#define TIF_NOTSC 16 /* TSC is not accessible in userland */
#define TIF_IA32 17 /* IA32 compatibility process */
-#define TIF_NOHZ 19 /* in adaptive nohz mode */
+#define TIF_SLD 18 /* Restore split lock detection on context switch */
#define TIF_MEMDIE 20 /* is terminating due to OOM killer */
#define TIF_POLLING_NRFLAG 21 /* idle is polling for TIF_NEED_RESCHED */
#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
@@ -122,7 +122,7 @@ struct thread_info {
#define _TIF_NOCPUID (1 << TIF_NOCPUID)
#define _TIF_NOTSC (1 << TIF_NOTSC)
#define _TIF_IA32 (1 << TIF_IA32)
-#define _TIF_NOHZ (1 << TIF_NOHZ)
+#define _TIF_SLD (1 << TIF_SLD)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
@@ -133,19 +133,15 @@ struct thread_info {
#define _TIF_X32 (1 << TIF_X32)
#define _TIF_FSCHECK (1 << TIF_FSCHECK)
-/*
- * work to do in syscall_trace_enter(). Also includes TIF_NOHZ for
- * enter_from_user_mode()
- */
+/* Work to do before invoking the actual syscall. */
#define _TIF_WORK_SYSCALL_ENTRY \
(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
- _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
- _TIF_NOHZ)
+ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
/* flags to check in __switch_to() */
#define _TIF_WORK_CTXSW_BASE \
(_TIF_NOCPUID | _TIF_NOTSC | _TIF_BLOCKSTEP | \
- _TIF_SSBD | _TIF_SPEC_FORCE_UPDATE)
+ _TIF_SSBD | _TIF_SPEC_FORCE_UPDATE | _TIF_SLD)
/*
* Avoid calls to __switch_to_xtra() on UP as STIBP is not evaluated.
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 4b14d2318251..79d8d5496330 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -193,4 +193,29 @@ static inline void sched_clear_itmt_support(void)
}
#endif /* CONFIG_SCHED_MC_PRIO */
+#ifdef CONFIG_SMP
+#include <asm/cpufeature.h>
+
+DECLARE_STATIC_KEY_FALSE(arch_scale_freq_key);
+
+#define arch_scale_freq_invariant() static_branch_likely(&arch_scale_freq_key)
+
+DECLARE_PER_CPU(unsigned long, arch_freq_scale);
+
+static inline long arch_scale_freq_capacity(int cpu)
+{
+ return per_cpu(arch_freq_scale, cpu);
+}
+#define arch_scale_freq_capacity arch_scale_freq_capacity
+
+extern void arch_scale_freq_tick(void);
+#define arch_scale_freq_tick arch_scale_freq_tick
+
+extern void arch_set_max_freq_ratio(bool turbo_disabled);
+#else
+static inline void arch_set_max_freq_ratio(bool turbo_disabled)
+{
+}
+#endif
+
#endif /* _ASM_X86_TOPOLOGY_H */
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index ffa0dc8a535e..c26a7e1d8a2c 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -76,27 +76,24 @@ dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *regs, long err
dotraplinkage void do_invalid_TSS(struct pt_regs *regs, long error_code);
dotraplinkage void do_segment_not_present(struct pt_regs *regs, long error_code);
dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code);
-#ifdef CONFIG_X86_64
-dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code, unsigned long address);
-asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs);
-asmlinkage __visible notrace
-struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s);
-void __init trap_init(void);
-#endif
dotraplinkage void do_general_protection(struct pt_regs *regs, long error_code);
dotraplinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
dotraplinkage void do_spurious_interrupt_bug(struct pt_regs *regs, long error_code);
dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code);
dotraplinkage void do_alignment_check(struct pt_regs *regs, long error_code);
-#ifdef CONFIG_X86_MCE
-dotraplinkage void do_machine_check(struct pt_regs *regs, long error_code);
-#endif
dotraplinkage void do_simd_coprocessor_error(struct pt_regs *regs, long error_code);
#ifdef CONFIG_X86_32
dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code);
#endif
dotraplinkage void do_mce(struct pt_regs *regs, long error_code);
+#ifdef CONFIG_X86_64
+asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs);
+asmlinkage __visible notrace
+struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s);
+void __init trap_init(void);
+#endif
+
static inline int get_si_code(unsigned long condition)
{
if (condition & DR_STEP)
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 61d93f062a36..c8247a84244b 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -193,23 +193,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
: : "A" (x), "r" (addr) \
: : label)
-#define __put_user_asm_ex_u64(x, addr) \
- asm volatile("\n" \
- "1: movl %%eax,0(%1)\n" \
- "2: movl %%edx,4(%1)\n" \
- "3:" \
- _ASM_EXTABLE_EX(1b, 2b) \
- _ASM_EXTABLE_EX(2b, 3b) \
- : : "A" (x), "r" (addr))
-
#define __put_user_x8(x, ptr, __ret_pu) \
asm volatile("call __put_user_8" : "=a" (__ret_pu) \
: "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
#else
#define __put_user_goto_u64(x, ptr, label) \
__put_user_goto(x, ptr, "q", "", "er", label)
-#define __put_user_asm_ex_u64(x, addr) \
- __put_user_asm_ex(x, addr, "q", "", "er")
#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
#endif
@@ -289,31 +278,6 @@ do { \
} \
} while (0)
-/*
- * This doesn't do __uaccess_begin/end - the exception handling
- * around it must do that.
- */
-#define __put_user_size_ex(x, ptr, size) \
-do { \
- __chk_user_ptr(ptr); \
- switch (size) { \
- case 1: \
- __put_user_asm_ex(x, ptr, "b", "b", "iq"); \
- break; \
- case 2: \
- __put_user_asm_ex(x, ptr, "w", "w", "ir"); \
- break; \
- case 4: \
- __put_user_asm_ex(x, ptr, "l", "k", "ir"); \
- break; \
- case 8: \
- __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
- break; \
- default: \
- __put_user_bad(); \
- } \
-} while (0)
-
#ifdef CONFIG_X86_32
#define __get_user_asm_u64(x, ptr, retval, errret) \
({ \
@@ -335,12 +299,9 @@ do { \
"i" (errret), "0" (retval)); \
})
-#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
#else
#define __get_user_asm_u64(x, ptr, retval, errret) \
__get_user_asm(x, ptr, retval, "q", "", "=r", errret)
-#define __get_user_asm_ex_u64(x, ptr) \
- __get_user_asm_ex(x, ptr, "q", "", "=r")
#endif
#define __get_user_size(x, ptr, size, retval, errret) \
@@ -378,53 +339,6 @@ do { \
: "=r" (err), ltype(x) \
: "m" (__m(addr)), "i" (errret), "0" (err))
-#define __get_user_asm_nozero(x, addr, err, itype, rtype, ltype, errret) \
- asm volatile("\n" \
- "1: mov"itype" %2,%"rtype"1\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: mov %3,%0\n" \
- " jmp 2b\n" \
- ".previous\n" \
- _ASM_EXTABLE_UA(1b, 3b) \
- : "=r" (err), ltype(x) \
- : "m" (__m(addr)), "i" (errret), "0" (err))
-
-/*
- * This doesn't do __uaccess_begin/end - the exception handling
- * around it must do that.
- */
-#define __get_user_size_ex(x, ptr, size) \
-do { \
- __chk_user_ptr(ptr); \
- switch (size) { \
- case 1: \
- __get_user_asm_ex(x, ptr, "b", "b", "=q"); \
- break; \
- case 2: \
- __get_user_asm_ex(x, ptr, "w", "w", "=r"); \
- break; \
- case 4: \
- __get_user_asm_ex(x, ptr, "l", "k", "=r"); \
- break; \
- case 8: \
- __get_user_asm_ex_u64(x, ptr); \
- break; \
- default: \
- (x) = __get_user_bad(); \
- } \
-} while (0)
-
-#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3:xor"itype" %"rtype"0,%"rtype"0\n" \
- " jmp 2b\n" \
- ".previous\n" \
- _ASM_EXTABLE_EX(1b, 3b) \
- : ltype(x) : "m" (__m(addr)))
-
#define __put_user_nocheck(x, ptr, size) \
({ \
__label__ __pu_label; \
@@ -480,29 +394,6 @@ struct __large_struct { unsigned long buf[100]; };
retval = __put_user_failed(x, addr, itype, rtype, ltype, errret); \
} while (0)
-#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
- "2:\n" \
- _ASM_EXTABLE_EX(1b, 2b) \
- : : ltype(x), "m" (__m(addr)))
-
-/*
- * uaccess_try and catch
- */
-#define uaccess_try do { \
- current->thread.uaccess_err = 0; \
- __uaccess_begin(); \
- barrier();
-
-#define uaccess_try_nospec do { \
- current->thread.uaccess_err = 0; \
- __uaccess_begin_nospec(); \
-
-#define uaccess_catch(err) \
- __uaccess_end(); \
- (err) |= (current->thread.uaccess_err ? -EFAULT : 0); \
-} while (0)
-
/**
* __get_user - Get a simple variable from user space, with less checking.
* @x: Variable to store result.
@@ -552,28 +443,6 @@ struct __large_struct { unsigned long buf[100]; };
#define __put_user(x, ptr) \
__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
-/*
- * {get|put}_user_try and catch
- *
- * get_user_try {
- * get_user_ex(...);
- * } get_user_catch(err)
- */
-#define get_user_try uaccess_try_nospec
-#define get_user_catch(err) uaccess_catch(err)
-
-#define get_user_ex(x, ptr) do { \
- unsigned long __gue_val; \
- __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
- (x) = (__force __typeof__(*(ptr)))__gue_val; \
-} while (0)
-
-#define put_user_try uaccess_try
-#define put_user_catch(err) uaccess_catch(err)
-
-#define put_user_ex(x, ptr) \
- __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
-
extern unsigned long
copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
extern __must_check long
@@ -584,99 +453,6 @@ extern __must_check long strnlen_user(const char __user *str, long n);
unsigned long __must_check clear_user(void __user *mem, unsigned long len);
unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
-extern void __cmpxchg_wrong_size(void)
- __compiletime_error("Bad argument size for cmpxchg");
-
-#define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \
-({ \
- int __ret = 0; \
- __typeof__(*(ptr)) __old = (old); \
- __typeof__(*(ptr)) __new = (new); \
- __uaccess_begin_nospec(); \
- switch (size) { \
- case 1: \
- { \
- asm volatile("\n" \
- "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
- "2:\n" \
- "\t.section .fixup, \"ax\"\n" \
- "3:\tmov %3, %0\n" \
- "\tjmp 2b\n" \
- "\t.previous\n" \
- _ASM_EXTABLE_UA(1b, 3b) \
- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
- : "i" (-EFAULT), "q" (__new), "1" (__old) \
- : "memory" \
- ); \
- break; \
- } \
- case 2: \
- { \
- asm volatile("\n" \
- "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
- "2:\n" \
- "\t.section .fixup, \"ax\"\n" \
- "3:\tmov %3, %0\n" \
- "\tjmp 2b\n" \
- "\t.previous\n" \
- _ASM_EXTABLE_UA(1b, 3b) \
- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
- : "i" (-EFAULT), "r" (__new), "1" (__old) \
- : "memory" \
- ); \
- break; \
- } \
- case 4: \
- { \
- asm volatile("\n" \
- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
- "2:\n" \
- "\t.section .fixup, \"ax\"\n" \
- "3:\tmov %3, %0\n" \
- "\tjmp 2b\n" \
- "\t.previous\n" \
- _ASM_EXTABLE_UA(1b, 3b) \
- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
- : "i" (-EFAULT), "r" (__new), "1" (__old) \
- : "memory" \
- ); \
- break; \
- } \
- case 8: \
- { \
- if (!IS_ENABLED(CONFIG_X86_64)) \
- __cmpxchg_wrong_size(); \
- \
- asm volatile("\n" \
- "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
- "2:\n" \
- "\t.section .fixup, \"ax\"\n" \
- "3:\tmov %3, %0\n" \
- "\tjmp 2b\n" \
- "\t.previous\n" \
- _ASM_EXTABLE_UA(1b, 3b) \
- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
- : "i" (-EFAULT), "r" (__new), "1" (__old) \
- : "memory" \
- ); \
- break; \
- } \
- default: \
- __cmpxchg_wrong_size(); \
- } \
- __uaccess_end(); \
- *(uval) = __old; \
- __ret; \
-})
-
-#define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \
-({ \
- access_ok((ptr), sizeof(*(ptr))) ? \
- __user_atomic_cmpxchg_inatomic((uval), (ptr), \
- (old), (new), sizeof(*(ptr))) : \
- -EFAULT; \
-})
-
/*
* movsl can be slow when source and dest are not both 8-byte aligned
*/
@@ -695,15 +471,6 @@ extern struct movsl_mask {
#endif
/*
- * We rely on the nested NMI work to allow atomic faults from the NMI path; the
- * nested NMI paths are careful to preserve CR2.
- *
- * Caller must use pagefault_enable/disable, or run in interrupt context,
- * and also do a uaccess_ok() check
- */
-#define __copy_from_user_nmi __copy_from_user_inatomic
-
-/*
* The "unsafe" user accesses aren't really "unsafe", but the naming
* is a big fat warning: you have to not only do the access_ok()
* checking before using them, but you have to surround them with the
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index ba2dc1930630..388a40660c7b 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -23,33 +23,6 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
static __always_inline unsigned long
raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
- if (__builtin_constant_p(n)) {
- unsigned long ret;
-
- switch (n) {
- case 1:
- ret = 0;
- __uaccess_begin_nospec();
- __get_user_asm_nozero(*(u8 *)to, from, ret,
- "b", "b", "=q", 1);
- __uaccess_end();
- return ret;
- case 2:
- ret = 0;
- __uaccess_begin_nospec();
- __get_user_asm_nozero(*(u16 *)to, from, ret,
- "w", "w", "=r", 2);
- __uaccess_end();
- return ret;
- case 4:
- ret = 0;
- __uaccess_begin_nospec();
- __get_user_asm_nozero(*(u32 *)to, from, ret,
- "l", "k", "=r", 4);
- __uaccess_end();
- return ret;
- }
- }
return __copy_user_ll(to, (__force const void *)from, n);
}
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 5cd1caa8bc65..bc10e3dc64fe 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -65,117 +65,13 @@ copy_to_user_mcsafe(void *to, const void *from, unsigned len)
static __always_inline __must_check unsigned long
raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
{
- int ret = 0;
-
- if (!__builtin_constant_p(size))
- return copy_user_generic(dst, (__force void *)src, size);
- switch (size) {
- case 1:
- __uaccess_begin_nospec();
- __get_user_asm_nozero(*(u8 *)dst, (u8 __user *)src,
- ret, "b", "b", "=q", 1);
- __uaccess_end();
- return ret;
- case 2:
- __uaccess_begin_nospec();
- __get_user_asm_nozero(*(u16 *)dst, (u16 __user *)src,
- ret, "w", "w", "=r", 2);
- __uaccess_end();
- return ret;
- case 4:
- __uaccess_begin_nospec();
- __get_user_asm_nozero(*(u32 *)dst, (u32 __user *)src,
- ret, "l", "k", "=r", 4);
- __uaccess_end();
- return ret;
- case 8:
- __uaccess_begin_nospec();
- __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
- ret, "q", "", "=r", 8);
- __uaccess_end();
- return ret;
- case 10:
- __uaccess_begin_nospec();
- __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
- ret, "q", "", "=r", 10);
- if (likely(!ret))
- __get_user_asm_nozero(*(u16 *)(8 + (char *)dst),
- (u16 __user *)(8 + (char __user *)src),
- ret, "w", "w", "=r", 2);
- __uaccess_end();
- return ret;
- case 16:
- __uaccess_begin_nospec();
- __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
- ret, "q", "", "=r", 16);
- if (likely(!ret))
- __get_user_asm_nozero(*(u64 *)(8 + (char *)dst),
- (u64 __user *)(8 + (char __user *)src),
- ret, "q", "", "=r", 8);
- __uaccess_end();
- return ret;
- default:
- return copy_user_generic(dst, (__force void *)src, size);
- }
+ return copy_user_generic(dst, (__force void *)src, size);
}
static __always_inline __must_check unsigned long
raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
{
- int ret = 0;
-
- if (!__builtin_constant_p(size))
- return copy_user_generic((__force void *)dst, src, size);
- switch (size) {
- case 1:
- __uaccess_begin();
- __put_user_asm(*(u8 *)src, (u8 __user *)dst,
- ret, "b", "b", "iq", 1);
- __uaccess_end();
- return ret;
- case 2:
- __uaccess_begin();
- __put_user_asm(*(u16 *)src, (u16 __user *)dst,
- ret, "w", "w", "ir", 2);
- __uaccess_end();
- return ret;
- case 4:
- __uaccess_begin();
- __put_user_asm(*(u32 *)src, (u32 __user *)dst,
- ret, "l", "k", "ir", 4);
- __uaccess_end();
- return ret;
- case 8:
- __uaccess_begin();
- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
- ret, "q", "", "er", 8);
- __uaccess_end();
- return ret;
- case 10:
- __uaccess_begin();
- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
- ret, "q", "", "er", 10);
- if (likely(!ret)) {
- asm("":::"memory");
- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
- ret, "w", "w", "ir", 2);
- }
- __uaccess_end();
- return ret;
- case 16:
- __uaccess_begin();
- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
- ret, "q", "", "er", 16);
- if (likely(!ret)) {
- asm("":::"memory");
- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
- ret, "q", "", "er", 8);
- }
- __uaccess_end();
- return ret;
- default:
- return copy_user_generic((__force void *)dst, src, size);
- }
+ return copy_user_generic((__force void *)dst, src, size);
}
static __always_inline __must_check
diff --git a/arch/x86/include/asm/unistd.h b/arch/x86/include/asm/unistd.h
index a7dd080749ce..c1c3d31b15c0 100644
--- a/arch/x86/include/asm/unistd.h
+++ b/arch/x86/include/asm/unistd.h
@@ -13,10 +13,13 @@
# define __ARCH_WANT_SYS_OLD_MMAP
# define __ARCH_WANT_SYS_OLD_SELECT
+# define __NR_ia32_syscall_max __NR_syscall_max
+
# else
# include <asm/unistd_64.h>
# include <asm/unistd_64_x32.h>
+# include <asm/unistd_32_ia32.h>
# define __ARCH_WANT_SYS_TIME
# define __ARCH_WANT_SYS_UTIME
# define __ARCH_WANT_COMPAT_SYS_PREADV64
@@ -26,6 +29,10 @@
# endif
+# define NR_syscalls (__NR_syscall_max + 1)
+# define X32_NR_syscalls (__NR_x32_syscall_max + 1)
+# define IA32_NR_syscalls (__NR_ia32_syscall_max + 1)
+
# define __ARCH_WANT_NEW_STAT
# define __ARCH_WANT_OLD_READDIR
# define __ARCH_WANT_OLD_STAT
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index 7803114aa140..13687bf0e0a9 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -858,4 +858,6 @@ static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u)
return 1;
}
+void uv_bau_message_interrupt(struct pt_regs *regs);
+
#endif /* _ASM_X86_UV_UV_BAU_H */
diff --git a/arch/x86/include/asm/vdso/clocksource.h b/arch/x86/include/asm/vdso/clocksource.h
new file mode 100644
index 000000000000..119ac8612d89
--- /dev/null
+++ b/arch/x86/include/asm/vdso/clocksource.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSO_CLOCKSOURCE_H
+#define __ASM_VDSO_CLOCKSOURCE_H
+
+#define VDSO_ARCH_CLOCKMODES \
+ VDSO_CLOCKMODE_TSC, \
+ VDSO_CLOCKMODE_PVCLOCK, \
+ VDSO_CLOCKMODE_HVCLOCK
+
+#endif /* __ASM_VDSO_CLOCKSOURCE_H */
diff --git a/arch/x86/include/asm/vdso/gettimeofday.h b/arch/x86/include/asm/vdso/gettimeofday.h
index 6ee1f7dba34b..9a6dc9b4ec99 100644
--- a/arch/x86/include/asm/vdso/gettimeofday.h
+++ b/arch/x86/include/asm/vdso/gettimeofday.h
@@ -243,7 +243,7 @@ static u64 vread_hvclock(void)
static inline u64 __arch_get_hw_counter(s32 clock_mode)
{
- if (clock_mode == VCLOCK_TSC)
+ if (likely(clock_mode == VDSO_CLOCKMODE_TSC))
return (u64)rdtsc_ordered();
/*
* For any memory-mapped vclock type, we need to make sure that gcc
@@ -252,13 +252,13 @@ static inline u64 __arch_get_hw_counter(s32 clock_mode)
* question isn't enabled, which will segfault. Hence the barriers.
*/
#ifdef CONFIG_PARAVIRT_CLOCK
- if (clock_mode == VCLOCK_PVCLOCK) {
+ if (clock_mode == VDSO_CLOCKMODE_PVCLOCK) {
barrier();
return vread_pvclock();
}
#endif
#ifdef CONFIG_HYPERV_TIMER
- if (clock_mode == VCLOCK_HVCLOCK) {
+ if (clock_mode == VDSO_CLOCKMODE_HVCLOCK) {
barrier();
return vread_hvclock();
}
diff --git a/arch/x86/include/asm/vdso/processor.h b/arch/x86/include/asm/vdso/processor.h
new file mode 100644
index 000000000000..57b1a7034c64
--- /dev/null
+++ b/arch/x86/include/asm/vdso/processor.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 ARM Ltd.
+ */
+#ifndef __ASM_VDSO_PROCESSOR_H
+#define __ASM_VDSO_PROCESSOR_H
+
+#ifndef __ASSEMBLY__
+
+/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
+static __always_inline void rep_nop(void)
+{
+ asm volatile("rep; nop" ::: "memory");
+}
+
+static __always_inline void cpu_relax(void)
+{
+ rep_nop();
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_PROCESSOR_H */
diff --git a/arch/x86/include/asm/vdso/vsyscall.h b/arch/x86/include/asm/vdso/vsyscall.h
index 0026ab2123ce..be199a9b2676 100644
--- a/arch/x86/include/asm/vdso/vsyscall.h
+++ b/arch/x86/include/asm/vdso/vsyscall.h
@@ -10,8 +10,6 @@
#include <asm/vgtod.h>
#include <asm/vvar.h>
-int vclocks_used __read_mostly;
-
DEFINE_VVAR(struct vdso_data, _vdso_data);
/*
* Update the vDSO data page to keep in sync with kernel timekeeping.
@@ -23,19 +21,6 @@ struct vdso_data *__x86_get_k_vdso_data(void)
}
#define __arch_get_k_vdso_data __x86_get_k_vdso_data
-static __always_inline
-int __x86_get_clock_mode(struct timekeeper *tk)
-{
- int vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode;
-
- /* Mark the new vclock used. */
- BUILD_BUG_ON(VCLOCK_MAX >= 32);
- WRITE_ONCE(vclocks_used, READ_ONCE(vclocks_used) | (1 << vclock_mode));
-
- return vclock_mode;
-}
-#define __arch_get_clock_mode __x86_get_clock_mode
-
/* The asm-generic header needs to be included after the definitions above */
#include <asm-generic/vdso/vsyscall.h>
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index a2638c6124ed..7aa38b2ad8a9 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -2,6 +2,11 @@
#ifndef _ASM_X86_VGTOD_H
#define _ASM_X86_VGTOD_H
+/*
+ * This check is required to prevent ARCH=um to include
+ * unwanted headers.
+ */
+#ifdef CONFIG_GENERIC_GETTIMEOFDAY
#include <linux/compiler.h>
#include <asm/clocksource.h>
#include <vdso/datapage.h>
@@ -14,11 +19,6 @@ typedef u64 gtod_long_t;
#else
typedef unsigned long gtod_long_t;
#endif
-
-extern int vclocks_used;
-static inline bool vclock_was_used(int vclock)
-{
- return READ_ONCE(vclocks_used) & (1 << vclock);
-}
+#endif /* CONFIG_GENERIC_GETTIMEOFDAY */
#endif /* _ASM_X86_VGTOD_H */