summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/bug.h2
-rw-r--r--arch/arm/include/asm/cacheflush.h2
-rw-r--r--arch/arm64/include/asm/bug.h2
-rw-r--r--arch/blackfin/include/asm/bug.h4
-rw-r--r--arch/blackfin/include/asm/flat.h3
-rw-r--r--arch/blackfin/kernel/flat.c4
-rw-r--r--arch/h8300/include/asm/flat.h2
-rw-r--r--arch/m68k/include/asm/flat.h3
-rw-r--r--arch/mn10300/include/asm/bug.h2
-rw-r--r--arch/parisc/include/asm/bug.h6
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash.h1
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h1
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix.h1
-rw-r--r--arch/powerpc/include/asm/bug.h8
-rw-r--r--arch/powerpc/include/asm/pgtable.h7
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S28
-rw-r--r--arch/powerpc/kernel/idle_book3s.S15
-rw-r--r--arch/powerpc/mm/mem.c1
-rw-r--r--arch/powerpc/mm/pgtable-hash64.c44
-rw-r--r--arch/powerpc/mm/pgtable-radix.c28
-rw-r--r--arch/powerpc/mm/pgtable_64.c8
-rw-r--r--arch/powerpc/platforms/powernv/opal.c2
-rw-r--r--arch/s390/include/asm/bug.h4
-rw-r--r--arch/sh/include/asm/bug.h4
-rw-r--r--arch/sparc/include/asm/trap_block.h1
-rw-r--r--arch/sparc/kernel/pci_sun4v.c12
-rw-r--r--arch/sparc/kernel/smp_64.c185
-rw-r--r--arch/sparc/kernel/sun4v_ivec.S15
-rw-r--r--arch/sparc/kernel/traps_64.c1
-rw-r--r--arch/x86/Kconfig.debug1
-rw-r--r--arch/x86/boot/Makefile5
-rw-r--r--arch/x86/configs/i386_defconfig3
-rw-r--r--arch/x86/configs/x86_64_defconfig3
-rw-r--r--arch/x86/events/core.c4
-rw-r--r--arch/x86/events/intel/core.c164
-rw-r--r--arch/x86/events/intel/cstate.c26
-rw-r--r--arch/x86/events/intel/ds.c22
-rw-r--r--arch/x86/events/intel/lbr.c4
-rw-r--r--arch/x86/events/perf_event.h2
-rw-r--r--arch/x86/include/asm/bug.h4
-rw-r--r--arch/x86/include/asm/io.h4
-rw-r--r--arch/x86/include/asm/kprobes.h8
-rw-r--r--arch/x86/include/asm/mmu_context.h2
-rw-r--r--arch/x86/include/asm/paravirt_types.h16
-rw-r--r--arch/x86/include/asm/processor.h2
-rw-r--r--arch/x86/kernel/acpi/boot.c8
-rw-r--r--arch/x86/kernel/apic/io_apic.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c1
-rw-r--r--arch/x86/kernel/devicetree.c3
-rw-r--r--arch/x86/kvm/Kconfig2
-rw-r--r--arch/x86/kvm/hyperv.c7
-rw-r--r--arch/x86/kvm/vmx.c46
-rw-r--r--arch/x86/kvm/x86.c4
-rw-r--r--arch/x86/math-emu/Makefile4
-rw-r--r--arch/x86/math-emu/fpu_emu.h2
-rw-r--r--arch/x86/math-emu/reg_compare.c16
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_max7315.c6
-rw-r--r--arch/x86/platform/uv/tlb_uv.c29
58 files changed, 573 insertions, 223 deletions
diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h
index 4e6e88a6b2f4..2244a94ed9c9 100644
--- a/arch/arm/include/asm/bug.h
+++ b/arch/arm/include/asm/bug.h
@@ -37,7 +37,7 @@ do { \
".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \
"2:\t.asciz " #__file "\n" \
".popsection\n" \
- ".pushsection __bug_table,\"a\"\n" \
+ ".pushsection __bug_table,\"aw\"\n" \
".align 2\n" \
"3:\t.word 1b, 2b\n" \
"\t.hword " #__line ", 0\n" \
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index d69bebf697e7..74504b154256 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -116,7 +116,7 @@ struct cpu_cache_fns {
void (*dma_unmap_area)(const void *, size_t, int);
void (*dma_flush_range)(const void *, const void *);
-};
+} __no_randomize_layout;
/*
* Select the calling method
diff --git a/arch/arm64/include/asm/bug.h b/arch/arm64/include/asm/bug.h
index 366448eb0fb7..a02a57186f56 100644
--- a/arch/arm64/include/asm/bug.h
+++ b/arch/arm64/include/asm/bug.h
@@ -36,7 +36,7 @@
#ifdef CONFIG_GENERIC_BUG
#define __BUG_ENTRY(flags) \
- ".pushsection __bug_table,\"a\"\n\t" \
+ ".pushsection __bug_table,\"aw\"\n\t" \
".align 2\n\t" \
"0: .long 1f - 0b\n\t" \
_BUGVERBOSE_LOCATION(__FILE__, __LINE__) \
diff --git a/arch/blackfin/include/asm/bug.h b/arch/blackfin/include/asm/bug.h
index 8d9b1eba89c4..76b2e82ee730 100644
--- a/arch/blackfin/include/asm/bug.h
+++ b/arch/blackfin/include/asm/bug.h
@@ -21,7 +21,7 @@
#define _BUG_OR_WARN(flags) \
asm volatile( \
"1: .hword %0\n" \
- " .section __bug_table,\"a\",@progbits\n" \
+ " .section __bug_table,\"aw\",@progbits\n" \
"2: .long 1b\n" \
" .long %1\n" \
" .short %2\n" \
@@ -38,7 +38,7 @@
#define _BUG_OR_WARN(flags) \
asm volatile( \
"1: .hword %0\n" \
- " .section __bug_table,\"a\",@progbits\n" \
+ " .section __bug_table,\"aw\",@progbits\n" \
"2: .long 1b\n" \
" .short %1\n" \
" .org 2b + %2\n" \
diff --git a/arch/blackfin/include/asm/flat.h b/arch/blackfin/include/asm/flat.h
index 296d7f56fbfd..f1d6ba7afbf2 100644
--- a/arch/blackfin/include/asm/flat.h
+++ b/arch/blackfin/include/asm/flat.h
@@ -44,8 +44,7 @@ flat_get_relocate_addr (unsigned long relval)
return relval & 0x03ffffff; /* Mask out top 6 bits */
}
-static inline int flat_set_persistent(unsigned long relval,
- unsigned long *persistent)
+static inline int flat_set_persistent(u32 relval, u32 *persistent)
{
int type = (relval >> 26) & 7;
if (type == 3) {
diff --git a/arch/blackfin/kernel/flat.c b/arch/blackfin/kernel/flat.c
index d29ab6a2e909..8ebc54daaa8e 100644
--- a/arch/blackfin/kernel/flat.c
+++ b/arch/blackfin/kernel/flat.c
@@ -32,7 +32,7 @@ unsigned long bfin_get_addr_from_rp(u32 *ptr,
break;
case FLAT_BFIN_RELOC_TYPE_32_BIT:
- pr_debug("*ptr = %lx", get_unaligned(ptr));
+ pr_debug("*ptr = %x", get_unaligned(ptr));
val = get_unaligned(ptr);
break;
@@ -77,7 +77,7 @@ void bfin_put_addr_at_rp(u32 *ptr, u32 addr, u32 relval)
case FLAT_BFIN_RELOC_TYPE_32_BIT:
put_unaligned(addr, ptr);
- pr_debug("new ptr =%lx", get_unaligned(ptr));
+ pr_debug("new ptr =%x", get_unaligned(ptr));
break;
}
}
diff --git a/arch/h8300/include/asm/flat.h b/arch/h8300/include/asm/flat.h
index 18d024251738..7e0bd6fa1532 100644
--- a/arch/h8300/include/asm/flat.h
+++ b/arch/h8300/include/asm/flat.h
@@ -24,7 +24,7 @@ static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags,
u32 *addr, u32 *persistent)
{
u32 val = get_unaligned((__force u32 *)rp);
- if (!(flags & FLAT_FLAG_GOTPIC)
+ if (!(flags & FLAT_FLAG_GOTPIC))
val &= 0x00ffffff;
*addr = val;
return 0;
diff --git a/arch/m68k/include/asm/flat.h b/arch/m68k/include/asm/flat.h
index 48b62790fe70..b2a41f5b3890 100644
--- a/arch/m68k/include/asm/flat.h
+++ b/arch/m68k/include/asm/flat.h
@@ -30,8 +30,7 @@ static inline int flat_put_addr_at_rp(u32 __user *rp, u32 addr, u32 rel)
}
#define flat_get_relocate_addr(rel) (rel)
-static inline int flat_set_persistent(unsigned long relval,
- unsigned long *persistent)
+static inline int flat_set_persistent(u32 relval, u32 *persistent)
{
return 0;
}
diff --git a/arch/mn10300/include/asm/bug.h b/arch/mn10300/include/asm/bug.h
index aa6a38886391..811414fb002d 100644
--- a/arch/mn10300/include/asm/bug.h
+++ b/arch/mn10300/include/asm/bug.h
@@ -21,7 +21,7 @@ do { \
asm volatile( \
" syscall 15 \n" \
"0: \n" \
- " .section __bug_table,\"a\" \n" \
+ " .section __bug_table,\"aw\" \n" \
" .long 0b,%0,%1 \n" \
" .previous \n" \
: \
diff --git a/arch/parisc/include/asm/bug.h b/arch/parisc/include/asm/bug.h
index d2742273a685..07ea467f22fc 100644
--- a/arch/parisc/include/asm/bug.h
+++ b/arch/parisc/include/asm/bug.h
@@ -27,7 +27,7 @@
do { \
asm volatile("\n" \
"1:\t" PARISC_BUG_BREAK_ASM "\n" \
- "\t.pushsection __bug_table,\"a\"\n" \
+ "\t.pushsection __bug_table,\"aw\"\n" \
"2:\t" ASM_WORD_INSN "1b, %c0\n" \
"\t.short %c1, %c2\n" \
"\t.org 2b+%c3\n" \
@@ -50,7 +50,7 @@
do { \
asm volatile("\n" \
"1:\t" PARISC_BUG_BREAK_ASM "\n" \
- "\t.pushsection __bug_table,\"a\"\n" \
+ "\t.pushsection __bug_table,\"aw\"\n" \
"2:\t" ASM_WORD_INSN "1b, %c0\n" \
"\t.short %c1, %c2\n" \
"\t.org 2b+%c3\n" \
@@ -64,7 +64,7 @@
do { \
asm volatile("\n" \
"1:\t" PARISC_BUG_BREAK_ASM "\n" \
- "\t.pushsection __bug_table,\"a\"\n" \
+ "\t.pushsection __bug_table,\"aw\"\n" \
"2:\t" ASM_WORD_INSN "1b\n" \
"\t.short %c0\n" \
"\t.org 2b+%c1\n" \
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index 0ce513f2926f..36fc7bfe9e11 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -91,6 +91,7 @@ static inline int hash__pgd_bad(pgd_t pgd)
}
#ifdef CONFIG_STRICT_KERNEL_RWX
extern void hash__mark_rodata_ro(void);
+extern void hash__mark_initmem_nx(void);
#endif
extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index c0737c86a362..d1da415e283c 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1192,5 +1192,6 @@ static inline const int pud_pfn(pud_t pud)
BUILD_BUG();
return 0;
}
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index 487709ff6875..544440b5aff3 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -118,6 +118,7 @@
#ifdef CONFIG_STRICT_KERNEL_RWX
extern void radix__mark_rodata_ro(void);
+extern void radix__mark_initmem_nx(void);
#endif
static inline unsigned long __radix_pte_update(pte_t *ptep, unsigned long clr,
diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h
index 0151af6c2a50..87fcc1948817 100644
--- a/arch/powerpc/include/asm/bug.h
+++ b/arch/powerpc/include/asm/bug.h
@@ -18,7 +18,7 @@
#include <asm/asm-offsets.h>
#ifdef CONFIG_DEBUG_BUGVERBOSE
.macro EMIT_BUG_ENTRY addr,file,line,flags
- .section __bug_table,"a"
+ .section __bug_table,"aw"
5001: PPC_LONG \addr, 5002f
.short \line, \flags
.org 5001b+BUG_ENTRY_SIZE
@@ -29,7 +29,7 @@
.endm
#else
.macro EMIT_BUG_ENTRY addr,file,line,flags
- .section __bug_table,"a"
+ .section __bug_table,"aw"
5001: PPC_LONG \addr
.short \flags
.org 5001b+BUG_ENTRY_SIZE
@@ -42,14 +42,14 @@
sizeof(struct bug_entry), respectively */
#ifdef CONFIG_DEBUG_BUGVERBOSE
#define _EMIT_BUG_ENTRY \
- ".section __bug_table,\"a\"\n" \
+ ".section __bug_table,\"aw\"\n" \
"2:\t" PPC_LONG "1b, %0\n" \
"\t.short %1, %2\n" \
".org 2b+%3\n" \
".previous\n"
#else
#define _EMIT_BUG_ENTRY \
- ".section __bug_table,\"a\"\n" \
+ ".section __bug_table,\"aw\"\n" \
"2:\t" PPC_LONG "1b\n" \
"\t.short %2\n" \
".org 2b+%3\n" \
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index dd01212935ac..afae9a336136 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -80,6 +80,13 @@ unsigned long vmalloc_to_phys(void *vmalloc_addr);
void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
void pgtable_cache_init(void);
+
+#ifdef CONFIG_STRICT_KERNEL_RWX
+void mark_initmem_nx(void);
+#else
+static inline void mark_initmem_nx(void) { }
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_PGTABLE_H */
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index e6d8354d79ef..9029afd1fa2a 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -824,7 +824,7 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
* r3 volatile parameter and return value for status
* r4-r10 volatile input and output value
* r11 volatile hypercall number and output value
- * r12 volatile
+ * r12 volatile input and output value
* r13-r31 nonvolatile
* LR nonvolatile
* CTR volatile
@@ -834,25 +834,26 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
* Other registers nonvolatile
*
* The intersection of volatile registers that don't contain possible
- * inputs is: r12, cr0, xer, ctr. We may use these as scratch regs
- * upon entry without saving.
+ * inputs is: cr0, xer, ctr. We may use these as scratch regs upon entry
+ * without saving, though xer is not a good idea to use, as hardware may
+ * interpret some bits so it may be costly to change them.
*/
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
/*
* There is a little bit of juggling to get syscall and hcall
- * working well. Save r10 in ctr to be restored in case it is a
- * hcall.
+ * working well. Save r13 in ctr to avoid using SPRG scratch
+ * register.
*
* Userspace syscalls have already saved the PPR, hcalls must save
* it before setting HMT_MEDIUM.
*/
#define SYSCALL_KVMTEST \
- mr r12,r13; \
+ mtctr r13; \
GET_PACA(r13); \
- mtctr r10; \
+ std r10,PACA_EXGEN+EX_R10(r13); \
KVMTEST_PR(0xc00); /* uses r10, branch to do_kvm_0xc00_system_call */ \
HMT_MEDIUM; \
- mr r9,r12; \
+ mfctr r9;
#else
#define SYSCALL_KVMTEST \
@@ -935,8 +936,8 @@ EXC_VIRT_END(system_call, 0x4c00, 0x100)
* This is a hcall, so register convention is as above, with these
* differences:
* r13 = PACA
- * r12 = orig r13
- * ctr = orig r10
+ * ctr = orig r13
+ * orig r10 saved in PACA
*/
TRAMP_KVM_BEGIN(do_kvm_0xc00)
/*
@@ -944,14 +945,13 @@ TRAMP_KVM_BEGIN(do_kvm_0xc00)
* HMT_MEDIUM. That allows the KVM code to save that value into the
* guest state (it is the guest's PPR value).
*/
- OPT_GET_SPR(r0, SPRN_PPR, CPU_FTR_HAS_PPR)
+ OPT_GET_SPR(r10, SPRN_PPR, CPU_FTR_HAS_PPR)
HMT_MEDIUM
- OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r0, CPU_FTR_HAS_PPR)
+ OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r10, CPU_FTR_HAS_PPR)
mfctr r10
- SET_SCRATCH0(r12)
+ SET_SCRATCH0(r10)
std r9,PACA_EXGEN+EX_R9(r13)
mfcr r9
- std r10,PACA_EXGEN+EX_R10(r13)
KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
#endif
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index 5adb390e773b..516ebef905c0 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -30,6 +30,7 @@
* Use unused space in the interrupt stack to save and restore
* registers for winkle support.
*/
+#define _MMCR0 GPR0
#define _SDR1 GPR3
#define _PTCR GPR3
#define _RPR GPR4
@@ -272,6 +273,14 @@ power_enter_stop:
b pnv_wakeup_noloss
.Lhandle_esl_ec_set:
+ /*
+ * POWER9 DD2 can incorrectly set PMAO when waking up after a
+ * state-loss idle. Saving and restoring MMCR0 over idle is a
+ * workaround.
+ */
+ mfspr r4,SPRN_MMCR0
+ std r4,_MMCR0(r1)
+
/*
* Check if the requested state is a deep idle state.
*/
@@ -450,10 +459,14 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
pnv_restore_hyp_resource_arch300:
/*
* Workaround for POWER9, if we lost resources, the ERAT
- * might have been mixed up and needs flushing.
+ * might have been mixed up and needs flushing. We also need
+ * to reload MMCR0 (see comment above).
*/
blt cr3,1f
PPC_INVALIDATE_ERAT
+ ld r1,PACAR1(r13)
+ ld r4,_MMCR0(r1)
+ mtspr SPRN_MMCR0,r4
1:
/*
* POWER ISA 3. Use PSSCR to determine if we
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 8541f18694a4..46b4e67d2372 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -402,6 +402,7 @@ void __init mem_init(void)
void free_initmem(void)
{
ppc_md.progress = ppc_printk_progress;
+ mark_initmem_nx();
free_initmem_default(POISON_FREE_INITMEM);
}
diff --git a/arch/powerpc/mm/pgtable-hash64.c b/arch/powerpc/mm/pgtable-hash64.c
index 188b4107584d..443a2c66a304 100644
--- a/arch/powerpc/mm/pgtable-hash64.c
+++ b/arch/powerpc/mm/pgtable-hash64.c
@@ -425,33 +425,51 @@ int hash__has_transparent_hugepage(void)
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#ifdef CONFIG_STRICT_KERNEL_RWX
-void hash__mark_rodata_ro(void)
+static bool hash__change_memory_range(unsigned long start, unsigned long end,
+ unsigned long newpp)
{
- unsigned long start = (unsigned long)_stext;
- unsigned long end = (unsigned long)__init_begin;
unsigned long idx;
unsigned int step, shift;
- unsigned long newpp = PP_RXXX;
shift = mmu_psize_defs[mmu_linear_psize].shift;
step = 1 << shift;
- start = ((start + step - 1) >> shift) << shift;
- end = (end >> shift) << shift;
+ start = ALIGN_DOWN(start, step);
+ end = ALIGN(end, step); // aligns up
- pr_devel("marking ro start %lx, end %lx, step %x\n",
- start, end, step);
+ if (start >= end)
+ return false;
- if (start == end) {
- pr_warn("could not set rodata ro, relocate the start"
- " of the kernel to a 0x%x boundary\n", step);
- return;
- }
+ pr_debug("Changing page protection on range 0x%lx-0x%lx, to 0x%lx, step 0x%x\n",
+ start, end, newpp, step);
for (idx = start; idx < end; idx += step)
/* Not sure if we can do much with the return value */
mmu_hash_ops.hpte_updateboltedpp(newpp, idx, mmu_linear_psize,
mmu_kernel_ssize);
+ return true;
+}
+
+void hash__mark_rodata_ro(void)
+{
+ unsigned long start, end;
+
+ start = (unsigned long)_stext;
+ end = (unsigned long)__init_begin;
+
+ WARN_ON(!hash__change_memory_range(start, end, PP_RXXX));
+}
+
+void hash__mark_initmem_nx(void)
+{
+ unsigned long start, end, pp;
+
+ start = (unsigned long)__init_begin;
+ end = (unsigned long)__init_end;
+
+ pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL));
+
+ WARN_ON(!hash__change_memory_range(start, end, pp));
}
#endif
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 8c13e4282308..5cc50d47ce3f 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -112,10 +112,9 @@ set_the_pte:
}
#ifdef CONFIG_STRICT_KERNEL_RWX
-void radix__mark_rodata_ro(void)
+void radix__change_memory_range(unsigned long start, unsigned long end,
+ unsigned long clear)
{
- unsigned long start = (unsigned long)_stext;
- unsigned long end = (unsigned long)__init_begin;
unsigned long idx;
pgd_t *pgdp;
pud_t *pudp;
@@ -125,7 +124,8 @@ void radix__mark_rodata_ro(void)
start = ALIGN_DOWN(start, PAGE_SIZE);
end = PAGE_ALIGN(end); // aligns up
- pr_devel("marking ro start %lx, end %lx\n", start, end);
+ pr_debug("Changing flags on range %lx-%lx removing 0x%lx\n",
+ start, end, clear);
for (idx = start; idx < end; idx += PAGE_SIZE) {
pgdp = pgd_offset_k(idx);
@@ -147,11 +147,29 @@ void radix__mark_rodata_ro(void)
if (!ptep)
continue;
update_the_pte:
- radix__pte_update(&init_mm, idx, ptep, _PAGE_WRITE, 0, 0);
+ radix__pte_update(&init_mm, idx, ptep, clear, 0, 0);
}
radix__flush_tlb_kernel_range(start, end);
}
+
+void radix__mark_rodata_ro(void)
+{
+ unsigned long start, end;
+
+ start = (unsigned long)_stext;
+ end = (unsigned long)__init_begin;
+
+ radix__change_memory_range(start, end, _PAGE_WRITE);
+}
+
+void radix__mark_initmem_nx(void)
+{
+ unsigned long start = (unsigned long)__init_begin;
+ unsigned long end = (unsigned long)__init_end;
+
+ radix__change_memory_range(start, end, _PAGE_EXEC);
+}
#endif /* CONFIG_STRICT_KERNEL_RWX */
static inline void __meminit print_mapping(unsigned long start,
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 5c0b795d656c..0736e94c7615 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -505,4 +505,12 @@ void mark_rodata_ro(void)
else
hash__mark_rodata_ro();
}
+
+void mark_initmem_nx(void)
+{
+ if (radix_enabled())
+ radix__mark_initmem_nx();
+ else
+ hash__mark_initmem_nx();
+}
#endif
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 9b87abb178f0..cad6b57ce494 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -78,7 +78,7 @@ void opal_configure_cores(void)
* ie. Host hash supports hash guests
* Host radix supports hash/radix guests
*/
- if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ if (early_cpu_has_feature(CPU_FTR_ARCH_300)) {
reinit_flags |= OPAL_REINIT_CPUS_MMU_HASH;
if (early_radix_enabled())
reinit_flags |= OPAL_REINIT_CPUS_MMU_RADIX;
diff --git a/arch/s390/include/asm/bug.h b/arch/s390/include/asm/bug.h
index 1bbd9dbfe4e0..ce9cc123988b 100644
--- a/arch/s390/include/asm/bug.h
+++ b/arch/s390/include/asm/bug.h
@@ -14,7 +14,7 @@
".section .rodata.str,\"aMS\",@progbits,1\n" \
"2: .asciz \""__FILE__"\"\n" \
".previous\n" \
- ".section __bug_table,\"a\"\n" \
+ ".section __bug_table,\"aw\"\n" \
"3: .long 1b-3b,2b-3b\n" \
" .short %0,%1\n" \
" .org 3b+%2\n" \
@@ -30,7 +30,7 @@
asm volatile( \
"0: j 0b+2\n" \
"1:\n" \
- ".section __bug_table,\"a\"\n" \
+ ".section __bug_table,\"aw\"\n" \
"2: .long 1b-2b\n" \
" .short %0\n" \
" .org 2b+%1\n" \
diff --git a/arch/sh/include/asm/bug.h b/arch/sh/include/asm/bug.h
index c9828f785ca0..5b5086367639 100644
--- a/arch/sh/include/asm/bug.h
+++ b/arch/sh/include/asm/bug.h
@@ -24,14 +24,14 @@
*/
#ifdef CONFIG_DEBUG_BUGVERBOSE
#define _EMIT_BUG_ENTRY \
- "\t.pushsection __bug_table,\"a\"\n" \
+ "\t.pushsection __bug_table,\"aw\"\n" \
"2:\t.long 1b, %O1\n" \
"\t.short %O2, %O3\n" \
"\t.org 2b+%O4\n" \
"\t.popsection\n"
#else
#define _EMIT_BUG_ENTRY \
- "\t.pushsection __bug_table,\"a\"\n" \
+ "\t.pushsection __bug_table,\"aw\"\n" \
"2:\t.long 1b\n" \
"\t.short %O3\n" \
"\t.org 2b+%O4\n" \
diff --git a/arch/sparc/include/asm/trap_block.h b/arch/sparc/include/asm/trap_block.h
index ec9c04de3664..ff05992dae7a 100644
--- a/arch/sparc/include/asm/trap_block.h
+++ b/arch/sparc/include/asm/trap_block.h
@@ -54,6 +54,7 @@ extern struct trap_per_cpu trap_block[NR_CPUS];
void init_cur_cpu_trap(struct thread_info *);
void setup_tba(void);
extern int ncpus_probed;
+extern u64 cpu_mondo_counter[NR_CPUS];
unsigned long real_hard_smp_processor_id(void);
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index 24f21c726dfa..f10e2f712394 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -673,12 +673,14 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
static int dma_4v_supported(struct device *dev, u64 device_mask)
{
struct iommu *iommu = dev->archdata.iommu;
- u64 dma_addr_mask;
+ u64 dma_addr_mask = iommu->dma_addr_mask;
- if (device_mask > DMA_BIT_MASK(32) && iommu->atu)
- dma_addr_mask = iommu->atu->dma_addr_mask;
- else
- dma_addr_mask = iommu->dma_addr_mask;
+ if (device_mask > DMA_BIT_MASK(32)) {
+ if (iommu->atu)
+ dma_addr_mask = iommu->atu->dma_addr_mask;
+ else
+ return 0;
+ }
if ((device_mask & dma_addr_mask) == dma_addr_mask)
return 1;
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index fdf31040a7dc..3218bc43302e 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -622,22 +622,48 @@ retry:
}
}
-/* Multi-cpu list version. */
+#define CPU_MONDO_COUNTER(cpuid) (cpu_mondo_counter[cpuid])
+#define MONDO_USEC_WAIT_MIN 2
+#define MONDO_USEC_WAIT_MAX 100
+#define MONDO_RETRY_LIMIT 500000
+
+/* Multi-cpu list version.
+ *
+ * Deliver xcalls to 'cnt' number of cpus in 'cpu_list'.
+ * Sometimes not all cpus receive the mondo, requiring us to re-send
+ * the mondo until all cpus have received, or cpus are truly stuck
+ * unable to receive mondo, and we timeout.
+ * Occasionally a target cpu strand is borrowed briefly by hypervisor to
+ * perform guest service, such as PCIe error handling. Consider the
+ * service time, 1 second overall wait is reasonable for 1 cpu.
+ * Here two in-between mondo check wait time are defined: 2 usec for
+ * single cpu quick turn around and up to 100usec for large cpu count.
+ * Deliver mondo to large number of cpus could take longer, we adjusts
+ * the retry count as long as target cpus are making forward progress.
+ */
static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
{
- int retries, this_cpu, prev_sent, i, saw_cpu_error;
+ int this_cpu, tot_cpus, prev_sent, i, rem;
+ int usec_wait, retries, tot_retries;
+ u16 first_cpu = 0xffff;
+ unsigned long xc_rcvd = 0;
unsigned long status;
+ int ecpuerror_id = 0;
+ int enocpu_id = 0;
u16 *cpu_list;
+ u16 cpu;
this_cpu = smp_processor_id();
-
cpu_list = __va(tb->cpu_list_pa);
-
- saw_cpu_error = 0;
- retries = 0;
+ usec_wait = cnt * MONDO_USEC_WAIT_MIN;
+ if (usec_wait > MONDO_USEC_WAIT_MAX)
+ usec_wait = MONDO_USEC_WAIT_MAX;
+ retries = tot_retries = 0;
+ tot_cpus = cnt;
prev_sent = 0;
+
do {
- int forward_progress, n_sent;
+ int n_sent, mondo_delivered, target_cpu_busy;
status = sun4v_cpu_mondo_send(cnt,
tb->cpu_list_pa,
@@ -645,94 +671,113 @@ static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
/* HV_EOK means all cpus received the xcall, we're done. */
if (likely(status == HV_EOK))
- break;
+ goto xcall_done;
+
+ /* If not these non-fatal errors, panic */
+ if (unlikely((status != HV_EWOULDBLOCK) &&
+ (status != HV_ECPUERROR) &&
+ (status != HV_ENOCPU)))
+ goto fatal_errors;
/* First, see if we made any forward progress.
*
+ * Go through the cpu_list, count the target cpus that have
+ * received our mondo (n_sent), and those that did not (rem).
+ * Re-pack cpu_list with the cpus remain to be retried in the
+ * front - this simplifies tracking the truly stalled cpus.
+ *
* The hypervisor indicates successful sends by setting
* cpu list entries to the value 0xffff.
+ *
+ * EWOULDBLOCK means some target cpus did not receive the
+ * mondo and retry usually helps.
+ *
+ * ECPUERROR means at least one target cpu is in error state,
+ * it's usually safe to skip the faulty cpu and retry.
+ *
+ * ENOCPU means one of the target cpu doesn't belong to the
+ * domain, perhaps offlined which is unexpected, but not
+ * fatal and it's okay to skip the offlined cpu.
*/
+ rem = 0;
n_sent = 0;
for (i = 0; i < cnt; i++) {
- if (likely(cpu_list[i] == 0xffff))
+ cpu = cpu_list[i];
+ if (likely(cpu == 0xffff)) {
n_sent++;
+ } else if ((status == HV_ECPUERROR) &&
+ (sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) {
+ ecpuerror_id = cpu + 1;
+ } else if (status == HV_ENOCPU && !cpu_online(cpu)) {
+ enocpu_id = cpu + 1;
+ } else {
+ cpu_list[rem++] = cpu;
+ }
}
- forward_progress = 0;
- if (n_sent > prev_sent)
- forward_progress = 1;
+ /* No cpu remained, we're done. */
+ if (rem == 0)
+ break;
- prev_sent = n_sent;
+ /* Otherwise, update the cpu count for retry. */
+ cnt = rem;
- /* If we get a HV_ECPUERROR, then one or more of the cpus
- * in the list are in error state. Use the cpu_state()
- * hypervisor call to find out which cpus are in error state.
+ /* Record the overall number of mondos received by the
+ * first of the remaining cpus.
*/
- if (unlikely(status == HV_ECPUERROR)) {
- for (i = 0; i < cnt; i++) {
- long err;
- u16 cpu;
+ if (first_cpu != cpu_list[0]) {
+ first_cpu = cpu_list[0];
+ xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
+ }
- cpu = cpu_list[i];
- if (cpu == 0xffff)
- continue;
+ /* Was any mondo delivered successfully? */
+ mondo_delivered = (n_sent > prev_sent);
+ prev_sent = n_sent;
- err = sun4v_cpu_state(cpu);
- if (err == HV_CPU_STATE_ERROR) {
- saw_cpu_error = (cpu + 1);
- cpu_list[i] = 0xffff;
- }
- }
- } else if (unlikely(status != HV_EWOULDBLOCK))
- goto fatal_mondo_error;
+ /* or, was any target cpu busy processing other mondos? */
+ target_cpu_busy = (xc_rcvd < CPU_MONDO_COUNTER(first_cpu));
+ xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
- /* Don't bother rewriting the CPU list, just leave the
- * 0xffff and non-0xffff entries in there and the
- * hypervisor will do the right thing.
- *
- * Only advance timeout state if we didn't make any
- * forward progress.
+ /* Retry count is for no progress. If we're making progress,
+ * reset the retry count.
*/
- if (unlikely(!forward_progress)) {
- if (unlikely(++retries > 10000))
- goto fatal_mondo_timeout;
-
- /* Delay a little bit to let other cpus catch up
- * on their cpu mondo queue work.
- */
- udelay(2 * cnt);
+ if (likely(mondo_delivered || target_cpu_busy)) {
+ tot_retries += retries;
+ retries = 0;
+ } else if (unlikely(retries > MONDO_RETRY_LIMIT)) {
+ goto fatal_mondo_timeout;
}
- } while (1);
- if (unlikely(saw_cpu_error))
- goto fatal_mondo_cpu_error;
+ /* Delay a little bit to let other cpus catch up on
+ * their cpu mondo queue work.
+ */
+ if (!mondo_delivered)
+ udelay(usec_wait);
- return;
+ retries++;
+ } while (1);
-fatal_mondo_cpu_error:
- printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
- "(including %d) were in error state\n",
- this_cpu, saw_cpu_error - 1);
+xcall_done:
+ if (unlikely(ecpuerror_id > 0)) {
+ pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in error state\n",
+ this_cpu, ecpuerror_id - 1);
+ } else if (unlikely(enocpu_id > 0)) {
+ pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does not belong to the domain\n",
+ this_cpu, enocpu_id - 1);
+ }
return;
+fatal_errors:
+ /* fatal errors include bad alignment, etc */
+ pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) mondo_block_pa(%lx)\n",
+ this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
+ panic("Unexpected SUN4V mondo error %lu\n", status);
+
fatal_mondo_timeout:
- printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
- " progress after %d retries.\n",
- this_cpu, retries);
- goto dump_cpu_list_and_out;
-
-fatal_mondo_error:
- printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
- this_cpu, status);
- printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
- "mondo_block_pa(%lx)\n",
- this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
-
-dump_cpu_list_and_out:
- printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
- for (i = 0; i < cnt; i++)
- printk("%u ", cpu_list[i]);
- printk("]\n");
+ /* some cpus being non-responsive to the cpu mondo */
+ pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress after %d retries. Total target cpus(%d).\n",
+ this_cpu, first_cpu, (tot_retries + retries), tot_cpus);
+ panic("SUN4V mondo timeout panic\n");
}
static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
diff --git a/arch/sparc/kernel/sun4v_ivec.S b/arch/sparc/kernel/sun4v_ivec.S
index 559bc5e9c199..34631995859a 100644
--- a/arch/sparc/kernel/sun4v_ivec.S
+++ b/arch/sparc/kernel/sun4v_ivec.S
@@ -26,6 +26,21 @@ sun4v_cpu_mondo:
ldxa [%g0] ASI_SCRATCHPAD, %g4
sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
+ /* Get smp_processor_id() into %g3 */
+ sethi %hi(trap_block), %g5
+ or %g5, %lo(trap_block), %g5
+ sub %g4, %g5, %g3
+ srlx %g3, TRAP_BLOCK_SZ_SHIFT, %g3
+
+ /* Increment cpu_mondo_counter[smp_processor_id()] */
+ sethi %hi(cpu_mondo_counter), %g5
+ or %g5, %lo(cpu_mondo_counter), %g5
+ sllx %g3, 3, %g3
+ add %g5, %g3, %g5
+ ldx [%g5], %g3
+ add %g3, 1, %g3
+ stx %g3, [%g5]
+
/* Get CPU mondo queue base phys address into %g7. */
ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 196ee5eb4d48..ad31af1dd726 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2733,6 +2733,7 @@ void do_getpsr(struct pt_regs *regs)
}
}
+u64 cpu_mondo_counter[NR_CPUS] = {0};
struct trap_per_cpu trap_block[NR_CPUS];
EXPORT_SYMBOL(trap_block);
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index fcb7604172ce..cd20ca0b4043 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -348,6 +348,7 @@ config X86_DEBUG_FPU
config PUNIT_ATOM_DEBUG
tristate "ATOM Punit debug driver"
+ depends on PCI
select DEBUG_FS
select IOSF_MBI
---help---
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 0d810fb15eac..d88a2fddba8c 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -73,12 +73,13 @@ UBSAN_SANITIZE := n
$(obj)/bzImage: asflags-y := $(SVGA_MODE)
quiet_cmd_image = BUILD $@
+silent_redirect_image = >/dev/null
cmd_image = $(obj)/tools/build $(obj)/setup.bin $(obj)/vmlinux.bin \
- $(obj)/zoffset.h $@
+ $(obj)/zoffset.h $@ $($(quiet)redirect_image)
$(obj)/bzImage: $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/tools/build FORCE
$(call if_changed,image)
- @echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
+ @$(kecho) 'Kernel: $@ is ready' ' (#'`cat .version`')'
OBJCOPYFLAGS_vmlinux.bin := -O binary -R .note -R .comment -S
$(obj)/vmlinux.bin: $(obj)/compressed/vmlinux FORCE
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index 6cf79e1a6830..0eb9f92f3717 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -1,5 +1,4 @@
# CONFIG_64BIT is not set
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
@@ -125,7 +124,6 @@ CONFIG_NF_CONNTRACK_IPV4=y
CONFIG_IP_NF_IPTABLES=y
CONFIG_IP_NF_FILTER=y
CONFIG_IP_NF_TARGET_REJECT=y
-CONFIG_IP_NF_TARGET_ULOG=y
CONFIG_NF_NAT=y
CONFIG_IP_NF_TARGET_MASQUERADE=y
CONFIG_IP_NF_MANGLE=y
@@ -255,7 +253,6 @@ CONFIG_USB_OHCI_HCD=y
CONFIG_USB_UHCI_HCD=y
CONFIG_USB_PRINTER=y
CONFIG_USB_STORAGE=y
-CONFIG_USB_LIBUSUAL=y
CONFIG_EDAC=y
CONFIG_RTC_CLASS=y
# CONFIG_RTC_HCTOSYS is not set
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index de45f57b410d..4a4b16e56d35 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
@@ -124,7 +123,6 @@ CONFIG_NF_CONNTRACK_IPV4=y
CONFIG_IP_NF_IPTABLES=y
CONFIG_IP_NF_FILTER=y
CONFIG_IP_NF_TARGET_REJECT=y
-CONFIG_IP_NF_TARGET_ULOG=y
CONFIG_NF_NAT=y
CONFIG_IP_NF_TARGET_MASQUERADE=y
CONFIG_IP_NF_MANGLE=y
@@ -251,7 +249,6 @@ CONFIG_USB_OHCI_HCD=y
CONFIG_USB_UHCI_HCD=y
CONFIG_USB_PRINTER=y
CONFIG_USB_STORAGE=y
-CONFIG_USB_LIBUSUAL=y
CONFIG_EDAC=y
CONFIG_RTC_CLASS=y
# CONFIG_RTC_HCTOSYS is not set
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index ff1ea2fb9705..8e3db8f642a7 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -191,8 +191,8 @@ static void release_pmc_hardware(void) {}
static bool check_hw_exists(void)
{
- u64 val, val_fail, val_new= ~0;
- int i, reg, reg_fail, ret = 0;
+ u64 val, val_fail = -1, val_new= ~0;
+ int i, reg, reg_fail = -1, ret = 0;
int bios_fail = 0;
int reg_safe = -1;
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index aa62437d1aa1..98b0f0729527 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -1708,6 +1708,120 @@ static __initconst const u64 glm_hw_cache_extra_regs
},
};
+static __initconst const u64 glp_hw_cache_event_ids
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ [C(L1D)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
+ [C(RESULT_MISS)] = 0x0,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
+ [C(RESULT_MISS)] = 0x0,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = 0x0,
+ [C(RESULT_MISS)] = 0x0,
+ },
+ },
+ [C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */
+ [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = 0x0,
+ [C(RESULT_MISS)] = 0x0,
+ },
+ },
+ [C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
+ [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
+ [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = 0x0,
+ [C(RESULT_MISS)] = 0x0,
+ },
+ },
+ [C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
+ [C(RESULT_MISS)] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
+ [C(RESULT_MISS)] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = 0x0,
+ [C(RESULT_MISS)] = 0x0,
+ },
+ },
+ [C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */
+ [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ },
+ [C(BPU)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
+ [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ },
+};
+
+static __initconst const u64 glp_hw_cache_extra_regs
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ [C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = GLM_DEMAND_READ|
+ GLM_LLC_ACCESS,
+ [C(RESULT_MISS)] = GLM_DEMAND_READ|
+ GLM_LLC_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE|
+ GLM_LLC_ACCESS,
+ [C(RESULT_MISS)] = GLM_DEMAND_WRITE|
+ GLM_LLC_MISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = 0x0,
+ [C(RESULT_MISS)] = 0x0,
+ },
+ },
+};
+
#define KNL_OT_L2_HITE BIT_ULL(19) /* Other Tile L2 Hit */
#define KNL_OT_L2_HITF BIT_ULL(20) /* Other Tile L2 Hit */
#define KNL_MCDRAM_LOCAL BIT_ULL(21)
@@ -3016,6 +3130,9 @@ static int hsw_hw_config(struct perf_event *event)
return 0;
}
+static struct event_constraint counter0_constraint =
+ INTEL_ALL_EVENT_CONSTRAINT(0, 0x1);
+
static struct event_constraint counter2_constraint =
EVENT_CONSTRAINT(0, 0x4, 0);
@@ -3037,6 +3154,21 @@ hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
return c;
}
+static struct event_constraint *
+glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+ struct perf_event *event)
+{
+ struct event_constraint *c;
+
+ /* :ppp means to do reduced skid PEBS which is PMC0 only. */
+ if (event->attr.precise_ip == 3)
+ return &counter0_constraint;
+
+ c = intel_get_event_constraints(cpuc, idx, event);
+
+ return c;
+}
+
/*
* Broadwell:
*
@@ -3265,10 +3397,8 @@ static void intel_pmu_cpu_dying(int cpu)
static void intel_pmu_sched_task(struct perf_event_context *ctx,
bool sched_in)
{
- if (x86_pmu.pebs_active)
- intel_pmu_pebs_sched_task(ctx, sched_in);
- if (x86_pmu.lbr_nr)
- intel_pmu_lbr_sched_task(ctx, sched_in);
+ intel_pmu_pebs_sched_task(ctx, sched_in);
+ intel_pmu_lbr_sched_task(ctx, sched_in);
}
PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
@@ -3838,6 +3968,32 @@ __init int intel_pmu_init(void)
pr_cont("Goldmont events, ");
break;
+ case INTEL_FAM6_ATOM_GEMINI_LAKE:
+ memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
+ sizeof(hw_cache_event_ids));
+ memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs,
+ sizeof(hw_cache_extra_regs));
+
+ intel_pmu_lbr_init_skl();
+
+ x86_pmu.event_constraints = intel_slm_event_constraints;
+ x86_pmu.pebs_constraints = intel_glp_pebs_event_constraints;
+ x86_pmu.extra_regs = intel_glm_extra_regs;
+ /*
+ * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
+ * for precise cycles.
+ */
+ x86_pmu.pebs_aliases = NULL;
+ x86_pmu.pebs_prec_dist = true;
+ x86_pmu.lbr_pt_coexist = true;
+ x86_pmu.flags |= PMU_FL_HAS_RSP_1;
+ x86_pmu.get_event_constraints = glp_get_event_constraints;
+ x86_pmu.cpu_events = glm_events_attrs;
+ /* Goldmont Plus has 4-wide pipeline */
+ event_attr_td_total_slots_scale_glm.event_str = "4";
+ pr_cont("Goldmont plus events, ");
+ break;
+
case INTEL_FAM6_WESTMERE:
case INTEL_FAM6_WESTMERE_EP:
case INTEL_FAM6_WESTMERE_EX:
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index 238ae3248ba5..4cf100ff2a37 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -40,16 +40,16 @@
* Model specific counters:
* MSR_CORE_C1_RES: CORE C1 Residency Counter
* perf code: 0x00
- * Available model: SLM,AMT
+ * Available model: SLM,AMT,GLM
* Scope: Core (each processor core has a MSR)
* MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
* perf code: 0x01
- * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL
+ * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,GLM
* Scope: Core
* MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
* perf code: 0x02
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
- * SKL,KNL
+ * SKL,KNL,GLM
* Scope: Core
* MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
* perf code: 0x03
@@ -57,16 +57,17 @@
* Scope: Core
* MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter.
* perf code: 0x00
- * Available model: SNB,IVB,HSW,BDW,SKL,KNL
+ * Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM
* Scope: Package (physical package)
* MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
* perf code: 0x01
* Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL
+ * GLM
* Scope: Package (physical package)
* MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter.
* perf code: 0x02
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
- * SKL,KNL
+ * SKL,KNL,GLM
* Scope: Package (physical package)
* MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
* perf code: 0x03
@@ -82,7 +83,7 @@
* Scope: Package (physical package)
* MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
* perf code: 0x06
- * Available model: HSW ULT only
+ * Available model: HSW ULT, GLM
* Scope: Package (physical package)
*
*/
@@ -504,6 +505,17 @@ static const struct cstate_model knl_cstates __initconst = {
};
+static const struct cstate_model glm_cstates __initconst = {
+ .core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
+ BIT(PERF_CSTATE_CORE_C3_RES) |
+ BIT(PERF_CSTATE_CORE_C6_RES),
+
+ .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
+ BIT(PERF_CSTATE_PKG_C3_RES) |
+ BIT(PERF_CSTATE_PKG_C6_RES) |
+ BIT(PERF_CSTATE_PKG_C10_RES),
+};
+
#define X86_CSTATES_MODEL(model, states) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long) &(states) }
@@ -546,6 +558,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNL, knl_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNM, knl_cstates),
+
+ X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT, glm_cstates),
{ },
};
MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index c6d23ffe422d..a322fed5f8ed 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -606,12 +606,6 @@ static inline void intel_pmu_drain_pebs_buffer(void)
x86_pmu.drain_pebs(&regs);
}
-void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in)
-{
- if (!sched_in)
- intel_pmu_drain_pebs_buffer();
-}
-
/*
* PEBS
*/
@@ -651,6 +645,12 @@ struct event_constraint intel_glm_pebs_event_constraints[] = {
EVENT_CONSTRAINT_END
};
+struct event_constraint intel_glp_pebs_event_constraints[] = {
+ /* Allow all events as PEBS with no flags */
+ INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
+ EVENT_CONSTRAINT_END
+};
+
struct event_constraint intel_nehalem_pebs_event_constraints[] = {
INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */
INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
@@ -816,6 +816,14 @@ static inline bool pebs_needs_sched_cb(struct cpu_hw_events *cpuc)
return cpuc->n_pebs && (cpuc->n_pebs == cpuc->n_large_pebs);
}
+void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+
+ if (!sched_in && pebs_needs_sched_cb(cpuc))
+ intel_pmu_drain_pebs_buffer();
+}
+
static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
{
struct debug_store *ds = cpuc->ds;
@@ -889,6 +897,8 @@ void intel_pmu_pebs_enable(struct perf_event *event)
if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
ds->pebs_event_reset[hwc->idx] =
(u64)(-hwc->sample_period) & x86_pmu.cntval_mask;
+ } else {
+ ds->pebs_event_reset[hwc->idx] = 0;
}
}
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index eb261656a320..955457a30197 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -380,8 +380,12 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct x86_perf_task_context *task_ctx;
+ if (!cpuc->lbr_users)
+ return;
+
/*
* If LBR callstack feature is enabled and the stack was saved when
* the task was scheduled out, restore the stack. Otherwise flush
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 53728eea1bed..476aec3a4cab 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -879,6 +879,8 @@ extern struct event_constraint intel_slm_pebs_event_constraints[];
extern struct event_constraint intel_glm_pebs_event_constraints[];
+extern struct event_constraint intel_glp_pebs_event_constraints[];
+
extern struct event_constraint intel_nehalem_pebs_event_constraints[];
extern struct event_constraint intel_westmere_pebs_event_constraints[];
diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
index 39e702d90cdb..aa6b2023d8f8 100644
--- a/arch/x86/include/asm/bug.h
+++ b/arch/x86/include/asm/bug.h
@@ -35,7 +35,7 @@
#define _BUG_FLAGS(ins, flags) \
do { \
asm volatile("1:\t" ins "\n" \
- ".pushsection __bug_table,\"a\"\n" \
+ ".pushsection __bug_table,\"aw\"\n" \
"2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
"\t" __BUG_REL(%c0) "\t# bug_entry::file\n" \
"\t.word %c1" "\t# bug_entry::line\n" \
@@ -52,7 +52,7 @@ do { \
#define _BUG_FLAGS(ins, flags) \
do { \
asm volatile("1:\t" ins "\n" \
- ".pushsection __bug_table,\"a\"\n" \
+ ".pushsection __bug_table,\"aw\"\n" \
"2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
"\t.word %c0" "\t# bug_entry::flags\n" \
"\t.org 2b+%c1\n" \
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 7afb0e2f07f4..48febf07e828 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -328,13 +328,13 @@ static inline unsigned type in##bwl##_p(int port) \
static inline void outs##bwl(int port, const void *addr, unsigned long count) \
{ \
asm volatile("rep; outs" #bwl \
- : "+S"(addr), "+c"(count) : "d"(port)); \
+ : "+S"(addr), "+c"(count) : "d"(port) : "memory"); \
} \
\
static inline void ins##bwl(int port, void *addr, unsigned long count) \
{ \
asm volatile("rep; ins" #bwl \
- : "+D"(addr), "+c"(count) : "d"(port)); \
+ : "+D"(addr), "+c"(count) : "d"(port) : "memory"); \
}
BUILDIO(b, b, char)
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index 34b984c60790..6cf65437b5e5 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -52,10 +52,10 @@ typedef u8 kprobe_opcode_t;
#define flush_insn_slot(p) do { } while (0)
/* optinsn template addresses */
-extern __visible kprobe_opcode_t optprobe_template_entry;
-extern __visible kprobe_opcode_t optprobe_template_val;
-extern __visible kprobe_opcode_t optprobe_template_call;
-extern __visible kprobe_opcode_t optprobe_template_end;
+extern __visible kprobe_opcode_t optprobe_template_entry[];
+extern __visible kprobe_opcode_t optprobe_template_val[];
+extern __visible kprobe_opcode_t optprobe_template_call[];
+extern __visible kprobe_opcode_t optprobe_template_end[];
#define MAX_OPTIMIZED_LENGTH (MAX_INSN_SIZE + RELATIVE_ADDR_SIZE)
#define MAX_OPTINSN_SIZE \
(((unsigned long)&optprobe_template_end - \
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index ecfcb6643c9b..265c907d7d4c 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -293,7 +293,7 @@ static inline unsigned long __get_current_cr3_fast(void)
unsigned long cr3 = __pa(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd);
/* For now, be very restrictive about when this can be called. */
- VM_WARN_ON(in_nmi() || !in_atomic());
+ VM_WARN_ON(in_nmi() || preemptible());
VM_BUG_ON(cr3 != __read_cr3());
return cr3;
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index cb976bab6299..9ffc36bfe4cd 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -84,7 +84,7 @@ struct pv_init_ops {
*/
unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
unsigned long addr, unsigned len);
-};
+} __no_randomize_layout;
struct pv_lazy_ops {
@@ -92,12 +92,12 @@ struct pv_lazy_ops {
void (*enter)(void);
void (*leave)(void);
void (*flush)(void);
-};
+} __no_randomize_layout;
struct pv_time_ops {
unsigned long long (*sched_clock)(void);
unsigned long long (*steal_clock)(int cpu);
-};
+} __no_randomize_layout;
struct pv_cpu_ops {
/* hooks for various privileged instructions */
@@ -176,7 +176,7 @@ struct pv_cpu_ops {
void (*start_context_switch)(struct task_struct *prev);
void (*end_context_switch)(struct task_struct *next);
-};
+} __no_randomize_layout;
struct pv_irq_ops {
/*
@@ -199,7 +199,7 @@ struct pv_irq_ops {
#ifdef CONFIG_X86_64
void (*adjust_exception_frame)(void);
#endif
-};
+} __no_randomize_layout;
struct pv_mmu_ops {
unsigned long (*read_cr2)(void);
@@ -305,7 +305,7 @@ struct pv_mmu_ops {
an mfn. We can tell which is which from the index. */
void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
phys_addr_t phys, pgprot_t flags);
-};
+} __no_randomize_layout;
struct arch_spinlock;
#ifdef CONFIG_SMP
@@ -322,7 +322,7 @@ struct pv_lock_ops {
void (*kick)(int cpu);
struct paravirt_callee_save vcpu_is_preempted;
-};
+} __no_randomize_layout;
/* This contains all the paravirt structures: we get a convenient
* number for each function using the offset which we use to indicate
@@ -334,7 +334,7 @@ struct paravirt_patch_template {
struct pv_irq_ops pv_irq_ops;
struct pv_mmu_ops pv_mmu_ops;
struct pv_lock_ops pv_lock_ops;
-};
+} __no_randomize_layout;
extern struct pv_info pv_info;
extern struct pv_init_ops pv_init_ops;
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 6a79547e8ee0..028245e1c42b 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -129,7 +129,7 @@ struct cpuinfo_x86 {
/* Index into per_cpu list: */
u16 cpu_index;
u32 microcode;
-};
+} __randomize_layout;
struct cpuid_regs {
u32 eax, ebx, ecx, edx;
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 6bb680671088..7491e73d9253 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -347,6 +347,14 @@ static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
struct mpc_intsrc mp_irq;
/*
+ * Check bus_irq boundary.
+ */
+ if (bus_irq >= NR_IRQS_LEGACY) {
+ pr_warn("Invalid bus_irq %u for legacy override\n", bus_irq);
+ return;
+ }
+
+ /*
* Convert 'gsi' to 'ioapic.pin'.
*/
ioapic = mp_find_ioapic(gsi);
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index b4f5f73febdb..237e9c2341c7 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2093,7 +2093,7 @@ static inline void __init check_timer(void)
int idx;
idx = find_irq_entry(apic1, pin1, mp_INT);
if (idx != -1 && irq_trigger(idx))
- unmask_ioapic_irq(irq_get_chip_data(0));
+ unmask_ioapic_irq(irq_get_irq_data(0));
}
irq_domain_deactivate_irq(irq_data);
irq_domain_activate_irq(irq_data);
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index bb5abe8f5fd4..3b9e220621f8 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -134,6 +134,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
n = K6_BUG_LOOP;
f_vide = vide;
+ OPTIMIZER_HIDE_VAR(f_vide);
d = rdtsc();
while (n--)
f_vide();
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index 3fe45f84ced4..cbf1f6ba39a8 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -235,8 +235,7 @@ static void __init dtb_add_ioapic(struct device_node *dn)
ret = of_address_to_resource(dn, 0, &r);
if (ret) {
- printk(KERN_ERR "Can't obtain address from node %s.\n",
- dn->full_name);
+ printk(KERN_ERR "Can't obtain address from device node %pOF.\n", dn);
return;
}
mp_register_ioapic(++ioapic_id, r.start, gsi_top, &cfg);
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 760433b2574a..2688c7dc5323 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -22,7 +22,7 @@ config KVM
depends on HAVE_KVM
depends on HIGH_RES_TIMERS
# for TASKSTATS/TASK_DELAY_ACCT:
- depends on NET
+ depends on NET && MULTIUSER
select PREEMPT_NOTIFIERS
select MMU_NOTIFIER
select ANON_INODES
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 2695a34fa1c5..337b6d2730fa 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -649,9 +649,10 @@ void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
}
if ((stimer->config & HV_STIMER_ENABLE) &&
- stimer->count)
- stimer_start(stimer);
- else
+ stimer->count) {
+ if (!stimer->msg_pending)
+ stimer_start(stimer);
+ } else
stimer_cleanup(stimer);
}
}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 84e62acf2dd8..29fd8af5c347 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -198,7 +198,8 @@ struct loaded_vmcs {
struct vmcs *vmcs;
struct vmcs *shadow_vmcs;
int cpu;
- int launched;
+ bool launched;
+ bool nmi_known_unmasked;
struct list_head loaded_vmcss_on_cpu_link;
};
@@ -2326,6 +2327,11 @@ static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
__vmx_load_host_state(to_vmx(vcpu));
}
+static bool emulation_required(struct kvm_vcpu *vcpu)
+{
+ return emulate_invalid_guest_state && !guest_state_valid(vcpu);
+}
+
static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
/*
@@ -2363,6 +2369,8 @@ static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{
+ unsigned long old_rflags = vmx_get_rflags(vcpu);
+
__set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
to_vmx(vcpu)->rflags = rflags;
if (to_vmx(vcpu)->rmode.vm86_active) {
@@ -2370,6 +2378,9 @@ static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
}
vmcs_writel(GUEST_RFLAGS, rflags);
+
+ if ((old_rflags ^ to_vmx(vcpu)->rflags) & X86_EFLAGS_VM)
+ to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
}
static u32 vmx_get_pkru(struct kvm_vcpu *vcpu)
@@ -3857,11 +3868,6 @@ static __init int alloc_kvm_area(void)
return 0;
}
-static bool emulation_required(struct kvm_vcpu *vcpu)
-{
- return emulate_invalid_guest_state && !guest_state_valid(vcpu);
-}
-
static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg,
struct kvm_segment *save)
{
@@ -5510,10 +5516,8 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- if (!is_guest_mode(vcpu)) {
- ++vcpu->stat.nmi_injections;
- vmx->nmi_known_unmasked = false;
- }
+ ++vcpu->stat.nmi_injections;
+ vmx->loaded_vmcs->nmi_known_unmasked = false;
if (vmx->rmode.vm86_active) {
if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE)
@@ -5527,16 +5531,21 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
{
- if (to_vmx(vcpu)->nmi_known_unmasked)
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ bool masked;
+
+ if (vmx->loaded_vmcs->nmi_known_unmasked)
return false;
- return vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
+ masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
+ vmx->loaded_vmcs->nmi_known_unmasked = !masked;
+ return masked;
}
static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- vmx->nmi_known_unmasked = !masked;
+ vmx->loaded_vmcs->nmi_known_unmasked = !masked;
if (masked)
vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
GUEST_INTR_STATE_NMI);
@@ -8736,7 +8745,7 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
- if (vmx->nmi_known_unmasked)
+ if (vmx->loaded_vmcs->nmi_known_unmasked)
return;
/*
* Can't use vmx->exit_intr_info since we're not sure what
@@ -8760,7 +8769,7 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
GUEST_INTR_STATE_NMI);
else
- vmx->nmi_known_unmasked =
+ vmx->loaded_vmcs->nmi_known_unmasked =
!(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
& GUEST_INTR_STATE_NMI);
}
@@ -10488,6 +10497,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
{
struct vmcs12 *vmcs12;
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
u32 exit_qual;
int ret;
@@ -10512,6 +10522,12 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
* for misconfigurations which will anyway be caught by the processor
* when using the merged vmcs02.
*/
+ if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS) {
+ nested_vmx_failValid(vcpu,
+ VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS);
+ goto out;
+ }
+
if (vmcs12->launch_state == launch) {
nested_vmx_failValid(vcpu,
launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 5b8f07889f6a..82a63c59f77b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -597,8 +597,8 @@ bool pdptrs_changed(struct kvm_vcpu *vcpu)
(unsigned long *)&vcpu->arch.regs_avail))
return true;
- gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT;
- offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1);
+ gfn = (kvm_read_cr3(vcpu) & ~31ul) >> PAGE_SHIFT;
+ offset = (kvm_read_cr3(vcpu) & ~31ul) & (PAGE_SIZE - 1);
r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
PFERR_USER_MASK | PFERR_WRITE_MASK);
if (r < 0)
diff --git a/arch/x86/math-emu/Makefile b/arch/x86/math-emu/Makefile
index 9b0c63b60302..1b2dac174321 100644
--- a/arch/x86/math-emu/Makefile
+++ b/arch/x86/math-emu/Makefile
@@ -5,8 +5,8 @@
#DEBUG = -DDEBUGGING
DEBUG =
PARANOID = -DPARANOID
-EXTRA_CFLAGS := $(PARANOID) $(DEBUG) -fno-builtin $(MATH_EMULATION)
-EXTRA_AFLAGS := $(PARANOID)
+ccflags-y += $(PARANOID) $(DEBUG) -fno-builtin $(MATH_EMULATION)
+asflags-y += $(PARANOID)
# From 'C' language sources:
C_OBJS =fpu_entry.o errors.o \
diff --git a/arch/x86/math-emu/fpu_emu.h b/arch/x86/math-emu/fpu_emu.h
index afbc4d805d66..c9c320dccca1 100644
--- a/arch/x86/math-emu/fpu_emu.h
+++ b/arch/x86/math-emu/fpu_emu.h
@@ -157,7 +157,7 @@ extern u_char const data_sizes_16[32];
#define signbyte(a) (((u_char *)(a))[9])
#define getsign(a) (signbyte(a) & 0x80)
-#define setsign(a,b) { if (b) signbyte(a) |= 0x80; else signbyte(a) &= 0x7f; }
+#define setsign(a,b) { if ((b) != 0) signbyte(a) |= 0x80; else signbyte(a) &= 0x7f; }
#define copysign(a,b) { if (getsign(a)) signbyte(b) |= 0x80; \
else signbyte(b) &= 0x7f; }
#define changesign(a) { signbyte(a) ^= 0x80; }
diff --git a/arch/x86/math-emu/reg_compare.c b/arch/x86/math-emu/reg_compare.c
index b77360fdbf4a..19b33b50adfa 100644
--- a/arch/x86/math-emu/reg_compare.c
+++ b/arch/x86/math-emu/reg_compare.c
@@ -168,7 +168,7 @@ static int compare(FPU_REG const *b, int tagb)
/* This function requires that st(0) is not empty */
int FPU_compare_st_data(FPU_REG const *loaded_data, u_char loaded_tag)
{
- int f = 0, c;
+ int f, c;
c = compare(loaded_data, loaded_tag);
@@ -189,12 +189,12 @@ int FPU_compare_st_data(FPU_REG const *loaded_data, u_char loaded_tag)
case COMP_No_Comp:
f = SW_C3 | SW_C2 | SW_C0;
break;
-#ifdef PARANOID
default:
+#ifdef PARANOID
EXCEPTION(EX_INTERNAL | 0x121);
+#endif /* PARANOID */
f = SW_C3 | SW_C2 | SW_C0;
break;
-#endif /* PARANOID */
}
setcc(f);
if (c & COMP_Denormal) {
@@ -205,7 +205,7 @@ int FPU_compare_st_data(FPU_REG const *loaded_data, u_char loaded_tag)
static int compare_st_st(int nr)
{
- int f = 0, c;
+ int f, c;
FPU_REG *st_ptr;
if (!NOT_EMPTY(0) || !NOT_EMPTY(nr)) {
@@ -235,12 +235,12 @@ static int compare_st_st(int nr)
case COMP_No_Comp:
f = SW_C3 | SW_C2 | SW_C0;
break;
-#ifdef PARANOID
default:
+#ifdef PARANOID
EXCEPTION(EX_INTERNAL | 0x122);
+#endif /* PARANOID */
f = SW_C3 | SW_C2 | SW_C0;
break;
-#endif /* PARANOID */
}
setcc(f);
if (c & COMP_Denormal) {
@@ -283,12 +283,12 @@ static int compare_i_st_st(int nr)
case COMP_No_Comp:
f = X86_EFLAGS_ZF | X86_EFLAGS_PF | X86_EFLAGS_CF;
break;
-#ifdef PARANOID
default:
+#ifdef PARANOID
EXCEPTION(EX_INTERNAL | 0x122);
+#endif /* PARANOID */
f = 0;
break;
-#endif /* PARANOID */
}
FPU_EFLAGS = (FPU_EFLAGS & ~(X86_EFLAGS_ZF | X86_EFLAGS_PF | X86_EFLAGS_CF)) | f;
if (c & COMP_Denormal) {
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_max7315.c b/arch/x86/platform/intel-mid/device_libs/platform_max7315.c
index 6e075afa7877..58337b2bc682 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_max7315.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_max7315.c
@@ -38,8 +38,10 @@ static void __init *max7315_platform_data(void *info)
*/
strcpy(i2c_info->type, "max7315");
if (nr++) {
- sprintf(base_pin_name, "max7315_%d_base", nr);
- sprintf(intr_pin_name, "max7315_%d_int", nr);
+ snprintf(base_pin_name, sizeof(base_pin_name),
+ "max7315_%d_base", nr);
+ snprintf(intr_pin_name, sizeof(intr_pin_name),
+ "max7315_%d_int", nr);
} else {
strcpy(base_pin_name, "max7315_base");
strcpy(intr_pin_name, "max7315_int");
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index d4a61ddf9e62..3e4bdb442fbc 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -40,7 +40,6 @@ static int timeout_base_ns[] = {
static int timeout_us;
static bool nobau = true;
static int nobau_perm;
-static cycles_t congested_cycles;
/* tunables: */
static int max_concurr = MAX_BAU_CONCURRENT;
@@ -829,10 +828,10 @@ static void record_send_stats(cycles_t time1, cycles_t time2,
if ((completion_status == FLUSH_COMPLETE) && (try == 1)) {
bcp->period_requests++;
bcp->period_time += elapsed;
- if ((elapsed > congested_cycles) &&
+ if ((elapsed > usec_2_cycles(bcp->cong_response_us)) &&
(bcp->period_requests > bcp->cong_reps) &&
((bcp->period_time / bcp->period_requests) >
- congested_cycles)) {
+ usec_2_cycles(bcp->cong_response_us))) {
stat->s_congested++;
disable_for_period(bcp, stat);
}
@@ -2222,14 +2221,17 @@ static int __init uv_bau_init(void)
else if (is_uv1_hub())
ops = uv1_bau_ops;
+ nuvhubs = uv_num_possible_blades();
+ if (nuvhubs < 2) {
+ pr_crit("UV: BAU disabled - insufficient hub count\n");
+ goto err_bau_disable;
+ }
+
for_each_possible_cpu(cur_cpu) {
mask = &per_cpu(uv_flush_tlb_mask, cur_cpu);
zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cur_cpu));
}
- nuvhubs = uv_num_possible_blades();
- congested_cycles = usec_2_cycles(congested_respns_us);
-
uv_base_pnode = 0x7fffffff;
for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
cpus = uv_blade_nr_possible_cpus(uvhub);
@@ -2242,9 +2244,8 @@ static int __init uv_bau_init(void)
enable_timeouts();
if (init_per_cpu(nuvhubs, uv_base_pnode)) {
- set_bau_off();
- nobau_perm = 1;
- return 0;
+ pr_crit("UV: BAU disabled - per CPU init failed\n");
+ goto err_bau_disable;
}
vector = UV_BAU_MESSAGE;
@@ -2270,6 +2271,16 @@ static int __init uv_bau_init(void)
}
return 0;
+
+err_bau_disable:
+
+ for_each_possible_cpu(cur_cpu)
+ free_cpumask_var(per_cpu(uv_flush_tlb_mask, cur_cpu));
+
+ set_bau_off();
+ nobau_perm = 1;
+
+ return -EINVAL;
}
core_initcall(uv_bau_init);
fs_initcall(uv_ptc_init);