From a01b3391b542aaaed539f9d9d6d0d4d6502ab9c6 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 5 Jan 2017 15:02:35 +0200 Subject: x86/platform/intel-mid: Get rid of duplication of IPC handler There is no other device handler than ipc_device_handler() and sfi.c already has a handler for IPC devices. Replace a pointer to custom handler by a flag. Due to this change adjust sfi_handle_ipc_dev() to handle it instead of ipc_device_handler(). Signed-off-by: Andy Shevchenko Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20170105130235.177792-2-andriy.shevchenko@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/intel-mid.h | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/intel-mid.h b/arch/x86/include/asm/intel-mid.h index 49da9f497b90..91ead0cefa76 100644 --- a/arch/x86/include/asm/intel-mid.h +++ b/arch/x86/include/asm/intel-mid.h @@ -42,10 +42,8 @@ struct devs_id { char name[SFI_NAME_LEN + 1]; u8 type; u8 delay; + u8 msic; void *(*get_platform_data)(void *info); - /* Custom handler for devices */ - void (*device_handler)(struct sfi_device_table_entry *pentry, - struct devs_id *dev); }; #define sfi_device(i) \ -- cgit From 49776faf93f8074bb4990beac04781a9507d3650 Mon Sep 17 00:00:00 2001 From: Radim Krčmář Date: Fri, 16 Dec 2016 16:10:02 +0100 Subject: KVM: x86: decouple irqchip_in_kernel() and pic_irqchip() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit irqchip_in_kernel() tried to save a bit by reusing pic_irqchip(), but it just complicated the code. Add a separate state for the irqchip mode. Reviewed-by: David Hildenbrand [Used Paolo's version of condition in irqchip_in_kernel().] Signed-off-by: Radim Krčmář --- arch/x86/include/asm/kvm_host.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index a7066dc1a7e9..fc03ab1f6110 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -716,6 +716,12 @@ struct kvm_hv { HV_REFERENCE_TSC_PAGE tsc_ref; }; +enum kvm_irqchip_mode { + KVM_IRQCHIP_NONE, + KVM_IRQCHIP_KERNEL, /* created with KVM_CREATE_IRQCHIP */ + KVM_IRQCHIP_SPLIT, /* created with KVM_CAP_SPLIT_IRQCHIP */ +}; + struct kvm_arch { unsigned int n_used_mmu_pages; unsigned int n_requested_mmu_pages; @@ -788,7 +794,7 @@ struct kvm_arch { u64 disabled_quirks; - bool irqchip_split; + enum kvm_irqchip_mode irqchip_mode; u8 nr_reserved_ioapic_pins; bool disabled_lapic_found; -- cgit From f3414bc77419463c0d81eaa2cea7ee4ccb447c7d Mon Sep 17 00:00:00 2001 From: David Matlack Date: Tue, 20 Dec 2016 15:25:57 -0800 Subject: kvm: x86: export maximum number of mmu_page_hash collisions Report the maximum number of mmu_page_hash collisions as a per-VM stat. This will make it easy to identify problems with the mmu_page_hash in the future. Signed-off-by: David Matlack Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index fc03ab1f6110..1bb1ffc0024c 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -821,6 +821,7 @@ struct kvm_vm_stat { ulong mmu_unsync; ulong remote_tlb_flush; ulong lpages; + ulong max_mmu_page_hash_collisions; }; struct kvm_vcpu_stat { -- cgit From 114df303a7eeae8b50ebf68229b7e647714a9bea Mon Sep 17 00:00:00 2001 From: David Matlack Date: Mon, 19 Dec 2016 13:58:25 -0800 Subject: kvm: x86: reduce collisions in mmu_page_hash When using two-dimensional paging, the mmu_page_hash (which provides lookups for existing kvm_mmu_page structs), becomes imbalanced; with too many collisions in buckets 0 and 512. This has been seen to cause mmu_lock to be held for multiple milliseconds in kvm_mmu_get_page on VMs with a large amount of RAM mapped with 4K pages. The current hash function uses the lower 10 bits of gfn to index into mmu_page_hash. When doing shadow paging, gfn is the address of the guest page table being shadow. These tables are 4K-aligned, which makes the low bits of gfn a good hash. However, with two-dimensional paging, no guest page tables are being shadowed, so gfn is the base address that is mapped by the table. Thus page tables (level=1) have a 2MB aligned gfn, page directories (level=2) have a 1GB aligned gfn, etc. This means hashes will only differ in their 10th bit. hash_64() provides a better hash. For example, on a VM with ~200G (99458 direct=1 kvm_mmu_page structs): hash max_mmu_page_hash_collisions -------------------------------------------- low 10 bits 49847 hash_64 105 perfect 97 While we're changing the hash, increase the table size by 4x to better support large VMs (further reduces number of collisions in 200G VM to 29). Note that hash_64() does not provide a good distribution prior to commit ef703f49a6c5 ("Eliminate bad hash multipliers from hash_32() and hash_64()"). Signed-off-by: David Matlack Change-Id: I5aa6b13c834722813c6cca46b8b1ed6f53368ade Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 1bb1ffc0024c..7e594a325158 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -115,7 +115,7 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) #define KVM_PERMILLE_MMU_PAGES 20 #define KVM_MIN_ALLOC_MMU_PAGES 64 -#define KVM_MMU_HASH_SHIFT 10 +#define KVM_MMU_HASH_SHIFT 12 #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT) #define KVM_MIN_FREE_MMU_PAGES 5 #define KVM_REFILL_PAGES 25 -- cgit From 27959a4415a5a00881a7b9353ab9b1274da2ca47 Mon Sep 17 00:00:00 2001 From: Junaid Shahid Date: Tue, 6 Dec 2016 16:46:10 -0800 Subject: kvm: x86: mmu: Use symbolic constants for EPT Violation Exit Qualifications This change adds some symbolic constants for VM Exit Qualifications related to EPT Violations and updates handle_ept_violation() to use these constants instead of hard-coded numbers. Signed-off-by: Junaid Shahid Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/vmx.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 2b5b2d4b924e..25a482fb5241 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -499,6 +499,22 @@ struct vmx_msr_entry { #define ENTRY_FAIL_NMI 3 #define ENTRY_FAIL_VMCS_LINK_PTR 4 +/* + * Exit Qualifications for EPT Violations + */ +#define EPT_VIOLATION_READ_BIT 0 +#define EPT_VIOLATION_WRITE_BIT 1 +#define EPT_VIOLATION_INSTR_BIT 2 +#define EPT_VIOLATION_READABLE_BIT 3 +#define EPT_VIOLATION_WRITABLE_BIT 4 +#define EPT_VIOLATION_EXECUTABLE_BIT 5 +#define EPT_VIOLATION_READ (1 << EPT_VIOLATION_READ_BIT) +#define EPT_VIOLATION_WRITE (1 << EPT_VIOLATION_WRITE_BIT) +#define EPT_VIOLATION_INSTR (1 << EPT_VIOLATION_INSTR_BIT) +#define EPT_VIOLATION_READABLE (1 << EPT_VIOLATION_READABLE_BIT) +#define EPT_VIOLATION_WRITABLE (1 << EPT_VIOLATION_WRITABLE_BIT) +#define EPT_VIOLATION_EXECUTABLE (1 << EPT_VIOLATION_EXECUTABLE_BIT) + /* * VM-instruction error numbers */ -- cgit From 37f0e8fe6b10ee2ab52576caa721ee1282de74a6 Mon Sep 17 00:00:00 2001 From: Junaid Shahid Date: Tue, 6 Dec 2016 16:46:15 -0800 Subject: kvm: x86: mmu: Do not use bit 63 for tracking special SPTEs MMIO SPTEs currently set both bits 62 and 63 to distinguish them as special PTEs. However, bit 63 is used as the SVE bit in Intel EPT PTEs. The SVE bit is ignored for misconfigured PTEs but not necessarily for not-Present PTEs. Since MMIO SPTEs use an EPT misconfiguration, so using bit 63 for them is acceptable. However, the upcoming fast access tracking feature adds another type of special tracking PTE, which uses not-Present PTEs and hence should not set bit 63. In order to use common bits to distinguish both type of special PTEs, we now use only bit 62 as the special bit. Signed-off-by: Junaid Shahid Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 7 +++++++ arch/x86/include/asm/vmx.h | 9 +++++++-- 2 files changed, 14 insertions(+), 2 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 7e594a325158..3272a5e4aaad 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -208,6 +208,13 @@ enum { PFERR_WRITE_MASK | \ PFERR_PRESENT_MASK) +/* + * The mask used to denote special SPTEs, which can be either MMIO SPTEs or + * Access Tracking SPTEs. We use bit 62 instead of bit 63 to avoid conflicting + * with the SVE bit in EPT PTEs. + */ +#define SPTE_SPECIAL_MASK (1ULL << 62) + /* apic attention bits */ #define KVM_APIC_CHECK_VAPIC 0 /* diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 25a482fb5241..fc061cbb46e0 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -467,8 +467,13 @@ enum vmcs_field { #define VMX_EPT_WRITABLE_MASK 0x2ull #define VMX_EPT_EXECUTABLE_MASK 0x4ull #define VMX_EPT_IPAT_BIT (1ull << 6) -#define VMX_EPT_ACCESS_BIT (1ull << 8) -#define VMX_EPT_DIRTY_BIT (1ull << 9) +#define VMX_EPT_ACCESS_BIT (1ull << 8) +#define VMX_EPT_DIRTY_BIT (1ull << 9) + +/* The mask to use to trigger an EPT Misconfiguration in order to track MMIO */ +#define VMX_EPT_MISCONFIG_WX_VALUE (VMX_EPT_WRITABLE_MASK | \ + VMX_EPT_EXECUTABLE_MASK) + #define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul -- cgit From f160c7b7bb322bf079a5bb4dd34c58f17553f193 Mon Sep 17 00:00:00 2001 From: Junaid Shahid Date: Tue, 6 Dec 2016 16:46:16 -0800 Subject: kvm: x86: mmu: Lockless access tracking for Intel CPUs without EPT A bits. This change implements lockless access tracking for Intel CPUs without EPT A bits. This is achieved by marking the PTEs as not-present (but not completely clearing them) when clear_flush_young() is called after marking the pages as accessed. When an EPT Violation is generated as a result of the VM accessing those pages, the PTEs are restored to their original values. Signed-off-by: Junaid Shahid Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 3 ++- arch/x86/include/asm/vmx.h | 9 ++++++--- 2 files changed, 8 insertions(+), 4 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 3272a5e4aaad..99a71d90b6ae 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1064,7 +1064,8 @@ void kvm_mmu_setup(struct kvm_vcpu *vcpu); void kvm_mmu_init_vm(struct kvm *kvm); void kvm_mmu_uninit_vm(struct kvm *kvm); void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, - u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask); + u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask, + u64 acc_track_mask); void kvm_mmu_reset_context(struct kvm_vcpu *vcpu); void kvm_mmu_slot_remove_write_access(struct kvm *kvm, diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index fc061cbb46e0..a22a4790f1ac 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -469,11 +469,14 @@ enum vmcs_field { #define VMX_EPT_IPAT_BIT (1ull << 6) #define VMX_EPT_ACCESS_BIT (1ull << 8) #define VMX_EPT_DIRTY_BIT (1ull << 9) +#define VMX_EPT_RWX_MASK (VMX_EPT_READABLE_MASK | \ + VMX_EPT_WRITABLE_MASK | \ + VMX_EPT_EXECUTABLE_MASK) +#define VMX_EPT_MT_MASK (7ull << VMX_EPT_MT_EPTE_SHIFT) /* The mask to use to trigger an EPT Misconfiguration in order to track MMIO */ -#define VMX_EPT_MISCONFIG_WX_VALUE (VMX_EPT_WRITABLE_MASK | \ - VMX_EPT_EXECUTABLE_MASK) - +#define VMX_EPT_MISCONFIG_WX_VALUE (VMX_EPT_WRITABLE_MASK | \ + VMX_EPT_EXECUTABLE_MASK) #define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul -- cgit From 0f89b207b04a1a399e19d35293658e3a571da3d7 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Wed, 14 Dec 2016 14:59:23 -0500 Subject: kvm: svm: Use the hardware provided GPA instead of page walk When a guest causes a NPF which requires emulation, KVM sometimes walks the guest page tables to translate the GVA to a GPA. This is unnecessary most of the time on AMD hardware since the hardware provides the GPA in EXITINFO2. The only exception cases involve string operations involving rep or operations that use two memory locations. With rep, the GPA will only be the value of the initial NPF and with dual memory locations we won't know which memory address was translated into EXITINFO2. Signed-off-by: Tom Lendacky Reviewed-by: Borislav Petkov Signed-off-by: Brijesh Singh Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_emulate.h | 1 + arch/x86/include/asm/kvm_host.h | 3 +++ 2 files changed, 4 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index e9cd7befcb76..3e8c287090e4 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h @@ -441,5 +441,6 @@ int emulator_task_switch(struct x86_emulate_ctxt *ctxt, int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq); void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt); void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt); +bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt); #endif /* _ASM_X86_KVM_X86_EMULATE_H */ diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 99a71d90b6ae..0419e114f27b 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -675,6 +675,9 @@ struct kvm_vcpu_arch { int pending_ioapic_eoi; int pending_external_vector; + + /* GPA available (AMD only) */ + bool gpa_available; }; struct kvm_lpage_info { -- cgit From 0f1e261ead16ce09169bf2d223d4c8803576f85e Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Sat, 17 Dec 2016 16:05:19 +0100 Subject: KVM: x86: add VCPU stat for KVM_REQ_EVENT processing This statistic can be useful to estimate the cost of an IRQ injection scenario, by comparing it with irq_injections. For example the stat shows that sti;hlt triggers more KVM_REQ_EVENT than sti;nop. Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 0419e114f27b..417502cf42b6 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -861,6 +861,7 @@ struct kvm_vcpu_stat { u64 hypercalls; u64 irq_injections; u64 nmi_injections; + u64 req_event; }; struct x86_instruction_info; -- cgit From f1be6cdaf57ce918828b6cff6ff2b4ea87be7f62 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Sat, 7 Jan 2017 14:34:57 +0200 Subject: x86/platform/intel-mid: Make intel_scu_device_register() static There is no need anymore to have intel_scu_device_register() exported. Annotate it with static keyword. While here, rename to intel_scu_ipc_device_register() to use same pattern for all SFI enumerated device register helpers. Signed-off-by: Andy Shevchenko Link: http://lkml.kernel.org/r/20170107123457.53033-1-andriy.shevchenko@linux.intel.com Signed-off-by: Thomas Gleixner --- arch/x86/include/asm/intel-mid.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/intel-mid.h b/arch/x86/include/asm/intel-mid.h index 91ead0cefa76..fe04491130ae 100644 --- a/arch/x86/include/asm/intel-mid.h +++ b/arch/x86/include/asm/intel-mid.h @@ -27,7 +27,6 @@ extern void intel_mid_pwr_power_off(void); extern int intel_mid_pwr_get_lss_id(struct pci_dev *pdev); extern int get_gpio_by_name(const char *name); -extern void intel_scu_device_register(struct platform_device *pdev); extern int __init sfi_parse_mrtc(struct sfi_table_header *table); extern int __init sfi_parse_mtmr(struct sfi_table_header *table); extern int sfi_mrtc_num; -- cgit From aef591cd3d1ddccb268f64c836d38382007373c1 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Thu, 12 Jan 2017 15:27:58 -0500 Subject: locking/spinlocks/x86, paravirt: Remove paravirt_ticketlocks_enabled MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a follow-up of commit: cfd8983f03c7b2 ("x86, locking/spinlocks: Remove ticket (spin)lock implementation") The static_key structure 'paravirt_ticketlocks_enabled' is now removed as it is no longer used. As a result, the init functions kvm_spinlock_init_jump() and xen_init_spinlocks_jump() are also removed. A simple build and boot test was done to verify it. Signed-off-by: Waiman Long Reviewed-by: Boris Ostrovsky Cc: Alok Kataria Cc: Chris Wright Cc: Jeremy Fitzhardinge Cc: Juergen Gross Cc: Linus Torvalds Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Radim Krčmář Cc: Rusty Russell Cc: Thomas Gleixner Cc: kvm@vger.kernel.org Cc: linux-arch@vger.kernel.org Cc: virtualization@lists.linux-foundation.org Cc: xen-devel@lists.xenproject.org Link: http://lkml.kernel.org/r/1484252878-1962-1-git-send-email-longman@redhat.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/spinlock.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 921bea7a2708..6d391909e864 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h @@ -23,9 +23,6 @@ /* How long a lock should spin before we consider blocking */ #define SPIN_THRESHOLD (1 << 15) -extern struct static_key paravirt_ticketlocks_enabled; -static __always_inline bool static_key_false(struct static_key *key); - #include /* -- cgit From 9e3d6223d2093a8903c8f570a06284453ee59944 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 9 Dec 2016 09:30:11 +0100 Subject: math64, timers: Fix 32bit mul_u64_u32_shr() and friends It turns out that while GCC-4.4 manages to generate 32x32->64 mult instructions for the 32bit mul_u64_u32_shr() code, any GCC after that fails horribly. Fix this by providing an explicit mul_u32_u32() function which can be architcture provided. Reported-by: Chris Metcalf Signed-off-by: Peter Zijlstra (Intel) Acked-by: Chris Metcalf [for tile] Cc: Christopher S. Hall Cc: David Gibson Cc: John Stultz Cc: Laurent Vivier Cc: Liav Rehana Cc: Linus Torvalds Cc: Parit Bhargava Cc: Peter Zijlstra Cc: Richard Cochran Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20161209083011.GD15765@worktop.programming.kicks-ass.net Signed-off-by: Ingo Molnar --- arch/x86/include/asm/div64.h | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h index ced283ac79df..af95c47d5c9e 100644 --- a/arch/x86/include/asm/div64.h +++ b/arch/x86/include/asm/div64.h @@ -59,6 +59,17 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) } #define div_u64_rem div_u64_rem +static inline u64 mul_u32_u32(u32 a, u32 b) +{ + u32 high, low; + + asm ("mull %[b]" : "=a" (low), "=d" (high) + : [a] "a" (a), [b] "rm" (b) ); + + return low | ((u64)high) << 32; +} +#define mul_u32_u32 mul_u32_u32 + #else # include #endif /* CONFIG_X86_32 */ -- cgit From 06b35d93af0a5904aa832f58733be84ddbfe2e04 Mon Sep 17 00:00:00 2001 From: Piotr Luc Date: Tue, 10 Jan 2017 18:34:02 +0100 Subject: x86/cpufeature: Add AVX512_VPOPCNTDQ feature MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Vector population count instructions for dwords and qwords are going to be available in future Intel Xeon & Xeon Phi processors. Bit 14 of CPUID[level:0x07, ECX] indicates that the instructions are supported by a processor. The specification can be found in the Intel Software Developer Manual (SDM) and in the Instruction Set Extensions Programming Reference (ISE). Populate the feature bit and clear it when xsave is disabled. Signed-off-by: Piotr Luc Reviewed-by: Borislav Petkov Cc: Paolo Bonzini Cc: kvm@vger.kernel.org Cc: Radim Krčmář Link: http://lkml.kernel.org/r/20170110173403.6010-2-piotr.luc@intel.com Signed-off-by: Thomas Gleixner --- arch/x86/include/asm/cpufeatures.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index eafee3161d1c..d9d7136edf05 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -288,6 +288,7 @@ #define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/ #define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */ #define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */ +#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ #define X86_FEATURE_RDPID (16*32+ 22) /* RDPID instruction */ /* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */ @@ -320,5 +321,4 @@ #define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */ #define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */ #define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */ - #endif /* _ASM_X86_CPUFEATURES_H */ -- cgit From 3f646ed70ccd1c4e5c1263d2922247d28c8e08f0 Mon Sep 17 00:00:00 2001 From: "K. Y. Srinivasan" Date: Wed, 18 Jan 2017 16:45:00 -0700 Subject: Drivers: hv: vmbus: Move the definition of hv_x64_msr_hypercall_contents As part of the effort to separate out architecture specific code, move the definition of hv_x64_msr_hypercall_contents to x86 specific header file. Signed-off-by: K. Y. Srinivasan Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/mshyperv.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index aaf59b7da98a..188ddfdde2b9 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -13,6 +13,18 @@ struct ms_hyperv_info { extern struct ms_hyperv_info ms_hyperv; +/* + * Declare the MSR used to setup pages used to communicate with the hypervisor. + */ +union hv_x64_msr_hypercall_contents { + u64 as_uint64; + struct { + u64 enable:1; + u64 reserved:11; + u64 guest_physical_address:52; + }; +}; + void hyperv_callback_vector(void); #ifdef CONFIG_TRACING #define trace_hyperv_callback_vector hyperv_callback_vector -- cgit From 352c9624242d5836ad8a960826183011367871a4 Mon Sep 17 00:00:00 2001 From: "K. Y. Srinivasan" Date: Wed, 18 Jan 2017 16:45:01 -0700 Subject: Drivers: hv: vmbus: Move the definition of generate_guest_id() As part of the effort to separate out architecture specific code, move the definition of generate_guest_id() to x86 specific header file. Signed-off-by: K. Y. Srinivasan Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/mshyperv.h | 43 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 188ddfdde2b9..15a0c275c82e 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -25,6 +25,49 @@ union hv_x64_msr_hypercall_contents { }; }; +/* + * The guest OS needs to register the guest ID with the hypervisor. + * The guest ID is a 64 bit entity and the structure of this ID is + * specified in the Hyper-V specification: + * + * msdn.microsoft.com/en-us/library/windows/hardware/ff542653%28v=vs.85%29.aspx + * + * While the current guideline does not specify how Linux guest ID(s) + * need to be generated, our plan is to publish the guidelines for + * Linux and other guest operating systems that currently are hosted + * on Hyper-V. The implementation here conforms to this yet + * unpublished guidelines. + * + * + * Bit(s) + * 63 - Indicates if the OS is Open Source or not; 1 is Open Source + * 62:56 - Os Type; Linux is 0x100 + * 55:48 - Distro specific identification + * 47:16 - Linux kernel version number + * 15:0 - Distro specific identification + * + * + */ + +#define HV_LINUX_VENDOR_ID 0x8800 + +/* + * Generate the guest ID based on the guideline described above. + */ + +static inline __u64 generate_guest_id(__u64 d_info1, __u64 kernel_version, + __u64 d_info2) +{ + __u64 guest_id = 0; + + guest_id = (((__u64)HV_LINUX_VENDOR_ID) << 56); + guest_id |= (d_info1 << 48); + guest_id |= (kernel_version << 16); + guest_id |= d_info2; + + return guest_id; +} + void hyperv_callback_vector(void); #ifdef CONFIG_TRACING #define trace_hyperv_callback_vector hyperv_callback_vector -- cgit From 8730046c1498e8fb8c9a124789893944e8ce8220 Mon Sep 17 00:00:00 2001 From: "K. Y. Srinivasan" Date: Wed, 18 Jan 2017 16:45:02 -0700 Subject: Drivers: hv vmbus: Move Hypercall page setup out of common code As part of the effort to separate out architecture specific code, move the hypercall page setup to an architecture specific file. Signed-off-by: K. Y. Srinivasan Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/mshyperv.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 15a0c275c82e..e5f57e15a507 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -80,4 +80,9 @@ void hv_setup_kexec_handler(void (*handler)(void)); void hv_remove_kexec_handler(void); void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs)); void hv_remove_crash_handler(void); + +#if IS_ENABLED(CONFIG_HYPERV) +void hyperv_init(void); +extern void *hv_hypercall_pg; +#endif #endif -- cgit From 6ab42a66d2cc10afefea9f9e5d9a5ad5a836d254 Mon Sep 17 00:00:00 2001 From: "K. Y. Srinivasan" Date: Wed, 18 Jan 2017 16:45:03 -0700 Subject: Drivers: hv: vmbus: Move Hypercall invocation code out of common code As part of the effort to separate out architecture specific code, move the hypercall invocation code to an architecture specific file. Signed-off-by: K. Y. Srinivasan Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/mshyperv.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index e5f57e15a507..ed8e07399071 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -83,6 +83,5 @@ void hv_remove_crash_handler(void); #if IS_ENABLED(CONFIG_HYPERV) void hyperv_init(void); -extern void *hv_hypercall_pg; #endif #endif -- cgit From 63ed4e0c67df332681ebfef6eca6852da28d6300 Mon Sep 17 00:00:00 2001 From: "K. Y. Srinivasan" Date: Thu, 19 Jan 2017 11:51:46 -0700 Subject: Drivers: hv: vmbus: Consolidate all Hyper-V specific clocksource code As part of the effort to separate out architecture specific code, consolidate all Hyper-V specific clocksource code to an architecture specific code. Signed-off-by: K. Y. Srinivasan Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/mshyperv.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index ed8e07399071..adfe8cc9f7e3 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -25,6 +25,18 @@ union hv_x64_msr_hypercall_contents { }; }; +/* + * TSC page layout. + */ + +struct ms_hyperv_tsc_page { + volatile u32 tsc_sequence; + u32 reserved1; + volatile u64 tsc_scale; + volatile s64 tsc_offset; + u64 reserved2[509]; +}; + /* * The guest OS needs to register the guest ID with the hypervisor. * The guest ID is a 64 bit entity and the structure of this ID is -- cgit From 8de8af7e0873c4fdac2205327dff922819e16657 Mon Sep 17 00:00:00 2001 From: "K. Y. Srinivasan" Date: Thu, 19 Jan 2017 11:51:47 -0700 Subject: Drivers: hv: vmbus: Move the extracting of Hypervisor version information As part of the effort to separate out architecture specific code, extract hypervisor version information in an architecture specific file. Signed-off-by: K. Y. Srinivasan Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/mshyperv.h | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index adfe8cc9f7e3..54729e3cba47 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -5,6 +5,25 @@ #include #include +/* + * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent + * is set by CPUID(HVCPUID_VERSION_FEATURES). + */ +enum hv_cpuid_function { + HVCPUID_VERSION_FEATURES = 0x00000001, + HVCPUID_VENDOR_MAXFUNCTION = 0x40000000, + HVCPUID_INTERFACE = 0x40000001, + + /* + * The remaining functions depend on the value of + * HVCPUID_INTERFACE + */ + HVCPUID_VERSION = 0x40000002, + HVCPUID_FEATURES = 0x40000003, + HVCPUID_ENLIGHTENMENT_INFO = 0x40000004, + HVCPUID_IMPLEMENTATION_LIMITS = 0x40000005, +}; + struct ms_hyperv_info { u32 features; u32 misc_features; -- cgit From d058fa7e98ff01a4b4750a2210fc19906db3cbe1 Mon Sep 17 00:00:00 2001 From: "K. Y. Srinivasan" Date: Thu, 19 Jan 2017 11:51:48 -0700 Subject: Drivers: hv: vmbus: Move the crash notification function As part of the effort to separate out architecture specific code, move the crash notification function. Signed-off-by: K. Y. Srinivasan Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/mshyperv.h | 1 + arch/x86/include/uapi/asm/hyperv.h | 8 ++++++++ 2 files changed, 9 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 54729e3cba47..64e682d88684 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -114,5 +114,6 @@ void hv_remove_crash_handler(void); #if IS_ENABLED(CONFIG_HYPERV) void hyperv_init(void); +void hyperv_report_panic(struct pt_regs *regs); #endif #endif diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h index 9b1a91834ac8..3a20ccf787b8 100644 --- a/arch/x86/include/uapi/asm/hyperv.h +++ b/arch/x86/include/uapi/asm/hyperv.h @@ -73,6 +73,9 @@ */ #define HV_X64_MSR_STAT_PAGES_AVAILABLE (1 << 8) +/* Crash MSR available */ +#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE (1 << 10) + /* * Feature identification: EBX indicates which flags were specified at * partition creation. The format is the same as the partition creation @@ -144,6 +147,11 @@ */ #define HV_X64_RELAXED_TIMING_RECOMMENDED (1 << 5) +/* + * Crash notification flag. + */ +#define HV_CRASH_CTL_CRASH_NOTIFY (1ULL << 63) + /* MSR used to identify the guest OS. */ #define HV_X64_MSR_GUEST_OS_ID 0x40000000 -- cgit From 73638cddaad861a5ebb2b119d8b318d4bded8f8d Mon Sep 17 00:00:00 2001 From: "K. Y. Srinivasan" Date: Thu, 19 Jan 2017 11:51:49 -0700 Subject: Drivers: hv: vmbus: Move the check for hypercall page setup As part of the effort to separate out architecture specific code, move the check for detecting if the hypercall page is setup. Signed-off-by: K. Y. Srinivasan Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/mshyperv.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 64e682d88684..c843ef64defe 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -115,5 +115,6 @@ void hv_remove_crash_handler(void); #if IS_ENABLED(CONFIG_HYPERV) void hyperv_init(void); void hyperv_report_panic(struct pt_regs *regs); +bool hv_is_hypercall_page_setup(void); #endif #endif -- cgit From e810e48c0c9a1a1ebb90cfe966bce6dc80ce08e7 Mon Sep 17 00:00:00 2001 From: "K. Y. Srinivasan" Date: Thu, 19 Jan 2017 11:51:50 -0700 Subject: Drivers: hv: vmbus: Move the code to signal end of message As part of the effort to separate out architecture specific code, move the code for signaling end of message. Signed-off-by: K. Y. Srinivasan Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/mshyperv.h | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index c843ef64defe..b57b470ac2a7 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -99,6 +99,43 @@ static inline __u64 generate_guest_id(__u64 d_info1, __u64 kernel_version, return guest_id; } + +/* Free the message slot and signal end-of-message if required */ +static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type) +{ + /* + * On crash we're reading some other CPU's message page and we need + * to be careful: this other CPU may already had cleared the header + * and the host may already had delivered some other message there. + * In case we blindly write msg->header.message_type we're going + * to lose it. We can still lose a message of the same type but + * we count on the fact that there can only be one + * CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages + * on crash. + */ + if (cmpxchg(&msg->header.message_type, old_msg_type, + HVMSG_NONE) != old_msg_type) + return; + + /* + * Make sure the write to MessageType (ie set to + * HVMSG_NONE) happens before we read the + * MessagePending and EOMing. Otherwise, the EOMing + * will not deliver any more messages since there is + * no empty slot + */ + mb(); + + if (msg->header.message_flags.msg_pending) { + /* + * This will cause message queue rescan to + * possibly deliver another msg from the + * hypervisor + */ + wrmsrl(HV_X64_MSR_EOM, 0); + } +} + void hyperv_callback_vector(void); #ifdef CONFIG_TRACING #define trace_hyperv_callback_vector hyperv_callback_vector -- cgit From d5116b4091ecca271c249ede43a49c1245920558 Mon Sep 17 00:00:00 2001 From: "K. Y. Srinivasan" Date: Thu, 19 Jan 2017 11:51:51 -0700 Subject: Drivers: hv: vmbus: Restructure the clockevents code Move the relevant code that programs the hypervisor to an architecture specific file. Signed-off-by: K. Y. Srinivasan Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/mshyperv.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index b57b470ac2a7..a58c201f3412 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -136,6 +136,10 @@ static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type) } } +#define hv_get_current_tick(tick) rdmsrl(HV_X64_MSR_TIME_REF_COUNT, tick) +#define hv_init_timer(timer, tick) wrmsrl(timer, tick) +#define hv_init_timer_config(config, val) wrmsrl(config, val) + void hyperv_callback_vector(void); #ifdef CONFIG_TRACING #define trace_hyperv_callback_vector hyperv_callback_vector -- cgit From 155e4a2f28a59e5344dfa7c5d003161fe59a5bf2 Mon Sep 17 00:00:00 2001 From: "K. Y. Srinivasan" Date: Thu, 19 Jan 2017 11:51:54 -0700 Subject: Drivers: hv: vmbus: Define APIs to manipulate the message page As part of cleaning up architecture specific code, define APIs to manipulate the message page. Signed-off-by: K. Y. Srinivasan Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/mshyperv.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index a58c201f3412..1e75141bc123 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -140,6 +140,9 @@ static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type) #define hv_init_timer(timer, tick) wrmsrl(timer, tick) #define hv_init_timer_config(config, val) wrmsrl(config, val) +#define hv_get_simp(val) rdmsrl(HV_X64_MSR_SIMP, val) +#define hv_set_simp(val) wrmsrl(HV_X64_MSR_SIMP, val) + void hyperv_callback_vector(void); #ifdef CONFIG_TRACING #define trace_hyperv_callback_vector hyperv_callback_vector -- cgit From 8e307bf82d76ab02e95a00d132d926f04db6ccab Mon Sep 17 00:00:00 2001 From: "K. Y. Srinivasan" Date: Thu, 19 Jan 2017 11:51:55 -0700 Subject: Drivers: hv: vmbus: Define APIs to manipulate the event page As part of cleaning up architecture specific code, define APIs to manipulate the event page. Signed-off-by: K. Y. Srinivasan Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/mshyperv.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 1e75141bc123..2ea7e16fc678 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -143,6 +143,9 @@ static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type) #define hv_get_simp(val) rdmsrl(HV_X64_MSR_SIMP, val) #define hv_set_simp(val) wrmsrl(HV_X64_MSR_SIMP, val) +#define hv_get_siefp(val) rdmsrl(HV_X64_MSR_SIEFP, val) +#define hv_set_siefp(val) wrmsrl(HV_X64_MSR_SIEFP, val) + void hyperv_callback_vector(void); #ifdef CONFIG_TRACING #define trace_hyperv_callback_vector hyperv_callback_vector -- cgit From 06d1d98a839f196e94cb726008fb2118e430f356 Mon Sep 17 00:00:00 2001 From: "K. Y. Srinivasan" Date: Thu, 19 Jan 2017 11:51:56 -0700 Subject: Drivers: hv: vmbus: Define APIs to manipulate the synthetic interrupt controller As part of cleaning up architecture specific code, define APIs to manipulate the interrupt controller state. Signed-off-by: K. Y. Srinivasan Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/mshyperv.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 2ea7e16fc678..1ea19a57cf4c 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -146,6 +146,9 @@ static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type) #define hv_get_siefp(val) rdmsrl(HV_X64_MSR_SIEFP, val) #define hv_set_siefp(val) wrmsrl(HV_X64_MSR_SIEFP, val) +#define hv_get_synic_state(val) rdmsrl(HV_X64_MSR_SCONTROL, val) +#define hv_set_synic_state(val) wrmsrl(HV_X64_MSR_SCONTROL, val) + void hyperv_callback_vector(void); #ifdef CONFIG_TRACING #define trace_hyperv_callback_vector hyperv_callback_vector -- cgit From 7297ff0ca9db7e2d830841035b95d8b94b529142 Mon Sep 17 00:00:00 2001 From: "K. Y. Srinivasan" Date: Thu, 19 Jan 2017 11:51:57 -0700 Subject: Drivers: hv: vmbus: Define an API to retrieve virtual processor index As part of cleaning up architecture specific code, define an API to retrieve the virtual procesor index. Signed-off-by: K. Y. Srinivasan Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/mshyperv.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 1ea19a57cf4c..2d40bfc57e7a 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -149,6 +149,8 @@ static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type) #define hv_get_synic_state(val) rdmsrl(HV_X64_MSR_SCONTROL, val) #define hv_set_synic_state(val) wrmsrl(HV_X64_MSR_SCONTROL, val) +#define hv_get_vp_index(index) rdmsrl(HV_X64_MSR_VP_INDEX, index) + void hyperv_callback_vector(void); #ifdef CONFIG_TRACING #define trace_hyperv_callback_vector hyperv_callback_vector -- cgit From 37e11d5c7052a5ca55ef807731c75218ea341b4c Mon Sep 17 00:00:00 2001 From: "K. Y. Srinivasan" Date: Thu, 19 Jan 2017 11:51:58 -0700 Subject: Drivers: hv: vmbus: Define an APIs to manage interrupt state As part of cleaning up architecture specific code, define APIs to manage interrupt state. Signed-off-by: K. Y. Srinivasan Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/mshyperv.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 2d40bfc57e7a..42505d1158d6 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -151,6 +151,9 @@ static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type) #define hv_get_vp_index(index) rdmsrl(HV_X64_MSR_VP_INDEX, index) +#define hv_get_synint_state(int_num, val) rdmsrl(int_num, val) +#define hv_set_synint_state(int_num, val) wrmsrl(int_num, val) + void hyperv_callback_vector(void); #ifdef CONFIG_TRACING #define trace_hyperv_callback_vector hyperv_callback_vector -- cgit From a585df8edabdb47ae25214ebb3a627ca7ce800d3 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Fri, 20 Jan 2017 21:29:41 +0100 Subject: x86/MSR: Carve out bare minimum accessors Add __rdmsr() and __wrmsr() which *only* read and write an MSR with exception handling. Those are going to be used in early code, like the microcode loader, which cannot stomach tracing code piggybacking on the MSR operation. While at it, get rid of __native_write_msr_notrace(). Signed-off-by: Borislav Petkov Reviewed-by: Thomas Gleixner Link: http://lkml.kernel.org/r/20170120202955.4091-3-bp@alien8.de Signed-off-by: Thomas Gleixner --- arch/x86/include/asm/apic.h | 2 +- arch/x86/include/asm/msr.h | 51 +++++++++++++++++++++++++++------------------ 2 files changed, 32 insertions(+), 21 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 0c5fbc68e82d..eff8e36aaf72 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -195,7 +195,7 @@ static inline void native_apic_msr_write(u32 reg, u32 v) static inline void native_apic_msr_eoi_write(u32 reg, u32 v) { - wrmsr_notrace(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK, 0); + __wrmsr(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK, 0); } static inline u32 native_apic_msr_read(u32 reg) diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index db0b90c3b03e..898dba2e2e2c 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h @@ -80,7 +80,14 @@ static inline void do_trace_read_msr(unsigned int msr, u64 val, int failed) {} static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {} #endif -static inline unsigned long long native_read_msr(unsigned int msr) +/* + * __rdmsr() and __wrmsr() are the two primitives which are the bare minimum MSR + * accessors and should not have any tracing or other functionality piggybacking + * on them - those are *purely* for accessing MSRs and nothing more. So don't even + * think of extending them - you will be slapped with a stinking trout or a frozen + * shark will reach you, wherever you are! You've been warned. + */ +static inline unsigned long long notrace __rdmsr(unsigned int msr) { DECLARE_ARGS(val, low, high); @@ -88,11 +95,30 @@ static inline unsigned long long native_read_msr(unsigned int msr) "2:\n" _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_rdmsr_unsafe) : EAX_EDX_RET(val, low, high) : "c" (msr)); - if (msr_tracepoint_active(__tracepoint_read_msr)) - do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), 0); + return EAX_EDX_VAL(val, low, high); } +static inline void notrace __wrmsr(unsigned int msr, u32 low, u32 high) +{ + asm volatile("1: wrmsr\n" + "2:\n" + _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe) + : : "c" (msr), "a"(low), "d" (high) : "memory"); +} + +static inline unsigned long long native_read_msr(unsigned int msr) +{ + unsigned long long val; + + val = __rdmsr(msr); + + if (msr_tracepoint_active(__tracepoint_read_msr)) + do_trace_read_msr(msr, val, 0); + + return val; +} + static inline unsigned long long native_read_msr_safe(unsigned int msr, int *err) { @@ -114,31 +140,16 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr, return EAX_EDX_VAL(val, low, high); } -/* Can be uninlined because referenced by paravirt */ -static inline void notrace -__native_write_msr_notrace(unsigned int msr, u32 low, u32 high) -{ - asm volatile("1: wrmsr\n" - "2:\n" - _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe) - : : "c" (msr), "a"(low), "d" (high) : "memory"); -} - /* Can be uninlined because referenced by paravirt */ static inline void notrace native_write_msr(unsigned int msr, u32 low, u32 high) { - __native_write_msr_notrace(msr, low, high); + __wrmsr(msr, low, high); + if (msr_tracepoint_active(__tracepoint_write_msr)) do_trace_write_msr(msr, ((u64)high << 32 | low), 0); } -static inline void -wrmsr_notrace(unsigned int msr, u32 low, u32 high) -{ - __native_write_msr_notrace(msr, low, high); -} - /* Can be uninlined because referenced by paravirt */ static inline int notrace native_write_msr_safe(unsigned int msr, u32 low, u32 high) -- cgit From 0c12d18ab96e4da9f3e963bc242689bdeaaf2330 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Fri, 20 Jan 2017 21:29:42 +0100 Subject: x86/microcode: Convert to bare minimum MSR accessors Having tracepoints to the MSR accessors makes them unsuitable for early microcode loading: think 32-bit before paging is enabled and us chasing pointers to test whether a tracepoint is enabled or not. Results in a reliable triple fault. Convert to the bare ones. Signed-off-by: Borislav Petkov Link: http://lkml.kernel.org/r/20170120202955.4091-4-bp@alien8.de Signed-off-by: Thomas Gleixner --- arch/x86/include/asm/microcode.h | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index 38711df3bcb5..90b22bbdfce9 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h @@ -7,18 +7,17 @@ #define native_rdmsr(msr, val1, val2) \ do { \ - u64 __val = native_read_msr((msr)); \ + u64 __val = __rdmsr((msr)); \ (void)((val1) = (u32)__val); \ (void)((val2) = (u32)(__val >> 32)); \ } while (0) #define native_wrmsr(msr, low, high) \ - native_write_msr(msr, low, high) + __wrmsr(msr, low, high) #define native_wrmsrl(msr, val) \ - native_write_msr((msr), \ - (u32)((u64)(val)), \ - (u32)((u64)(val) >> 32)) + __wrmsr((msr), (u32)((u64)(val)), \ + (u32)((u64)(val) >> 32)) struct ucode_patch { struct list_head plist; -- cgit From f3ad136d6ef966c8ba9090770c2bfe7e85f18471 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Fri, 20 Jan 2017 21:29:51 +0100 Subject: x86/microcode/AMD: Check patch level only on the BSP Check final patch levels for AMD only on the BSP. This way, we decide early and only once whether to continue loading or to leave the loader disabled on such systems. Simplify a lot. Signed-off-by: Borislav Petkov Link: http://lkml.kernel.org/r/20170120202955.4091-13-bp@alien8.de Signed-off-by: Thomas Gleixner --- arch/x86/include/asm/microcode_amd.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h index 3e3e20be829a..3d57009e168b 100644 --- a/arch/x86/include/asm/microcode_amd.h +++ b/arch/x86/include/asm/microcode_amd.h @@ -54,6 +54,4 @@ static inline int __init save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; } void reload_ucode_amd(void) {} #endif - -extern bool check_current_patch_level(u32 *rev, bool early); #endif /* _ASM_X86_MICROCODE_AMD_H */ -- cgit From 9b052ea4ced0fa1ad30a2eafe86984a16297e6f1 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Mon, 23 Jan 2017 19:35:07 +0100 Subject: x86/ras/therm_throt: Do not log a fake MCE for thermal events We log a fake bank 128 MCE to note that we're handling a CPU thermal event. However, this confuses people into thinking that their hardware generates MCEs. Hijacking MCA for logging thermal events is a gross misuse anyway and it shouldn't have been done in the first place. And besides we have other means for dealing with thermal events which are much more suitable. So let's kill the MCE logging part. Signed-off-by: Borislav Petkov Acked-by: Ashok Raj Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Tony Luck Cc: Yazen Ghannam Cc: linux-edac Link: http://lkml.kernel.org/r/20170105213846.GA12024@gmail.com Link: http://lkml.kernel.org/r/20170123183514.13356-3-bp@alien8.de Signed-off-by: Ingo Molnar --- arch/x86/include/asm/mce.h | 6 ------ 1 file changed, 6 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 5132f2a6c0a2..a09ed05725c2 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -97,10 +97,6 @@ #define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */ -/* Software defined banks */ -#define MCE_EXTENDED_BANK 128 -#define MCE_THERMAL_BANK (MCE_EXTENDED_BANK + 0) - #define MCE_LOG_LEN 32 #define MCE_LOG_SIGNATURE "MACHINECHECK" @@ -306,8 +302,6 @@ extern void (*deferred_error_int_vector)(void); void intel_init_thermal(struct cpuinfo_x86 *c); -void mce_log_therm_throt_event(__u64 status); - /* Interrupt Handler for core thermal thresholds */ extern int (*platform_thermal_notify)(__u64 msr_val); -- cgit From 0b737a9c2af85cc8295f9308d9250f9111bbf94d Mon Sep 17 00:00:00 2001 From: Yazen Ghannam Date: Mon, 23 Jan 2017 19:35:08 +0100 Subject: x86/ras/amd: Make sysfs names of banks more user-friendly Currently, we append the MCA_IPID[InstanceId] to the bank name to create the sysfs filename. The InstanceId field uniquely identifies a bank instance but it doesn't look very nice for most banks. Replace the InstanceId with a simpler, ascending (0, 1, ..) value. Only use this in the sysfs name when there is more than 1 instance. Otherwise, just use the bank's name as the sysfs name. Signed-off-by: Yazen Ghannam Signed-off-by: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Tony Luck Cc: linux-edac Link: http://lkml.kernel.org/r/1484322741-41884-3-git-send-email-Yazen.Ghannam@amd.com Link: http://lkml.kernel.org/r/20170123183514.13356-4-bp@alien8.de Signed-off-by: Ingo Molnar --- arch/x86/include/asm/mce.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index a09ed05725c2..528f6ec897cb 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -356,12 +356,13 @@ struct smca_hwid { unsigned int bank_type; /* Use with smca_bank_types for easy indexing. */ u32 hwid_mcatype; /* (hwid,mcatype) tuple */ u32 xec_bitmap; /* Bitmap of valid ExtErrorCodes; current max is 21. */ + u8 count; /* Number of instances. */ }; struct smca_bank { struct smca_hwid *hwid; - /* Instance ID */ - u32 id; + u32 id; /* Value of MCA_IPID[InstanceId]. */ + u8 sysfs_id; /* Value used for sysfs name. */ }; extern struct smca_bank smca_banks[MAX_NR_BANKS]; -- cgit From 9026cc82b632ed1a859935c82ed8ad65f27f2781 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Mon, 23 Jan 2017 19:35:14 +0100 Subject: x86/ras, EDAC, acpi: Assign MCE notifier handlers a priority Assign all notifiers on the MCE decode chain a priority so that they get called in the correct order. Suggested-by: Thomas Gleixner Signed-off-by: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Tony Luck Cc: Yazen Ghannam Cc: linux-edac Link: http://lkml.kernel.org/r/20170123183514.13356-10-bp@alien8.de Signed-off-by: Ingo Molnar --- arch/x86/include/asm/mce.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 528f6ec897cb..e63873683d4a 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -189,6 +189,15 @@ extern struct mce_vendor_flags mce_flags; extern struct mca_config mca_cfg; extern struct mca_msr_regs msr_ops; + +enum mce_notifier_prios { + MCE_PRIO_SRAO = INT_MAX, + MCE_PRIO_EXTLOG = INT_MAX - 1, + MCE_PRIO_NFIT = INT_MAX - 2, + MCE_PRIO_EDAC = INT_MAX - 3, + MCE_PRIO_LOWEST = 0, +}; + extern void mce_register_decode_chain(struct notifier_block *nb); extern void mce_unregister_decode_chain(struct notifier_block *nb); -- cgit From 5299709d0a87342dadc1fc9850484fadeb488bf8 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Fri, 20 Jan 2017 13:04:01 -0800 Subject: treewide: Constify most dma_map_ops structures Most dma_map_ops structures are never modified. Constify these structures such that these can be write-protected. This patch has been generated as follows: git grep -l 'struct dma_map_ops' | xargs -d\\n sed -i \ -e 's/struct dma_map_ops/const struct dma_map_ops/g' \ -e 's/const struct dma_map_ops {/struct dma_map_ops {/g' \ -e 's/^const struct dma_map_ops;$/struct dma_map_ops;/' \ -e 's/const const struct dma_map_ops /const struct dma_map_ops /g'; sed -i -e 's/const \(struct dma_map_ops intel_dma_ops\)/\1/' \ $(git grep -l 'struct dma_map_ops intel_dma_ops'); sed -i -e 's/const \(struct dma_map_ops dma_iommu_ops\)/\1/' \ $(git grep -l 'struct dma_map_ops' | grep ^arch/powerpc); sed -i -e '/^struct vmd_dev {$/,/^};$/ s/const \(struct dma_map_ops[[:blank:]]dma_ops;\)/\1/' \ -e '/^static void vmd_setup_dma_ops/,/^}$/ s/const \(struct dma_map_ops \*dest\)/\1/' \ -e 's/const \(struct dma_map_ops \*dest = \&vmd->dma_ops\)/\1/' \ drivers/pci/host/*.c sed -i -e '/^void __init pci_iommu_alloc(void)$/,/^}$/ s/dma_ops->/intel_dma_ops./' arch/ia64/kernel/pci-dma.c sed -i -e 's/static const struct dma_map_ops sn_dma_ops/static struct dma_map_ops sn_dma_ops/' arch/ia64/sn/pci/pci_dma.c sed -i -e 's/(const struct dma_map_ops \*)//' drivers/misc/mic/bus/vop_bus.c Signed-off-by: Bart Van Assche Reviewed-by: Christoph Hellwig Cc: Benjamin Herrenschmidt Cc: Boris Ostrovsky Cc: David Woodhouse Cc: Juergen Gross Cc: H. Peter Anvin Cc: Ingo Molnar Cc: linux-arch@vger.kernel.org Cc: linux-kernel@vger.kernel.org Cc: Russell King Cc: x86@kernel.org Signed-off-by: Doug Ledford --- arch/x86/include/asm/device.h | 4 ++-- arch/x86/include/asm/dma-mapping.h | 4 ++-- arch/x86/include/asm/iommu.h | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h index 684ed6c3aa67..b2d0b4ced7e3 100644 --- a/arch/x86/include/asm/device.h +++ b/arch/x86/include/asm/device.h @@ -3,7 +3,7 @@ struct dev_archdata { #ifdef CONFIG_X86_DEV_DMA_OPS - struct dma_map_ops *dma_ops; + const struct dma_map_ops *dma_ops; #endif #if defined(CONFIG_INTEL_IOMMU) || defined(CONFIG_AMD_IOMMU) void *iommu; /* hook for IOMMU specific extension */ @@ -13,7 +13,7 @@ struct dev_archdata { #if defined(CONFIG_X86_DEV_DMA_OPS) && defined(CONFIG_PCI_DOMAINS) struct dma_domain { struct list_head node; - struct dma_map_ops *dma_ops; + const struct dma_map_ops *dma_ops; int domain_nr; }; void add_dma_domain(struct dma_domain *domain); diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 44461626830e..5e4772886a1e 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -25,9 +25,9 @@ extern int iommu_merge; extern struct device x86_dma_fallback_dev; extern int panic_on_overflow; -extern struct dma_map_ops *dma_ops; +extern const struct dma_map_ops *dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_dma_ops(struct device *dev) { #ifndef CONFIG_X86_DEV_DMA_OPS return dma_ops; diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h index 345c99cef152..793869879464 100644 --- a/arch/x86/include/asm/iommu.h +++ b/arch/x86/include/asm/iommu.h @@ -1,7 +1,7 @@ #ifndef _ASM_X86_IOMMU_H #define _ASM_X86_IOMMU_H -extern struct dma_map_ops nommu_dma_ops; +extern const struct dma_map_ops nommu_dma_ops; extern int force_iommu, no_iommu; extern int iommu_detected; extern int iommu_pass_through; -- cgit From 5657933dbb6e25feaf5d8df8c88f96cdade693a3 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Fri, 20 Jan 2017 13:04:02 -0800 Subject: treewide: Move dma_ops from struct dev_archdata into struct device Some but not all architectures provide set_dma_ops(). Move dma_ops from struct dev_archdata into struct device such that it becomes possible on all architectures to configure dma_ops per device. Signed-off-by: Bart Van Assche Acked-by: Greg Kroah-Hartman Cc: Benjamin Herrenschmidt Cc: Boris Ostrovsky Cc: David Woodhouse Cc: Juergen Gross Cc: H. Peter Anvin Cc: Ingo Molnar Cc: linux-arch@vger.kernel.org Cc: linux-kernel@vger.kernel.org Cc: Russell King Cc: x86@kernel.org Signed-off-by: Doug Ledford --- arch/x86/include/asm/device.h | 3 --- arch/x86/include/asm/dma-mapping.h | 4 ++-- 2 files changed, 2 insertions(+), 5 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h index b2d0b4ced7e3..1b3ef26e77df 100644 --- a/arch/x86/include/asm/device.h +++ b/arch/x86/include/asm/device.h @@ -2,9 +2,6 @@ #define _ASM_X86_DEVICE_H struct dev_archdata { -#ifdef CONFIG_X86_DEV_DMA_OPS - const struct dma_map_ops *dma_ops; -#endif #if defined(CONFIG_INTEL_IOMMU) || defined(CONFIG_AMD_IOMMU) void *iommu; /* hook for IOMMU specific extension */ #endif diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 5e4772886a1e..94b5b96966cb 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -32,10 +32,10 @@ static inline const struct dma_map_ops *get_dma_ops(struct device *dev) #ifndef CONFIG_X86_DEV_DMA_OPS return dma_ops; #else - if (unlikely(!dev) || !dev->archdata.dma_ops) + if (unlikely(!dev) || !dev->dma_ops) return dma_ops; else - return dev->archdata.dma_ops; + return dev->dma_ops; #endif } -- cgit From 815dd18788fe0d41899f51b91d0560279cf16b0d Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Fri, 20 Jan 2017 13:04:04 -0800 Subject: treewide: Consolidate get_dma_ops() implementations Introduce a new architecture-specific get_arch_dma_ops() function that takes a struct bus_type * argument. Add get_dma_ops() in . Signed-off-by: Bart Van Assche Cc: Benjamin Herrenschmidt Cc: Boris Ostrovsky Cc: David Woodhouse Cc: Juergen Gross Cc: H. Peter Anvin Cc: Ingo Molnar Cc: linux-arch@vger.kernel.org Cc: linux-kernel@vger.kernel.org Cc: Russell King Cc: x86@kernel.org Signed-off-by: Doug Ledford --- arch/x86/include/asm/dma-mapping.h | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 94b5b96966cb..08a0838b83fb 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -27,16 +27,9 @@ extern int panic_on_overflow; extern const struct dma_map_ops *dma_ops; -static inline const struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { -#ifndef CONFIG_X86_DEV_DMA_OPS return dma_ops; -#else - if (unlikely(!dev) || !dev->dma_ops) - return dma_ops; - else - return dev->dma_ops; -#endif } bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp); -- cgit From a5828ed3d03d38399159dc17a98cefde3109a66b Mon Sep 17 00:00:00 2001 From: Yu-cheng Yu Date: Tue, 24 Jan 2017 10:25:46 -0800 Subject: x86/fpu/xstate: Move XSAVES state init to a function Make XSTATE init similar to existing code; move it to a separate function. There is no functionality change. Signed-off-by: Yu-cheng Yu Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Dave Hansen Cc: Fenghua Yu Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Oleg Nesterov Cc: Peter Zijlstra Cc: Ravi V. Shankar Cc: Rik van Riel Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1485282346-15437-1-git-send-email-yu-cheng.yu@intel.com [ Minor cleanliness edits. ] Signed-off-by: Ingo Molnar --- arch/x86/include/asm/fpu/internal.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index d4a684997497..255645f60ca2 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h @@ -87,6 +87,16 @@ extern void fpstate_init_soft(struct swregs_state *soft); #else static inline void fpstate_init_soft(struct swregs_state *soft) {} #endif + +static inline void fpstate_init_xstate(struct xregs_state *xsave) +{ + /* + * XRSTORS requires these bits set in xcomp_bv, or it will + * trigger #GP: + */ + xsave->header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT | xfeatures_mask; +} + static inline void fpstate_init_fxstate(struct fxregs_state *fx) { fx->cwd = 0x37f; -- cgit From 78d1b296843a719935277b0d24d1358416ed0204 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Wed, 18 Jan 2017 11:15:37 -0800 Subject: x86/cpu: Add X86_FEATURE_CPUID Add a synthetic CPUID flag denoting whether the CPU sports the CPUID instruction or not. This will come useful later when accomodating CPUID-less CPUs. Signed-off-by: Borislav Petkov [ Slightly prettified. ] Signed-off-by: Andy Lutomirski Reviewed-by: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Fenghua Yu Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Matthew Whitehead Cc: Oleg Nesterov Cc: One Thousand Gnomes Cc: Peter Zijlstra Cc: Rik van Riel Cc: Thomas Gleixner Cc: Yu-cheng Yu Link: http://lkml.kernel.org/r/dcb355adae3ab812c79397056a61c212f1a0c7cc.1484705016.git.luto@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/include/asm/cpufeatures.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index eafee3161d1c..a0f4ff25669e 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -100,7 +100,7 @@ #define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */ #define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */ #define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */ -/* free, was #define X86_FEATURE_CLFLUSH_MONITOR ( 3*32+25) * "" clflush reqd with monitor */ +#define X86_FEATURE_CPUID ( 3*32+25) /* CPU has CPUID instruction itself */ #define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */ #define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */ #define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */ -- cgit From 80a7581f38c0b2e83dc883a2125340b90b5635ec Mon Sep 17 00:00:00 2001 From: Irina Tirdea Date: Mon, 23 Jan 2017 12:07:43 -0600 Subject: arch/x86/platform/atom: Move pmc_atom to drivers/platform/x86 The pmc_atom driver does not contain any architecture specific code. It only enables the SoC Power Management Controller driver for BayTrail and CherryTrail platforms. Move the pmc_atom driver from arch/x86/platform/atom to drivers/platform/x86. Also clean-up and reorder include files by alphabetical order in pmc_atom.h Signed-off-by: Irina Tirdea Signed-off-by: Pierre-Louis Bossart Acked-by: Thomas Gleixner Acked-by: Andy Shevchenko Signed-off-by: Stephen Boyd --- arch/x86/include/asm/pmc_atom.h | 158 ---------------------------------------- 1 file changed, 158 deletions(-) delete mode 100644 arch/x86/include/asm/pmc_atom.h (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/pmc_atom.h b/arch/x86/include/asm/pmc_atom.h deleted file mode 100644 index aa8744c77c6d..000000000000 --- a/arch/x86/include/asm/pmc_atom.h +++ /dev/null @@ -1,158 +0,0 @@ -/* - * Intel Atom SOC Power Management Controller Header File - * Copyright (c) 2014, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - */ - -#ifndef PMC_ATOM_H -#define PMC_ATOM_H - -/* ValleyView Power Control Unit PCI Device ID */ -#define PCI_DEVICE_ID_VLV_PMC 0x0F1C -/* CherryTrail Power Control Unit PCI Device ID */ -#define PCI_DEVICE_ID_CHT_PMC 0x229C - -/* PMC Memory mapped IO registers */ -#define PMC_BASE_ADDR_OFFSET 0x44 -#define PMC_BASE_ADDR_MASK 0xFFFFFE00 -#define PMC_MMIO_REG_LEN 0x100 -#define PMC_REG_BIT_WIDTH 32 - -/* BIOS uses FUNC_DIS to disable specific function */ -#define PMC_FUNC_DIS 0x34 -#define PMC_FUNC_DIS_2 0x38 - -/* CHT specific bits in FUNC_DIS2 register */ -#define BIT_FD_GMM BIT(3) -#define BIT_FD_ISH BIT(4) - -/* S0ix wake event control */ -#define PMC_S0IX_WAKE_EN 0x3C - -#define BIT_LPC_CLOCK_RUN BIT(4) -#define BIT_SHARED_IRQ_GPSC BIT(5) -#define BIT_ORED_DEDICATED_IRQ_GPSS BIT(18) -#define BIT_ORED_DEDICATED_IRQ_GPSC BIT(19) -#define BIT_SHARED_IRQ_GPSS BIT(20) - -#define PMC_WAKE_EN_SETTING ~(BIT_LPC_CLOCK_RUN | \ - BIT_SHARED_IRQ_GPSC | \ - BIT_ORED_DEDICATED_IRQ_GPSS | \ - BIT_ORED_DEDICATED_IRQ_GPSC | \ - BIT_SHARED_IRQ_GPSS) - -/* The timers acumulate time spent in sleep state */ -#define PMC_S0IR_TMR 0x80 -#define PMC_S0I1_TMR 0x84 -#define PMC_S0I2_TMR 0x88 -#define PMC_S0I3_TMR 0x8C -#define PMC_S0_TMR 0x90 -/* Sleep state counter is in units of of 32us */ -#define PMC_TMR_SHIFT 5 - -/* Power status of power islands */ -#define PMC_PSS 0x98 - -#define PMC_PSS_BIT_GBE BIT(0) -#define PMC_PSS_BIT_SATA BIT(1) -#define PMC_PSS_BIT_HDA BIT(2) -#define PMC_PSS_BIT_SEC BIT(3) -#define PMC_PSS_BIT_PCIE BIT(4) -#define PMC_PSS_BIT_LPSS BIT(5) -#define PMC_PSS_BIT_LPE BIT(6) -#define PMC_PSS_BIT_DFX BIT(7) -#define PMC_PSS_BIT_USH_CTRL BIT(8) -#define PMC_PSS_BIT_USH_SUS BIT(9) -#define PMC_PSS_BIT_USH_VCCS BIT(10) -#define PMC_PSS_BIT_USH_VCCA BIT(11) -#define PMC_PSS_BIT_OTG_CTRL BIT(12) -#define PMC_PSS_BIT_OTG_VCCS BIT(13) -#define PMC_PSS_BIT_OTG_VCCA_CLK BIT(14) -#define PMC_PSS_BIT_OTG_VCCA BIT(15) -#define PMC_PSS_BIT_USB BIT(16) -#define PMC_PSS_BIT_USB_SUS BIT(17) - -/* CHT specific bits in PSS register */ -#define PMC_PSS_BIT_CHT_UFS BIT(7) -#define PMC_PSS_BIT_CHT_UXD BIT(11) -#define PMC_PSS_BIT_CHT_UXD_FD BIT(12) -#define PMC_PSS_BIT_CHT_UX_ENG BIT(15) -#define PMC_PSS_BIT_CHT_USB_SUS BIT(16) -#define PMC_PSS_BIT_CHT_GMM BIT(17) -#define PMC_PSS_BIT_CHT_ISH BIT(18) -#define PMC_PSS_BIT_CHT_DFX_MASTER BIT(26) -#define PMC_PSS_BIT_CHT_DFX_CLUSTER1 BIT(27) -#define PMC_PSS_BIT_CHT_DFX_CLUSTER2 BIT(28) -#define PMC_PSS_BIT_CHT_DFX_CLUSTER3 BIT(29) -#define PMC_PSS_BIT_CHT_DFX_CLUSTER4 BIT(30) -#define PMC_PSS_BIT_CHT_DFX_CLUSTER5 BIT(31) - -/* These registers reflect D3 status of functions */ -#define PMC_D3_STS_0 0xA0 - -#define BIT_LPSS1_F0_DMA BIT(0) -#define BIT_LPSS1_F1_PWM1 BIT(1) -#define BIT_LPSS1_F2_PWM2 BIT(2) -#define BIT_LPSS1_F3_HSUART1 BIT(3) -#define BIT_LPSS1_F4_HSUART2 BIT(4) -#define BIT_LPSS1_F5_SPI BIT(5) -#define BIT_LPSS1_F6_XXX BIT(6) -#define BIT_LPSS1_F7_XXX BIT(7) -#define BIT_SCC_EMMC BIT(8) -#define BIT_SCC_SDIO BIT(9) -#define BIT_SCC_SDCARD BIT(10) -#define BIT_SCC_MIPI BIT(11) -#define BIT_HDA BIT(12) -#define BIT_LPE BIT(13) -#define BIT_OTG BIT(14) -#define BIT_USH BIT(15) -#define BIT_GBE BIT(16) -#define BIT_SATA BIT(17) -#define BIT_USB_EHCI BIT(18) -#define BIT_SEC BIT(19) -#define BIT_PCIE_PORT0 BIT(20) -#define BIT_PCIE_PORT1 BIT(21) -#define BIT_PCIE_PORT2 BIT(22) -#define BIT_PCIE_PORT3 BIT(23) -#define BIT_LPSS2_F0_DMA BIT(24) -#define BIT_LPSS2_F1_I2C1 BIT(25) -#define BIT_LPSS2_F2_I2C2 BIT(26) -#define BIT_LPSS2_F3_I2C3 BIT(27) -#define BIT_LPSS2_F4_I2C4 BIT(28) -#define BIT_LPSS2_F5_I2C5 BIT(29) -#define BIT_LPSS2_F6_I2C6 BIT(30) -#define BIT_LPSS2_F7_I2C7 BIT(31) - -#define PMC_D3_STS_1 0xA4 -#define BIT_SMB BIT(0) -#define BIT_OTG_SS_PHY BIT(1) -#define BIT_USH_SS_PHY BIT(2) -#define BIT_DFX BIT(3) - -/* CHT specific bits in PMC_D3_STS_1 register */ -#define BIT_STS_GMM BIT(1) -#define BIT_STS_ISH BIT(2) - -/* PMC I/O Registers */ -#define ACPI_BASE_ADDR_OFFSET 0x40 -#define ACPI_BASE_ADDR_MASK 0xFFFFFE00 -#define ACPI_MMIO_REG_LEN 0x100 - -#define PM1_CNT 0x4 -#define SLEEP_TYPE_MASK 0xFFFFECFF -#define SLEEP_TYPE_S5 0x1C00 -#define SLEEP_ENABLE 0x2000 - -extern int pmc_atom_read(int offset, u32 *value); -extern int pmc_atom_write(int offset, u32 value); - -#endif /* PMC_ATOM_H */ -- cgit From ab22a4733fe919d22bc2957680506ed17e40941e Mon Sep 17 00:00:00 2001 From: Junaid Shahid Date: Wed, 21 Dec 2016 20:29:28 -0800 Subject: kvm: x86: mmu: Rename EPT_VIOLATION_READ/WRITE/INSTR constants Rename the EPT_VIOLATION_READ/WRITE/INSTR constants to EPT_VIOLATION_ACC_READ/WRITE/INSTR to more clearly indicate that these signify the type of the memory access as opposed to the permissions granted by the PTE. Signed-off-by: Junaid Shahid Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/vmx.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index a22a4790f1ac..cc54b7026567 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -510,15 +510,15 @@ struct vmx_msr_entry { /* * Exit Qualifications for EPT Violations */ -#define EPT_VIOLATION_READ_BIT 0 -#define EPT_VIOLATION_WRITE_BIT 1 -#define EPT_VIOLATION_INSTR_BIT 2 +#define EPT_VIOLATION_ACC_READ_BIT 0 +#define EPT_VIOLATION_ACC_WRITE_BIT 1 +#define EPT_VIOLATION_ACC_INSTR_BIT 2 #define EPT_VIOLATION_READABLE_BIT 3 #define EPT_VIOLATION_WRITABLE_BIT 4 #define EPT_VIOLATION_EXECUTABLE_BIT 5 -#define EPT_VIOLATION_READ (1 << EPT_VIOLATION_READ_BIT) -#define EPT_VIOLATION_WRITE (1 << EPT_VIOLATION_WRITE_BIT) -#define EPT_VIOLATION_INSTR (1 << EPT_VIOLATION_INSTR_BIT) +#define EPT_VIOLATION_ACC_READ (1 << EPT_VIOLATION_ACC_READ_BIT) +#define EPT_VIOLATION_ACC_WRITE (1 << EPT_VIOLATION_ACC_WRITE_BIT) +#define EPT_VIOLATION_ACC_INSTR (1 << EPT_VIOLATION_ACC_INSTR_BIT) #define EPT_VIOLATION_READABLE (1 << EPT_VIOLATION_READABLE_BIT) #define EPT_VIOLATION_WRITABLE (1 << EPT_VIOLATION_WRITABLE_BIT) #define EPT_VIOLATION_EXECUTABLE (1 << EPT_VIOLATION_EXECUTABLE_BIT) -- cgit From f58576666ccdcfb9cf7cae8669dffe1eed844f88 Mon Sep 17 00:00:00 2001 From: Jonathan Corbet Date: Fri, 27 Jan 2017 16:17:52 -0700 Subject: x86/mm: Improve documentation for low-level device I/O functions Add kerneldoc comments for memcpy_{to,from}io() and memset_io(). The existing documentation for ioremap() was distant from the definition, causing kernel-doc to miss it; move it appropriately. Signed-off-by: Jonathan Corbet Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20170127161752.0b95e95b@lwn.net Signed-off-by: Ingo Molnar --- arch/x86/include/asm/io.h | 46 +++++++++++++++++++++++++++++++++++----------- 1 file changed, 35 insertions(+), 11 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index d34bd370074b..7afb0e2f07f4 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -164,6 +164,17 @@ static inline unsigned int isa_virt_to_bus(volatile void *address) #define virt_to_bus virt_to_phys #define bus_to_virt phys_to_virt +/* + * The default ioremap() behavior is non-cached; if you need something + * else, you probably want one of the following. + */ +extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size); +extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size); +#define ioremap_uc ioremap_uc + +extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); +extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, unsigned long prot_val); + /** * ioremap - map bus memory into CPU space * @offset: bus address of the memory @@ -178,17 +189,6 @@ static inline unsigned int isa_virt_to_bus(volatile void *address) * If the area you are trying to map is a PCI BAR you should have a * look at pci_iomap(). */ -extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size); -extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size); -#define ioremap_uc ioremap_uc - -extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); -extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, - unsigned long prot_val); - -/* - * The default ioremap() behavior is non-cached: - */ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size) { return ioremap_nocache(offset, size); @@ -207,18 +207,42 @@ extern void set_iounmap_nonlazy(void); */ #define xlate_dev_kmem_ptr(p) p +/** + * memset_io Set a range of I/O memory to a constant value + * @addr: The beginning of the I/O-memory range to set + * @val: The value to set the memory to + * @count: The number of bytes to set + * + * Set a range of I/O memory to a given value. + */ static inline void memset_io(volatile void __iomem *addr, unsigned char val, size_t count) { memset((void __force *)addr, val, count); } +/** + * memcpy_fromio Copy a block of data from I/O memory + * @dst: The (RAM) destination for the copy + * @src: The (I/O memory) source for the data + * @count: The number of bytes to copy + * + * Copy a block of data from I/O memory. + */ static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, size_t count) { memcpy(dst, (const void __force *)src, count); } +/** + * memcpy_toio Copy a block of data into I/O memory + * @dst: The (I/O memory) destination for the copy + * @src: The (RAM) source for the data + * @count: The number of bytes to copy + * + * Copy a block of data to I/O memory. + */ static inline void memcpy_toio(volatile void __iomem *dst, const void *src, size_t count) { -- cgit From 24c2503255d35c269b67162c397a1a1c1e02f6ce Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Wed, 25 Jan 2017 21:00:48 +0100 Subject: x86/microcode: Do not access the initrd after it has been freed When we look for microcode blobs, we first try builtin and if that doesn't succeed, we fallback to the initrd supplied to the kernel. However, at some point doing boot, that initrd gets jettisoned and we shouldn't access it anymore. But we do, as the below KASAN report shows. That's because find_microcode_in_initrd() doesn't check whether the initrd is still valid or not. So do that. ================================================================== BUG: KASAN: use-after-free in find_cpio_data Read of size 1 by task swapper/1/0 page:ffffea0000db9d40 count:0 mapcount:0 mapping: (null) index:0x1 flags: 0x100000000000000() raw: 0100000000000000 0000000000000000 0000000000000001 00000000ffffffff raw: dead000000000100 dead000000000200 0000000000000000 0000000000000000 page dumped because: kasan: bad access detected CPU: 1 PID: 0 Comm: swapper/1 Tainted: G W 4.10.0-rc5-debug-00075-g2dbde22 #3 Hardware name: Dell Inc. XPS 13 9360/0839Y6, BIOS 1.2.3 12/01/2016 Call Trace: dump_stack ? _atomic_dec_and_lock ? __dump_page kasan_report_error ? pointer ? find_cpio_data __asan_report_load1_noabort ? find_cpio_data find_cpio_data ? vsprintf ? dump_stack ? get_ucode_user ? print_usage_bug find_microcode_in_initrd __load_ucode_intel ? collect_cpu_info_early ? debug_check_no_locks_freed load_ucode_intel_ap ? collect_cpu_info ? trace_hardirqs_on ? flat_send_IPI_mask_allbutself load_ucode_ap ? get_builtin_firmware ? flush_tlb_func ? do_raw_spin_trylock ? cpumask_weight cpu_init ? trace_hardirqs_off ? play_dead_common ? native_play_dead ? hlt_play_dead ? syscall_init ? arch_cpu_idle_dead ? do_idle start_secondary start_cpu Memory state around the buggy address: ffff880036e74f00: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ffff880036e74f80: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff >ffff880036e75000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ^ ffff880036e75080: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ffff880036e75100: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ================================================================== Reported-by: Andrey Ryabinin Tested-by: Andrey Ryabinin Signed-off-by: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20170126165833.evjemhbqzaepirxo@pd.tnic Signed-off-by: Ingo Molnar --- arch/x86/include/asm/microcode.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index 38711df3bcb5..2266f864b747 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h @@ -140,6 +140,7 @@ extern void __init load_ucode_bsp(void); extern void load_ucode_ap(void); void reload_early_microcode(void); extern bool get_builtin_firmware(struct cpio_data *cd, const char *name); +extern bool initrd_gone; #else static inline int __init microcode_init(void) { return 0; }; static inline void __init load_ucode_bsp(void) { } -- cgit From d6f3609d2b4c6d0eec01f398cb685e50da3e6013 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Sat, 28 Jan 2017 12:37:14 -0700 Subject: Drivers: hv: restore hypervcall page cleanup before kexec We need to cleanup the hypercall page before doing kexec/kdump or the new kernel may crash if it tries to use it. Reuse the now-empty hv_cleanup function renaming it to hyperv_cleanup and moving to the arch specific code. Fixes: 8730046c1498 ("Drivers: hv vmbus: Move Hypercall page setup out of common code") Signed-off-by: Vitaly Kuznetsov Signed-off-by: K. Y. Srinivasan Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/mshyperv.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 42505d1158d6..f8dc3700de67 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -171,5 +171,6 @@ void hv_remove_crash_handler(void); void hyperv_init(void); void hyperv_report_panic(struct pt_regs *regs); bool hv_is_hypercall_page_setup(void); +void hyperv_cleanup(void); #endif #endif -- cgit From b672592f022152155fde7db99aafbcf04a2c3ba5 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 31 Jan 2017 04:09:51 +0100 Subject: sched/cputime: Remove generic asm headers cputime_t is now only used by two architectures: * powerpc (when CONFIG_VIRT_CPU_ACCOUNTING_NATIVE=y) * s390 And since the core doesn't use it anymore, we don't need any arch support from the others. So we can remove their stub implementations. A final cleanup would be to provide an efficient pure arch implementation of cputime_to_nsec() for s390 and powerpc and finally remove include/linux/cputime.h . Signed-off-by: Frederic Weisbecker Cc: Benjamin Herrenschmidt Cc: Fenghua Yu Cc: Heiko Carstens Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Rik van Riel Cc: Stanislaw Gruszka Cc: Thomas Gleixner Cc: Tony Luck Cc: Wanpeng Li Link: http://lkml.kernel.org/r/1485832191-26889-36-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/Kbuild | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild index 2b892e2313a9..5d6a53fd7521 100644 --- a/arch/x86/include/asm/Kbuild +++ b/arch/x86/include/asm/Kbuild @@ -7,7 +7,6 @@ generated-y += unistd_64_x32.h generated-y += xen-hypercalls.h generic-y += clkdev.h -generic-y += cputime.h generic-y += dma-contiguous.h generic-y += early_ioremap.h generic-y += mcs_spinlock.h -- cgit From 74862b03b46a852662c1a30c859b985261ff5d5c Mon Sep 17 00:00:00 2001 From: "travis@sgi.com" Date: Wed, 25 Jan 2017 10:35:18 -0600 Subject: x86/platform/UV: Add Support for UV4 Hubless systems Add recognition and support for UV4 hubless systems. Signed-off-by: Mike Travis Reviewed-by: Russ Anderson Acked-by: Thomas Gleixner Acked-by: Dimitri Sivanich Cc: Linus Torvalds Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20170125163517.398537358@asylum.americas.sgi.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/uv/uv.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h index 062921ef34e9..6686820feae9 100644 --- a/arch/x86/include/asm/uv/uv.h +++ b/arch/x86/include/asm/uv/uv.h @@ -10,6 +10,7 @@ struct mm_struct; extern enum uv_system_type get_uv_system_type(void); extern int is_uv_system(void); +extern int is_uv_hubless(void); extern void uv_cpu_init(void); extern void uv_nmi_init(void); extern void uv_system_init(void); @@ -23,6 +24,7 @@ extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; } static inline int is_uv_system(void) { return 0; } +static inline int is_uv_hubless(void) { return 0; } static inline void uv_cpu_init(void) { } static inline void uv_system_init(void) { } static inline const struct cpumask * -- cgit From abdf1df6bc0416ec19b841e92b497ca55b23454c Mon Sep 17 00:00:00 2001 From: "travis@sgi.com" Date: Wed, 25 Jan 2017 10:35:19 -0600 Subject: x86/platform/UV: Add Support for UV4 Hubless NMIs Merge new UV Hubless NMI support into existing UV NMI handler. Signed-off-by: Mike Travis Reviewed-by: Russ Anderson Acked-by: Thomas Gleixner Acked-by: Dimitri Sivanich Cc: Linus Torvalds Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20170125163517.585269837@asylum.americas.sgi.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/uv/uv_hub.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h index 097b80c989c4..72e8300b1e8a 100644 --- a/arch/x86/include/asm/uv/uv_hub.h +++ b/arch/x86/include/asm/uv/uv_hub.h @@ -772,6 +772,7 @@ static inline int uv_num_possible_blades(void) /* Per Hub NMI support */ extern void uv_nmi_setup(void); +extern void uv_nmi_setup_hubless(void); /* BMC sets a bit this MMR non-zero before sending an NMI */ #define UVH_NMI_MMR UVH_SCRATCH5 @@ -799,6 +800,8 @@ struct uv_hub_nmi_s { atomic_t read_mmr_count; /* count of MMR reads */ atomic_t nmi_count; /* count of true UV NMIs */ unsigned long nmi_value; /* last value read from NMI MMR */ + bool hub_present; /* false means UV hubless system */ + bool pch_owner; /* indicates this hub owns PCH */ }; struct uv_cpu_nmi_s { -- cgit From ae47eda905e61ef6ba0b6f79b967c9de15ca4f8e Mon Sep 17 00:00:00 2001 From: Grzegorz Andrejczuk Date: Fri, 20 Jan 2017 14:22:33 +0100 Subject: x86/msr: Add MSR_MISC_FEATURE_ENABLES and RING3MWAIT bit Define new MSR MISC_FEATURE_ENABLES (0x140). On supported CPUs if bit 1 of this MSR is set, then calling MONITOR and MWAIT instructions outside of ring 0 will not cause invalid-opcode exception. The MSR MISC_FEATURE_ENABLES is not yet documented in the SDM. Here is the relevant documentation: Hex Dec Name Scope 140H 320 MISC_FEATURE_ENABLES Thread 0 Reserved 1 If set to 1, the MONITOR and MWAIT instructions do not cause invalid-opcode exceptions when executed with CPL > 0 or in virtual-8086 mode. If MWAIT is executed when CPL > 0 or in virtual-8086 mode, and if EAX indicates a C-state other than C0 or C1, the instruction operates as if EAX indicated the C-state C1. 63:2 Reserved Signed-off-by: Grzegorz Andrejczuk Cc: Piotr.Luc@intel.com Cc: dave.hansen@linux.intel.com Link: http://lkml.kernel.org/r/1484918557-15481-2-git-send-email-grzegorz.andrejczuk@intel.com Signed-off-by: Thomas Gleixner --- arch/x86/include/asm/msr-index.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 710273c617b8..00293a94ffaf 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -543,6 +543,11 @@ #define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE_BIT 39 #define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE_BIT) +/* MISC_FEATURE_ENABLES non-architectural features */ +#define MSR_MISC_FEATURE_ENABLES 0x00000140 + +#define MSR_MISC_FEATURE_ENABLES_RING3MWAIT_BIT 1 + #define MSR_IA32_TSC_DEADLINE 0x000006E0 /* P4/Xeon+ specific */ -- cgit From 0274f9551eff55dbd63b5f5f3efe30fe5d4c801c Mon Sep 17 00:00:00 2001 From: Grzegorz Andrejczuk Date: Fri, 20 Jan 2017 14:22:34 +0100 Subject: x86/elf: Add HWCAP2 to expose ring 3 MONITOR/MWAIT Introduce ELF_HWCAP2 variable for x86 and reserve its bit 0 to expose the ring 3 MONITOR/MWAIT. HWCAP variables contain bitmasks which can be used by userspace applications to detect which instruction sets are supported by CPU. On x86 architecture information about CPU capabilities can be checked via CPUID instructions, unfortunately presence of ring 3 MONITOR/MWAIT feature cannot be checked this way. ELF_HWCAP cannot be used as well, because on x86 it is set to CPUID[1].EDX which means that all bits are reserved there. HWCAP2 approach was chosen because it reuses existing solution present in other architectures, so only minor modifications are required to the kernel and userspace applications. When ELF_HWCAP2 is defined kernel maps it to AT_HWCAP2 during the start of the application. This way the ring 3 MONITOR/MWAIT feature can be detected using getauxval() API in a simple and fast manner. ELF_HWCAP2 type is u32 to be consistent with x86 ELF_HWCAP type. Signed-off-by: Grzegorz Andrejczuk Cc: Piotr.Luc@intel.com Cc: dave.hansen@linux.intel.com Link: http://lkml.kernel.org/r/1484918557-15481-3-git-send-email-grzegorz.andrejczuk@intel.com Signed-off-by: Thomas Gleixner --- arch/x86/include/asm/elf.h | 9 +++++++++ arch/x86/include/uapi/asm/hwcap2.h | 7 +++++++ 2 files changed, 16 insertions(+) create mode 100644 arch/x86/include/uapi/asm/hwcap2.h (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h index e7f155c3045e..9d49c18b5ea9 100644 --- a/arch/x86/include/asm/elf.h +++ b/arch/x86/include/asm/elf.h @@ -258,6 +258,15 @@ extern int force_personality32; #define ELF_HWCAP (boot_cpu_data.x86_capability[CPUID_1_EDX]) +extern u32 elf_hwcap2; + +/* + * HWCAP2 supplies mask with kernel enabled CPU features, so that + * the application can discover that it can safely use them. + * The bits are defined in uapi/asm/hwcap2.h. + */ +#define ELF_HWCAP2 (elf_hwcap2) + /* This yields a string that ld.so will use to load implementation specific libraries for optimization. This is more specific in intent than poking at uname or /proc/cpuinfo. diff --git a/arch/x86/include/uapi/asm/hwcap2.h b/arch/x86/include/uapi/asm/hwcap2.h new file mode 100644 index 000000000000..0bd2be5c7617 --- /dev/null +++ b/arch/x86/include/uapi/asm/hwcap2.h @@ -0,0 +1,7 @@ +#ifndef _ASM_X86_HWCAP2_H +#define _ASM_X86_HWCAP2_H + +/* MONITOR/MWAIT enabled in Ring 3 */ +#define HWCAP2_RING3MWAIT (1 << 0) + +#endif -- cgit From 1d12d0ef0194ccc4dcebed3d96bb2301b26fc3ee Mon Sep 17 00:00:00 2001 From: Grzegorz Andrejczuk Date: Fri, 20 Jan 2017 14:22:35 +0100 Subject: x86/cpufeature: Add RING3MWAIT to CPU features Add software-defined CPUID bit for the non-architectural ring 3 MONITOR/MWAIT feature. Signed-off-by: Grzegorz Andrejczuk Cc: Piotr.Luc@intel.com Cc: dave.hansen@linux.intel.com Link: http://lkml.kernel.org/r/1484918557-15481-4-git-send-email-grzegorz.andrejczuk@intel.com Signed-off-by: Thomas Gleixner --- arch/x86/include/asm/cpufeatures.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index d9d7136edf05..56e5184514c6 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -100,7 +100,7 @@ #define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */ #define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */ #define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */ -/* free, was #define X86_FEATURE_CLFLUSH_MONITOR ( 3*32+25) * "" clflush reqd with monitor */ +#define X86_FEATURE_RING3MWAIT ( 3*32+25) /* ring 3 MONITOR/MWAIT */ #define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */ #define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */ #define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */ -- cgit From 79a8b9aa388b0620cc1d525d7c0f0d9a8a85e08e Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Sun, 5 Feb 2017 11:50:21 +0100 Subject: x86/CPU/AMD: Bring back Compute Unit ID Commit: a33d331761bc ("x86/CPU/AMD: Fix Bulldozer topology") restored the initial approach we had with the Fam15h topology of enumerating CU (Compute Unit) threads as cores. And this is still correct - they're beefier than HT threads but still have some shared functionality. Our current approach has a problem with the Mad Max Steam game, for example. Yves Dionne reported a certain "choppiness" while playing on v4.9.5. That problem stems most likely from the fact that the CU threads share resources within one CU and when we schedule to a thread of a different compute unit, this incurs latency due to migrating the working set to a different CU through the caches. When the thread siblings mask mirrors that aspect of the CUs and threads, the scheduler pays attention to it and tries to schedule within one CU first. Which takes care of the latency, of course. Reported-by: Yves Dionne Signed-off-by: Borislav Petkov Cc: # 4.9 Cc: Brice Goglin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Yazen Ghannam Link: http://lkml.kernel.org/r/20170205105022.8705-1-bp@alien8.de Signed-off-by: Ingo Molnar --- arch/x86/include/asm/processor.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 1be64da0384e..e6cfe7ba2d65 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -104,6 +104,7 @@ struct cpuinfo_x86 { __u8 x86_phys_bits; /* CPUID returned core id bits: */ __u8 x86_coreid_bits; + __u8 cu_id; /* Max extended CPUID function supported: */ __u32 extended_cpuid_level; /* Maximum supported CPUID level, -1=no CPUID: */ -- cgit From a2cd2f3f29f26782b7484b32e2af172e29313717 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 6 Feb 2017 11:22:40 +0000 Subject: x86/efi: Allow invocation of arbitrary runtime services Provide the ability to perform mixed-mode runtime service calls for x86 in the same way the following commit provided the ability to invoke for boot services: 0a637ee61247bd ("x86/efi: Allow invocation of arbitrary boot services") Suggested-by: Lukas Wunner Signed-off-by: David Howells Signed-off-by: Ard Biesheuvel Cc: Linus Torvalds Cc: Matt Fleming Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-efi@vger.kernel.org Link: http://lkml.kernel.org/r/1486380166-31868-2-git-send-email-ard.biesheuvel@linaro.org Signed-off-by: Ingo Molnar --- arch/x86/include/asm/efi.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index e99675b9c861..2f77bcefe6b4 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -191,6 +191,7 @@ static inline efi_status_t efi_thunk_set_virtual_address_map( struct efi_config { u64 image_handle; u64 table; + u64 runtime_services; u64 boot_services; u64 text_output; efi_status_t (*call)(unsigned long, ...); @@ -226,6 +227,10 @@ static inline bool efi_is_64bit(void) #define __efi_call_early(f, ...) \ __efi_early()->call((unsigned long)f, __VA_ARGS__); +#define efi_call_runtime(f, ...) \ + __efi_early()->call(efi_table_attr(efi_runtime_services, f, \ + __efi_early()->runtime_services), __VA_ARGS__) + extern bool efi_reboot_required(void); #else -- cgit From de8cb458625c164bb3f93c4e415e479afce8fa9d Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 6 Feb 2017 11:22:43 +0000 Subject: efi: Get and store the secure boot status Get the firmware's secure-boot status in the kernel boot wrapper and stash it somewhere that the main kernel image can find. The efi_get_secureboot() function is extracted from the ARM stub and (a) generalised so that it can be called from x86 and (b) made to use efi_call_runtime() so that it can be run in mixed-mode. For x86, it is stored in boot_params and can be overridden by the boot loader or kexec. This allows secure-boot mode to be passed on to a new kernel. Suggested-by: Lukas Wunner Signed-off-by: David Howells Signed-off-by: Ard Biesheuvel Cc: Linus Torvalds Cc: Matt Fleming Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-efi@vger.kernel.org Link: http://lkml.kernel.org/r/1486380166-31868-5-git-send-email-ard.biesheuvel@linaro.org [ Small readability edits. ] Signed-off-by: Ingo Molnar --- arch/x86/include/uapi/asm/bootparam.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h index b10bf319ed20..5138dacf8bb8 100644 --- a/arch/x86/include/uapi/asm/bootparam.h +++ b/arch/x86/include/uapi/asm/bootparam.h @@ -135,7 +135,8 @@ struct boot_params { __u8 eddbuf_entries; /* 0x1e9 */ __u8 edd_mbr_sig_buf_entries; /* 0x1ea */ __u8 kbd_status; /* 0x1eb */ - __u8 _pad5[3]; /* 0x1ec */ + __u8 secure_boot; /* 0x1ec */ + __u8 _pad5[2]; /* 0x1ed */ /* * The sentinel is set to a nonzero value (0xff) in header.S. * -- cgit From 5a7670ee23f2c07a639c263b70140eaf1da9f68f Mon Sep 17 00:00:00 2001 From: Boris Ostrovsky Date: Fri, 3 Feb 2017 16:57:22 -0500 Subject: x86/boot/32: Convert the 32-bit pgtable setup code from assembly to C The new Xen PVH entry point requires page tables to be setup by the kernel since it is entered with paging disabled. Pull the common code out of head_32.S so that mk_early_pgtbl_32() can be invoked from both the new Xen entry point and the existing startup_32() code. Convert resulting common code to C. Signed-off-by: Boris Ostrovsky Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: matt@codeblueprint.co.uk Cc: xen-devel@lists.xenproject.org Link: http://lkml.kernel.org/r/1481215471-9639-1-git-send-email-boris.ostrovsky@oracle.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/pgtable_32.h | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index b6c0b404898a..fbc73360aea0 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h @@ -27,6 +27,7 @@ struct vm_area_struct; extern pgd_t swapper_pg_dir[1024]; extern pgd_t initial_page_table[1024]; +extern pmd_t initial_pg_pmd[]; static inline void pgtable_cache_init(void) { } static inline void check_pgt_cache(void) { } @@ -75,4 +76,35 @@ do { \ #define kern_addr_valid(kaddr) (0) #endif +/* + * This is how much memory in addition to the memory covered up to + * and including _end we need mapped initially. + * We need: + * (KERNEL_IMAGE_SIZE/4096) / 1024 pages (worst case, non PAE) + * (KERNEL_IMAGE_SIZE/4096) / 512 + 4 pages (worst case for PAE) + * + * Modulo rounding, each megabyte assigned here requires a kilobyte of + * memory, which is currently unreclaimed. + * + * This should be a multiple of a page. + * + * KERNEL_IMAGE_SIZE should be greater than pa(_end) + * and small than max_low_pfn, otherwise will waste some page table entries + */ +#if PTRS_PER_PMD > 1 +#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD) +#else +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD) +#endif + +/* + * Number of possible pages in the lowmem region. + * + * We shift 2 by 31 instead of 1 by 32 to the left in order to avoid a + * gas warning about overflowing shift count when gas has been compiled + * with only a host target support using a 32-bit type for internal + * representation. + */ +#define LOWMEM_PAGES ((((2<<31) - __PAGE_OFFSET) >> PAGE_SHIFT)) + #endif /* _ASM_X86_PGTABLE_32_H */ -- cgit From 55dd00a73a518281bc846dc5de1a718349431eb2 Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Tue, 24 Jan 2017 15:09:39 -0200 Subject: KVM: x86: add KVM_HC_CLOCK_PAIRING hypercall Add a hypercall to retrieve the host realtime clock and the TSC value used to calculate that clock read. Used to implement clock synchronization between host and guest. Signed-off-by: Marcelo Tosatti Signed-off-by: Paolo Bonzini --- arch/x86/include/uapi/asm/kvm_para.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h index 1421a6585126..cff0bb6556f8 100644 --- a/arch/x86/include/uapi/asm/kvm_para.h +++ b/arch/x86/include/uapi/asm/kvm_para.h @@ -50,6 +50,15 @@ struct kvm_steal_time { __u32 pad[11]; }; +#define KVM_CLOCK_PAIRING_WALLCLOCK 0 +struct kvm_clock_pairing { + __s64 sec; + __s64 nsec; + __u64 tsc; + __u32 flags; + __u32 pad[9]; +}; + #define KVM_STEAL_ALIGNMENT_BITS 5 #define KVM_STEAL_VALID_BITS ((-1ULL << (KVM_STEAL_ALIGNMENT_BITS + 1))) #define KVM_STEAL_RESERVED_MASK (((1 << KVM_STEAL_ALIGNMENT_BITS) - 1 ) << 1) -- cgit From f4066c2bc4d0de4e5dcbff21dae41e89fe8f38c0 Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Tue, 24 Jan 2017 15:09:41 -0200 Subject: kvmclock: export kvmclock clocksource and data pointers To be used by KVM PTP driver. Signed-off-by: Marcelo Tosatti Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvmclock.h | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 arch/x86/include/asm/kvmclock.h (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/kvmclock.h b/arch/x86/include/asm/kvmclock.h new file mode 100644 index 000000000000..f260bef63591 --- /dev/null +++ b/arch/x86/include/asm/kvmclock.h @@ -0,0 +1,6 @@ +#ifndef _ASM_X86_KVM_CLOCK_H +#define _ASM_X86_KVM_CLOCK_H + +extern struct clocksource kvm_clock; + +#endif /* _ASM_X86_KVM_CLOCK_H */ -- cgit From 9b06e1018abc65585b07c75c5b3f406dbabe7005 Mon Sep 17 00:00:00 2001 From: "K. Y. Srinivasan" Date: Sat, 4 Feb 2017 08:46:23 -0700 Subject: Drivers: hv: Fix the bug in generating the guest ID Fix the bug in the generation of the guest ID. Without this fix the host side telemetry code is broken. Signed-off-by: K. Y. Srinivasan Fixes: 352c9624242d ("Drivers: hv: vmbus: Move the definition of generate_guest_id()") Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/mshyperv.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index f8dc3700de67..56407c6d2397 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -80,7 +80,7 @@ struct ms_hyperv_tsc_page { * */ -#define HV_LINUX_VENDOR_ID 0x8800 +#define HV_LINUX_VENDOR_ID 0x8100 /* * Generate the guest ID based on the guideline described above. @@ -91,7 +91,7 @@ static inline __u64 generate_guest_id(__u64 d_info1, __u64 kernel_version, { __u64 guest_id = 0; - guest_id = (((__u64)HV_LINUX_VENDOR_ID) << 56); + guest_id = (((__u64)HV_LINUX_VENDOR_ID) << 48); guest_id |= (d_info1 << 48); guest_id |= (kernel_version << 16); guest_id |= d_info2; -- cgit From dee863b571b0a76e9c549ee99e8782bb4bc6502b Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Sat, 4 Feb 2017 09:57:13 -0700 Subject: hv: export current Hyper-V clocksource As a preparation to implementing Hyper-V PTP device supporting .getcrosststamp we need to export a reference to the current Hyper-V clocksource in use (MSR or TSC page). Signed-off-by: Vitaly Kuznetsov Signed-off-by: K. Y. Srinivasan Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/mshyperv.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 56407c6d2397..7c9c895432a9 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -3,6 +3,7 @@ #include #include +#include #include /* @@ -168,6 +169,8 @@ void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs)); void hv_remove_crash_handler(void); #if IS_ENABLED(CONFIG_HYPERV) +extern struct clocksource *hyperv_cs; + void hyperv_init(void); void hyperv_report_panic(struct pt_regs *regs); bool hv_is_hypercall_page_setup(void); -- cgit From ab520be8cd5d56867fc95cfbc34b90880faf1f9d Mon Sep 17 00:00:00 2001 From: Paul Durrant Date: Mon, 13 Feb 2017 17:03:23 +0000 Subject: xen/privcmd: Add IOCTL_PRIVCMD_DM_OP Recently a new dm_op[1] hypercall was added to Xen to provide a mechanism for restricting device emulators (such as QEMU) to a limited set of hypervisor operations, and being able to audit those operations in the kernel of the domain in which they run. This patch adds IOCTL_PRIVCMD_DM_OP as gateway for __HYPERVISOR_dm_op. NOTE: There is no requirement for user-space code to bounce data through locked memory buffers (as with IOCTL_PRIVCMD_HYPERCALL) since privcmd has enough information to lock the original buffers directly. [1] http://xenbits.xen.org/gitweb/?p=xen.git;a=commit;h=524a98c2 Signed-off-by: Paul Durrant Acked-by: Stefano Stabellini Signed-off-by: Boris Ostrovsky --- arch/x86/include/asm/xen/hypercall.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h index a12a047184ee..f6d20f6cca12 100644 --- a/arch/x86/include/asm/xen/hypercall.h +++ b/arch/x86/include/asm/xen/hypercall.h @@ -472,6 +472,13 @@ HYPERVISOR_xenpmu_op(unsigned int op, void *arg) return _hypercall2(int, xenpmu_op, op, arg); } +static inline int +HYPERVISOR_dm_op( + domid_t dom, unsigned int nr_bufs, void *bufs) +{ + return _hypercall3(int, dm_op, dom, nr_bufs, bufs); +} + static inline void MULTI_fpu_taskswitch(struct multicall_entry *mcl, int set) { -- cgit From 76dfafd536730ef9b9d99b1cf596916d52be76d1 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Mon, 19 Dec 2016 17:17:11 +0100 Subject: KVM: x86: do not scan IRR twice on APICv vmentry Calls to apic_find_highest_irr are scanning IRR twice, once in vmx_sync_pir_from_irr and once in apic_search_irr. Change sync_pir_from_irr to get the new maximum IRR from kvm_apic_update_irr; now that it does the computation, it can also do the RVI write. In order to avoid complications in svm.c, make the callback optional. Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 417502cf42b6..e4f13e714bcf 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -969,7 +969,7 @@ struct kvm_x86_ops { void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set); void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa); void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector); - void (*sync_pir_to_irr)(struct kvm_vcpu *vcpu); + int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu); int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); int (*get_tdp_level)(void); u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); -- cgit From 3bba73b1b7a88d88c3ea16b7914c13d475e4a87b Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 16 Feb 2017 15:12:04 +0100 Subject: x86/cpufeature: Move RING3MWAIT feature to avoid conflicts The original feature bit is used in a different branch already. Move it to scattered bits. Signed-off-by: Thomas Gleixner --- arch/x86/include/asm/cpufeatures.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 56e5184514c6..43c4ea9cd907 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -100,7 +100,7 @@ #define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */ #define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */ #define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */ -#define X86_FEATURE_RING3MWAIT ( 3*32+25) /* ring 3 MONITOR/MWAIT */ +/* free, was #define X86_FEATURE_CLFLUSH_MONITOR ( 3*32+25) * "" clflush reqd with monitor */ #define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */ #define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */ #define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */ @@ -186,7 +186,7 @@ * * Reuse free bits when adding new feature flags! */ - +#define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* Ring 3 MONITOR/MWAIT */ #define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */ #define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */ #define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */ -- cgit From bd7e5b0899a429445cc6e3037c13f8b5ae3be903 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 3 Feb 2017 21:18:52 -0800 Subject: KVM: x86: remove code for lazy FPU handling The FPU is always active now when running KVM. Reviewed-by: David Matlack Reviewed-by: Bandan Das Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index e4f13e714bcf..74ef58c8ff53 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -55,7 +55,6 @@ #define KVM_REQ_TRIPLE_FAULT 10 #define KVM_REQ_MMU_SYNC 11 #define KVM_REQ_CLOCK_UPDATE 12 -#define KVM_REQ_DEACTIVATE_FPU 13 #define KVM_REQ_EVENT 14 #define KVM_REQ_APF_HALT 15 #define KVM_REQ_STEAL_UPDATE 16 @@ -936,8 +935,6 @@ struct kvm_x86_ops { unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); u32 (*get_pkru)(struct kvm_vcpu *vcpu); - void (*fpu_activate)(struct kvm_vcpu *vcpu); - void (*fpu_deactivate)(struct kvm_vcpu *vcpu); void (*tlb_flush)(struct kvm_vcpu *vcpu); -- cgit From 4f53ab14285802b298261f8b52af322039d1dfd0 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Mon, 20 Feb 2017 08:56:09 -0800 Subject: x86/asm: Define the kernel TSS limit in a macro Rather than open-coding the kernel TSS limit in set_tss_desc(), make it a real macro near the TSS layout definition. This is purely a cleanup. Cc: Thomas Garnier Cc: Jim Mattson Signed-off-by: Andy Lutomirski Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/desc.h | 10 +--------- arch/x86/include/asm/processor.h | 10 ++++++++++ 2 files changed, 11 insertions(+), 9 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index 12080d87da3b..2e781bcc5e12 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h @@ -177,16 +177,8 @@ static inline void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr) struct desc_struct *d = get_cpu_gdt_table(cpu); tss_desc tss; - /* - * sizeof(unsigned long) coming from an extra "long" at the end - * of the iobitmap. See tss_struct definition in processor.h - * - * -1? seg base+limit should be pointing to the address of the - * last valid byte - */ set_tssldt_descriptor(&tss, (unsigned long)addr, DESC_TSS, - IO_BITMAP_OFFSET + IO_BITMAP_BYTES + - sizeof(unsigned long) - 1); + __KERNEL_TSS_LIMIT); write_gdt_entry(d, entry, &tss, DESC_TSS); } diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 1be64da0384e..f8f1b7537abe 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -341,6 +341,16 @@ struct tss_struct { DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss); +/* + * sizeof(unsigned long) coming from an extra "long" at the end + * of the iobitmap. + * + * -1? seg base+limit should be pointing to the address of the + * last valid byte + */ +#define __KERNEL_TSS_LIMIT \ + (IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1) + #ifdef CONFIG_X86_32 DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack); #endif -- cgit From d3273deac9c0cdae32eb46f928487433eaa37f87 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Mon, 20 Feb 2017 08:56:13 -0800 Subject: x86/asm/64: Drop __cacheline_aligned from struct x86_hw_tss Historically, the entire TSS + io bitmap structure was cacheline aligned, but commit ca241c75037b ("x86: unify tss_struct") changed it (presumably inadvertently) so that the fixed-layout hardware part is cacheline-aligned and the io bitmap is after the padding. This wastes 24 bytes (the hardware part should be 104 bytes, but this pads it to 128 bytes) and, serves no purpose, and causes sizeof(struct x86_hw_tss) to have a confusing value. Drop the pointless alignment. Signed-off-by: Andy Lutomirski Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/processor.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index f8f1b7537abe..1879cdf2b6ae 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -303,7 +303,7 @@ struct x86_hw_tss { u16 reserved5; u16 io_bitmap_base; -} __attribute__((packed)) ____cacheline_aligned; +} __attribute__((packed)); #endif /* -- cgit From b7ffc44d5b2ea163899d09289ca7743d5c32e926 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Mon, 20 Feb 2017 08:56:14 -0800 Subject: x86/kvm/vmx: Defer TR reload after VM exit Intel's VMX is daft and resets the hidden TSS limit register to 0x67 on VMX reload, and the 0x67 is not configurable. KVM currently reloads TR using the LTR instruction on every exit, but this is quite slow because LTR is serializing. The 0x67 limit is entirely harmless unless ioperm() is in use, so defer the reload until a task using ioperm() is actually running. Here's some poorly done benchmarking using kvm-unit-tests: Before: cpuid 1313 vmcall 1195 mov_from_cr8 11 mov_to_cr8 17 inl_from_pmtimer 6770 inl_from_qemu 6856 inl_from_kernel 2435 outl_to_kernel 1402 After: cpuid 1291 vmcall 1181 mov_from_cr8 11 mov_to_cr8 16 inl_from_pmtimer 6457 inl_from_qemu 6209 inl_from_kernel 2339 outl_to_kernel 1391 Signed-off-by: Andy Lutomirski [Force-reload TR in invalidate_tss_limit. - Paolo] Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/desc.h | 48 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index 2e781bcc5e12..cb8f9149f6c8 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h @@ -205,6 +205,54 @@ static inline void native_load_tr_desc(void) asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8)); } +static inline void force_reload_TR(void) +{ + struct desc_struct *d = get_cpu_gdt_table(smp_processor_id()); + tss_desc tss; + + memcpy(&tss, &d[GDT_ENTRY_TSS], sizeof(tss_desc)); + + /* + * LTR requires an available TSS, and the TSS is currently + * busy. Make it be available so that LTR will work. + */ + tss.type = DESC_TSS; + write_gdt_entry(d, GDT_ENTRY_TSS, &tss, DESC_TSS); + + load_TR_desc(); +} + +DECLARE_PER_CPU(bool, need_tr_refresh); + +static inline void refresh_TR(void) +{ + DEBUG_LOCKS_WARN_ON(preemptible()); + + if (unlikely(this_cpu_read(need_tr_refresh))) { + force_reload_TR(); + this_cpu_write(need_tr_refresh, false); + } +} + +/* + * If you do something evil that corrupts the cached TSS limit (I'm looking + * at you, VMX exits), call this function. + * + * The optimization here is that the TSS limit only matters for Linux if the + * IO bitmap is in use. If the TSS limit gets forced to its minimum value, + * everything works except that IO bitmap will be ignored and all CPL 3 IO + * instructions will #GP, which is exactly what we want for normal tasks. + */ +static inline void invalidate_tss_limit(void) +{ + DEBUG_LOCKS_WARN_ON(preemptible()); + + if (unlikely(test_thread_flag(TIF_IO_BITMAP))) + force_reload_TR(); + else + this_cpu_write(need_tr_refresh, true); +} + static inline void native_load_gdt(const struct desc_ptr *dtr) { asm volatile("lgdt %0"::"m" (*dtr)); -- cgit From 6c62985d576c8a816f528c39204207b9f449d923 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Mon, 20 Feb 2017 13:36:03 -0500 Subject: x86/paravirt: Change vcp_is_preempted() arg type to long The cpu argument in the function prototype of vcpu_is_preempted() is changed from int to long. That makes it easier to provide a better optimized assembly version of that function. For Xen, vcpu_is_preempted(long) calls xen_vcpu_stolen(int), the downcast from long to int is not a problem as vCPU number won't exceed 32 bits. Signed-off-by: Waiman Long Acked-by: Peter Zijlstra (Intel) Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/paravirt.h | 2 +- arch/x86/include/asm/qspinlock.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 1eea6ca40694..f75fbfe550f2 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -673,7 +673,7 @@ static __always_inline void pv_kick(int cpu) PVOP_VCALL1(pv_lock_ops.kick, cpu); } -static __always_inline bool pv_vcpu_is_preempted(int cpu) +static __always_inline bool pv_vcpu_is_preempted(long cpu) { return PVOP_CALLEE1(bool, pv_lock_ops.vcpu_is_preempted, cpu); } diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h index c343ab52579f..48a706f641f2 100644 --- a/arch/x86/include/asm/qspinlock.h +++ b/arch/x86/include/asm/qspinlock.h @@ -34,7 +34,7 @@ static inline void queued_spin_unlock(struct qspinlock *lock) } #define vcpu_is_preempted vcpu_is_preempted -static inline bool vcpu_is_preempted(int cpu) +static inline bool vcpu_is_preempted(long cpu) { return pv_vcpu_is_preempted(cpu); } -- cgit From a00cc7d9dd93d66a3fb83fc52aa57a4bec51c517 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Fri, 24 Feb 2017 14:57:02 -0800 Subject: mm, x86: add support for PUD-sized transparent hugepages The current transparent hugepage code only supports PMDs. This patch adds support for transparent use of PUDs with DAX. It does not include support for anonymous pages. x86 support code also added. Most of this patch simply parallels the work that was done for huge PMDs. The only major difference is how the new ->pud_entry method in mm_walk works. The ->pmd_entry method replaces the ->pte_entry method, whereas the ->pud_entry method works along with either ->pmd_entry or ->pte_entry. The pagewalk code takes care of locking the PUD before calling ->pud_walk, so handlers do not need to worry whether the PUD is stable. [dave.jiang@intel.com: fix SMP x86 32bit build for native_pud_clear()] Link: http://lkml.kernel.org/r/148719066814.31111.3239231168815337012.stgit@djiang5-desk3.ch.intel.com [dave.jiang@intel.com: native_pud_clear missing on i386 build] Link: http://lkml.kernel.org/r/148640375195.69754.3315433724330910314.stgit@djiang5-desk3.ch.intel.com Link: http://lkml.kernel.org/r/148545059381.17912.8602162635537598445.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Matthew Wilcox Signed-off-by: Dave Jiang Tested-by: Alexander Kapshuk Cc: Dave Hansen Cc: Vlastimil Babka Cc: Jan Kara Cc: Dan Williams Cc: Ross Zwisler Cc: Kirill A. Shutemov Cc: Nilesh Choudhury Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/include/asm/paravirt.h | 11 +++ arch/x86/include/asm/paravirt_types.h | 2 + arch/x86/include/asm/pgtable-2level.h | 17 +++++ arch/x86/include/asm/pgtable-3level.h | 30 ++++++++ arch/x86/include/asm/pgtable.h | 140 ++++++++++++++++++++++++++++++++++ arch/x86/include/asm/pgtable_64.h | 15 ++++ 6 files changed, 215 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index f75fbfe550f2..0489884fdc44 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -475,6 +475,17 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, native_pmd_val(pmd)); } +static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, + pud_t *pudp, pud_t pud) +{ + if (sizeof(pudval_t) > sizeof(long)) + /* 5 arg words */ + pv_mmu_ops.set_pud_at(mm, addr, pudp, pud); + else + PVOP_VCALL4(pv_mmu_ops.set_pud_at, mm, addr, pudp, + native_pud_val(pud)); +} + static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) { pmdval_t val = native_pmd_val(pmd); diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index bb2de45a60f2..b060f962d581 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -249,6 +249,8 @@ struct pv_mmu_ops { void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); void (*set_pmd_at)(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmdval); + void (*set_pud_at)(struct mm_struct *mm, unsigned long addr, + pud_t *pudp, pud_t pudval); void (*pte_update)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h index fd74a11959de..a8b96e708c2b 100644 --- a/arch/x86/include/asm/pgtable-2level.h +++ b/arch/x86/include/asm/pgtable-2level.h @@ -21,6 +21,10 @@ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) *pmdp = pmd; } +static inline void native_set_pud(pud_t *pudp, pud_t pud) +{ +} + static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) { native_set_pte(ptep, pte); @@ -31,6 +35,10 @@ static inline void native_pmd_clear(pmd_t *pmdp) native_set_pmd(pmdp, __pmd(0)); } +static inline void native_pud_clear(pud_t *pudp) +{ +} + static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *xp) { @@ -55,6 +63,15 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) #endif +#ifdef CONFIG_SMP +static inline pud_t native_pudp_get_and_clear(pud_t *xp) +{ + return __pud(xchg((pudval_t *)xp, 0)); +} +#else +#define native_pudp_get_and_clear(xp) native_local_pudp_get_and_clear(xp) +#endif + /* Bit manipulation helper on pte/pgoff entry */ static inline unsigned long pte_bitop(unsigned long value, unsigned int rightshift, unsigned long mask, unsigned int leftshift) diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index cdaa58c9b39e..8f50fb3f04e1 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h @@ -121,6 +121,12 @@ static inline void native_pmd_clear(pmd_t *pmd) *(tmp + 1) = 0; } +#ifndef CONFIG_SMP +static inline void native_pud_clear(pud_t *pudp) +{ +} +#endif + static inline void pud_clear(pud_t *pudp) { set_pud(pudp, __pud(0)); @@ -176,6 +182,30 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp) #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) #endif +#ifdef CONFIG_SMP +union split_pud { + struct { + u32 pud_low; + u32 pud_high; + }; + pud_t pud; +}; + +static inline pud_t native_pudp_get_and_clear(pud_t *pudp) +{ + union split_pud res, *orig = (union split_pud *)pudp; + + /* xchg acts as a barrier before setting of the high bits */ + res.pud_low = xchg(&orig->pud_low, 0); + res.pud_high = orig->pud_high; + orig->pud_high = 0; + + return res.pud; +} +#else +#define native_pudp_get_and_clear(xp) native_local_pudp_get_and_clear(xp) +#endif + /* Encode and de-code a swap entry */ #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5) #define __swp_type(x) (((x).val) & 0x1f) diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 437feb436efa..1cfb36b8c024 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -46,6 +46,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page); #define set_pte(ptep, pte) native_set_pte(ptep, pte) #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte) #define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd) +#define set_pud_at(mm, addr, pudp, pud) native_set_pud_at(mm, addr, pudp, pud) #define set_pte_atomic(ptep, pte) \ native_set_pte_atomic(ptep, pte) @@ -128,6 +129,16 @@ static inline int pmd_young(pmd_t pmd) return pmd_flags(pmd) & _PAGE_ACCESSED; } +static inline int pud_dirty(pud_t pud) +{ + return pud_flags(pud) & _PAGE_DIRTY; +} + +static inline int pud_young(pud_t pud) +{ + return pud_flags(pud) & _PAGE_ACCESSED; +} + static inline int pte_write(pte_t pte) { return pte_flags(pte) & _PAGE_RW; @@ -181,6 +192,13 @@ static inline int pmd_trans_huge(pmd_t pmd) return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE; } +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD +static inline int pud_trans_huge(pud_t pud) +{ + return (pud_val(pud) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE; +} +#endif + #define has_transparent_hugepage has_transparent_hugepage static inline int has_transparent_hugepage(void) { @@ -192,6 +210,18 @@ static inline int pmd_devmap(pmd_t pmd) { return !!(pmd_val(pmd) & _PAGE_DEVMAP); } + +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD +static inline int pud_devmap(pud_t pud) +{ + return !!(pud_val(pud) & _PAGE_DEVMAP); +} +#else +static inline int pud_devmap(pud_t pud) +{ + return 0; +} +#endif #endif #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ @@ -333,6 +363,65 @@ static inline pmd_t pmd_mknotpresent(pmd_t pmd) return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE); } +static inline pud_t pud_set_flags(pud_t pud, pudval_t set) +{ + pudval_t v = native_pud_val(pud); + + return __pud(v | set); +} + +static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear) +{ + pudval_t v = native_pud_val(pud); + + return __pud(v & ~clear); +} + +static inline pud_t pud_mkold(pud_t pud) +{ + return pud_clear_flags(pud, _PAGE_ACCESSED); +} + +static inline pud_t pud_mkclean(pud_t pud) +{ + return pud_clear_flags(pud, _PAGE_DIRTY); +} + +static inline pud_t pud_wrprotect(pud_t pud) +{ + return pud_clear_flags(pud, _PAGE_RW); +} + +static inline pud_t pud_mkdirty(pud_t pud) +{ + return pud_set_flags(pud, _PAGE_DIRTY | _PAGE_SOFT_DIRTY); +} + +static inline pud_t pud_mkdevmap(pud_t pud) +{ + return pud_set_flags(pud, _PAGE_DEVMAP); +} + +static inline pud_t pud_mkhuge(pud_t pud) +{ + return pud_set_flags(pud, _PAGE_PSE); +} + +static inline pud_t pud_mkyoung(pud_t pud) +{ + return pud_set_flags(pud, _PAGE_ACCESSED); +} + +static inline pud_t pud_mkwrite(pud_t pud) +{ + return pud_set_flags(pud, _PAGE_RW); +} + +static inline pud_t pud_mknotpresent(pud_t pud) +{ + return pud_clear_flags(pud, _PAGE_PRESENT | _PAGE_PROTNONE); +} + #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY static inline int pte_soft_dirty(pte_t pte) { @@ -344,6 +433,11 @@ static inline int pmd_soft_dirty(pmd_t pmd) return pmd_flags(pmd) & _PAGE_SOFT_DIRTY; } +static inline int pud_soft_dirty(pud_t pud) +{ + return pud_flags(pud) & _PAGE_SOFT_DIRTY; +} + static inline pte_t pte_mksoft_dirty(pte_t pte) { return pte_set_flags(pte, _PAGE_SOFT_DIRTY); @@ -354,6 +448,11 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY); } +static inline pud_t pud_mksoft_dirty(pud_t pud) +{ + return pud_set_flags(pud, _PAGE_SOFT_DIRTY); +} + static inline pte_t pte_clear_soft_dirty(pte_t pte) { return pte_clear_flags(pte, _PAGE_SOFT_DIRTY); @@ -364,6 +463,11 @@ static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY); } +static inline pud_t pud_clear_soft_dirty(pud_t pud) +{ + return pud_clear_flags(pud, _PAGE_SOFT_DIRTY); +} + #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ /* @@ -392,6 +496,12 @@ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) massage_pgprot(pgprot)); } +static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot) +{ + return __pud(((phys_addr_t)page_nr << PAGE_SHIFT) | + massage_pgprot(pgprot)); +} + static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { pteval_t val = pte_val(pte); @@ -771,6 +881,14 @@ static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp) return res; } +static inline pud_t native_local_pudp_get_and_clear(pud_t *pudp) +{ + pud_t res = *pudp; + + native_pud_clear(pudp); + return res; +} + static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep , pte_t pte) { @@ -783,6 +901,12 @@ static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr, native_set_pmd(pmdp, pmd); } +static inline void native_set_pud_at(struct mm_struct *mm, unsigned long addr, + pud_t *pudp, pud_t pud) +{ + native_set_pud(pudp, pud); +} + #ifndef CONFIG_PARAVIRT /* * Rules for using pte_update - it must be called after any PTE update which @@ -861,10 +985,15 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, extern int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, pmd_t entry, int dirty); +extern int pudp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pud_t *pudp, + pud_t entry, int dirty); #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG extern int pmdp_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmdp); +extern int pudp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long addr, pud_t *pudp); #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH extern int pmdp_clear_flush_young(struct vm_area_struct *vma, @@ -884,6 +1013,13 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long return native_pmdp_get_and_clear(pmdp); } +#define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR +static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm, + unsigned long addr, pud_t *pudp) +{ + return native_pudp_get_and_clear(pudp); +} + #define __HAVE_ARCH_PMDP_SET_WRPROTECT static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp) @@ -932,6 +1068,10 @@ static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd) { } +static inline void update_mmu_cache_pud(struct vm_area_struct *vma, + unsigned long addr, pud_t *pud) +{ +} #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY static inline pte_t pte_swp_mksoft_dirty(pte_t pte) diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 62b775926045..73c7ccc38912 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -106,6 +106,21 @@ static inline void native_pud_clear(pud_t *pud) native_set_pud(pud, native_make_pud(0)); } +static inline pud_t native_pudp_get_and_clear(pud_t *xp) +{ +#ifdef CONFIG_SMP + return native_make_pud(xchg(&xp->pud, 0)); +#else + /* native_local_pudp_get_and_clear, + * but duplicated because of cyclic dependency + */ + pud_t ret = *xp; + + native_pud_clear(xp); + return ret; +#endif +} + static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) { *pgdp = pgd; -- cgit From 76062b4ae2ea54fcfb8fce6940921a90f33f38da Mon Sep 17 00:00:00 2001 From: Shanth Murthy Date: Mon, 13 Feb 2017 04:02:52 -0800 Subject: platform/x86: intel_pmc_ipc: read s0ix residency API This patch adds a new API to indicate S0ix residency in usec. It utilizes the PMC Global Control Registers (GCR) to read deep and shallow S0ix residency. PMC MMIO resources: o Lower 4kB: IPC1 (PMC inter-processor communication) interface o Upper 4kB: GCR (Global Control Registers) This enables the power management framework to take corrective actions when the platform fails to enter S0ix after kernel freeze as part of the suspend to idle flow. (echo freeze > /sys/power/state). This is expected to be used with a S0ix failsafe framework such as: [rajneesh: folded in "fix division in 32-bit case" from Andy Shevchenko] Signed-off-by: Rajneesh Bhardwaj Signed-off-by: Shanth Murthy [andy: fixed kbuild error, removed "total" from variables, fixed macro] Signed-off-by: Andy Shevchenko --- arch/x86/include/asm/intel_pmc_ipc.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/intel_pmc_ipc.h b/arch/x86/include/asm/intel_pmc_ipc.h index cd0310e186f4..4291b6a5ddf7 100644 --- a/arch/x86/include/asm/intel_pmc_ipc.h +++ b/arch/x86/include/asm/intel_pmc_ipc.h @@ -30,6 +30,7 @@ int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out, u32 outlen, u32 dptr, u32 sptr); int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out, u32 outlen); +int intel_pmc_s0ix_counter_read(u64 *data); #else @@ -50,6 +51,11 @@ static inline int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen, return -EINVAL; } +static inline int intel_pmc_s0ix_counter_read(u64 *data) +{ + return -EINVAL; +} + #endif /*CONFIG_INTEL_PMC_IPC*/ #endif -- cgit From 7d134b2ce639448199052fd573a324f7e7cd5ed8 Mon Sep 17 00:00:00 2001 From: "Luis R. Rodriguez" Date: Mon, 27 Feb 2017 14:26:56 -0800 Subject: kprobes: move kprobe declarations to asm-generic/kprobes.h Often all is needed is these small helpers, instead of compiler.h or a full kprobes.h. This is important for asm helpers, in fact even some asm/kprobes.h make use of these helpers... instead just keep a generic asm file with helpers useful for asm code with the least amount of clutter as possible. Likewise we need now to also address what to do about this file for both when architectures have CONFIG_HAVE_KPROBES, and when they do not. Then for when architectures have CONFIG_HAVE_KPROBES but have disabled CONFIG_KPROBES. Right now most asm/kprobes.h do not have guards against CONFIG_KPROBES, this means most architecture code cannot include asm/kprobes.h safely. Correct this and add guards for architectures missing them. Additionally provide architectures that not have kprobes support with the default asm-generic solution. This lets us force asm/kprobes.h on the header include/linux/kprobes.h always, but most importantly we can now safely include just asm/kprobes.h on architecture code without bringing the full kitchen sink of header files. Two architectures already provided a guard against CONFIG_KPROBES on its kprobes.h: sh, arch. The rest of the architectures needed gaurds added. We avoid including any not-needed headers on asm/kprobes.h unless kprobes have been enabled. In a subsequent atomic change we can try now to remove compiler.h from include/linux/kprobes.h. During this sweep I've also identified a few architectures defining a common macro needed for both kprobes and ftrace, that of the definition of the breakput instruction up. Some refer to this as BREAKPOINT_INSTRUCTION. This must be kept outside of the #ifdef CONFIG_KPROBES guard. [mcgrof@kernel.org: fix arm64 build] Link: http://lkml.kernel.org/r/CAB=NE6X1WMByuARS4mZ1g9+W=LuVBnMDnh_5zyN0CLADaVh=Jw@mail.gmail.com [sfr@canb.auug.org.au: fixup for kprobes declarations moving] Link: http://lkml.kernel.org/r/20170214165933.13ebd4f4@canb.auug.org.au Link: http://lkml.kernel.org/r/20170203233139.32682-1-mcgrof@kernel.org Signed-off-by: Luis R. Rodriguez Signed-off-by: Stephen Rothwell Acked-by: Masami Hiramatsu Cc: Arnd Bergmann Cc: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli Cc: Anil S Keshavamurthy Cc: David S. Miller Cc: Ingo Molnar Cc: Thomas Gleixner Cc: H. Peter Anvin Cc: Andy Lutomirski Cc: Steven Rostedt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/include/asm/kprobes.h | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h index d1d1e5094c28..200581691c6e 100644 --- a/arch/x86/include/asm/kprobes.h +++ b/arch/x86/include/asm/kprobes.h @@ -21,6 +21,12 @@ * * See arch/x86/kernel/kprobes.c for x86 kprobes history. */ + +#include + +#define BREAKPOINT_INSTRUCTION 0xcc + +#ifdef CONFIG_KPROBES #include #include #include @@ -32,7 +38,6 @@ struct pt_regs; struct kprobe; typedef u8 kprobe_opcode_t; -#define BREAKPOINT_INSTRUCTION 0xcc #define RELATIVEJUMP_OPCODE 0xe9 #define RELATIVEJUMP_SIZE 5 #define RELATIVECALL_OPCODE 0xe8 @@ -116,4 +121,6 @@ extern int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data); extern int kprobe_int3_handler(struct pt_regs *regs); extern int kprobe_debug_handler(struct pt_regs *regs); + +#endif /* CONFIG_KPROBES */ #endif /* _ASM_X86_KPROBES_H */ -- cgit From 03440c4e5e2f167764997a7e0f2dbb279d8078e6 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 27 Feb 2017 14:28:49 -0800 Subject: scripts/spelling.txt: add "an union" pattern and fix typo instances Fix typos and add the following to the scripts/spelling.txt: an union||a union Link: http://lkml.kernel.org/r/1481573103-11329-5-git-send-email-yamada.masahiro@socionext.com Signed-off-by: Masahiro Yamada Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/include/asm/desc_defs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h index eb5deb42484d..49265345d4d2 100644 --- a/arch/x86/include/asm/desc_defs.h +++ b/arch/x86/include/asm/desc_defs.h @@ -15,7 +15,7 @@ * FIXME: Accessing the desc_struct through its fields is more elegant, * and should be the one valid thing to do. However, a lot of open code * still touches the a and b accessors, and doing this allow us to do it - * incrementally. We keep the signature as a struct, rather than an union, + * incrementally. We keep the signature as a struct, rather than a union, * so we can get rid of it transparently in the future -- glommer */ /* 8 byte segment descriptor */ -- cgit From 2959a5f726f6510d6dd7c958f8877e08d0cf589c Mon Sep 17 00:00:00 2001 From: Jinbum Park Date: Mon, 27 Feb 2017 14:30:22 -0800 Subject: mm: add arch-independent testcases for RODATA This patch makes arch-independent testcases for RODATA. Both x86 and x86_64 already have testcases for RODATA, But they are arch-specific because using inline assembly directly. And cacheflush.h is not a suitable location for rodata-test related things. Since they were in cacheflush.h, If someone change the state of CONFIG_DEBUG_RODATA_TEST, It cause overhead of kernel build. To solve the above issues, write arch-independent testcases and move it to shared location. [jinb.park7@gmail.com: fix config dependency] Link: http://lkml.kernel.org/r/20170209131625.GA16954@pjb1027-Latitude-E5410 Link: http://lkml.kernel.org/r/20170129105436.GA9303@pjb1027-Latitude-E5410 Signed-off-by: Jinbum Park Acked-by: Kees Cook Cc: Ingo Molnar Cc: H. Peter Anvin Cc: Arjan van de Ven Cc: Laura Abbott Cc: Russell King Cc: Valentin Rothberg Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/include/asm/cacheflush.h | 10 ---------- 1 file changed, 10 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h index 872877d930de..e7e1942edff7 100644 --- a/arch/x86/include/asm/cacheflush.h +++ b/arch/x86/include/asm/cacheflush.h @@ -90,18 +90,8 @@ void clflush_cache_range(void *addr, unsigned int size); #define mmio_flush_range(addr, size) clflush_cache_range(addr, size) -extern const int rodata_test_data; extern int kernel_set_to_readonly; void set_kernel_text_rw(void); void set_kernel_text_ro(void); -#ifdef CONFIG_DEBUG_RODATA_TEST -int rodata_test(void); -#else -static inline int rodata_test(void) -{ - return 0; -} -#endif - #endif /* _ASM_X86_CACHEFLUSH_H */ -- cgit From 3e761a42e19c63b624ebac94d918d8a15e07e2a7 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Mon, 27 Feb 2017 14:30:25 -0800 Subject: mm, x86: fix HIGHMEM64 && PARAVIRT build config for native_pud_clear() Looks like I also missed the build config that includes CONFIG_HIGHMEM64G && CONFIG_PARAVIRT to export the native_pud_clear() dummy function. Fixes: a00cc7d9dd93d ("mm, x86: add support for PUD-sized transparent hugepages") Link: http://lkml.kernel.org/r/148823188084.56076.17451228917824355200.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Dave Jiang Reported-by: Laura Abbott Reported-by: Boris Ostrovsky Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/include/asm/pgtable-3level.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index 8f50fb3f04e1..72277b1028a5 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h @@ -121,7 +121,8 @@ static inline void native_pmd_clear(pmd_t *pmd) *(tmp + 1) = 0; } -#ifndef CONFIG_SMP +#if !defined(CONFIG_SMP) || (defined(CONFIG_HIGHMEM64G) && \ + defined(CONFIG_PARAVIRT)) static inline void native_pud_clear(pud_t *pudp) { } -- cgit