diff options
Diffstat (limited to 'include/asm-x86')
-rw-r--r-- | include/asm-x86/bitops.h | 37 | ||||
-rw-r--r-- | include/asm-x86/bootparam.h | 8 | ||||
-rw-r--r-- | include/asm-x86/div64.h | 34 | ||||
-rw-r--r-- | include/asm-x86/dmi.h | 1 | ||||
-rw-r--r-- | include/asm-x86/geode.h | 12 | ||||
-rw-r--r-- | include/asm-x86/i387.h | 10 | ||||
-rw-r--r-- | include/asm-x86/io_32.h | 5 | ||||
-rw-r--r-- | include/asm-x86/kvm_host.h | 10 | ||||
-rw-r--r-- | include/asm-x86/pat.h | 8 | ||||
-rw-r--r-- | include/asm-x86/pgtable_32.h | 9 | ||||
-rw-r--r-- | include/asm-x86/pgtable_64.h | 6 | ||||
-rw-r--r-- | include/asm-x86/spinlock.h | 18 | ||||
-rw-r--r-- | include/asm-x86/topology.h | 18 | ||||
-rw-r--r-- | include/asm-x86/types.h | 38 |
14 files changed, 89 insertions, 125 deletions
diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h index b81a4d4d3337..ee4b3ead6a43 100644 --- a/include/asm-x86/bitops.h +++ b/include/asm-x86/bitops.h @@ -23,13 +23,10 @@ #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) /* Technically wrong, but this avoids compilation errors on some gcc versions. */ -#define ADDR "=m" (*(volatile long *)addr) -#define BIT_ADDR "=m" (((volatile int *)addr)[nr >> 5]) +#define ADDR "=m" (*(volatile long *) addr) #else #define ADDR "+m" (*(volatile long *) addr) -#define BIT_ADDR "+m" (((volatile int *)addr)[nr >> 5]) #endif -#define BASE_ADDR "m" (*(volatile int *)addr) /** * set_bit - Atomically set a bit in memory @@ -77,7 +74,7 @@ static inline void __set_bit(int nr, volatile void *addr) */ static inline void clear_bit(int nr, volatile void *addr) { - asm volatile(LOCK_PREFIX "btr %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR); + asm volatile(LOCK_PREFIX "btr %1,%0" : ADDR : "Ir" (nr)); } /* @@ -96,7 +93,7 @@ static inline void clear_bit_unlock(unsigned nr, volatile void *addr) static inline void __clear_bit(int nr, volatile void *addr) { - asm volatile("btr %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR); + asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); } /* @@ -131,7 +128,7 @@ static inline void __clear_bit_unlock(unsigned nr, volatile void *addr) */ static inline void __change_bit(int nr, volatile void *addr) { - asm volatile("btc %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR); + asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); } /** @@ -145,7 +142,7 @@ static inline void __change_bit(int nr, volatile void *addr) */ static inline void change_bit(int nr, volatile void *addr) { - asm volatile(LOCK_PREFIX "btc %1,%2" : BIT_ADDR : "Ir" (nr), BASE_ADDR); + asm volatile(LOCK_PREFIX "btc %1,%0" : ADDR : "Ir" (nr)); } /** @@ -191,9 +188,10 @@ static inline int __test_and_set_bit(int nr, volatile void *addr) { int oldbit; - asm volatile("bts %2,%3\n\t" - "sbb %0,%0" - : "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR); + asm("bts %2,%1\n\t" + "sbb %0,%0" + : "=r" (oldbit), ADDR + : "Ir" (nr)); return oldbit; } @@ -229,9 +227,10 @@ static inline int __test_and_clear_bit(int nr, volatile void *addr) { int oldbit; - asm volatile("btr %2,%3\n\t" + asm volatile("btr %2,%1\n\t" "sbb %0,%0" - : "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR); + : "=r" (oldbit), ADDR + : "Ir" (nr)); return oldbit; } @@ -240,9 +239,10 @@ static inline int __test_and_change_bit(int nr, volatile void *addr) { int oldbit; - asm volatile("btc %2,%3\n\t" + asm volatile("btc %2,%1\n\t" "sbb %0,%0" - : "=r" (oldbit), BIT_ADDR : "Ir" (nr), BASE_ADDR); + : "=r" (oldbit), ADDR + : "Ir" (nr) : "memory"); return oldbit; } @@ -276,11 +276,10 @@ static inline int variable_test_bit(int nr, volatile const void *addr) { int oldbit; - asm volatile("bt %2,%3\n\t" + asm volatile("bt %2,%1\n\t" "sbb %0,%0" : "=r" (oldbit) - : "m" (((volatile const int *)addr)[nr >> 5]), - "Ir" (nr), BASE_ADDR); + : "m" (*(unsigned long *)addr), "Ir" (nr)); return oldbit; } @@ -397,8 +396,6 @@ static inline int fls(int x) } #endif /* __KERNEL__ */ -#undef BASE_ADDR -#undef BIT_ADDR #undef ADDR static inline void set_bit_string(unsigned long *bitmap, diff --git a/include/asm-x86/bootparam.h b/include/asm-x86/bootparam.h index e8659909e5f6..f62f4733606b 100644 --- a/include/asm-x86/bootparam.h +++ b/include/asm-x86/bootparam.h @@ -14,10 +14,10 @@ /* extensible setup data list node */ struct setup_data { - u64 next; - u32 type; - u32 len; - u8 data[0]; + __u64 next; + __u32 type; + __u32 len; + __u8 data[0]; }; struct setup_header { diff --git a/include/asm-x86/div64.h b/include/asm-x86/div64.h index 0dbf8bf3ef0a..9a2d644c08ef 100644 --- a/include/asm-x86/div64.h +++ b/include/asm-x86/div64.h @@ -33,25 +33,25 @@ __mod; \ }) -/* - * (long)X = ((long long)divs) / (long)div - * (long)rem = ((long long)divs) % (long)div - * - * Warning, this will do an exception if X overflows. - */ -#define div_long_long_rem(a, b, c) div_ll_X_l_rem(a, b, c) - -static inline long div_ll_X_l_rem(long long divs, long div, long *rem) +static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) { - long dum2; - asm("divl %2":"=a"(dum2), "=d"(*rem) - : "rm"(div), "A"(divs)); - - return dum2; - + union { + u64 v64; + u32 v32[2]; + } d = { dividend }; + u32 upper; + + upper = d.v32[1]; + d.v32[1] = 0; + if (upper >= divisor) { + d.v32[1] = upper / divisor; + upper %= divisor; + } + asm ("divl %2" : "=a" (d.v32[0]), "=d" (*remainder) : + "rm" (divisor), "0" (d.v32[0]), "1" (upper)); + return d.v64; } - -extern uint64_t div64_64(uint64_t dividend, uint64_t divisor); +#define div_u64_rem div_u64_rem #else # include <asm-generic/div64.h> diff --git a/include/asm-x86/dmi.h b/include/asm-x86/dmi.h index 1241e6ad1935..4edf7514a750 100644 --- a/include/asm-x86/dmi.h +++ b/include/asm-x86/dmi.h @@ -27,6 +27,7 @@ static inline void *dmi_alloc(unsigned len) #endif +/* Use early IO mappings for DMI because it's initialized early */ #define dmi_ioremap early_ioremap #define dmi_iounmap early_iounmap diff --git a/include/asm-x86/geode.h b/include/asm-x86/geode.h index 7154dc4de951..6e6458853a36 100644 --- a/include/asm-x86/geode.h +++ b/include/asm-x86/geode.h @@ -185,16 +185,14 @@ static inline int is_geode(void) return (is_geode_gx() || is_geode_lx()); } -/* - * The VSA has virtual registers that we can query for a signature. - */ +#ifdef CONFIG_MGEODE_LX +extern int geode_has_vsa2(void); +#else static inline int geode_has_vsa2(void) { - outw(VSA_VR_UNLOCK, VSA_VRC_INDEX); - outw(VSA_VR_SIGNATURE, VSA_VRC_INDEX); - - return (inw(VSA_VRC_DATA) == VSA_SIG); + return 0; } +#endif /* MFGPTs */ diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h index da2adb45f6e3..6b722d315936 100644 --- a/include/asm-x86/i387.h +++ b/include/asm-x86/i387.h @@ -175,7 +175,15 @@ static inline int save_i387(struct _fpstate __user *buf) */ static inline int restore_i387(struct _fpstate __user *buf) { - set_used_math(); + struct task_struct *tsk = current; + int err; + + if (!used_math()) { + err = init_fpu(tsk); + if (err) + return err; + } + if (!(task_thread_info(current)->status & TS_USEDFPU)) { clts(); task_thread_info(current)->status |= TS_USEDFPU; diff --git a/include/asm-x86/io_32.h b/include/asm-x86/io_32.h index 6e73467a4fb1..049e81e797a0 100644 --- a/include/asm-x86/io_32.h +++ b/include/asm-x86/io_32.h @@ -133,11 +133,6 @@ extern void *early_ioremap(unsigned long offset, unsigned long size); extern void early_iounmap(void *addr, unsigned long size); extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); -/* Use early IO mappings for DMI because it's initialized early */ -#define dmi_ioremap early_ioremap -#define dmi_iounmap early_iounmap -#define dmi_alloc alloc_bootmem - /* * ISA I/O bus memory addresses are 1:1 with the physical address. */ diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h index 9d963cd6533c..1d8cd01fa514 100644 --- a/include/asm-x86/kvm_host.h +++ b/include/asm-x86/kvm_host.h @@ -314,6 +314,9 @@ struct kvm_arch{ struct page *apic_access_page; gpa_t wall_clock; + + struct page *ept_identity_pagetable; + bool ept_identity_pagetable_done; }; struct kvm_vm_stat { @@ -422,6 +425,7 @@ struct kvm_x86_ops { struct kvm_run *run); int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); + int (*get_tdp_level)(void); }; extern struct kvm_x86_ops *kvm_x86_ops; @@ -433,6 +437,9 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu); int kvm_mmu_create(struct kvm_vcpu *vcpu); int kvm_mmu_setup(struct kvm_vcpu *vcpu); void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte); +void kvm_mmu_set_base_ptes(u64 base_pte); +void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, + u64 dirty_mask, u64 nx_mask, u64 x_mask); int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); @@ -620,7 +627,7 @@ static inline void fx_restore(struct i387_fxsave_struct *image) asm("fxrstor (%0)":: "r" (image)); } -static inline void fpu_init(void) +static inline void fx_finit(void) { asm("finit"); } @@ -644,6 +651,7 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code) #define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4" #define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4" #define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30" +#define ASM_VMX_INVEPT ".byte 0x66, 0x0f, 0x38, 0x80, 0x08" #define ASM_VMX_INVVPID ".byte 0x66, 0x0f, 0x38, 0x81, 0x08" #define MSR_IA32_TIME_STAMP_COUNTER 0x010 diff --git a/include/asm-x86/pat.h b/include/asm-x86/pat.h index 8b822b5a1786..88f60cc6a227 100644 --- a/include/asm-x86/pat.h +++ b/include/asm-x86/pat.h @@ -4,7 +4,13 @@ #include <linux/types.h> +#ifdef CONFIG_X86_PAT extern int pat_wc_enabled; +extern void validate_pat_support(struct cpuinfo_x86 *c); +#else +static const int pat_wc_enabled = 0; +static inline void validate_pat_support(struct cpuinfo_x86 *c) { } +#endif extern void pat_init(void); @@ -12,5 +18,7 @@ extern int reserve_memtype(u64 start, u64 end, unsigned long req_type, unsigned long *ret_type); extern int free_memtype(u64 start, u64 end); +extern void pat_disable(char *reason); + #endif diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h index 577ab79c4c27..d7f0403bbecb 100644 --- a/include/asm-x86/pgtable_32.h +++ b/include/asm-x86/pgtable_32.h @@ -88,14 +88,7 @@ extern unsigned long pg0[]; /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */ #define pmd_none(x) (!(unsigned long)pmd_val((x))) #define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT) - -extern int pmd_bad(pmd_t pmd); - -#define pmd_bad_v1(x) \ - (_KERNPG_TABLE != (pmd_val((x)) & ~(PAGE_MASK | _PAGE_USER))) -#define pmd_bad_v2(x) \ - (_KERNPG_TABLE != (pmd_val((x)) & ~(PAGE_MASK | _PAGE_USER | \ - _PAGE_PSE | _PAGE_NX))) +#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h index a3bbf8766c1d..efe83dcbd412 100644 --- a/include/asm-x86/pgtable_64.h +++ b/include/asm-x86/pgtable_64.h @@ -158,14 +158,12 @@ static inline unsigned long pgd_bad(pgd_t pgd) static inline unsigned long pud_bad(pud_t pud) { - return pud_val(pud) & - ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER | _PAGE_PSE | _PAGE_NX); + return pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER); } static inline unsigned long pmd_bad(pmd_t pmd) { - return pmd_val(pmd) & - ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER | _PAGE_PSE | _PAGE_NX); + return pmd_val(pmd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER); } #define pte_none(x) (!pte_val((x))) diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h index bc6376f1bc5a..21e89bf92f1c 100644 --- a/include/asm-x86/spinlock.h +++ b/include/asm-x86/spinlock.h @@ -20,18 +20,8 @@ */ #ifdef CONFIG_X86_32 -typedef char _slock_t; -# define LOCK_INS_DEC "decb" -# define LOCK_INS_XCH "xchgb" -# define LOCK_INS_MOV "movb" -# define LOCK_INS_CMP "cmpb" # define LOCK_PTR_REG "a" #else -typedef int _slock_t; -# define LOCK_INS_DEC "decl" -# define LOCK_INS_XCH "xchgl" -# define LOCK_INS_MOV "movl" -# define LOCK_INS_CMP "cmpl" # define LOCK_PTR_REG "D" #endif @@ -66,14 +56,14 @@ typedef int _slock_t; #if (NR_CPUS < 256) static inline int __raw_spin_is_locked(raw_spinlock_t *lock) { - int tmp = *(volatile signed int *)(&(lock)->slock); + int tmp = ACCESS_ONCE(lock->slock); return (((tmp >> 8) & 0xff) != (tmp & 0xff)); } static inline int __raw_spin_is_contended(raw_spinlock_t *lock) { - int tmp = *(volatile signed int *)(&(lock)->slock); + int tmp = ACCESS_ONCE(lock->slock); return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1; } @@ -130,14 +120,14 @@ static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) #else static inline int __raw_spin_is_locked(raw_spinlock_t *lock) { - int tmp = *(volatile signed int *)(&(lock)->slock); + int tmp = ACCESS_ONCE(lock->slock); return (((tmp >> 16) & 0xffff) != (tmp & 0xffff)); } static inline int __raw_spin_is_contended(raw_spinlock_t *lock) { - int tmp = *(volatile signed int *)(&(lock)->slock); + int tmp = ACCESS_ONCE(lock->slock); return (((tmp >> 16) & 0xffff) - (tmp & 0xffff)) > 1; } diff --git a/include/asm-x86/topology.h b/include/asm-x86/topology.h index 4f35a0fb4f22..dcf3f8131d6b 100644 --- a/include/asm-x86/topology.h +++ b/include/asm-x86/topology.h @@ -25,6 +25,16 @@ #ifndef _ASM_X86_TOPOLOGY_H #define _ASM_X86_TOPOLOGY_H +#ifdef CONFIG_X86_32 +# ifdef CONFIG_X86_HT +# define ENABLE_TOPO_DEFINES +# endif +#else +# ifdef CONFIG_SMP +# define ENABLE_TOPO_DEFINES +# endif +#endif + #ifdef CONFIG_NUMA #include <linux/cpumask.h> #include <asm/mpspec.h> @@ -130,10 +140,6 @@ extern unsigned long node_end_pfn[]; extern unsigned long node_remap_size[]; #define node_has_online_mem(nid) (node_start_pfn[nid] != node_end_pfn[nid]) -# ifdef CONFIG_X86_HT -# define ENABLE_TOPO_DEFINES -# endif - # define SD_CACHE_NICE_TRIES 1 # define SD_IDLE_IDX 1 # define SD_NEWIDLE_IDX 2 @@ -141,10 +147,6 @@ extern unsigned long node_remap_size[]; #else -# ifdef CONFIG_SMP -# define ENABLE_TOPO_DEFINES -# endif - # define SD_CACHE_NICE_TRIES 2 # define SD_IDLE_IDX 2 # define SD_NEWIDLE_IDX 2 diff --git a/include/asm-x86/types.h b/include/asm-x86/types.h index 63733f315688..1ac80cd9acf8 100644 --- a/include/asm-x86/types.h +++ b/include/asm-x86/types.h @@ -1,34 +1,12 @@ #ifndef _ASM_X86_TYPES_H #define _ASM_X86_TYPES_H +#include <asm-generic/int-ll64.h> + #ifndef __ASSEMBLY__ typedef unsigned short umode_t; -/* - * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the - * header files exported to user space - */ - -typedef __signed__ char __s8; -typedef unsigned char __u8; - -typedef __signed__ short __s16; -typedef unsigned short __u16; - -typedef __signed__ int __s32; -typedef unsigned int __u32; - -#ifdef __i386__ -# ifdef __GNUC__ -__extension__ typedef __signed__ long long __s64; -__extension__ typedef unsigned long long __u64; -# endif -#else -typedef __signed__ long long __s64; -typedef unsigned long long __u64; -#endif - #endif /* __ASSEMBLY__ */ /* @@ -44,18 +22,6 @@ typedef unsigned long long __u64; #ifndef __ASSEMBLY__ -typedef signed char s8; -typedef unsigned char u8; - -typedef signed short s16; -typedef unsigned short u16; - -typedef signed int s32; -typedef unsigned int u32; - -typedef signed long long s64; -typedef unsigned long long u64; - typedef u64 dma64_addr_t; #if defined(CONFIG_X86_64) || defined(CONFIG_HIGHMEM64G) /* DMA addresses come in 32-bit and 64-bit flavours. */ |