diff options
Diffstat (limited to 'arch/ia64/include/asm')
-rw-r--r-- | arch/ia64/include/asm/Kbuild | 2 | ||||
-rw-r--r-- | arch/ia64/include/asm/acenv.h | 4 | ||||
-rw-r--r-- | arch/ia64/include/asm/acpi.h | 5 | ||||
-rw-r--r-- | arch/ia64/include/asm/atomic.h | 192 | ||||
-rw-r--r-- | arch/ia64/include/asm/kvm_host.h | 15 | ||||
-rw-r--r-- | arch/ia64/include/asm/page.h | 2 | ||||
-rw-r--r-- | arch/ia64/include/asm/processor.h | 1 | ||||
-rw-r--r-- | arch/ia64/include/asm/scatterlist.h | 7 | ||||
-rw-r--r-- | arch/ia64/include/asm/unistd.h | 2 |
9 files changed, 110 insertions, 120 deletions
diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild index 0da4aa2602ae..747320be9d0e 100644 --- a/arch/ia64/include/asm/Kbuild +++ b/arch/ia64/include/asm/Kbuild @@ -2,8 +2,10 @@ generic-y += clkdev.h generic-y += exec.h generic-y += hash.h +generic-y += irq_work.h generic-y += kvm_para.h generic-y += mcs_spinlock.h generic-y += preempt.h +generic-y += scatterlist.h generic-y += trace_clock.h generic-y += vtime.h diff --git a/arch/ia64/include/asm/acenv.h b/arch/ia64/include/asm/acenv.h index 3f9eaeec9873..35ff13afbf34 100644 --- a/arch/ia64/include/asm/acenv.h +++ b/arch/ia64/include/asm/acenv.h @@ -19,8 +19,6 @@ /* Asm macros */ -#ifdef CONFIG_ACPI - static inline int ia64_acpi_acquire_global_lock(unsigned int *lock) { @@ -51,6 +49,4 @@ ia64_acpi_release_global_lock(unsigned int *lock) #define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \ ((Acq) = ia64_acpi_release_global_lock(&facs->global_lock)) -#endif - #endif /* _ASM_IA64_ACENV_H */ diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h index 75dc59a793d6..a1d91ab4c5ef 100644 --- a/arch/ia64/include/asm/acpi.h +++ b/arch/ia64/include/asm/acpi.h @@ -40,6 +40,11 @@ extern int acpi_lapic; #define acpi_noirq 0 /* ACPI always enabled on IA64 */ #define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */ #define acpi_strict 1 /* no ACPI spec workarounds on IA64 */ + +static inline bool acpi_has_cpu_in_madt(void) +{ + return !!acpi_lapic; +} #endif #define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */ static inline void disable_acpi(void) { } diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index 0f8bf48dadf3..0bf03501fe5c 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h @@ -21,68 +21,100 @@ #define ATOMIC_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) } -#define atomic_read(v) (*(volatile int *)&(v)->counter) -#define atomic64_read(v) (*(volatile long *)&(v)->counter) +#define atomic_read(v) ACCESS_ONCE((v)->counter) +#define atomic64_read(v) ACCESS_ONCE((v)->counter) #define atomic_set(v,i) (((v)->counter) = (i)) #define atomic64_set(v,i) (((v)->counter) = (i)) -static __inline__ int -ia64_atomic_add (int i, atomic_t *v) -{ - __s32 old, new; - CMPXCHG_BUGCHECK_DECL - - do { - CMPXCHG_BUGCHECK(v); - old = atomic_read(v); - new = old + i; - } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); - return new; +#define ATOMIC_OP(op, c_op) \ +static __inline__ int \ +ia64_atomic_##op (int i, atomic_t *v) \ +{ \ + __s32 old, new; \ + CMPXCHG_BUGCHECK_DECL \ + \ + do { \ + CMPXCHG_BUGCHECK(v); \ + old = atomic_read(v); \ + new = old c_op i; \ + } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \ + return new; \ } -static __inline__ long -ia64_atomic64_add (__s64 i, atomic64_t *v) -{ - __s64 old, new; - CMPXCHG_BUGCHECK_DECL - - do { - CMPXCHG_BUGCHECK(v); - old = atomic64_read(v); - new = old + i; - } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); - return new; -} +ATOMIC_OP(add, +) +ATOMIC_OP(sub, -) -static __inline__ int -ia64_atomic_sub (int i, atomic_t *v) -{ - __s32 old, new; - CMPXCHG_BUGCHECK_DECL - - do { - CMPXCHG_BUGCHECK(v); - old = atomic_read(v); - new = old - i; - } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); - return new; -} +#undef ATOMIC_OP -static __inline__ long -ia64_atomic64_sub (__s64 i, atomic64_t *v) -{ - __s64 old, new; - CMPXCHG_BUGCHECK_DECL - - do { - CMPXCHG_BUGCHECK(v); - old = atomic64_read(v); - new = old - i; - } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); - return new; +#define atomic_add_return(i,v) \ +({ \ + int __ia64_aar_i = (i); \ + (__builtin_constant_p(i) \ + && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \ + || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \ + || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \ + || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \ + ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \ + : ia64_atomic_add(__ia64_aar_i, v); \ +}) + +#define atomic_sub_return(i,v) \ +({ \ + int __ia64_asr_i = (i); \ + (__builtin_constant_p(i) \ + && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \ + || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \ + || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \ + || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \ + ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \ + : ia64_atomic_sub(__ia64_asr_i, v); \ +}) + +#define ATOMIC64_OP(op, c_op) \ +static __inline__ long \ +ia64_atomic64_##op (__s64 i, atomic64_t *v) \ +{ \ + __s64 old, new; \ + CMPXCHG_BUGCHECK_DECL \ + \ + do { \ + CMPXCHG_BUGCHECK(v); \ + old = atomic64_read(v); \ + new = old c_op i; \ + } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \ + return new; \ } +ATOMIC64_OP(add, +) +ATOMIC64_OP(sub, -) + +#undef ATOMIC64_OP + +#define atomic64_add_return(i,v) \ +({ \ + long __ia64_aar_i = (i); \ + (__builtin_constant_p(i) \ + && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \ + || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \ + || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \ + || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \ + ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \ + : ia64_atomic64_add(__ia64_aar_i, v); \ +}) + +#define atomic64_sub_return(i,v) \ +({ \ + long __ia64_asr_i = (i); \ + (__builtin_constant_p(i) \ + && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \ + || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \ + || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \ + || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \ + ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \ + : ia64_atomic64_sub(__ia64_asr_i, v); \ +}) + #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) @@ -123,30 +155,6 @@ static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) -#define atomic_add_return(i,v) \ -({ \ - int __ia64_aar_i = (i); \ - (__builtin_constant_p(i) \ - && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \ - || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \ - || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \ - || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \ - ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \ - : ia64_atomic_add(__ia64_aar_i, v); \ -}) - -#define atomic64_add_return(i,v) \ -({ \ - long __ia64_aar_i = (i); \ - (__builtin_constant_p(i) \ - && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \ - || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \ - || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \ - || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \ - ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \ - : ia64_atomic64_add(__ia64_aar_i, v); \ -}) - /* * Atomically add I to V and return TRUE if the resulting value is * negative. @@ -163,30 +171,6 @@ atomic64_add_negative (__s64 i, atomic64_t *v) return atomic64_add_return(i, v) < 0; } -#define atomic_sub_return(i,v) \ -({ \ - int __ia64_asr_i = (i); \ - (__builtin_constant_p(i) \ - && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \ - || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \ - || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \ - || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \ - ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \ - : ia64_atomic_sub(__ia64_asr_i, v); \ -}) - -#define atomic64_sub_return(i,v) \ -({ \ - long __ia64_asr_i = (i); \ - (__builtin_constant_p(i) \ - && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \ - || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \ - || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \ - || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \ - ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \ - : ia64_atomic64_sub(__ia64_asr_i, v); \ -}) - #define atomic_dec_return(v) atomic_sub_return(1, (v)) #define atomic_inc_return(v) atomic_add_return(1, (v)) #define atomic64_dec_return(v) atomic64_sub_return(1, (v)) @@ -199,13 +183,13 @@ atomic64_add_negative (__s64 i, atomic64_t *v) #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0) #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0) -#define atomic_add(i,v) atomic_add_return((i), (v)) -#define atomic_sub(i,v) atomic_sub_return((i), (v)) +#define atomic_add(i,v) (void)atomic_add_return((i), (v)) +#define atomic_sub(i,v) (void)atomic_sub_return((i), (v)) #define atomic_inc(v) atomic_add(1, (v)) #define atomic_dec(v) atomic_sub(1, (v)) -#define atomic64_add(i,v) atomic64_add_return((i), (v)) -#define atomic64_sub(i,v) atomic64_sub_return((i), (v)) +#define atomic64_add(i,v) (void)atomic64_add_return((i), (v)) +#define atomic64_sub(i,v) (void)atomic64_sub_return((i), (v)) #define atomic64_inc(v) atomic64_add(1, (v)) #define atomic64_dec(v) atomic64_sub(1, (v)) diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h index db95f570705f..4729752b7256 100644 --- a/arch/ia64/include/asm/kvm_host.h +++ b/arch/ia64/include/asm/kvm_host.h @@ -234,9 +234,6 @@ struct kvm_vm_data { #define KVM_REQ_PTC_G 32 #define KVM_REQ_RESUME 33 -struct kvm; -struct kvm_vcpu; - struct kvm_mmio_req { uint64_t addr; /* physical address */ uint64_t size; /* size in bytes */ @@ -595,6 +592,18 @@ void kvm_sal_emul(struct kvm_vcpu *vcpu); struct kvm *kvm_arch_alloc_vm(void); void kvm_arch_free_vm(struct kvm *kvm); +static inline void kvm_arch_sync_events(struct kvm *kvm) {} +static inline void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) {} +static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu) {} +static inline void kvm_arch_free_memslot(struct kvm *kvm, + struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {} +static inline void kvm_arch_memslots_updated(struct kvm *kvm) {} +static inline void kvm_arch_commit_memory_region(struct kvm *kvm, + struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old, + enum kvm_mr_change change) {} +static inline void kvm_arch_hardware_unsetup(void) {} + #endif /* __ASSEMBLY__*/ #endif diff --git a/arch/ia64/include/asm/page.h b/arch/ia64/include/asm/page.h index f1e1b2e3cdb3..1f1bf144fe62 100644 --- a/arch/ia64/include/asm/page.h +++ b/arch/ia64/include/asm/page.h @@ -231,4 +231,6 @@ get_order (unsigned long size) #define PERCPU_ADDR (-PERCPU_PAGE_SIZE) #define LOAD_OFFSET (KERNEL_START - KERNEL_TR_PAGE_SIZE) +#define __HAVE_ARCH_GATE_AREA 1 + #endif /* _ASM_IA64_PAGE_H */ diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h index c7367130ab14..ce53c50d0ba4 100644 --- a/arch/ia64/include/asm/processor.h +++ b/arch/ia64/include/asm/processor.h @@ -19,7 +19,6 @@ #include <asm/ptrace.h> #include <asm/ustack.h> -#define __ARCH_WANT_UNLOCKED_CTXSW #define ARCH_HAS_PREFETCH_SWITCH_STACK #define IA64_NUM_PHYS_STACK_REG 96 diff --git a/arch/ia64/include/asm/scatterlist.h b/arch/ia64/include/asm/scatterlist.h deleted file mode 100644 index 08fd93bff1db..000000000000 --- a/arch/ia64/include/asm/scatterlist.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef _ASM_IA64_SCATTERLIST_H -#define _ASM_IA64_SCATTERLIST_H - -#include <asm-generic/scatterlist.h> -#define ARCH_HAS_SG_CHAIN - -#endif /* _ASM_IA64_SCATTERLIST_H */ diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h index fb13dc5e8f8c..10a14ead70b9 100644 --- a/arch/ia64/include/asm/unistd.h +++ b/arch/ia64/include/asm/unistd.h @@ -11,7 +11,7 @@ -#define NR_syscalls 315 /* length of syscall table */ +#define NR_syscalls 317 /* length of syscall table */ /* * The following defines stop scripts/checksyscalls.sh from complaining about |