diff options
Diffstat (limited to 'arch/s390/include')
-rw-r--r-- | arch/s390/include/asm/atomic.h | 98 | ||||
-rw-r--r-- | arch/s390/include/asm/atomic_ops.h | 76 | ||||
-rw-r--r-- | arch/s390/include/asm/bitops.h | 93 | ||||
-rw-r--r-- | arch/s390/include/asm/ccwdev.h | 3 | ||||
-rw-r--r-- | arch/s390/include/asm/cmpxchg.h | 168 | ||||
-rw-r--r-- | arch/s390/include/asm/entry-common.h | 4 | ||||
-rw-r--r-- | arch/s390/include/asm/idle.h | 12 | ||||
-rw-r--r-- | arch/s390/include/asm/pci.h | 10 | ||||
-rw-r--r-- | arch/s390/include/asm/qdio.h | 22 | ||||
-rw-r--r-- | arch/s390/include/asm/spinlock.h | 2 | ||||
-rw-r--r-- | arch/s390/include/asm/spinlock_types.h | 2 | ||||
-rw-r--r-- | arch/s390/include/asm/stacktrace.h | 1 | ||||
-rw-r--r-- | arch/s390/include/asm/timex.h | 36 | ||||
-rw-r--r-- | arch/s390/include/asm/vdso/data.h | 2 | ||||
-rw-r--r-- | arch/s390/include/uapi/asm/hwctrset.h (renamed from arch/s390/include/uapi/asm/perf_cpum_cf_diag.h) | 0 |
15 files changed, 359 insertions, 170 deletions
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index 5860ae790f2d..7c93c6573524 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -15,48 +15,46 @@ #include <asm/barrier.h> #include <asm/cmpxchg.h> -static inline int atomic_read(const atomic_t *v) +static inline int arch_atomic_read(const atomic_t *v) { - int c; - - asm volatile( - " l %0,%1\n" - : "=d" (c) : "Q" (v->counter)); - return c; + return __atomic_read(v); } +#define arch_atomic_read arch_atomic_read -static inline void atomic_set(atomic_t *v, int i) +static inline void arch_atomic_set(atomic_t *v, int i) { - asm volatile( - " st %1,%0\n" - : "=Q" (v->counter) : "d" (i)); + __atomic_set(v, i); } +#define arch_atomic_set arch_atomic_set -static inline int atomic_add_return(int i, atomic_t *v) +static inline int arch_atomic_add_return(int i, atomic_t *v) { return __atomic_add_barrier(i, &v->counter) + i; } +#define arch_atomic_add_return arch_atomic_add_return -static inline int atomic_fetch_add(int i, atomic_t *v) +static inline int arch_atomic_fetch_add(int i, atomic_t *v) { return __atomic_add_barrier(i, &v->counter); } +#define arch_atomic_fetch_add arch_atomic_fetch_add -static inline void atomic_add(int i, atomic_t *v) +static inline void arch_atomic_add(int i, atomic_t *v) { __atomic_add(i, &v->counter); } +#define arch_atomic_add arch_atomic_add -#define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v) -#define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v) -#define atomic_fetch_sub(_i, _v) atomic_fetch_add(-(int)(_i), _v) +#define arch_atomic_sub(_i, _v) arch_atomic_add(-(int)(_i), _v) +#define arch_atomic_sub_return(_i, _v) arch_atomic_add_return(-(int)(_i), _v) +#define arch_atomic_fetch_sub(_i, _v) arch_atomic_fetch_add(-(int)(_i), _v) #define ATOMIC_OPS(op) \ -static inline void atomic_##op(int i, atomic_t *v) \ +static inline void arch_atomic_##op(int i, atomic_t *v) \ { \ __atomic_##op(i, &v->counter); \ } \ -static inline int atomic_fetch_##op(int i, atomic_t *v) \ +static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \ { \ return __atomic_##op##_barrier(i, &v->counter); \ } @@ -67,60 +65,67 @@ ATOMIC_OPS(xor) #undef ATOMIC_OPS -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) +#define arch_atomic_and arch_atomic_and +#define arch_atomic_or arch_atomic_or +#define arch_atomic_xor arch_atomic_xor +#define arch_atomic_fetch_and arch_atomic_fetch_and +#define arch_atomic_fetch_or arch_atomic_fetch_or +#define arch_atomic_fetch_xor arch_atomic_fetch_xor + +#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new)) -static inline int atomic_cmpxchg(atomic_t *v, int old, int new) +static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new) { return __atomic_cmpxchg(&v->counter, old, new); } +#define arch_atomic_cmpxchg arch_atomic_cmpxchg #define ATOMIC64_INIT(i) { (i) } -static inline s64 atomic64_read(const atomic64_t *v) +static inline s64 arch_atomic64_read(const atomic64_t *v) { - s64 c; - - asm volatile( - " lg %0,%1\n" - : "=d" (c) : "Q" (v->counter)); - return c; + return __atomic64_read(v); } +#define arch_atomic64_read arch_atomic64_read -static inline void atomic64_set(atomic64_t *v, s64 i) +static inline void arch_atomic64_set(atomic64_t *v, s64 i) { - asm volatile( - " stg %1,%0\n" - : "=Q" (v->counter) : "d" (i)); + __atomic64_set(v, i); } +#define arch_atomic64_set arch_atomic64_set -static inline s64 atomic64_add_return(s64 i, atomic64_t *v) +static inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v) { return __atomic64_add_barrier(i, (long *)&v->counter) + i; } +#define arch_atomic64_add_return arch_atomic64_add_return -static inline s64 atomic64_fetch_add(s64 i, atomic64_t *v) +static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v) { return __atomic64_add_barrier(i, (long *)&v->counter); } +#define arch_atomic64_fetch_add arch_atomic64_fetch_add -static inline void atomic64_add(s64 i, atomic64_t *v) +static inline void arch_atomic64_add(s64 i, atomic64_t *v) { __atomic64_add(i, (long *)&v->counter); } +#define arch_atomic64_add arch_atomic64_add -#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) +#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new)) -static inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) +static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) { return __atomic64_cmpxchg((long *)&v->counter, old, new); } +#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg #define ATOMIC64_OPS(op) \ -static inline void atomic64_##op(s64 i, atomic64_t *v) \ +static inline void arch_atomic64_##op(s64 i, atomic64_t *v) \ { \ __atomic64_##op(i, (long *)&v->counter); \ } \ -static inline long atomic64_fetch_##op(s64 i, atomic64_t *v) \ +static inline long arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \ { \ return __atomic64_##op##_barrier(i, (long *)&v->counter); \ } @@ -131,8 +136,17 @@ ATOMIC64_OPS(xor) #undef ATOMIC64_OPS -#define atomic64_sub_return(_i, _v) atomic64_add_return(-(s64)(_i), _v) -#define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(s64)(_i), _v) -#define atomic64_sub(_i, _v) atomic64_add(-(s64)(_i), _v) +#define arch_atomic64_and arch_atomic64_and +#define arch_atomic64_or arch_atomic64_or +#define arch_atomic64_xor arch_atomic64_xor +#define arch_atomic64_fetch_and arch_atomic64_fetch_and +#define arch_atomic64_fetch_or arch_atomic64_fetch_or +#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor + +#define arch_atomic64_sub_return(_i, _v) arch_atomic64_add_return(-(s64)(_i), _v) +#define arch_atomic64_fetch_sub(_i, _v) arch_atomic64_fetch_add(-(s64)(_i), _v) +#define arch_atomic64_sub(_i, _v) arch_atomic64_add(-(s64)(_i), _v) + +#define ARCH_ATOMIC #endif /* __ARCH_S390_ATOMIC__ */ diff --git a/arch/s390/include/asm/atomic_ops.h b/arch/s390/include/asm/atomic_ops.h index 61467b9eecc7..50510e08b893 100644 --- a/arch/s390/include/asm/atomic_ops.h +++ b/arch/s390/include/asm/atomic_ops.h @@ -8,6 +8,40 @@ #ifndef __ARCH_S390_ATOMIC_OPS__ #define __ARCH_S390_ATOMIC_OPS__ +static inline int __atomic_read(const atomic_t *v) +{ + int c; + + asm volatile( + " l %0,%1\n" + : "=d" (c) : "R" (v->counter)); + return c; +} + +static inline void __atomic_set(atomic_t *v, int i) +{ + asm volatile( + " st %1,%0\n" + : "=R" (v->counter) : "d" (i)); +} + +static inline s64 __atomic64_read(const atomic64_t *v) +{ + s64 c; + + asm volatile( + " lg %0,%1\n" + : "=d" (c) : "RT" (v->counter)); + return c; +} + +static inline void __atomic64_set(atomic64_t *v, s64 i) +{ + asm volatile( + " stg %1,%0\n" + : "=RT" (v->counter) : "d" (i)); +} + #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES #define __ATOMIC_OP(op_name, op_type, op_string, op_barrier) \ @@ -18,7 +52,7 @@ static inline op_type op_name(op_type val, op_type *ptr) \ asm volatile( \ op_string " %[old],%[val],%[ptr]\n" \ op_barrier \ - : [old] "=d" (old), [ptr] "+Q" (*ptr) \ + : [old] "=d" (old), [ptr] "+QS" (*ptr) \ : [val] "d" (val) : "cc", "memory"); \ return old; \ } \ @@ -46,7 +80,7 @@ static __always_inline void op_name(op_type val, op_type *ptr) \ asm volatile( \ op_string " %[ptr],%[val]\n" \ op_barrier \ - : [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc", "memory");\ + : [ptr] "+QS" (*ptr) : [val] "i" (val) : "cc", "memory");\ } #define __ATOMIC_CONST_OPS(op_name, op_type, op_string) \ @@ -97,7 +131,7 @@ static inline long op_name(long val, long *ptr) \ op_string " %[new],%[val]\n" \ " csg %[old],%[new],%[ptr]\n" \ " jl 0b" \ - : [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\ + : [old] "=d" (old), [new] "=&d" (new), [ptr] "+QS" (*ptr)\ : [val] "d" (val), "0" (*ptr) : "cc", "memory"); \ return old; \ } @@ -122,22 +156,46 @@ __ATOMIC64_OPS(__atomic64_xor, "xgr") static inline int __atomic_cmpxchg(int *ptr, int old, int new) { - return __sync_val_compare_and_swap(ptr, old, new); + asm volatile( + " cs %[old],%[new],%[ptr]" + : [old] "+d" (old), [ptr] "+Q" (*ptr) + : [new] "d" (new) + : "cc", "memory"); + return old; } -static inline int __atomic_cmpxchg_bool(int *ptr, int old, int new) +static inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new) { - return __sync_bool_compare_and_swap(ptr, old, new); + int old_expected = old; + + asm volatile( + " cs %[old],%[new],%[ptr]" + : [old] "+d" (old), [ptr] "+Q" (*ptr) + : [new] "d" (new) + : "cc", "memory"); + return old == old_expected; } static inline long __atomic64_cmpxchg(long *ptr, long old, long new) { - return __sync_val_compare_and_swap(ptr, old, new); + asm volatile( + " csg %[old],%[new],%[ptr]" + : [old] "+d" (old), [ptr] "+QS" (*ptr) + : [new] "d" (new) + : "cc", "memory"); + return old; } -static inline long __atomic64_cmpxchg_bool(long *ptr, long old, long new) +static inline bool __atomic64_cmpxchg_bool(long *ptr, long old, long new) { - return __sync_bool_compare_and_swap(ptr, old, new); + long old_expected = old; + + asm volatile( + " csg %[old],%[new],%[ptr]" + : [old] "+d" (old), [ptr] "+QS" (*ptr) + : [new] "d" (new) + : "cc", "memory"); + return old == old_expected; } #endif /* __ARCH_S390_ATOMIC_OPS__ */ diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h index 31121d32f81d..68da67d2c4c9 100644 --- a/arch/s390/include/asm/bitops.h +++ b/arch/s390/include/asm/bitops.h @@ -42,7 +42,7 @@ #define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG) static inline unsigned long * -__bitops_word(unsigned long nr, volatile unsigned long *ptr) +__bitops_word(unsigned long nr, const volatile unsigned long *ptr) { unsigned long addr; @@ -50,37 +50,33 @@ __bitops_word(unsigned long nr, volatile unsigned long *ptr) return (unsigned long *)addr; } -static inline unsigned char * -__bitops_byte(unsigned long nr, volatile unsigned long *ptr) +static inline unsigned long __bitops_mask(unsigned long nr) { - return ((unsigned char *)ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3); + return 1UL << (nr & (BITS_PER_LONG - 1)); } static __always_inline void arch_set_bit(unsigned long nr, volatile unsigned long *ptr) { unsigned long *addr = __bitops_word(nr, ptr); - unsigned long mask; + unsigned long mask = __bitops_mask(nr); - mask = 1UL << (nr & (BITS_PER_LONG - 1)); __atomic64_or(mask, (long *)addr); } static __always_inline void arch_clear_bit(unsigned long nr, volatile unsigned long *ptr) { unsigned long *addr = __bitops_word(nr, ptr); - unsigned long mask; + unsigned long mask = __bitops_mask(nr); - mask = ~(1UL << (nr & (BITS_PER_LONG - 1))); - __atomic64_and(mask, (long *)addr); + __atomic64_and(~mask, (long *)addr); } static __always_inline void arch_change_bit(unsigned long nr, volatile unsigned long *ptr) { unsigned long *addr = __bitops_word(nr, ptr); - unsigned long mask; + unsigned long mask = __bitops_mask(nr); - mask = 1UL << (nr & (BITS_PER_LONG - 1)); __atomic64_xor(mask, (long *)addr); } @@ -88,99 +84,104 @@ static inline bool arch_test_and_set_bit(unsigned long nr, volatile unsigned long *ptr) { unsigned long *addr = __bitops_word(nr, ptr); - unsigned long old, mask; + unsigned long mask = __bitops_mask(nr); + unsigned long old; - mask = 1UL << (nr & (BITS_PER_LONG - 1)); old = __atomic64_or_barrier(mask, (long *)addr); - return (old & mask) != 0; + return old & mask; } static inline bool arch_test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr) { unsigned long *addr = __bitops_word(nr, ptr); - unsigned long old, mask; + unsigned long mask = __bitops_mask(nr); + unsigned long old; - mask = ~(1UL << (nr & (BITS_PER_LONG - 1))); - old = __atomic64_and_barrier(mask, (long *)addr); - return (old & ~mask) != 0; + old = __atomic64_and_barrier(~mask, (long *)addr); + return old & mask; } static inline bool arch_test_and_change_bit(unsigned long nr, volatile unsigned long *ptr) { unsigned long *addr = __bitops_word(nr, ptr); - unsigned long old, mask; + unsigned long mask = __bitops_mask(nr); + unsigned long old; - mask = 1UL << (nr & (BITS_PER_LONG - 1)); old = __atomic64_xor_barrier(mask, (long *)addr); - return (old & mask) != 0; + return old & mask; } static inline void arch___set_bit(unsigned long nr, volatile unsigned long *ptr) { - unsigned char *addr = __bitops_byte(nr, ptr); + unsigned long *addr = __bitops_word(nr, ptr); + unsigned long mask = __bitops_mask(nr); - *addr |= 1 << (nr & 7); + *addr |= mask; } static inline void arch___clear_bit(unsigned long nr, volatile unsigned long *ptr) { - unsigned char *addr = __bitops_byte(nr, ptr); + unsigned long *addr = __bitops_word(nr, ptr); + unsigned long mask = __bitops_mask(nr); - *addr &= ~(1 << (nr & 7)); + *addr &= ~mask; } static inline void arch___change_bit(unsigned long nr, volatile unsigned long *ptr) { - unsigned char *addr = __bitops_byte(nr, ptr); + unsigned long *addr = __bitops_word(nr, ptr); + unsigned long mask = __bitops_mask(nr); - *addr ^= 1 << (nr & 7); + *addr ^= mask; } static inline bool arch___test_and_set_bit(unsigned long nr, volatile unsigned long *ptr) { - unsigned char *addr = __bitops_byte(nr, ptr); - unsigned char ch; + unsigned long *addr = __bitops_word(nr, ptr); + unsigned long mask = __bitops_mask(nr); + unsigned long old; - ch = *addr; - *addr |= 1 << (nr & 7); - return (ch >> (nr & 7)) & 1; + old = *addr; + *addr |= mask; + return old & mask; } static inline bool arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr) { - unsigned char *addr = __bitops_byte(nr, ptr); - unsigned char ch; + unsigned long *addr = __bitops_word(nr, ptr); + unsigned long mask = __bitops_mask(nr); + unsigned long old; - ch = *addr; - *addr &= ~(1 << (nr & 7)); - return (ch >> (nr & 7)) & 1; + old = *addr; + *addr &= ~mask; + return old & mask; } static inline bool arch___test_and_change_bit(unsigned long nr, volatile unsigned long *ptr) { - unsigned char *addr = __bitops_byte(nr, ptr); - unsigned char ch; + unsigned long *addr = __bitops_word(nr, ptr); + unsigned long mask = __bitops_mask(nr); + unsigned long old; - ch = *addr; - *addr ^= 1 << (nr & 7); - return (ch >> (nr & 7)) & 1; + old = *addr; + *addr ^= mask; + return old & mask; } static inline bool arch_test_bit(unsigned long nr, const volatile unsigned long *ptr) { - const volatile unsigned char *addr; + const volatile unsigned long *addr = __bitops_word(nr, ptr); + unsigned long mask = __bitops_mask(nr); - addr = ((const volatile unsigned char *)ptr); - addr += (nr ^ (BITS_PER_LONG - 8)) >> 3; - return (*addr >> (nr & 7)) & 1; + return *addr & mask; } static inline bool arch_test_and_set_bit_lock(unsigned long nr, diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h index 778247bb1d61..d4e90f2ba77e 100644 --- a/arch/s390/include/asm/ccwdev.h +++ b/arch/s390/include/asm/ccwdev.h @@ -152,9 +152,6 @@ extern struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv, * when new devices for its type pop up */ extern int ccw_driver_register (struct ccw_driver *driver); extern void ccw_driver_unregister (struct ccw_driver *driver); - -struct ccw1; - extern int ccw_device_set_options_mask(struct ccw_device *, unsigned long); extern int ccw_device_set_options(struct ccw_device *, unsigned long); extern void ccw_device_clear_options(struct ccw_device *, unsigned long); diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h index af99c1f66f12..1960a7295ae5 100644 --- a/arch/s390/include/asm/cmpxchg.h +++ b/arch/s390/include/asm/cmpxchg.h @@ -12,27 +12,163 @@ #include <linux/types.h> #include <linux/bug.h> -#define cmpxchg(ptr, o, n) \ +void __xchg_called_with_bad_pointer(void); + +static __always_inline unsigned long __xchg(unsigned long x, + unsigned long address, int size) +{ + unsigned long old; + int shift; + + switch (size) { + case 1: + shift = (3 ^ (address & 3)) << 3; + address ^= address & 3; + asm volatile( + " l %0,%1\n" + "0: lr 0,%0\n" + " nr 0,%3\n" + " or 0,%2\n" + " cs %0,0,%1\n" + " jl 0b\n" + : "=&d" (old), "+Q" (*(int *) address) + : "d" ((x & 0xff) << shift), "d" (~(0xff << shift)) + : "memory", "cc", "0"); + return old >> shift; + case 2: + shift = (2 ^ (address & 2)) << 3; + address ^= address & 2; + asm volatile( + " l %0,%1\n" + "0: lr 0,%0\n" + " nr 0,%3\n" + " or 0,%2\n" + " cs %0,0,%1\n" + " jl 0b\n" + : "=&d" (old), "+Q" (*(int *) address) + : "d" ((x & 0xffff) << shift), "d" (~(0xffff << shift)) + : "memory", "cc", "0"); + return old >> shift; + case 4: + asm volatile( + " l %0,%1\n" + "0: cs %0,%2,%1\n" + " jl 0b\n" + : "=&d" (old), "+Q" (*(int *) address) + : "d" (x) + : "memory", "cc"); + return old; + case 8: + asm volatile( + " lg %0,%1\n" + "0: csg %0,%2,%1\n" + " jl 0b\n" + : "=&d" (old), "+QS" (*(long *) address) + : "d" (x) + : "memory", "cc"); + return old; + } + __xchg_called_with_bad_pointer(); + return x; +} + +#define arch_xchg(ptr, x) \ ({ \ - __typeof__(*(ptr)) __o = (o); \ - __typeof__(*(ptr)) __n = (n); \ - (__typeof__(*(ptr))) __sync_val_compare_and_swap((ptr),__o,__n);\ + __typeof__(*(ptr)) __ret; \ + \ + __ret = (__typeof__(*(ptr))) \ + __xchg((unsigned long)(x), (unsigned long)(ptr), \ + sizeof(*(ptr))); \ + __ret; \ }) -#define cmpxchg64 cmpxchg -#define cmpxchg_local cmpxchg -#define cmpxchg64_local cmpxchg +void __cmpxchg_called_with_bad_pointer(void); + +static __always_inline unsigned long __cmpxchg(unsigned long address, + unsigned long old, + unsigned long new, int size) +{ + unsigned long prev, tmp; + int shift; + + switch (size) { + case 1: + shift = (3 ^ (address & 3)) << 3; + address ^= address & 3; + asm volatile( + " l %0,%2\n" + "0: nr %0,%5\n" + " lr %1,%0\n" + " or %0,%3\n" + " or %1,%4\n" + " cs %0,%1,%2\n" + " jnl 1f\n" + " xr %1,%0\n" + " nr %1,%5\n" + " jnz 0b\n" + "1:" + : "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) address) + : "d" ((old & 0xff) << shift), + "d" ((new & 0xff) << shift), + "d" (~(0xff << shift)) + : "memory", "cc"); + return prev >> shift; + case 2: + shift = (2 ^ (address & 2)) << 3; + address ^= address & 2; + asm volatile( + " l %0,%2\n" + "0: nr %0,%5\n" + " lr %1,%0\n" + " or %0,%3\n" + " or %1,%4\n" + " cs %0,%1,%2\n" + " jnl 1f\n" + " xr %1,%0\n" + " nr %1,%5\n" + " jnz 0b\n" + "1:" + : "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) address) + : "d" ((old & 0xffff) << shift), + "d" ((new & 0xffff) << shift), + "d" (~(0xffff << shift)) + : "memory", "cc"); + return prev >> shift; + case 4: + asm volatile( + " cs %0,%3,%1\n" + : "=&d" (prev), "+Q" (*(int *) address) + : "0" (old), "d" (new) + : "memory", "cc"); + return prev; + case 8: + asm volatile( + " csg %0,%3,%1\n" + : "=&d" (prev), "+QS" (*(long *) address) + : "0" (old), "d" (new) + : "memory", "cc"); + return prev; + } + __cmpxchg_called_with_bad_pointer(); + return old; +} -#define xchg(ptr, x) \ +#define arch_cmpxchg(ptr, o, n) \ ({ \ - __typeof__(ptr) __ptr = (ptr); \ - __typeof__(*(ptr)) __old; \ - do { \ - __old = *__ptr; \ - } while (!__sync_bool_compare_and_swap(__ptr, __old, x)); \ - __old; \ + __typeof__(*(ptr)) __ret; \ + \ + __ret = (__typeof__(*(ptr))) \ + __cmpxchg((unsigned long)(ptr), (unsigned long)(o), \ + (unsigned long)(n), sizeof(*(ptr))); \ + __ret; \ }) +#define arch_cmpxchg64 arch_cmpxchg +#define arch_cmpxchg_local arch_cmpxchg +#define arch_cmpxchg64_local arch_cmpxchg + +#define system_has_cmpxchg_double() 1 + #define __cmpxchg_double(p1, p2, o1, o2, n1, n2) \ ({ \ register __typeof__(*(p1)) __old1 asm("2") = (o1); \ @@ -51,7 +187,7 @@ !cc; \ }) -#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \ +#define arch_cmpxchg_double(p1, p2, o1, o2, n1, n2) \ ({ \ __typeof__(p1) __p1 = (p1); \ __typeof__(p2) __p2 = (p2); \ @@ -61,6 +197,4 @@ __cmpxchg_double(__p1, __p2, o1, o2, n1, n2); \ }) -#define system_has_cmpxchg_double() 1 - #endif /* __ASM_CMPXCHG_H */ diff --git a/arch/s390/include/asm/entry-common.h b/arch/s390/include/asm/entry-common.h index 75cebc80474e..9cceb26ed63f 100644 --- a/arch/s390/include/asm/entry-common.h +++ b/arch/s390/include/asm/entry-common.h @@ -14,10 +14,6 @@ void do_per_trap(struct pt_regs *regs); void do_syscall(struct pt_regs *regs); -typedef void (*pgm_check_func)(struct pt_regs *regs); - -extern pgm_check_func pgm_check_table[128]; - #ifdef CONFIG_DEBUG_ENTRY static __always_inline void arch_check_user_regs(struct pt_regs *regs) { diff --git a/arch/s390/include/asm/idle.h b/arch/s390/include/asm/idle.h index b04f6a794cdf..5cea629c548e 100644 --- a/arch/s390/include/asm/idle.h +++ b/arch/s390/include/asm/idle.h @@ -14,12 +14,12 @@ struct s390_idle_data { seqcount_t seqcount; - unsigned long long idle_count; - unsigned long long idle_time; - unsigned long long clock_idle_enter; - unsigned long long clock_idle_exit; - unsigned long long timer_idle_enter; - unsigned long long timer_idle_exit; + unsigned long idle_count; + unsigned long idle_time; + unsigned long clock_idle_enter; + unsigned long clock_idle_exit; + unsigned long timer_idle_enter; + unsigned long timer_idle_exit; unsigned long mt_cycles_enter[8]; }; diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index 053fe8b8dec7..35c2af9371a9 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -85,7 +85,6 @@ enum zpci_state { ZPCI_FN_STATE_STANDBY = 0, ZPCI_FN_STATE_CONFIGURED = 1, ZPCI_FN_STATE_RESERVED = 2, - ZPCI_FN_STATE_ONLINE = 3, }; struct zpci_bar_struct { @@ -131,9 +130,10 @@ struct zpci_dev { u8 port; u8 rid_available : 1; u8 has_hp_slot : 1; + u8 has_resources : 1; u8 is_physfn : 1; u8 util_str_avail : 1; - u8 reserved : 4; + u8 reserved : 3; unsigned int devfn; /* DEVFN part of the RID*/ struct mutex lock; @@ -201,10 +201,12 @@ extern unsigned int s390_pci_no_rid; Prototypes ----------------------------------------------------------------------------- */ /* Base stuff */ -int zpci_create_device(u32 fid, u32 fh, enum zpci_state state); -void zpci_remove_device(struct zpci_dev *zdev); +struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state); int zpci_enable_device(struct zpci_dev *); int zpci_disable_device(struct zpci_dev *); +int zpci_configure_device(struct zpci_dev *zdev, u32 fh); +int zpci_deconfigure_device(struct zpci_dev *zdev); + int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64); int zpci_unregister_ioat(struct zpci_dev *, u8); void zpci_remove_reserved_devices(void); diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h index d9215c7106f0..8fc52679543d 100644 --- a/arch/s390/include/asm/qdio.h +++ b/arch/s390/include/asm/qdio.h @@ -246,21 +246,8 @@ struct slsb { u8 val[QDIO_MAX_BUFFERS_PER_Q]; } __attribute__ ((packed, aligned(256))); -/** - * struct qdio_outbuf_state - SBAL related asynchronous operation information - * (for communication with upper layer programs) - * (only required for use with completion queues) - * @user: pointer to upper layer program's state information related to SBAL - * (stored in user1 data of QAOB) - */ -struct qdio_outbuf_state { - void *user; -}; - -#define CHSC_AC1_INITIATE_INPUTQ 0x80 - - /* qdio adapter-characteristics-1 flag */ +#define CHSC_AC1_INITIATE_INPUTQ 0x80 #define AC1_SIGA_INPUT_NEEDED 0x40 /* process input queues */ #define AC1_SIGA_OUTPUT_NEEDED 0x20 /* process output queues */ #define AC1_SIGA_SYNC_NEEDED 0x10 /* ask hypervisor to sync */ @@ -338,7 +325,6 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int, * @int_parm: interruption parameter * @input_sbal_addr_array: per-queue array, each element points to 128 SBALs * @output_sbal_addr_array: per-queue array, each element points to 128 SBALs - * @output_sbal_state_array: no_output_qs * 128 state info (for CQ or NULL) */ struct qdio_initialize { unsigned char q_format; @@ -357,7 +343,6 @@ struct qdio_initialize { unsigned long int_parm; struct qdio_buffer ***input_sbal_addr_array; struct qdio_buffer ***output_sbal_addr_array; - struct qdio_outbuf_state *output_sbal_state_array; }; #define QDIO_STATE_INACTIVE 0x00000002 /* after qdio_cleanup */ @@ -378,9 +363,10 @@ extern int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs, extern int qdio_establish(struct ccw_device *cdev, struct qdio_initialize *init_data); extern int qdio_activate(struct ccw_device *); +extern struct qaob *qdio_allocate_aob(void); extern void qdio_release_aob(struct qaob *); -extern int do_QDIO(struct ccw_device *, unsigned int, int, unsigned int, - unsigned int); +extern int do_QDIO(struct ccw_device *cdev, unsigned int callflags, int q_nr, + unsigned int bufnr, unsigned int count, struct qaob *aob); extern int qdio_start_irq(struct ccw_device *cdev); extern int qdio_stop_irq(struct ccw_device *cdev); extern int qdio_get_next_buffers(struct ccw_device *, int, int *, int *); diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index 3a37172d5398..ef59588a3042 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -88,7 +88,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp) asm_inline volatile( ALTERNATIVE("", ".long 0xb2fa0070", 49) /* NIAI 7 */ " sth %1,%0\n" - : "=Q" (((unsigned short *) &lp->lock)[1]) + : "=R" (((unsigned short *) &lp->lock)[1]) : "d" (0) : "cc", "memory"); } diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h index cfed272e4fd5..a2bbfd7df85f 100644 --- a/arch/s390/include/asm/spinlock_types.h +++ b/arch/s390/include/asm/spinlock_types.h @@ -8,7 +8,7 @@ typedef struct { int lock; -} __attribute__ ((aligned (4))) arch_spinlock_t; +} arch_spinlock_t; #define __ARCH_SPIN_LOCK_UNLOCKED { .lock = 0, } diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h index ee056f4a4fa3..2b543163d90a 100644 --- a/arch/s390/include/asm/stacktrace.h +++ b/arch/s390/include/asm/stacktrace.h @@ -12,6 +12,7 @@ enum stack_type { STACK_TYPE_IRQ, STACK_TYPE_NODAT, STACK_TYPE_RESTART, + STACK_TYPE_MCCK, }; struct stack_info { diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index c4e23e925665..f6326c6d2abe 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h @@ -98,10 +98,10 @@ extern unsigned char ptff_function_mask[16]; /* Query TOD offset result */ struct ptff_qto { - unsigned long long physical_clock; - unsigned long long tod_offset; - unsigned long long logical_tod_offset; - unsigned long long tod_epoch_difference; + unsigned long physical_clock; + unsigned long tod_offset; + unsigned long logical_tod_offset; + unsigned long tod_epoch_difference; } __packed; static inline int ptff_query(unsigned int nr) @@ -151,9 +151,9 @@ struct ptff_qui { rc; \ }) -static inline unsigned long long local_tick_disable(void) +static inline unsigned long local_tick_disable(void) { - unsigned long long old; + unsigned long old; old = S390_lowcore.clock_comparator; S390_lowcore.clock_comparator = clock_comparator_max; @@ -161,7 +161,7 @@ static inline unsigned long long local_tick_disable(void) return old; } -static inline void local_tick_enable(unsigned long long comp) +static inline void local_tick_enable(unsigned long comp) { S390_lowcore.clock_comparator = comp; set_clock_comparator(S390_lowcore.clock_comparator); @@ -169,9 +169,9 @@ static inline void local_tick_enable(unsigned long long comp) #define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ -typedef unsigned long long cycles_t; +typedef unsigned long cycles_t; -static inline unsigned long long get_tod_clock(void) +static inline unsigned long get_tod_clock(void) { union tod_clock clk; @@ -179,10 +179,10 @@ static inline unsigned long long get_tod_clock(void) return clk.tod; } -static inline unsigned long long get_tod_clock_fast(void) +static inline unsigned long get_tod_clock_fast(void) { #ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES - unsigned long long clk; + unsigned long clk; asm volatile("stckf %0" : "=Q" (clk) : : "cc"); return clk; @@ -208,9 +208,9 @@ extern union tod_clock tod_clock_base; * Therefore preemption must be disabled, otherwise the returned * value is not guaranteed to be monotonic. */ -static inline unsigned long long get_tod_clock_monotonic(void) +static inline unsigned long get_tod_clock_monotonic(void) { - unsigned long long tod; + unsigned long tod; preempt_disable_notrace(); tod = get_tod_clock() - tod_clock_base.tod; @@ -237,7 +237,7 @@ static inline unsigned long long get_tod_clock_monotonic(void) * -> ns = (th * 125) + ((tl * 125) >> 9); * */ -static inline unsigned long long tod_to_ns(unsigned long long todval) +static inline unsigned long tod_to_ns(unsigned long todval) { return ((todval >> 9) * 125) + (((todval & 0x1ff) * 125) >> 9); } @@ -249,10 +249,10 @@ static inline unsigned long long tod_to_ns(unsigned long long todval) * * Returns: true if a is later than b */ -static inline int tod_after(unsigned long long a, unsigned long long b) +static inline int tod_after(unsigned long a, unsigned long b) { if (MACHINE_HAS_SCC) - return (long long) a > (long long) b; + return (long) a > (long) b; return a > b; } @@ -263,10 +263,10 @@ static inline int tod_after(unsigned long long a, unsigned long long b) * * Returns: true if a is later than b */ -static inline int tod_after_eq(unsigned long long a, unsigned long long b) +static inline int tod_after_eq(unsigned long a, unsigned long b) { if (MACHINE_HAS_SCC) - return (long long) a >= (long long) b; + return (long) a >= (long) b; return a >= b; } diff --git a/arch/s390/include/asm/vdso/data.h b/arch/s390/include/asm/vdso/data.h index 7b3cdb4a5f48..73ee89142666 100644 --- a/arch/s390/include/asm/vdso/data.h +++ b/arch/s390/include/asm/vdso/data.h @@ -6,7 +6,7 @@ #include <vdso/datapage.h> struct arch_vdso_data { - __u64 tod_steering_delta; + __s64 tod_steering_delta; __u64 tod_steering_end; }; diff --git a/arch/s390/include/uapi/asm/perf_cpum_cf_diag.h b/arch/s390/include/uapi/asm/hwctrset.h index 3d8284b95f87..3d8284b95f87 100644 --- a/arch/s390/include/uapi/asm/perf_cpum_cf_diag.h +++ b/arch/s390/include/uapi/asm/hwctrset.h |