summaryrefslogtreecommitdiff
path: root/arch/alpha/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/alpha/include/asm')
-rw-r--r--arch/alpha/include/asm/Kbuild15
-rw-r--r--arch/alpha/include/asm/a.out.h16
-rw-r--r--arch/alpha/include/asm/agp.h19
-rw-r--r--arch/alpha/include/asm/asm-offsets.h1
-rw-r--r--arch/alpha/include/asm/asm-prototypes.h1
-rw-r--r--arch/alpha/include/asm/atomic.h118
-rw-r--r--arch/alpha/include/asm/barrier.h59
-rw-r--r--arch/alpha/include/asm/bitops.h53
-rw-r--r--arch/alpha/include/asm/bugs.h20
-rw-r--r--arch/alpha/include/asm/cacheflush.h43
-rw-r--r--arch/alpha/include/asm/checksum.h6
-rw-r--r--arch/alpha/include/asm/cmpxchg.h253
-rw-r--r--arch/alpha/include/asm/compiler.h11
-rw-r--r--arch/alpha/include/asm/core_apecs.h518
-rw-r--r--arch/alpha/include/asm/core_cia.h22
-rw-r--r--arch/alpha/include/asm/core_lca.h362
-rw-r--r--arch/alpha/include/asm/core_marvel.h4
-rw-r--r--arch/alpha/include/asm/core_mcpcia.h28
-rw-r--r--arch/alpha/include/asm/core_t2.h24
-rw-r--r--arch/alpha/include/asm/div64.h1
-rw-r--r--arch/alpha/include/asm/dma-mapping.h6
-rw-r--r--arch/alpha/include/asm/dma.h18
-rw-r--r--arch/alpha/include/asm/elf.h16
-rw-r--r--arch/alpha/include/asm/floppy.h13
-rw-r--r--arch/alpha/include/asm/fpu.h61
-rw-r--r--arch/alpha/include/asm/futex.h5
-rw-r--r--arch/alpha/include/asm/hwrpb.h4
-rw-r--r--arch/alpha/include/asm/io.h276
-rw-r--r--arch/alpha/include/asm/io_trivial.h34
-rw-r--r--arch/alpha/include/asm/irq.h10
-rw-r--r--arch/alpha/include/asm/irq_regs.h1
-rw-r--r--arch/alpha/include/asm/jensen.h347
-rw-r--r--arch/alpha/include/asm/kdebug.h1
-rw-r--r--arch/alpha/include/asm/kmap_types.h15
-rw-r--r--arch/alpha/include/asm/local.h45
-rw-r--r--arch/alpha/include/asm/local64.h1
-rw-r--r--arch/alpha/include/asm/machvec.h23
-rw-r--r--arch/alpha/include/asm/mmu_context.h59
-rw-r--r--arch/alpha/include/asm/mmzone.h112
-rw-r--r--arch/alpha/include/asm/page.h21
-rw-r--r--arch/alpha/include/asm/pci.h9
-rw-r--r--arch/alpha/include/asm/pgalloc.h22
-rw-r--r--arch/alpha/include/asm/pgtable.h121
-rw-r--r--arch/alpha/include/asm/processor.h29
-rw-r--r--arch/alpha/include/asm/ptrace.h1
-rw-r--r--arch/alpha/include/asm/rwonce.h35
-rw-r--r--arch/alpha/include/asm/setup.h43
-rw-r--r--arch/alpha/include/asm/sparsemem.h18
-rw-r--r--arch/alpha/include/asm/special_insns.h5
-rw-r--r--arch/alpha/include/asm/spinlock_types.h4
-rw-r--r--arch/alpha/include/asm/syscall.h6
-rw-r--r--arch/alpha/include/asm/termios.h87
-rw-r--r--arch/alpha/include/asm/thread_info.h28
-rw-r--r--arch/alpha/include/asm/timex.h1
-rw-r--r--arch/alpha/include/asm/tlbflush.h38
-rw-r--r--arch/alpha/include/asm/topology.h39
-rw-r--r--arch/alpha/include/asm/uaccess.h133
-rw-r--r--arch/alpha/include/asm/unaligned.h12
-rw-r--r--arch/alpha/include/asm/unistd.h2
-rw-r--r--arch/alpha/include/asm/user.h6
-rw-r--r--arch/alpha/include/asm/vga.h2
-rw-r--r--arch/alpha/include/asm/vmalloc.h4
-rw-r--r--arch/alpha/include/asm/xchg.h246
-rw-r--r--arch/alpha/include/asm/xor.h53
64 files changed, 941 insertions, 2645 deletions
diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild
index 89e87bbc987f..483965c5a4de 100644
--- a/arch/alpha/include/asm/Kbuild
+++ b/arch/alpha/include/asm/Kbuild
@@ -1,17 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
generated-y += syscall_table.h
-generic-y += compat.h
-generic-y += exec.h
-generic-y += export.h
-generic-y += fb.h
-generic-y += irq_work.h
+generic-y += agp.h
+generic-y += asm-offsets.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
-generic-y += mm-arch-hooks.h
-generic-y += mmiowb.h
-generic-y += preempt.h
-generic-y += sections.h
-generic-y += trace_clock.h
-generic-y += current.h
-generic-y += kprobes.h
+generic-y += text-patching.h
diff --git a/arch/alpha/include/asm/a.out.h b/arch/alpha/include/asm/a.out.h
deleted file mode 100644
index d2346b7caff1..000000000000
--- a/arch/alpha/include/asm/a.out.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ALPHA_A_OUT_H__
-#define __ALPHA_A_OUT_H__
-
-#include <uapi/asm/a.out.h>
-
-
-/* Assume that start addresses below 4G belong to a TASO application.
- Unfortunately, there is no proper bit in the exec header to check.
- Worse, we have to notice the start address before swapping to use
- /sbin/loader, which of course is _not_ a TASO application. */
-#define SET_AOUT_PERSONALITY(BFPM, EX) \
- set_personality (((BFPM->taso || EX.ah.entry < 0x100000000L \
- ? ADDR_LIMIT_32BIT : 0) | PER_OSF4))
-
-#endif /* __A_OUT_GNU_H__ */
diff --git a/arch/alpha/include/asm/agp.h b/arch/alpha/include/asm/agp.h
deleted file mode 100644
index 7173eada1567..000000000000
--- a/arch/alpha/include/asm/agp.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef AGP_H
-#define AGP_H 1
-
-#include <asm/io.h>
-
-/* dummy for now */
-
-#define map_page_into_agp(page)
-#define unmap_page_from_agp(page)
-#define flush_agp_cache() mb()
-
-/* GATT allocation. Returns/accepts GATT kernel virtual address. */
-#define alloc_gatt_pages(order) \
- ((char *)__get_free_pages(GFP_KERNEL, (order)))
-#define free_gatt_pages(table, order) \
- free_pages((unsigned long)(table), (order))
-
-#endif
diff --git a/arch/alpha/include/asm/asm-offsets.h b/arch/alpha/include/asm/asm-offsets.h
deleted file mode 100644
index d370ee36a182..000000000000
--- a/arch/alpha/include/asm/asm-offsets.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <generated/asm-offsets.h>
diff --git a/arch/alpha/include/asm/asm-prototypes.h b/arch/alpha/include/asm/asm-prototypes.h
index b34cc1f06720..c8ae46fc2e74 100644
--- a/arch/alpha/include/asm/asm-prototypes.h
+++ b/arch/alpha/include/asm/asm-prototypes.h
@@ -16,3 +16,4 @@ extern void __divlu(void);
extern void __remlu(void);
extern void __divqu(void);
extern void __remqu(void);
+extern unsigned long __udiv_qrnnd(unsigned long *, unsigned long, unsigned long , unsigned long);
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index 2144530d1428..cbd9244571af 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -16,22 +16,21 @@
/*
* To ensure dependency ordering is preserved for the _relaxed and
- * _release atomics, an smp_read_barrier_depends() is unconditionally
- * inserted into the _relaxed variants, which are used to build the
- * barriered versions. Avoid redundant back-to-back fences in the
- * _acquire and _fence versions.
+ * _release atomics, an smp_mb() is unconditionally inserted into the
+ * _relaxed variants, which are used to build the barriered versions.
+ * Avoid redundant back-to-back fences in the _acquire and _fence
+ * versions.
*/
#define __atomic_acquire_fence()
#define __atomic_post_full_fence()
-#define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) }
-#define atomic_read(v) READ_ONCE((v)->counter)
-#define atomic64_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic64_read(v) READ_ONCE((v)->counter)
-#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
-#define atomic64_set(v,i) WRITE_ONCE((v)->counter, (i))
+#define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
+#define arch_atomic64_set(v,i) WRITE_ONCE((v)->counter, (i))
/*
* To get proper branch prediction for the main line, we must branch
@@ -40,7 +39,7 @@
*/
#define ATOMIC_OP(op, asm_op) \
-static __inline__ void atomic_##op(int i, atomic_t * v) \
+static __inline__ void arch_atomic_##op(int i, atomic_t * v) \
{ \
unsigned long temp; \
__asm__ __volatile__( \
@@ -56,7 +55,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
} \
#define ATOMIC_OP_RETURN(op, asm_op) \
-static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
{ \
long temp, result; \
__asm__ __volatile__( \
@@ -70,12 +69,12 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \
- smp_read_barrier_depends(); \
+ smp_mb(); \
return result; \
}
#define ATOMIC_FETCH_OP(op, asm_op) \
-static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
{ \
long temp, result; \
__asm__ __volatile__( \
@@ -88,12 +87,12 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \
- smp_read_barrier_depends(); \
+ smp_mb(); \
return result; \
}
#define ATOMIC64_OP(op, asm_op) \
-static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \
+static __inline__ void arch_atomic64_##op(s64 i, atomic64_t * v) \
{ \
s64 temp; \
__asm__ __volatile__( \
@@ -109,7 +108,8 @@ static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \
} \
#define ATOMIC64_OP_RETURN(op, asm_op) \
-static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
+static __inline__ s64 \
+arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
{ \
s64 temp, result; \
__asm__ __volatile__( \
@@ -123,12 +123,13 @@ static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \
- smp_read_barrier_depends(); \
+ smp_mb(); \
return result; \
}
#define ATOMIC64_FETCH_OP(op, asm_op) \
-static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
+static __inline__ s64 \
+arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
{ \
s64 temp, result; \
__asm__ __volatile__( \
@@ -141,7 +142,7 @@ static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \
- smp_read_barrier_depends(); \
+ smp_mb(); \
return result; \
}
@@ -156,18 +157,18 @@ static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
-#define atomic_add_return_relaxed atomic_add_return_relaxed
-#define atomic_sub_return_relaxed atomic_sub_return_relaxed
-#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
-#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
-#define atomic64_add_return_relaxed atomic64_add_return_relaxed
-#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
-#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
-#define atomic_andnot atomic_andnot
-#define atomic64_andnot atomic64_andnot
+#define arch_atomic_andnot arch_atomic_andnot
+#define arch_atomic64_andnot arch_atomic64_andnot
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, asm) \
@@ -181,15 +182,15 @@ ATOMIC_OPS(andnot, bic)
ATOMIC_OPS(or, bis)
ATOMIC_OPS(xor, xor)
-#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
-#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
-#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
-#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
-#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
-#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
#undef ATOMIC_OPS
#undef ATOMIC64_FETCH_OP
@@ -199,22 +200,7 @@ ATOMIC_OPS(xor, xor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
-#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
-
-#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-
-/**
- * atomic_fetch_add_unless - add unless the number is a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v.
- */
-static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int c, new, old;
smp_mb();
@@ -235,18 +221,9 @@ static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
smp_mb();
return old;
}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
-/**
- * atomic64_fetch_add_unless - add unless the number is a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v.
- */
-static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
s64 c, new, old;
smp_mb();
@@ -267,16 +244,9 @@ static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
smp_mb();
return old;
}
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
-/*
- * atomic64_dec_if_positive - decrement by 1 if old value positive
- * @v: pointer of type atomic_t
- *
- * The function returns the old value of *v minus 1, even if
- * the atomic variable, v, was not decremented.
- */
-static inline s64 atomic64_dec_if_positive(atomic64_t *v)
+static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{
s64 old, tmp;
smp_mb();
@@ -296,6 +266,6 @@ static inline s64 atomic64_dec_if_positive(atomic64_t *v)
smp_mb();
return old - 1;
}
-#define atomic64_dec_if_positive atomic64_dec_if_positive
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
#endif /* _ALPHA_ATOMIC_H */
diff --git a/arch/alpha/include/asm/barrier.h b/arch/alpha/include/asm/barrier.h
index 92ec486a4f9e..c56bfffc9918 100644
--- a/arch/alpha/include/asm/barrier.h
+++ b/arch/alpha/include/asm/barrier.h
@@ -2,64 +2,15 @@
#ifndef __BARRIER_H
#define __BARRIER_H
-#include <asm/compiler.h>
-
#define mb() __asm__ __volatile__("mb": : :"memory")
#define rmb() __asm__ __volatile__("mb": : :"memory")
#define wmb() __asm__ __volatile__("wmb": : :"memory")
-/**
- * read_barrier_depends - Flush all pending reads that subsequents reads
- * depend on.
- *
- * No data-dependent reads from memory-like regions are ever reordered
- * over this barrier. All reads preceding this primitive are guaranteed
- * to access memory (but not necessarily other CPUs' caches) before any
- * reads following this primitive that depend on the data return by
- * any of the preceding reads. This primitive is much lighter weight than
- * rmb() on most CPUs, and is never heavier weight than is
- * rmb().
- *
- * These ordering constraints are respected by both the local CPU
- * and the compiler.
- *
- * Ordering is not guaranteed by anything other than these primitives,
- * not even by data dependencies. See the documentation for
- * memory_barrier() for examples and URLs to more information.
- *
- * For example, the following code would force ordering (the initial
- * value of "a" is zero, "b" is one, and "p" is "&a"):
- *
- * <programlisting>
- * CPU 0 CPU 1
- *
- * b = 2;
- * memory_barrier();
- * p = &b; q = p;
- * read_barrier_depends();
- * d = *q;
- * </programlisting>
- *
- * because the read of "*q" depends on the read of "p" and these
- * two reads are separated by a read_barrier_depends(). However,
- * the following code, with the same initial values for "a" and "b":
- *
- * <programlisting>
- * CPU 0 CPU 1
- *
- * a = 2;
- * memory_barrier();
- * b = 3; y = b;
- * read_barrier_depends();
- * x = a;
- * </programlisting>
- *
- * does not enforce ordering, since there is no data dependency between
- * the read of "a" and the read of "b". Therefore, on some CPUs, such
- * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
- * in cases like this where there are no data dependencies.
- */
-#define read_barrier_depends() __asm__ __volatile__("mb": : :"memory")
+#define __smp_load_acquire(p) \
+({ \
+ compiletime_assert_atomic_type(*p); \
+ __READ_ONCE(*p); \
+})
#ifdef CONFIG_SMP
#define __ASM_SMP_MB "\tmb\n"
diff --git a/arch/alpha/include/asm/bitops.h b/arch/alpha/include/asm/bitops.h
index 5adca78830b5..3e33621922c3 100644
--- a/arch/alpha/include/asm/bitops.h
+++ b/arch/alpha/include/asm/bitops.h
@@ -46,8 +46,8 @@ set_bit(unsigned long nr, volatile void * addr)
/*
* WARNING: non atomic version.
*/
-static inline void
-__set_bit(unsigned long nr, volatile void * addr)
+static __always_inline void
+arch___set_bit(unsigned long nr, volatile unsigned long *addr)
{
int *m = ((int *) addr) + (nr >> 5);
@@ -82,8 +82,8 @@ clear_bit_unlock(unsigned long nr, volatile void * addr)
/*
* WARNING: non atomic version.
*/
-static __inline__ void
-__clear_bit(unsigned long nr, volatile void * addr)
+static __always_inline void
+arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
{
int *m = ((int *) addr) + (nr >> 5);
@@ -94,7 +94,7 @@ static inline void
__clear_bit_unlock(unsigned long nr, volatile void * addr)
{
smp_mb();
- __clear_bit(nr, addr);
+ arch___clear_bit(nr, addr);
}
static inline void
@@ -118,8 +118,8 @@ change_bit(unsigned long nr, volatile void * addr)
/*
* WARNING: non atomic version.
*/
-static __inline__ void
-__change_bit(unsigned long nr, volatile void * addr)
+static __always_inline void
+arch___change_bit(unsigned long nr, volatile unsigned long *addr)
{
int *m = ((int *) addr) + (nr >> 5);
@@ -186,8 +186,8 @@ test_and_set_bit_lock(unsigned long nr, volatile void *addr)
/*
* WARNING: non atomic version.
*/
-static inline int
-__test_and_set_bit(unsigned long nr, volatile void * addr)
+static __always_inline bool
+arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long mask = 1 << (nr & 0x1f);
int *m = ((int *) addr) + (nr >> 5);
@@ -230,8 +230,8 @@ test_and_clear_bit(unsigned long nr, volatile void * addr)
/*
* WARNING: non atomic version.
*/
-static inline int
-__test_and_clear_bit(unsigned long nr, volatile void * addr)
+static __always_inline bool
+arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long mask = 1 << (nr & 0x1f);
int *m = ((int *) addr) + (nr >> 5);
@@ -272,8 +272,8 @@ test_and_change_bit(unsigned long nr, volatile void * addr)
/*
* WARNING: non atomic version.
*/
-static __inline__ int
-__test_and_change_bit(unsigned long nr, volatile void * addr)
+static __always_inline bool
+arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long mask = 1 << (nr & 0x1f);
int *m = ((int *) addr) + (nr >> 5);
@@ -283,10 +283,27 @@ __test_and_change_bit(unsigned long nr, volatile void * addr)
return (old & mask) != 0;
}
-static inline int
-test_bit(int nr, const volatile void * addr)
+#define arch_test_bit generic_test_bit
+#define arch_test_bit_acquire generic_test_bit_acquire
+
+static inline bool xor_unlock_is_negative_byte(unsigned long mask,
+ volatile unsigned long *p)
{
- return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL;
+ unsigned long temp, old;
+
+ __asm__ __volatile__(
+ "1: ldl_l %0,%4\n"
+ " mov %0,%2\n"
+ " xor %0,%3,%0\n"
+ " stl_c %0,%1\n"
+ " beq %0,2f\n"
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous"
+ :"=&r" (temp), "=m" (*p), "=&r" (old)
+ :"Ir" (mask), "m" (*p));
+
+ return (old & BIT(7)) != 0;
}
/*
@@ -430,8 +447,6 @@ static inline unsigned int __arch_hweight8(unsigned int w)
#endif /* __KERNEL__ */
-#include <asm-generic/bitops/find.h>
-
#ifdef __KERNEL__
/*
@@ -452,6 +467,8 @@ sched_find_first_bit(const unsigned long b[2])
return __ffs(tmp) + ofs;
}
+#include <asm-generic/bitops/non-instrumented-non-atomic.h>
+
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic-setbit.h>
diff --git a/arch/alpha/include/asm/bugs.h b/arch/alpha/include/asm/bugs.h
deleted file mode 100644
index 78030d1c7e7e..000000000000
--- a/arch/alpha/include/asm/bugs.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * include/asm-alpha/bugs.h
- *
- * Copyright (C) 1994 Linus Torvalds
- */
-
-/*
- * This is included by init/main.c to check for architecture-dependent bugs.
- *
- * Needs:
- * void check_bugs(void);
- */
-
-/*
- * I don't know of any alpha bugs yet.. Nice chip
- */
-
-static void check_bugs(void)
-{
-}
diff --git a/arch/alpha/include/asm/cacheflush.h b/arch/alpha/include/asm/cacheflush.h
index 89128489cb59..36a7e924c3b9 100644
--- a/arch/alpha/include/asm/cacheflush.h
+++ b/arch/alpha/include/asm/cacheflush.h
@@ -4,19 +4,6 @@
#include <linux/mm.h>
-/* Caches aren't brain-dead on the Alpha. */
-#define flush_cache_all() do { } while (0)
-#define flush_cache_mm(mm) do { } while (0)
-#define flush_cache_dup_mm(mm) do { } while (0)
-#define flush_cache_range(vma, start, end) do { } while (0)
-#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define flush_dcache_page(page) do { } while (0)
-#define flush_dcache_mmap_lock(mapping) do { } while (0)
-#define flush_dcache_mmap_unlock(mapping) do { } while (0)
-#define flush_cache_vmap(start, end) do { } while (0)
-#define flush_cache_vunmap(start, end) do { } while (0)
-
/* Note that the following two definitions are _highly_ dependent
on the contexts in which they are used in the kernel. I personally
think it is criminal how loosely defined these macros are. */
@@ -48,7 +35,7 @@ extern void smp_imb(void);
extern void __load_new_mm_context(struct mm_struct *);
static inline void
-flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len)
{
if (vma->vm_flags & VM_EXEC) {
@@ -59,20 +46,24 @@ flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
mm->context[smp_processor_id()] = 0;
}
}
-#else
-extern void flush_icache_user_range(struct vm_area_struct *vma,
+#define flush_icache_user_page flush_icache_user_page
+#else /* CONFIG_SMP */
+extern void flush_icache_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long addr, int len);
-#endif
+#define flush_icache_user_page flush_icache_user_page
+#endif /* CONFIG_SMP */
-/* This is used only in __do_fault and do_swap_page. */
-#define flush_icache_page(vma, page) \
- flush_icache_user_range((vma), (page), 0, 0)
+/*
+ * Both implementations of flush_icache_user_page flush the entire
+ * address space, so one call, no matter how many pages.
+ */
+static inline void flush_icache_pages(struct vm_area_struct *vma,
+ struct page *page, unsigned int nr)
+{
+ flush_icache_user_page(vma, page, 0, 0);
+}
+#define flush_icache_pages flush_icache_pages
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
-do { memcpy(dst, src, len); \
- flush_icache_user_range(vma, page, vaddr, len); \
-} while (0)
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
+#include <asm-generic/cacheflush.h>
#endif /* _ALPHA_CACHEFLUSH_H */
diff --git a/arch/alpha/include/asm/checksum.h b/arch/alpha/include/asm/checksum.h
index 473e6ccb65a3..99d631e146b2 100644
--- a/arch/alpha/include/asm/checksum.h
+++ b/arch/alpha/include/asm/checksum.h
@@ -41,9 +41,11 @@ extern __wsum csum_partial(const void *buff, int len, __wsum sum);
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
-__wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *errp);
+#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
+#define _HAVE_ARCH_CSUM_AND_COPY
+__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len);
-__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
+__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len);
/*
diff --git a/arch/alpha/include/asm/cmpxchg.h b/arch/alpha/include/asm/cmpxchg.h
index 6c7c39452471..ae1b96479d0c 100644
--- a/arch/alpha/include/asm/cmpxchg.h
+++ b/arch/alpha/include/asm/cmpxchg.h
@@ -3,74 +3,281 @@
#define _ALPHA_CMPXCHG_H
/*
- * Atomic exchange routines.
+ * Atomic exchange.
+ * Since it can be used to implement critical sections
+ * it must clobber "memory" (also for interrupts in UP).
*/
-#define ____xchg(type, args...) __xchg ## type ## _local(args)
-#define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args)
-#include <asm/xchg.h>
+static inline unsigned long
+____xchg_u8(volatile char *m, unsigned long val)
+{
+ unsigned long ret, tmp, addr64;
+
+ __asm__ __volatile__(
+ " andnot %4,7,%3\n"
+ " insbl %1,%4,%1\n"
+ "1: ldq_l %2,0(%3)\n"
+ " extbl %2,%4,%0\n"
+ " mskbl %2,%4,%2\n"
+ " or %1,%2,%2\n"
+ " stq_c %2,0(%3)\n"
+ " beq %2,2f\n"
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous"
+ : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
+ : "r" ((long)m), "1" (val) : "memory");
+
+ return ret;
+}
+
+static inline unsigned long
+____xchg_u16(volatile short *m, unsigned long val)
+{
+ unsigned long ret, tmp, addr64;
+
+ __asm__ __volatile__(
+ " andnot %4,7,%3\n"
+ " inswl %1,%4,%1\n"
+ "1: ldq_l %2,0(%3)\n"
+ " extwl %2,%4,%0\n"
+ " mskwl %2,%4,%2\n"
+ " or %1,%2,%2\n"
+ " stq_c %2,0(%3)\n"
+ " beq %2,2f\n"
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous"
+ : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
+ : "r" ((long)m), "1" (val) : "memory");
+
+ return ret;
+}
+
+static inline unsigned long
+____xchg_u32(volatile int *m, unsigned long val)
+{
+ unsigned long dummy;
+
+ __asm__ __volatile__(
+ "1: ldl_l %0,%4\n"
+ " bis $31,%3,%1\n"
+ " stl_c %1,%2\n"
+ " beq %1,2f\n"
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous"
+ : "=&r" (val), "=&r" (dummy), "=m" (*m)
+ : "rI" (val), "m" (*m) : "memory");
+
+ return val;
+}
+
+static inline unsigned long
+____xchg_u64(volatile long *m, unsigned long val)
+{
+ unsigned long dummy;
+
+ __asm__ __volatile__(
+ "1: ldq_l %0,%4\n"
+ " bis $31,%3,%1\n"
+ " stq_c %1,%2\n"
+ " beq %1,2f\n"
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous"
+ : "=&r" (val), "=&r" (dummy), "=m" (*m)
+ : "rI" (val), "m" (*m) : "memory");
+
+ return val;
+}
+
+/* This function doesn't exist, so you'll get a linker error
+ if something tries to do an invalid xchg(). */
+extern void __xchg_called_with_bad_pointer(void);
+
+static __always_inline unsigned long
+____xchg(volatile void *ptr, unsigned long x, int size)
+{
+ return
+ size == 1 ? ____xchg_u8(ptr, x) :
+ size == 2 ? ____xchg_u16(ptr, x) :
+ size == 4 ? ____xchg_u32(ptr, x) :
+ size == 8 ? ____xchg_u64(ptr, x) :
+ (__xchg_called_with_bad_pointer(), x);
+}
+
+/*
+ * Atomic compare and exchange. Compare OLD with MEM, if identical,
+ * store NEW in MEM. Return the initial value in MEM. Success is
+ * indicated by comparing RETURN with OLD.
+ */
+
+static inline unsigned long
+____cmpxchg_u8(volatile char *m, unsigned char old, unsigned char new)
+{
+ unsigned long prev, tmp, cmp, addr64;
+
+ __asm__ __volatile__(
+ " andnot %5,7,%4\n"
+ " insbl %1,%5,%1\n"
+ "1: ldq_l %2,0(%4)\n"
+ " extbl %2,%5,%0\n"
+ " cmpeq %0,%6,%3\n"
+ " beq %3,2f\n"
+ " mskbl %2,%5,%2\n"
+ " or %1,%2,%2\n"
+ " stq_c %2,0(%4)\n"
+ " beq %2,3f\n"
+ "2:\n"
+ ".subsection 2\n"
+ "3: br 1b\n"
+ ".previous"
+ : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
+ : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
+
+ return prev;
+}
+
+static inline unsigned long
+____cmpxchg_u16(volatile short *m, unsigned short old, unsigned short new)
+{
+ unsigned long prev, tmp, cmp, addr64;
+
+ __asm__ __volatile__(
+ " andnot %5,7,%4\n"
+ " inswl %1,%5,%1\n"
+ "1: ldq_l %2,0(%4)\n"
+ " extwl %2,%5,%0\n"
+ " cmpeq %0,%6,%3\n"
+ " beq %3,2f\n"
+ " mskwl %2,%5,%2\n"
+ " or %1,%2,%2\n"
+ " stq_c %2,0(%4)\n"
+ " beq %2,3f\n"
+ "2:\n"
+ ".subsection 2\n"
+ "3: br 1b\n"
+ ".previous"
+ : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
+ : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
+
+ return prev;
+}
+
+static inline unsigned long
+____cmpxchg_u32(volatile int *m, int old, int new)
+{
+ unsigned long prev, cmp;
+
+ __asm__ __volatile__(
+ "1: ldl_l %0,%5\n"
+ " cmpeq %0,%3,%1\n"
+ " beq %1,2f\n"
+ " mov %4,%1\n"
+ " stl_c %1,%2\n"
+ " beq %1,3f\n"
+ "2:\n"
+ ".subsection 2\n"
+ "3: br 1b\n"
+ ".previous"
+ : "=&r"(prev), "=&r"(cmp), "=m"(*m)
+ : "r"((long) old), "r"(new), "m"(*m) : "memory");
+
+ return prev;
+}
+
+static inline unsigned long
+____cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
+{
+ unsigned long prev, cmp;
+
+ __asm__ __volatile__(
+ "1: ldq_l %0,%5\n"
+ " cmpeq %0,%3,%1\n"
+ " beq %1,2f\n"
+ " mov %4,%1\n"
+ " stq_c %1,%2\n"
+ " beq %1,3f\n"
+ "2:\n"
+ ".subsection 2\n"
+ "3: br 1b\n"
+ ".previous"
+ : "=&r"(prev), "=&r"(cmp), "=m"(*m)
+ : "r"((long) old), "r"(new), "m"(*m) : "memory");
+
+ return prev;
+}
+
+/* This function doesn't exist, so you'll get a linker error
+ if something tries to do an invalid cmpxchg(). */
+extern void __cmpxchg_called_with_bad_pointer(void);
+
+static __always_inline unsigned long
+____cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
+ int size)
+{
+ return
+ size == 1 ? ____cmpxchg_u8(ptr, old, new) :
+ size == 2 ? ____cmpxchg_u16(ptr, old, new) :
+ size == 4 ? ____cmpxchg_u32(ptr, old, new) :
+ size == 8 ? ____cmpxchg_u64(ptr, old, new) :
+ (__cmpxchg_called_with_bad_pointer(), old);
+}
#define xchg_local(ptr, x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
- (__typeof__(*(ptr))) __xchg_local((ptr), (unsigned long)_x_, \
- sizeof(*(ptr))); \
+ (__typeof__(*(ptr))) ____xchg((ptr), (unsigned long)_x_, \
+ sizeof(*(ptr))); \
})
-#define cmpxchg_local(ptr, o, n) \
+#define arch_cmpxchg_local(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
+ (__typeof__(*(ptr))) ____cmpxchg((ptr), (unsigned long)_o_, \
(unsigned long)_n_, \
sizeof(*(ptr))); \
})
-#define cmpxchg64_local(ptr, o, n) \
+#define arch_cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \
})
-#undef ____xchg
-#undef ____cmpxchg
-#define ____xchg(type, args...) __xchg ##type(args)
-#define ____cmpxchg(type, args...) __cmpxchg ##type(args)
-#include <asm/xchg.h>
-
/*
* The leading and the trailing memory barriers guarantee that these
* operations are fully ordered.
*/
-#define xchg(ptr, x) \
+#define arch_xchg(ptr, x) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _x_ = (x); \
smp_mb(); \
__ret = (__typeof__(*(ptr))) \
- __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
+ ____xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
smp_mb(); \
__ret; \
})
-#define cmpxchg(ptr, o, n) \
+#define arch_cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
smp_mb(); \
- __ret = (__typeof__(*(ptr))) __cmpxchg((ptr), \
+ __ret = (__typeof__(*(ptr))) ____cmpxchg((ptr), \
(unsigned long)_o_, (unsigned long)_n_, sizeof(*(ptr)));\
smp_mb(); \
__ret; \
})
-#define cmpxchg64(ptr, o, n) \
+#define arch_cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg((ptr), (o), (n)); \
+ arch_cmpxchg((ptr), (o), (n)); \
})
-#undef ____cmpxchg
-
#endif /* _ALPHA_CMPXCHG_H */
diff --git a/arch/alpha/include/asm/compiler.h b/arch/alpha/include/asm/compiler.h
index 5159ba259d65..ae645959018a 100644
--- a/arch/alpha/include/asm/compiler.h
+++ b/arch/alpha/include/asm/compiler.h
@@ -4,15 +4,4 @@
#include <uapi/asm/compiler.h>
-/* Some idiots over in <linux/compiler.h> thought inline should imply
- always_inline. This breaks stuff. We'll include this file whenever
- we run into such problems. */
-
-#include <linux/compiler.h>
-#undef inline
-#undef __inline__
-#undef __inline
-#undef __always_inline
-#define __always_inline inline __attribute__((always_inline))
-
#endif /* __ALPHA_COMPILER_H */
diff --git a/arch/alpha/include/asm/core_apecs.h b/arch/alpha/include/asm/core_apecs.h
deleted file mode 100644
index 0a07055bc0fe..000000000000
--- a/arch/alpha/include/asm/core_apecs.h
+++ /dev/null
@@ -1,518 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ALPHA_APECS__H__
-#define __ALPHA_APECS__H__
-
-#include <linux/types.h>
-#include <asm/compiler.h>
-
-/*
- * APECS is the internal name for the 2107x chipset which provides
- * memory controller and PCI access for the 21064 chip based systems.
- *
- * This file is based on:
- *
- * DECchip 21071-AA and DECchip 21072-AA Core Logic Chipsets
- * Data Sheet
- *
- * EC-N0648-72
- *
- *
- * david.rusling@reo.mts.dec.com Initial Version.
- *
- */
-
-/*
- An AVANTI *might* be an XL, and an XL has only 27 bits of ISA address
- that get passed through the PCI<->ISA bridge chip. So we've gotta use
- both windows to max out the physical memory we can DMA to. Sigh...
-
- If we try a window at 0 for 1GB as a work-around, we run into conflicts
- with ISA/PCI bus memory which can't be relocated, like VGA aperture and
- BIOS ROMs. So we must put the windows high enough to avoid these areas.
-
- We put window 1 at BUS 64Mb for 64Mb, mapping physical 0 to 64Mb-1,
- and window 2 at BUS 1Gb for 1Gb, mapping physical 0 to 1Gb-1.
- Yes, this does map 0 to 64Mb-1 twice, but only window 1 will actually
- be used for that range (via virt_to_bus()).
-
- Note that we actually fudge the window 1 maximum as 48Mb instead of 64Mb,
- to keep virt_to_bus() from returning an address in the first window, for
- a data area that goes beyond the 64Mb first DMA window. Sigh...
- The fudge factor MUST match with <asm/dma.h> MAX_DMA_ADDRESS, but
- we can't just use that here, because of header file looping... :-(
-
- Window 1 will be used for all DMA from the ISA bus; yes, that does
- limit what memory an ISA floppy or sound card or Ethernet can touch, but
- it's also a known limitation on other platforms as well. We use the
- same technique that is used on INTEL platforms with similar limitation:
- set MAX_DMA_ADDRESS and clear some pages' DMAable flags during mem_init().
- We trust that any ISA bus device drivers will *always* ask for DMAable
- memory explicitly via kmalloc()/get_free_pages() flags arguments.
-
- Note that most PCI bus devices' drivers do *not* explicitly ask for
- DMAable memory; they count on being able to DMA to any memory they
- get from kmalloc()/get_free_pages(). They will also use window 1 for
- any physical memory accesses below 64Mb; the rest will be handled by
- window 2, maxing out at 1Gb of memory. I trust this is enough... :-)
-
- We hope that the area before the first window is large enough so that
- there will be no overlap at the top end (64Mb). We *must* locate the
- PCI cards' memory just below window 1, so that there's still the
- possibility of being able to access it via SPARSE space. This is
- important for cards such as the Matrox Millennium, whose Xserver
- wants to access memory-mapped registers in byte and short lengths.
-
- Note that the XL is treated differently from the AVANTI, even though
- for most other things they are identical. It didn't seem reasonable to
- make the AVANTI support pay for the limitations of the XL. It is true,
- however, that an XL kernel will run on an AVANTI without problems.
-
- %%% All of this should be obviated by the ability to route
- everything through the iommu.
-*/
-
-/*
- * 21071-DA Control and Status registers.
- * These are used for PCI memory access.
- */
-#define APECS_IOC_DCSR (IDENT_ADDR + 0x1A0000000UL)
-#define APECS_IOC_PEAR (IDENT_ADDR + 0x1A0000020UL)
-#define APECS_IOC_SEAR (IDENT_ADDR + 0x1A0000040UL)
-#define APECS_IOC_DR1 (IDENT_ADDR + 0x1A0000060UL)
-#define APECS_IOC_DR2 (IDENT_ADDR + 0x1A0000080UL)
-#define APECS_IOC_DR3 (IDENT_ADDR + 0x1A00000A0UL)
-
-#define APECS_IOC_TB1R (IDENT_ADDR + 0x1A00000C0UL)
-#define APECS_IOC_TB2R (IDENT_ADDR + 0x1A00000E0UL)
-
-#define APECS_IOC_PB1R (IDENT_ADDR + 0x1A0000100UL)
-#define APECS_IOC_PB2R (IDENT_ADDR + 0x1A0000120UL)
-
-#define APECS_IOC_PM1R (IDENT_ADDR + 0x1A0000140UL)
-#define APECS_IOC_PM2R (IDENT_ADDR + 0x1A0000160UL)
-
-#define APECS_IOC_HAXR0 (IDENT_ADDR + 0x1A0000180UL)
-#define APECS_IOC_HAXR1 (IDENT_ADDR + 0x1A00001A0UL)
-#define APECS_IOC_HAXR2 (IDENT_ADDR + 0x1A00001C0UL)
-
-#define APECS_IOC_PMLT (IDENT_ADDR + 0x1A00001E0UL)
-
-#define APECS_IOC_TLBTAG0 (IDENT_ADDR + 0x1A0000200UL)
-#define APECS_IOC_TLBTAG1 (IDENT_ADDR + 0x1A0000220UL)
-#define APECS_IOC_TLBTAG2 (IDENT_ADDR + 0x1A0000240UL)
-#define APECS_IOC_TLBTAG3 (IDENT_ADDR + 0x1A0000260UL)
-#define APECS_IOC_TLBTAG4 (IDENT_ADDR + 0x1A0000280UL)
-#define APECS_IOC_TLBTAG5 (IDENT_ADDR + 0x1A00002A0UL)
-#define APECS_IOC_TLBTAG6 (IDENT_ADDR + 0x1A00002C0UL)
-#define APECS_IOC_TLBTAG7 (IDENT_ADDR + 0x1A00002E0UL)
-
-#define APECS_IOC_TLBDATA0 (IDENT_ADDR + 0x1A0000300UL)
-#define APECS_IOC_TLBDATA1 (IDENT_ADDR + 0x1A0000320UL)
-#define APECS_IOC_TLBDATA2 (IDENT_ADDR + 0x1A0000340UL)
-#define APECS_IOC_TLBDATA3 (IDENT_ADDR + 0x1A0000360UL)
-#define APECS_IOC_TLBDATA4 (IDENT_ADDR + 0x1A0000380UL)
-#define APECS_IOC_TLBDATA5 (IDENT_ADDR + 0x1A00003A0UL)
-#define APECS_IOC_TLBDATA6 (IDENT_ADDR + 0x1A00003C0UL)
-#define APECS_IOC_TLBDATA7 (IDENT_ADDR + 0x1A00003E0UL)
-
-#define APECS_IOC_TBIA (IDENT_ADDR + 0x1A0000400UL)
-
-
-/*
- * 21071-CA Control and Status registers.
- * These are used to program memory timing,
- * configure memory and initialise the B-Cache.
- */
-#define APECS_MEM_GCR (IDENT_ADDR + 0x180000000UL)
-#define APECS_MEM_EDSR (IDENT_ADDR + 0x180000040UL)
-#define APECS_MEM_TAR (IDENT_ADDR + 0x180000060UL)
-#define APECS_MEM_ELAR (IDENT_ADDR + 0x180000080UL)
-#define APECS_MEM_EHAR (IDENT_ADDR + 0x1800000a0UL)
-#define APECS_MEM_SFT_RST (IDENT_ADDR + 0x1800000c0UL)
-#define APECS_MEM_LDxLAR (IDENT_ADDR + 0x1800000e0UL)
-#define APECS_MEM_LDxHAR (IDENT_ADDR + 0x180000100UL)
-#define APECS_MEM_GTR (IDENT_ADDR + 0x180000200UL)
-#define APECS_MEM_RTR (IDENT_ADDR + 0x180000220UL)
-#define APECS_MEM_VFPR (IDENT_ADDR + 0x180000240UL)
-#define APECS_MEM_PDLDR (IDENT_ADDR + 0x180000260UL)
-#define APECS_MEM_PDhDR (IDENT_ADDR + 0x180000280UL)
-
-/* Bank x Base Address Register */
-#define APECS_MEM_B0BAR (IDENT_ADDR + 0x180000800UL)
-#define APECS_MEM_B1BAR (IDENT_ADDR + 0x180000820UL)
-#define APECS_MEM_B2BAR (IDENT_ADDR + 0x180000840UL)
-#define APECS_MEM_B3BAR (IDENT_ADDR + 0x180000860UL)
-#define APECS_MEM_B4BAR (IDENT_ADDR + 0x180000880UL)
-#define APECS_MEM_B5BAR (IDENT_ADDR + 0x1800008A0UL)
-#define APECS_MEM_B6BAR (IDENT_ADDR + 0x1800008C0UL)
-#define APECS_MEM_B7BAR (IDENT_ADDR + 0x1800008E0UL)
-#define APECS_MEM_B8BAR (IDENT_ADDR + 0x180000900UL)
-
-/* Bank x Configuration Register */
-#define APECS_MEM_B0BCR (IDENT_ADDR + 0x180000A00UL)
-#define APECS_MEM_B1BCR (IDENT_ADDR + 0x180000A20UL)
-#define APECS_MEM_B2BCR (IDENT_ADDR + 0x180000A40UL)
-#define APECS_MEM_B3BCR (IDENT_ADDR + 0x180000A60UL)
-#define APECS_MEM_B4BCR (IDENT_ADDR + 0x180000A80UL)
-#define APECS_MEM_B5BCR (IDENT_ADDR + 0x180000AA0UL)
-#define APECS_MEM_B6BCR (IDENT_ADDR + 0x180000AC0UL)
-#define APECS_MEM_B7BCR (IDENT_ADDR + 0x180000AE0UL)
-#define APECS_MEM_B8BCR (IDENT_ADDR + 0x180000B00UL)
-
-/* Bank x Timing Register A */
-#define APECS_MEM_B0TRA (IDENT_ADDR + 0x180000C00UL)
-#define APECS_MEM_B1TRA (IDENT_ADDR + 0x180000C20UL)
-#define APECS_MEM_B2TRA (IDENT_ADDR + 0x180000C40UL)
-#define APECS_MEM_B3TRA (IDENT_ADDR + 0x180000C60UL)
-#define APECS_MEM_B4TRA (IDENT_ADDR + 0x180000C80UL)
-#define APECS_MEM_B5TRA (IDENT_ADDR + 0x180000CA0UL)
-#define APECS_MEM_B6TRA (IDENT_ADDR + 0x180000CC0UL)
-#define APECS_MEM_B7TRA (IDENT_ADDR + 0x180000CE0UL)
-#define APECS_MEM_B8TRA (IDENT_ADDR + 0x180000D00UL)
-
-/* Bank x Timing Register B */
-#define APECS_MEM_B0TRB (IDENT_ADDR + 0x180000E00UL)
-#define APECS_MEM_B1TRB (IDENT_ADDR + 0x180000E20UL)
-#define APECS_MEM_B2TRB (IDENT_ADDR + 0x180000E40UL)
-#define APECS_MEM_B3TRB (IDENT_ADDR + 0x180000E60UL)
-#define APECS_MEM_B4TRB (IDENT_ADDR + 0x180000E80UL)
-#define APECS_MEM_B5TRB (IDENT_ADDR + 0x180000EA0UL)
-#define APECS_MEM_B6TRB (IDENT_ADDR + 0x180000EC0UL)
-#define APECS_MEM_B7TRB (IDENT_ADDR + 0x180000EE0UL)
-#define APECS_MEM_B8TRB (IDENT_ADDR + 0x180000F00UL)
-
-
-/*
- * Memory spaces:
- */
-#define APECS_IACK_SC (IDENT_ADDR + 0x1b0000000UL)
-#define APECS_CONF (IDENT_ADDR + 0x1e0000000UL)
-#define APECS_IO (IDENT_ADDR + 0x1c0000000UL)
-#define APECS_SPARSE_MEM (IDENT_ADDR + 0x200000000UL)
-#define APECS_DENSE_MEM (IDENT_ADDR + 0x300000000UL)
-
-
-/*
- * Bit definitions for I/O Controller status register 0:
- */
-#define APECS_IOC_STAT0_CMD 0xf
-#define APECS_IOC_STAT0_ERR (1<<4)
-#define APECS_IOC_STAT0_LOST (1<<5)
-#define APECS_IOC_STAT0_THIT (1<<6)
-#define APECS_IOC_STAT0_TREF (1<<7)
-#define APECS_IOC_STAT0_CODE_SHIFT 8
-#define APECS_IOC_STAT0_CODE_MASK 0x7
-#define APECS_IOC_STAT0_P_NBR_SHIFT 13
-#define APECS_IOC_STAT0_P_NBR_MASK 0x7ffff
-
-#define APECS_HAE_ADDRESS APECS_IOC_HAXR1
-
-
-/*
- * Data structure for handling APECS machine checks:
- */
-
-struct el_apecs_mikasa_sysdata_mcheck
-{
- unsigned long coma_gcr;
- unsigned long coma_edsr;
- unsigned long coma_ter;
- unsigned long coma_elar;
- unsigned long coma_ehar;
- unsigned long coma_ldlr;
- unsigned long coma_ldhr;
- unsigned long coma_base0;
- unsigned long coma_base1;
- unsigned long coma_base2;
- unsigned long coma_base3;
- unsigned long coma_cnfg0;
- unsigned long coma_cnfg1;
- unsigned long coma_cnfg2;
- unsigned long coma_cnfg3;
- unsigned long epic_dcsr;
- unsigned long epic_pear;
- unsigned long epic_sear;
- unsigned long epic_tbr1;
- unsigned long epic_tbr2;
- unsigned long epic_pbr1;
- unsigned long epic_pbr2;
- unsigned long epic_pmr1;
- unsigned long epic_pmr2;
- unsigned long epic_harx1;
- unsigned long epic_harx2;
- unsigned long epic_pmlt;
- unsigned long epic_tag0;
- unsigned long epic_tag1;
- unsigned long epic_tag2;
- unsigned long epic_tag3;
- unsigned long epic_tag4;
- unsigned long epic_tag5;
- unsigned long epic_tag6;
- unsigned long epic_tag7;
- unsigned long epic_data0;
- unsigned long epic_data1;
- unsigned long epic_data2;
- unsigned long epic_data3;
- unsigned long epic_data4;
- unsigned long epic_data5;
- unsigned long epic_data6;
- unsigned long epic_data7;
-
- unsigned long pceb_vid;
- unsigned long pceb_did;
- unsigned long pceb_revision;
- unsigned long pceb_command;
- unsigned long pceb_status;
- unsigned long pceb_latency;
- unsigned long pceb_control;
- unsigned long pceb_arbcon;
- unsigned long pceb_arbpri;
-
- unsigned long esc_id;
- unsigned long esc_revision;
- unsigned long esc_int0;
- unsigned long esc_int1;
- unsigned long esc_elcr0;
- unsigned long esc_elcr1;
- unsigned long esc_last_eisa;
- unsigned long esc_nmi_stat;
-
- unsigned long pci_ir;
- unsigned long pci_imr;
- unsigned long svr_mgr;
-};
-
-/* This for the normal APECS machines. */
-struct el_apecs_sysdata_mcheck
-{
- unsigned long coma_gcr;
- unsigned long coma_edsr;
- unsigned long coma_ter;
- unsigned long coma_elar;
- unsigned long coma_ehar;
- unsigned long coma_ldlr;
- unsigned long coma_ldhr;
- unsigned long coma_base0;
- unsigned long coma_base1;
- unsigned long coma_base2;
- unsigned long coma_cnfg0;
- unsigned long coma_cnfg1;
- unsigned long coma_cnfg2;
- unsigned long epic_dcsr;
- unsigned long epic_pear;
- unsigned long epic_sear;
- unsigned long epic_tbr1;
- unsigned long epic_tbr2;
- unsigned long epic_pbr1;
- unsigned long epic_pbr2;
- unsigned long epic_pmr1;
- unsigned long epic_pmr2;
- unsigned long epic_harx1;
- unsigned long epic_harx2;
- unsigned long epic_pmlt;
- unsigned long epic_tag0;
- unsigned long epic_tag1;
- unsigned long epic_tag2;
- unsigned long epic_tag3;
- unsigned long epic_tag4;
- unsigned long epic_tag5;
- unsigned long epic_tag6;
- unsigned long epic_tag7;
- unsigned long epic_data0;
- unsigned long epic_data1;
- unsigned long epic_data2;
- unsigned long epic_data3;
- unsigned long epic_data4;
- unsigned long epic_data5;
- unsigned long epic_data6;
- unsigned long epic_data7;
-};
-
-struct el_apecs_procdata
-{
- unsigned long paltemp[32]; /* PAL TEMP REGS. */
- /* EV4-specific fields */
- unsigned long exc_addr; /* Address of excepting instruction. */
- unsigned long exc_sum; /* Summary of arithmetic traps. */
- unsigned long exc_mask; /* Exception mask (from exc_sum). */
- unsigned long iccsr; /* IBox hardware enables. */
- unsigned long pal_base; /* Base address for PALcode. */
- unsigned long hier; /* Hardware Interrupt Enable. */
- unsigned long hirr; /* Hardware Interrupt Request. */
- unsigned long csr; /* D-stream fault info. */
- unsigned long dc_stat; /* D-cache status (ECC/Parity Err). */
- unsigned long dc_addr; /* EV3 Phys Addr for ECC/DPERR. */
- unsigned long abox_ctl; /* ABox Control Register. */
- unsigned long biu_stat; /* BIU Status. */
- unsigned long biu_addr; /* BUI Address. */
- unsigned long biu_ctl; /* BIU Control. */
- unsigned long fill_syndrome;/* For correcting ECC errors. */
- unsigned long fill_addr; /* Cache block which was being read */
- unsigned long va; /* Effective VA of fault or miss. */
- unsigned long bc_tag; /* Backup Cache Tag Probe Results.*/
-};
-
-
-#ifdef __KERNEL__
-
-#ifndef __EXTERN_INLINE
-#define __EXTERN_INLINE extern inline
-#define __IO_EXTERN_INLINE
-#endif
-
-/*
- * I/O functions:
- *
- * Unlike Jensen, the APECS machines have no concept of local
- * I/O---everything goes over the PCI bus.
- *
- * There is plenty room for optimization here. In particular,
- * the Alpha's insb/insw/extb/extw should be useful in moving
- * data to/from the right byte-lanes.
- */
-
-#define vip volatile int __force *
-#define vuip volatile unsigned int __force *
-#define vulp volatile unsigned long __force *
-
-#define APECS_SET_HAE \
- do { \
- if (addr >= (1UL << 24)) { \
- unsigned long msb = addr & 0xf8000000; \
- addr -= msb; \
- set_hae(msb); \
- } \
- } while (0)
-
-__EXTERN_INLINE unsigned int apecs_ioread8(void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- unsigned long result, base_and_type;
-
- if (addr >= APECS_DENSE_MEM) {
- addr -= APECS_DENSE_MEM;
- APECS_SET_HAE;
- base_and_type = APECS_SPARSE_MEM + 0x00;
- } else {
- addr -= APECS_IO;
- base_and_type = APECS_IO + 0x00;
- }
-
- result = *(vip) ((addr << 5) + base_and_type);
- return __kernel_extbl(result, addr & 3);
-}
-
-__EXTERN_INLINE void apecs_iowrite8(u8 b, void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- unsigned long w, base_and_type;
-
- if (addr >= APECS_DENSE_MEM) {
- addr -= APECS_DENSE_MEM;
- APECS_SET_HAE;
- base_and_type = APECS_SPARSE_MEM + 0x00;
- } else {
- addr -= APECS_IO;
- base_and_type = APECS_IO + 0x00;
- }
-
- w = __kernel_insbl(b, addr & 3);
- *(vuip) ((addr << 5) + base_and_type) = w;
-}
-
-__EXTERN_INLINE unsigned int apecs_ioread16(void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- unsigned long result, base_and_type;
-
- if (addr >= APECS_DENSE_MEM) {
- addr -= APECS_DENSE_MEM;
- APECS_SET_HAE;
- base_and_type = APECS_SPARSE_MEM + 0x08;
- } else {
- addr -= APECS_IO;
- base_and_type = APECS_IO + 0x08;
- }
-
- result = *(vip) ((addr << 5) + base_and_type);
- return __kernel_extwl(result, addr & 3);
-}
-
-__EXTERN_INLINE void apecs_iowrite16(u16 b, void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- unsigned long w, base_and_type;
-
- if (addr >= APECS_DENSE_MEM) {
- addr -= APECS_DENSE_MEM;
- APECS_SET_HAE;
- base_and_type = APECS_SPARSE_MEM + 0x08;
- } else {
- addr -= APECS_IO;
- base_and_type = APECS_IO + 0x08;
- }
-
- w = __kernel_inswl(b, addr & 3);
- *(vuip) ((addr << 5) + base_and_type) = w;
-}
-
-__EXTERN_INLINE unsigned int apecs_ioread32(void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- if (addr < APECS_DENSE_MEM)
- addr = ((addr - APECS_IO) << 5) + APECS_IO + 0x18;
- return *(vuip)addr;
-}
-
-__EXTERN_INLINE void apecs_iowrite32(u32 b, void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- if (addr < APECS_DENSE_MEM)
- addr = ((addr - APECS_IO) << 5) + APECS_IO + 0x18;
- *(vuip)addr = b;
-}
-
-__EXTERN_INLINE void __iomem *apecs_ioportmap(unsigned long addr)
-{
- return (void __iomem *)(addr + APECS_IO);
-}
-
-__EXTERN_INLINE void __iomem *apecs_ioremap(unsigned long addr,
- unsigned long size)
-{
- return (void __iomem *)(addr + APECS_DENSE_MEM);
-}
-
-__EXTERN_INLINE int apecs_is_ioaddr(unsigned long addr)
-{
- return addr >= IDENT_ADDR + 0x180000000UL;
-}
-
-__EXTERN_INLINE int apecs_is_mmio(const volatile void __iomem *addr)
-{
- return (unsigned long)addr >= APECS_DENSE_MEM;
-}
-
-#undef APECS_SET_HAE
-
-#undef vip
-#undef vuip
-#undef vulp
-
-#undef __IO_PREFIX
-#define __IO_PREFIX apecs
-#define apecs_trivial_io_bw 0
-#define apecs_trivial_io_lq 0
-#define apecs_trivial_rw_bw 2
-#define apecs_trivial_rw_lq 1
-#define apecs_trivial_iounmap 1
-#include <asm/io_trivial.h>
-
-#ifdef __IO_EXTERN_INLINE
-#undef __EXTERN_INLINE
-#undef __IO_EXTERN_INLINE
-#endif
-
-#endif /* __KERNEL__ */
-
-#endif /* __ALPHA_APECS__H__ */
diff --git a/arch/alpha/include/asm/core_cia.h b/arch/alpha/include/asm/core_cia.h
index c706a7f2b061..d26bdfb7ca3b 100644
--- a/arch/alpha/include/asm/core_cia.h
+++ b/arch/alpha/include/asm/core_cia.h
@@ -342,7 +342,7 @@ struct el_CIA_sysdata_mcheck {
#define vuip volatile unsigned int __force *
#define vulp volatile unsigned long __force *
-__EXTERN_INLINE unsigned int cia_ioread8(void __iomem *xaddr)
+__EXTERN_INLINE u8 cia_ioread8(const void __iomem *xaddr)
{
unsigned long addr = (unsigned long) xaddr;
unsigned long result, base_and_type;
@@ -374,7 +374,7 @@ __EXTERN_INLINE void cia_iowrite8(u8 b, void __iomem *xaddr)
*(vuip) ((addr << 5) + base_and_type) = w;
}
-__EXTERN_INLINE unsigned int cia_ioread16(void __iomem *xaddr)
+__EXTERN_INLINE u16 cia_ioread16(const void __iomem *xaddr)
{
unsigned long addr = (unsigned long) xaddr;
unsigned long result, base_and_type;
@@ -404,7 +404,7 @@ __EXTERN_INLINE void cia_iowrite16(u16 b, void __iomem *xaddr)
*(vuip) ((addr << 5) + base_and_type) = w;
}
-__EXTERN_INLINE unsigned int cia_ioread32(void __iomem *xaddr)
+__EXTERN_INLINE u32 cia_ioread32(const void __iomem *xaddr)
{
unsigned long addr = (unsigned long) xaddr;
if (addr < CIA_DENSE_MEM)
@@ -420,6 +420,22 @@ __EXTERN_INLINE void cia_iowrite32(u32 b, void __iomem *xaddr)
*(vuip)addr = b;
}
+__EXTERN_INLINE u64 cia_ioread64(const void __iomem *xaddr)
+{
+ unsigned long addr = (unsigned long) xaddr;
+ if (addr < CIA_DENSE_MEM)
+ addr = ((addr - CIA_IO) << 5) + CIA_IO + 0x18;
+ return *(vulp)addr;
+}
+
+__EXTERN_INLINE void cia_iowrite64(u64 b, void __iomem *xaddr)
+{
+ unsigned long addr = (unsigned long) xaddr;
+ if (addr < CIA_DENSE_MEM)
+ addr = ((addr - CIA_IO) << 5) + CIA_IO + 0x18;
+ *(vulp)addr = b;
+}
+
__EXTERN_INLINE void __iomem *cia_ioportmap(unsigned long addr)
{
return (void __iomem *)(addr + CIA_IO);
diff --git a/arch/alpha/include/asm/core_lca.h b/arch/alpha/include/asm/core_lca.h
deleted file mode 100644
index 84d5e5b84f4f..000000000000
--- a/arch/alpha/include/asm/core_lca.h
+++ /dev/null
@@ -1,362 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ALPHA_LCA__H__
-#define __ALPHA_LCA__H__
-
-#include <asm/compiler.h>
-#include <asm/mce.h>
-
-/*
- * Low Cost Alpha (LCA) definitions (these apply to 21066 and 21068,
- * for example).
- *
- * This file is based on:
- *
- * DECchip 21066 and DECchip 21068 Alpha AXP Microprocessors
- * Hardware Reference Manual; Digital Equipment Corp.; May 1994;
- * Maynard, MA; Order Number: EC-N2681-71.
- */
-
-/*
- * NOTE: The LCA uses a Host Address Extension (HAE) register to access
- * PCI addresses that are beyond the first 27 bits of address
- * space. Updating the HAE requires an external cycle (and
- * a memory barrier), which tends to be slow. Instead of updating
- * it on each sparse memory access, we keep the current HAE value
- * cached in variable cache_hae. Only if the cached HAE differs
- * from the desired HAE value do we actually updated HAE register.
- * The HAE register is preserved by the interrupt handler entry/exit
- * code, so this scheme works even in the presence of interrupts.
- *
- * Dense memory space doesn't require the HAE, but is restricted to
- * aligned 32 and 64 bit accesses. Special Cycle and Interrupt
- * Acknowledge cycles may also require the use of the HAE. The LCA
- * limits I/O address space to the bottom 24 bits of address space,
- * but this easily covers the 16 bit ISA I/O address space.
- */
-
-/*
- * NOTE 2! The memory operations do not set any memory barriers, as
- * it's not needed for cases like a frame buffer that is essentially
- * memory-like. You need to do them by hand if the operations depend
- * on ordering.
- *
- * Similarly, the port I/O operations do a "mb" only after a write
- * operation: if an mb is needed before (as in the case of doing
- * memory mapped I/O first, and then a port I/O operation to the same
- * device), it needs to be done by hand.
- *
- * After the above has bitten me 100 times, I'll give up and just do
- * the mb all the time, but right now I'm hoping this will work out.
- * Avoiding mb's may potentially be a noticeable speed improvement,
- * but I can't honestly say I've tested it.
- *
- * Handling interrupts that need to do mb's to synchronize to
- * non-interrupts is another fun race area. Don't do it (because if
- * you do, I'll have to do *everything* with interrupts disabled,
- * ugh).
- */
-
-/*
- * Memory Controller registers:
- */
-#define LCA_MEM_BCR0 (IDENT_ADDR + 0x120000000UL)
-#define LCA_MEM_BCR1 (IDENT_ADDR + 0x120000008UL)
-#define LCA_MEM_BCR2 (IDENT_ADDR + 0x120000010UL)
-#define LCA_MEM_BCR3 (IDENT_ADDR + 0x120000018UL)
-#define LCA_MEM_BMR0 (IDENT_ADDR + 0x120000020UL)
-#define LCA_MEM_BMR1 (IDENT_ADDR + 0x120000028UL)
-#define LCA_MEM_BMR2 (IDENT_ADDR + 0x120000030UL)
-#define LCA_MEM_BMR3 (IDENT_ADDR + 0x120000038UL)
-#define LCA_MEM_BTR0 (IDENT_ADDR + 0x120000040UL)
-#define LCA_MEM_BTR1 (IDENT_ADDR + 0x120000048UL)
-#define LCA_MEM_BTR2 (IDENT_ADDR + 0x120000050UL)
-#define LCA_MEM_BTR3 (IDENT_ADDR + 0x120000058UL)
-#define LCA_MEM_GTR (IDENT_ADDR + 0x120000060UL)
-#define LCA_MEM_ESR (IDENT_ADDR + 0x120000068UL)
-#define LCA_MEM_EAR (IDENT_ADDR + 0x120000070UL)
-#define LCA_MEM_CAR (IDENT_ADDR + 0x120000078UL)
-#define LCA_MEM_VGR (IDENT_ADDR + 0x120000080UL)
-#define LCA_MEM_PLM (IDENT_ADDR + 0x120000088UL)
-#define LCA_MEM_FOR (IDENT_ADDR + 0x120000090UL)
-
-/*
- * I/O Controller registers:
- */
-#define LCA_IOC_HAE (IDENT_ADDR + 0x180000000UL)
-#define LCA_IOC_CONF (IDENT_ADDR + 0x180000020UL)
-#define LCA_IOC_STAT0 (IDENT_ADDR + 0x180000040UL)
-#define LCA_IOC_STAT1 (IDENT_ADDR + 0x180000060UL)
-#define LCA_IOC_TBIA (IDENT_ADDR + 0x180000080UL)
-#define LCA_IOC_TB_ENA (IDENT_ADDR + 0x1800000a0UL)
-#define LCA_IOC_SFT_RST (IDENT_ADDR + 0x1800000c0UL)
-#define LCA_IOC_PAR_DIS (IDENT_ADDR + 0x1800000e0UL)
-#define LCA_IOC_W_BASE0 (IDENT_ADDR + 0x180000100UL)
-#define LCA_IOC_W_BASE1 (IDENT_ADDR + 0x180000120UL)
-#define LCA_IOC_W_MASK0 (IDENT_ADDR + 0x180000140UL)
-#define LCA_IOC_W_MASK1 (IDENT_ADDR + 0x180000160UL)
-#define LCA_IOC_T_BASE0 (IDENT_ADDR + 0x180000180UL)
-#define LCA_IOC_T_BASE1 (IDENT_ADDR + 0x1800001a0UL)
-#define LCA_IOC_TB_TAG0 (IDENT_ADDR + 0x188000000UL)
-#define LCA_IOC_TB_TAG1 (IDENT_ADDR + 0x188000020UL)
-#define LCA_IOC_TB_TAG2 (IDENT_ADDR + 0x188000040UL)
-#define LCA_IOC_TB_TAG3 (IDENT_ADDR + 0x188000060UL)
-#define LCA_IOC_TB_TAG4 (IDENT_ADDR + 0x188000070UL)
-#define LCA_IOC_TB_TAG5 (IDENT_ADDR + 0x1880000a0UL)
-#define LCA_IOC_TB_TAG6 (IDENT_ADDR + 0x1880000c0UL)
-#define LCA_IOC_TB_TAG7 (IDENT_ADDR + 0x1880000e0UL)
-
-/*
- * Memory spaces:
- */
-#define LCA_IACK_SC (IDENT_ADDR + 0x1a0000000UL)
-#define LCA_CONF (IDENT_ADDR + 0x1e0000000UL)
-#define LCA_IO (IDENT_ADDR + 0x1c0000000UL)
-#define LCA_SPARSE_MEM (IDENT_ADDR + 0x200000000UL)
-#define LCA_DENSE_MEM (IDENT_ADDR + 0x300000000UL)
-
-/*
- * Bit definitions for I/O Controller status register 0:
- */
-#define LCA_IOC_STAT0_CMD 0xf
-#define LCA_IOC_STAT0_ERR (1<<4)
-#define LCA_IOC_STAT0_LOST (1<<5)
-#define LCA_IOC_STAT0_THIT (1<<6)
-#define LCA_IOC_STAT0_TREF (1<<7)
-#define LCA_IOC_STAT0_CODE_SHIFT 8
-#define LCA_IOC_STAT0_CODE_MASK 0x7
-#define LCA_IOC_STAT0_P_NBR_SHIFT 13
-#define LCA_IOC_STAT0_P_NBR_MASK 0x7ffff
-
-#define LCA_HAE_ADDRESS LCA_IOC_HAE
-
-/* LCA PMR Power Management register defines */
-#define LCA_PMR_ADDR (IDENT_ADDR + 0x120000098UL)
-#define LCA_PMR_PDIV 0x7 /* Primary clock divisor */
-#define LCA_PMR_ODIV 0x38 /* Override clock divisor */
-#define LCA_PMR_INTO 0x40 /* Interrupt override */
-#define LCA_PMR_DMAO 0x80 /* DMA override */
-#define LCA_PMR_OCCEB 0xffff0000L /* Override cycle counter - even bits */
-#define LCA_PMR_OCCOB 0xffff000000000000L /* Override cycle counter - even bits */
-#define LCA_PMR_PRIMARY_MASK 0xfffffffffffffff8L
-
-/* LCA PMR Macros */
-
-#define LCA_READ_PMR (*(volatile unsigned long *)LCA_PMR_ADDR)
-#define LCA_WRITE_PMR(d) (*((volatile unsigned long *)LCA_PMR_ADDR) = (d))
-
-#define LCA_GET_PRIMARY(r) ((r) & LCA_PMR_PDIV)
-#define LCA_GET_OVERRIDE(r) (((r) >> 3) & LCA_PMR_PDIV)
-#define LCA_SET_PRIMARY_CLOCK(r, c) ((r) = (((r) & LCA_PMR_PRIMARY_MASK)|(c)))
-
-/* LCA PMR Divisor values */
-#define LCA_PMR_DIV_1 0x0
-#define LCA_PMR_DIV_1_5 0x1
-#define LCA_PMR_DIV_2 0x2
-#define LCA_PMR_DIV_4 0x3
-#define LCA_PMR_DIV_8 0x4
-#define LCA_PMR_DIV_16 0x5
-#define LCA_PMR_DIV_MIN DIV_1
-#define LCA_PMR_DIV_MAX DIV_16
-
-
-/*
- * Data structure for handling LCA machine checks. Correctable errors
- * result in a short logout frame, uncorrectable ones in a long one.
- */
-struct el_lca_mcheck_short {
- struct el_common h; /* common logout header */
- unsigned long esr; /* error-status register */
- unsigned long ear; /* error-address register */
- unsigned long dc_stat; /* dcache status register */
- unsigned long ioc_stat0; /* I/O controller status register 0 */
- unsigned long ioc_stat1; /* I/O controller status register 1 */
-};
-
-struct el_lca_mcheck_long {
- struct el_common h; /* common logout header */
- unsigned long pt[31]; /* PAL temps */
- unsigned long exc_addr; /* exception address */
- unsigned long pad1[3];
- unsigned long pal_base; /* PALcode base address */
- unsigned long hier; /* hw interrupt enable */
- unsigned long hirr; /* hw interrupt request */
- unsigned long mm_csr; /* MMU control & status */
- unsigned long dc_stat; /* data cache status */
- unsigned long dc_addr; /* data cache addr register */
- unsigned long abox_ctl; /* address box control register */
- unsigned long esr; /* error status register */
- unsigned long ear; /* error address register */
- unsigned long car; /* cache control register */
- unsigned long ioc_stat0; /* I/O controller status register 0 */
- unsigned long ioc_stat1; /* I/O controller status register 1 */
- unsigned long va; /* virtual address register */
-};
-
-union el_lca {
- struct el_common * c;
- struct el_lca_mcheck_long * l;
- struct el_lca_mcheck_short * s;
-};
-
-#ifdef __KERNEL__
-
-#ifndef __EXTERN_INLINE
-#define __EXTERN_INLINE extern inline
-#define __IO_EXTERN_INLINE
-#endif
-
-/*
- * I/O functions:
- *
- * Unlike Jensen, the Noname machines have no concept of local
- * I/O---everything goes over the PCI bus.
- *
- * There is plenty room for optimization here. In particular,
- * the Alpha's insb/insw/extb/extw should be useful in moving
- * data to/from the right byte-lanes.
- */
-
-#define vip volatile int __force *
-#define vuip volatile unsigned int __force *
-#define vulp volatile unsigned long __force *
-
-#define LCA_SET_HAE \
- do { \
- if (addr >= (1UL << 24)) { \
- unsigned long msb = addr & 0xf8000000; \
- addr -= msb; \
- set_hae(msb); \
- } \
- } while (0)
-
-
-__EXTERN_INLINE unsigned int lca_ioread8(void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- unsigned long result, base_and_type;
-
- if (addr >= LCA_DENSE_MEM) {
- addr -= LCA_DENSE_MEM;
- LCA_SET_HAE;
- base_and_type = LCA_SPARSE_MEM + 0x00;
- } else {
- addr -= LCA_IO;
- base_and_type = LCA_IO + 0x00;
- }
-
- result = *(vip) ((addr << 5) + base_and_type);
- return __kernel_extbl(result, addr & 3);
-}
-
-__EXTERN_INLINE void lca_iowrite8(u8 b, void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- unsigned long w, base_and_type;
-
- if (addr >= LCA_DENSE_MEM) {
- addr -= LCA_DENSE_MEM;
- LCA_SET_HAE;
- base_and_type = LCA_SPARSE_MEM + 0x00;
- } else {
- addr -= LCA_IO;
- base_and_type = LCA_IO + 0x00;
- }
-
- w = __kernel_insbl(b, addr & 3);
- *(vuip) ((addr << 5) + base_and_type) = w;
-}
-
-__EXTERN_INLINE unsigned int lca_ioread16(void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- unsigned long result, base_and_type;
-
- if (addr >= LCA_DENSE_MEM) {
- addr -= LCA_DENSE_MEM;
- LCA_SET_HAE;
- base_and_type = LCA_SPARSE_MEM + 0x08;
- } else {
- addr -= LCA_IO;
- base_and_type = LCA_IO + 0x08;
- }
-
- result = *(vip) ((addr << 5) + base_and_type);
- return __kernel_extwl(result, addr & 3);
-}
-
-__EXTERN_INLINE void lca_iowrite16(u16 b, void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- unsigned long w, base_and_type;
-
- if (addr >= LCA_DENSE_MEM) {
- addr -= LCA_DENSE_MEM;
- LCA_SET_HAE;
- base_and_type = LCA_SPARSE_MEM + 0x08;
- } else {
- addr -= LCA_IO;
- base_and_type = LCA_IO + 0x08;
- }
-
- w = __kernel_inswl(b, addr & 3);
- *(vuip) ((addr << 5) + base_and_type) = w;
-}
-
-__EXTERN_INLINE unsigned int lca_ioread32(void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- if (addr < LCA_DENSE_MEM)
- addr = ((addr - LCA_IO) << 5) + LCA_IO + 0x18;
- return *(vuip)addr;
-}
-
-__EXTERN_INLINE void lca_iowrite32(u32 b, void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- if (addr < LCA_DENSE_MEM)
- addr = ((addr - LCA_IO) << 5) + LCA_IO + 0x18;
- *(vuip)addr = b;
-}
-
-__EXTERN_INLINE void __iomem *lca_ioportmap(unsigned long addr)
-{
- return (void __iomem *)(addr + LCA_IO);
-}
-
-__EXTERN_INLINE void __iomem *lca_ioremap(unsigned long addr,
- unsigned long size)
-{
- return (void __iomem *)(addr + LCA_DENSE_MEM);
-}
-
-__EXTERN_INLINE int lca_is_ioaddr(unsigned long addr)
-{
- return addr >= IDENT_ADDR + 0x120000000UL;
-}
-
-__EXTERN_INLINE int lca_is_mmio(const volatile void __iomem *addr)
-{
- return (unsigned long)addr >= LCA_DENSE_MEM;
-}
-
-#undef vip
-#undef vuip
-#undef vulp
-
-#undef __IO_PREFIX
-#define __IO_PREFIX lca
-#define lca_trivial_rw_bw 2
-#define lca_trivial_rw_lq 1
-#define lca_trivial_io_bw 0
-#define lca_trivial_io_lq 0
-#define lca_trivial_iounmap 1
-#include <asm/io_trivial.h>
-
-#ifdef __IO_EXTERN_INLINE
-#undef __EXTERN_INLINE
-#undef __IO_EXTERN_INLINE
-#endif
-
-#endif /* __KERNEL__ */
-
-#endif /* __ALPHA_LCA__H__ */
diff --git a/arch/alpha/include/asm/core_marvel.h b/arch/alpha/include/asm/core_marvel.h
index cc6fd92d5fa9..d99f3a82e0e5 100644
--- a/arch/alpha/include/asm/core_marvel.h
+++ b/arch/alpha/include/asm/core_marvel.h
@@ -332,10 +332,10 @@ struct io7 {
#define vucp volatile unsigned char __force *
#define vusp volatile unsigned short __force *
-extern unsigned int marvel_ioread8(void __iomem *);
+extern u8 marvel_ioread8(const void __iomem *);
extern void marvel_iowrite8(u8 b, void __iomem *);
-__EXTERN_INLINE unsigned int marvel_ioread16(void __iomem *addr)
+__EXTERN_INLINE u16 marvel_ioread16(const void __iomem *addr)
{
return __kernel_ldwu(*(vusp)addr);
}
diff --git a/arch/alpha/include/asm/core_mcpcia.h b/arch/alpha/include/asm/core_mcpcia.h
index b30dc128210d..ed2bf8ad40ed 100644
--- a/arch/alpha/include/asm/core_mcpcia.h
+++ b/arch/alpha/include/asm/core_mcpcia.h
@@ -248,6 +248,7 @@ struct el_MCPCIA_uncorrected_frame_mcheck {
#define vip volatile int __force *
#define vuip volatile unsigned int __force *
+#define vulp volatile unsigned long __force *
#ifndef MCPCIA_ONE_HAE_WINDOW
#define MCPCIA_FROB_MMIO \
@@ -267,7 +268,7 @@ extern inline int __mcpcia_is_mmio(unsigned long addr)
return (addr & 0x80000000UL) == 0;
}
-__EXTERN_INLINE unsigned int mcpcia_ioread8(void __iomem *xaddr)
+__EXTERN_INLINE u8 mcpcia_ioread8(const void __iomem *xaddr)
{
unsigned long addr = (unsigned long)xaddr & MCPCIA_MEM_MASK;
unsigned long hose = (unsigned long)xaddr & ~MCPCIA_MEM_MASK;
@@ -291,7 +292,7 @@ __EXTERN_INLINE void mcpcia_iowrite8(u8 b, void __iomem *xaddr)
*(vuip) ((addr << 5) + hose + 0x00) = w;
}
-__EXTERN_INLINE unsigned int mcpcia_ioread16(void __iomem *xaddr)
+__EXTERN_INLINE u16 mcpcia_ioread16(const void __iomem *xaddr)
{
unsigned long addr = (unsigned long)xaddr & MCPCIA_MEM_MASK;
unsigned long hose = (unsigned long)xaddr & ~MCPCIA_MEM_MASK;
@@ -315,7 +316,7 @@ __EXTERN_INLINE void mcpcia_iowrite16(u16 b, void __iomem *xaddr)
*(vuip) ((addr << 5) + hose + 0x08) = w;
}
-__EXTERN_INLINE unsigned int mcpcia_ioread32(void __iomem *xaddr)
+__EXTERN_INLINE u32 mcpcia_ioread32(const void __iomem *xaddr)
{
unsigned long addr = (unsigned long)xaddr;
@@ -335,6 +336,26 @@ __EXTERN_INLINE void mcpcia_iowrite32(u32 b, void __iomem *xaddr)
*(vuip)addr = b;
}
+__EXTERN_INLINE u64 mcpcia_ioread64(const void __iomem *xaddr)
+{
+ unsigned long addr = (unsigned long)xaddr;
+
+ if (!__mcpcia_is_mmio(addr))
+ addr = ((addr & 0xffff) << 5) + (addr & ~0xfffful) + 0x18;
+
+ return *(vulp)addr;
+}
+
+__EXTERN_INLINE void mcpcia_iowrite64(u64 b, void __iomem *xaddr)
+{
+ unsigned long addr = (unsigned long)xaddr;
+
+ if (!__mcpcia_is_mmio(addr))
+ addr = ((addr & 0xffff) << 5) + (addr & ~0xfffful) + 0x18;
+
+ *(vulp)addr = b;
+}
+
__EXTERN_INLINE void __iomem *mcpcia_ioportmap(unsigned long addr)
{
@@ -362,6 +383,7 @@ __EXTERN_INLINE int mcpcia_is_mmio(const volatile void __iomem *xaddr)
#undef vip
#undef vuip
+#undef vulp
#undef __IO_PREFIX
#define __IO_PREFIX mcpcia
diff --git a/arch/alpha/include/asm/core_t2.h b/arch/alpha/include/asm/core_t2.h
index e0b33d09e93a..ca9b091d9c5f 100644
--- a/arch/alpha/include/asm/core_t2.h
+++ b/arch/alpha/include/asm/core_t2.h
@@ -25,16 +25,8 @@
#define T2_MEM_R1_MASK 0x07ffffff /* Mem sparse region 1 mask is 27 bits */
/* GAMMA-SABLE is a SABLE with EV5-based CPUs */
-/* All LYNX machines, EV4 or EV5, use the GAMMA bias also */
#define _GAMMA_BIAS 0x8000000000UL
-
-#if defined(CONFIG_ALPHA_GENERIC)
-#define GAMMA_BIAS alpha_mv.sys.t2.gamma_bias
-#elif defined(CONFIG_ALPHA_GAMMA)
#define GAMMA_BIAS _GAMMA_BIAS
-#else
-#define GAMMA_BIAS 0
-#endif
/*
* Memory spaces:
@@ -360,6 +352,7 @@ struct el_t2_frame_corrected {
#define vip volatile int *
#define vuip volatile unsigned int *
+#define vulp volatile unsigned long *
extern inline u8 t2_inb(unsigned long addr)
{
@@ -402,6 +395,17 @@ extern inline void t2_outl(u32 b, unsigned long addr)
mb();
}
+extern inline u64 t2_inq(unsigned long addr)
+{
+ return *(vulp) ((addr << 5) + T2_IO + 0x18);
+}
+
+extern inline void t2_outq(u64 b, unsigned long addr)
+{
+ *(vulp) ((addr << 5) + T2_IO + 0x18) = b;
+ mb();
+}
+
/*
* Memory functions.
@@ -572,7 +576,7 @@ __EXTERN_INLINE int t2_is_mmio(const volatile void __iomem *addr)
it doesn't make sense to merge the pio and mmio routines. */
#define IOPORT(OS, NS) \
-__EXTERN_INLINE unsigned int t2_ioread##NS(void __iomem *xaddr) \
+__EXTERN_INLINE u##NS t2_ioread##NS(const void __iomem *xaddr) \
{ \
if (t2_is_mmio(xaddr)) \
return t2_read##OS(xaddr); \
@@ -590,11 +594,13 @@ __EXTERN_INLINE void t2_iowrite##NS(u##NS b, void __iomem *xaddr) \
IOPORT(b, 8)
IOPORT(w, 16)
IOPORT(l, 32)
+IOPORT(q, 64)
#undef IOPORT
#undef vip
#undef vuip
+#undef vulp
#undef __IO_PREFIX
#define __IO_PREFIX t2
diff --git a/arch/alpha/include/asm/div64.h b/arch/alpha/include/asm/div64.h
deleted file mode 100644
index 6cd978cefb28..000000000000
--- a/arch/alpha/include/asm/div64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/div64.h>
diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h
index 0ee6a5c99b16..ad5a59b035cb 100644
--- a/arch/alpha/include/asm/dma-mapping.h
+++ b/arch/alpha/include/asm/dma-mapping.h
@@ -4,13 +4,9 @@
extern const struct dma_map_ops alpha_pci_ops;
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
+static inline const struct dma_map_ops *get_arch_dma_ops(void)
{
-#ifdef CONFIG_ALPHA_JENSEN
- return NULL;
-#else
return &alpha_pci_ops;
-#endif
}
#endif /* _ALPHA_DMA_MAPPING_H */
diff --git a/arch/alpha/include/asm/dma.h b/arch/alpha/include/asm/dma.h
index 28610ea7786d..3a88812b7165 100644
--- a/arch/alpha/include/asm/dma.h
+++ b/arch/alpha/include/asm/dma.h
@@ -82,11 +82,6 @@
just a wiring limit.
*/
-/* The maximum address for ISA DMA transfer on Alpha XL, due to an
- hardware SIO limitation, is 64MB.
-*/
-#define ALPHA_XL_MAX_ISA_DMA_ADDRESS 0x04000000UL
-
/* The maximum address for ISA DMA transfer on RUFFIAN,
due to an hardware SIO limitation, is 16MB.
*/
@@ -107,9 +102,7 @@
#ifdef CONFIG_ALPHA_GENERIC
# define MAX_ISA_DMA_ADDRESS (alpha_mv.max_isa_dma_address)
#else
-# if defined(CONFIG_ALPHA_XL)
-# define MAX_ISA_DMA_ADDRESS ALPHA_XL_MAX_ISA_DMA_ADDRESS
-# elif defined(CONFIG_ALPHA_RUFFIAN)
+# if defined(CONFIG_ALPHA_RUFFIAN)
# define MAX_ISA_DMA_ADDRESS ALPHA_RUFFIAN_MAX_ISA_DMA_ADDRESS
# elif defined(CONFIG_ALPHA_SABLE)
# define MAX_ISA_DMA_ADDRESS ALPHA_SABLE_MAX_ISA_DMA_ADDRESS
@@ -365,13 +358,4 @@ extern void free_dma(unsigned int dmanr); /* release it again */
#define KERNEL_HAVE_CHECK_DMA
extern int check_dma(unsigned int dmanr);
-/* From PCI */
-
-#ifdef CONFIG_PCI
-extern int isa_dma_bridge_buggy;
-#else
-#define isa_dma_bridge_buggy (0)
-#endif
-
-
#endif /* _ASM_DMA_H */
diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
index 8049997fa372..50c82187e60e 100644
--- a/arch/alpha/include/asm/elf.h
+++ b/arch/alpha/include/asm/elf.h
@@ -74,7 +74,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
-#define elf_check_arch(x) ((x)->e_machine == EM_ALPHA)
+#define elf_check_arch(x) (((x)->e_machine == EM_ALPHA) && !((x)->e_flags & EF_ALPHA_32BIT))
/*
* These are used to set parameters in the core dumps.
@@ -120,12 +120,6 @@ extern int dump_elf_task(elf_greg_t *dest, struct task_struct *task);
#define ELF_CORE_COPY_TASK_REGS(TASK, DEST) \
dump_elf_task(*(DEST), TASK)
-/* Similar, but for the FP registers. */
-
-extern int dump_elf_task_fp(elf_fpreg_t *dest, struct task_struct *task);
-#define ELF_CORE_COPY_FPREGS(TASK, DEST) \
- dump_elf_task_fp(*(DEST), TASK)
-
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. This is trivial on Alpha,
but not so on other machines. */
@@ -139,16 +133,10 @@ extern int dump_elf_task_fp(elf_fpreg_t *dest, struct task_struct *task);
#define ELF_PLATFORM \
({ \
enum implver_enum i_ = implver(); \
- ( i_ == IMPLVER_EV4 ? "ev4" \
- : i_ == IMPLVER_EV5 \
- ? (amask(AMASK_BWX) ? "ev5" : "ev56") \
+ ( i_ == IMPLVER_EV5 ? "ev56" \
: amask (AMASK_CIX) ? "ev6" : "ev67"); \
})
-#define SET_PERSONALITY(EX) \
- set_personality(((EX).e_flags & EF_ALPHA_32BIT) \
- ? PER_LINUX_32BIT : PER_LINUX)
-
extern int alpha_l1i_cacheshape;
extern int alpha_l1d_cacheshape;
extern int alpha_l2_cacheshape;
diff --git a/arch/alpha/include/asm/floppy.h b/arch/alpha/include/asm/floppy.h
index 942924756cf2..64b42d9591fc 100644
--- a/arch/alpha/include/asm/floppy.h
+++ b/arch/alpha/include/asm/floppy.h
@@ -11,8 +11,8 @@
#define __ASM_ALPHA_FLOPPY_H
-#define fd_inb(port) inb_p(port)
-#define fd_outb(value,port) outb_p(value,port)
+#define fd_inb(base, reg) inb_p((base) + (reg))
+#define fd_outb(value, base, reg) outb_p(value, (base) + (reg))
#define fd_enable_dma() enable_dma(FLOPPY_DMA)
#define fd_disable_dma() disable_dma(FLOPPY_DMA)
@@ -20,7 +20,7 @@
#define fd_free_dma() free_dma(FLOPPY_DMA)
#define fd_clear_dma_ff() clear_dma_ff(FLOPPY_DMA)
#define fd_set_dma_mode(mode) set_dma_mode(FLOPPY_DMA,mode)
-#define fd_set_dma_addr(addr) set_dma_addr(FLOPPY_DMA,virt_to_bus(addr))
+#define fd_set_dma_addr(addr) set_dma_addr(FLOPPY_DMA,isa_virt_to_bus(addr))
#define fd_set_dma_count(count) set_dma_count(FLOPPY_DMA,count)
#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
@@ -43,17 +43,18 @@ alpha_fd_dma_setup(char *addr, unsigned long size, int mode, int io)
static int prev_dir;
int dir;
- dir = (mode != DMA_MODE_READ) ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE;
+ dir = (mode != DMA_MODE_READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
if (bus_addr
&& (addr != prev_addr || size != prev_size || dir != prev_dir)) {
/* different from last time -- unmap prev */
- pci_unmap_single(isa_bridge, bus_addr, prev_size, prev_dir);
+ dma_unmap_single(&isa_bridge->dev, bus_addr, prev_size,
+ prev_dir);
bus_addr = 0;
}
if (!bus_addr) /* need to map it */
- bus_addr = pci_map_single(isa_bridge, addr, size, dir);
+ bus_addr = dma_map_single(&isa_bridge->dev, addr, size, dir);
/* remember this one as prev */
prev_addr = addr;
diff --git a/arch/alpha/include/asm/fpu.h b/arch/alpha/include/asm/fpu.h
index b9691405e56b..30b24135dd7a 100644
--- a/arch/alpha/include/asm/fpu.h
+++ b/arch/alpha/include/asm/fpu.h
@@ -15,21 +15,27 @@ rdfpcr(void)
{
unsigned long tmp, ret;
+ preempt_disable();
+ if (current_thread_info()->status & TS_SAVED_FP) {
+ ret = current_thread_info()->fp[31];
+ } else {
#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
- __asm__ __volatile__ (
- "ftoit $f0,%0\n\t"
- "mf_fpcr $f0\n\t"
- "ftoit $f0,%1\n\t"
- "itoft %0,$f0"
- : "=r"(tmp), "=r"(ret));
+ __asm__ __volatile__ (
+ "ftoit $f0,%0\n\t"
+ "mf_fpcr $f0\n\t"
+ "ftoit $f0,%1\n\t"
+ "itoft %0,$f0"
+ : "=r"(tmp), "=r"(ret));
#else
- __asm__ __volatile__ (
- "stt $f0,%0\n\t"
- "mf_fpcr $f0\n\t"
- "stt $f0,%1\n\t"
- "ldt $f0,%0"
- : "=m"(tmp), "=m"(ret));
+ __asm__ __volatile__ (
+ "stt $f0,%0\n\t"
+ "mf_fpcr $f0\n\t"
+ "stt $f0,%1\n\t"
+ "ldt $f0,%0"
+ : "=m"(tmp), "=m"(ret));
#endif
+ }
+ preempt_enable();
return ret;
}
@@ -39,21 +45,28 @@ wrfpcr(unsigned long val)
{
unsigned long tmp;
+ preempt_disable();
+ if (current_thread_info()->status & TS_SAVED_FP) {
+ current_thread_info()->status |= TS_RESTORE_FP;
+ current_thread_info()->fp[31] = val;
+ } else {
#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
- __asm__ __volatile__ (
- "ftoit $f0,%0\n\t"
- "itoft %1,$f0\n\t"
- "mt_fpcr $f0\n\t"
- "itoft %0,$f0"
- : "=&r"(tmp) : "r"(val));
+ __asm__ __volatile__ (
+ "ftoit $f0,%0\n\t"
+ "itoft %1,$f0\n\t"
+ "mt_fpcr $f0\n\t"
+ "itoft %0,$f0"
+ : "=&r"(tmp) : "r"(val));
#else
- __asm__ __volatile__ (
- "stt $f0,%0\n\t"
- "ldt $f0,%1\n\t"
- "mt_fpcr $f0\n\t"
- "ldt $f0,%0"
- : "=m"(tmp) : "m"(val));
+ __asm__ __volatile__ (
+ "stt $f0,%0\n\t"
+ "ldt $f0,%1\n\t"
+ "mt_fpcr $f0\n\t"
+ "ldt $f0,%0"
+ : "=m"(tmp) : "m"(val));
#endif
+ }
+ preempt_enable();
}
static inline unsigned long
diff --git a/arch/alpha/include/asm/futex.h b/arch/alpha/include/asm/futex.h
index bfd3c01038f8..da67afd578fd 100644
--- a/arch/alpha/include/asm/futex.h
+++ b/arch/alpha/include/asm/futex.h
@@ -31,7 +31,8 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
{
int oldval = 0, ret;
- pagefault_disable();
+ if (!access_ok(uaddr, sizeof(u32)))
+ return -EFAULT;
switch (op) {
case FUTEX_OP_SET:
@@ -53,8 +54,6 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
ret = -ENOSYS;
}
- pagefault_enable();
-
if (!ret)
*oval = oldval;
diff --git a/arch/alpha/include/asm/hwrpb.h b/arch/alpha/include/asm/hwrpb.h
index d8180e527a1e..db831cf8de10 100644
--- a/arch/alpha/include/asm/hwrpb.h
+++ b/arch/alpha/include/asm/hwrpb.h
@@ -135,7 +135,7 @@ struct crb_struct {
/* virtual->physical map */
unsigned long map_entries;
unsigned long map_pages;
- struct vf_map_struct map[1];
+ struct vf_map_struct map[];
};
struct memclust_struct {
@@ -152,7 +152,7 @@ struct memdesc_struct {
unsigned long chksum;
unsigned long optional_pa;
unsigned long numclusters;
- struct memclust_struct cluster[0];
+ struct memclust_struct cluster[];
};
struct dsr_struct {
diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
index 1989b946a28d..fa3e4c246cda 100644
--- a/arch/alpha/include/asm/io.h
+++ b/arch/alpha/include/asm/io.h
@@ -7,18 +7,9 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <asm/compiler.h>
-#include <asm/pgtable.h>
#include <asm/machvec.h>
#include <asm/hwrpb.h>
-/* The generic header contains only prototypes. Including it ensures that
- the implementation we have here matches that interface. */
-#include <asm-generic/iomap.h>
-
-/* We don't use IO slowdowns on the Alpha, but.. */
-#define __SLOW_DOWN_IO do { } while (0)
-#define SLOW_DOWN_IO do { } while (0)
-
/*
* Virtual -> physical identity mapping starts at this offset
*/
@@ -61,7 +52,7 @@ extern inline void set_hae(unsigned long new_hae)
* Change virtual addresses to physical addresses and vv.
*/
#ifdef USE_48_BIT_KSEG
-static inline unsigned long virt_to_phys(void *address)
+static inline unsigned long virt_to_phys(volatile void *address)
{
return (unsigned long)address - IDENT_ADDR;
}
@@ -71,7 +62,7 @@ static inline void * phys_to_virt(unsigned long address)
return (void *) (address + IDENT_ADDR);
}
#else
-static inline unsigned long virt_to_phys(void *address)
+static inline unsigned long virt_to_phys(volatile void *address)
{
unsigned long phys = (unsigned long)address;
@@ -91,7 +82,8 @@ static inline void * phys_to_virt(unsigned long address)
}
#endif
-#define page_to_phys(page) page_to_pa(page)
+#define virt_to_phys virt_to_phys
+#define phys_to_virt phys_to_virt
/* Maximum PIO space address supported? */
#define IO_SPACE_LIMIT 0xffff
@@ -107,15 +99,15 @@ static inline void * phys_to_virt(unsigned long address)
extern unsigned long __direct_map_base;
extern unsigned long __direct_map_size;
-static inline unsigned long __deprecated virt_to_bus(void *address)
+static inline unsigned long __deprecated isa_virt_to_bus(volatile void *address)
{
unsigned long phys = virt_to_phys(address);
unsigned long bus = phys + __direct_map_base;
return phys <= __direct_map_size ? bus : 0;
}
-#define isa_virt_to_bus virt_to_bus
+#define isa_virt_to_bus isa_virt_to_bus
-static inline void * __deprecated bus_to_virt(unsigned long address)
+static inline void * __deprecated isa_bus_to_virt(unsigned long address)
{
void *virt;
@@ -126,7 +118,7 @@ static inline void * __deprecated bus_to_virt(unsigned long address)
virt = phys_to_virt(address);
return (long)address <= 0 ? NULL : virt;
}
-#define isa_bus_to_virt bus_to_virt
+#define isa_bus_to_virt isa_bus_to_virt
/*
* There are different chipsets to interface the Alpha CPUs to the world.
@@ -151,9 +143,10 @@ static inline void generic_##NAME(TYPE b, QUAL void __iomem *addr) \
alpha_mv.mv_##NAME(b, addr); \
}
-REMAP1(unsigned int, ioread8, /**/)
-REMAP1(unsigned int, ioread16, /**/)
-REMAP1(unsigned int, ioread32, /**/)
+REMAP1(unsigned int, ioread8, const)
+REMAP1(unsigned int, ioread16, const)
+REMAP1(unsigned int, ioread32, const)
+REMAP1(u64, ioread64, const)
REMAP1(u8, readb, const volatile)
REMAP1(u16, readw, const volatile)
REMAP1(u32, readl, const volatile)
@@ -162,6 +155,7 @@ REMAP1(u64, readq, const volatile)
REMAP2(u8, iowrite8, /**/)
REMAP2(u16, iowrite16, /**/)
REMAP2(u32, iowrite32, /**/)
+REMAP2(u64, iowrite64, /**/)
REMAP2(u8, writeb, volatile)
REMAP2(u16, writew, volatile)
REMAP2(u32, writel, volatile)
@@ -204,16 +198,10 @@ static inline int generic_is_mmio(const volatile void __iomem *a)
#else
-#if defined(CONFIG_ALPHA_APECS)
-# include <asm/core_apecs.h>
-#elif defined(CONFIG_ALPHA_CIA)
+#if defined(CONFIG_ALPHA_CIA)
# include <asm/core_cia.h>
#elif defined(CONFIG_ALPHA_IRONGATE)
# include <asm/core_irongate.h>
-#elif defined(CONFIG_ALPHA_JENSEN)
-# include <asm/jensen.h>
-#elif defined(CONFIG_ALPHA_LCA)
-# include <asm/core_lca.h>
#elif defined(CONFIG_ALPHA_MARVEL)
# include <asm/core_marvel.h>
#elif defined(CONFIG_ALPHA_MCPCIA)
@@ -243,6 +231,12 @@ extern u32 inl(unsigned long port);
extern void outb(u8 b, unsigned long port);
extern void outw(u16 b, unsigned long port);
extern void outl(u32 b, unsigned long port);
+#define inb inb
+#define inw inw
+#define inl inl
+#define outb outb
+#define outw outw
+#define outl outl
extern u8 readb(const volatile void __iomem *addr);
extern u16 readw(const volatile void __iomem *addr);
@@ -252,6 +246,14 @@ extern void writeb(u8 b, volatile void __iomem *addr);
extern void writew(u16 b, volatile void __iomem *addr);
extern void writel(u32 b, volatile void __iomem *addr);
extern void writeq(u64 b, volatile void __iomem *addr);
+#define readb readb
+#define readw readw
+#define readl readl
+#define readq readq
+#define writeb writeb
+#define writew writew
+#define writel writel
+#define writeq writeq
extern u8 __raw_readb(const volatile void __iomem *addr);
extern u16 __raw_readw(const volatile void __iomem *addr);
@@ -261,14 +263,33 @@ extern void __raw_writeb(u8 b, volatile void __iomem *addr);
extern void __raw_writew(u16 b, volatile void __iomem *addr);
extern void __raw_writel(u32 b, volatile void __iomem *addr);
extern void __raw_writeq(u64 b, volatile void __iomem *addr);
+#define __raw_readb __raw_readb
+#define __raw_readw __raw_readw
+#define __raw_readl __raw_readl
+#define __raw_readq __raw_readq
+#define __raw_writeb __raw_writeb
+#define __raw_writew __raw_writew
+#define __raw_writel __raw_writel
+#define __raw_writeq __raw_writeq
+
+extern unsigned int ioread8(const void __iomem *);
+extern unsigned int ioread16(const void __iomem *);
+extern unsigned int ioread32(const void __iomem *);
+extern u64 ioread64(const void __iomem *);
+
+extern void iowrite8(u8, void __iomem *);
+extern void iowrite16(u16, void __iomem *);
+extern void iowrite32(u32, void __iomem *);
+extern void iowrite64(u64, void __iomem *);
+
+extern void ioread8_rep(const void __iomem *port, void *buf, unsigned long count);
+extern void ioread16_rep(const void __iomem *port, void *buf, unsigned long count);
+extern void ioread32_rep(const void __iomem *port, void *buf, unsigned long count);
+
+extern void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count);
+extern void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count);
+extern void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count);
-/*
- * Mapping from port numbers to __iomem space is pretty easy.
- */
-
-/* These two have to be extern inline because of the extern prototype from
- <asm-generic/iomap.h>. It is not legal to mix "extern" and "static" for
- the same declaration. */
extern inline void __iomem *ioport_map(unsigned long port, unsigned int size)
{
return IO_CONCAT(__IO_PREFIX,ioportmap) (port);
@@ -278,19 +299,15 @@ extern inline void ioport_unmap(void __iomem *addr)
{
}
+#define ioport_map ioport_map
+#define ioport_unmap ioport_unmap
+
static inline void __iomem *ioremap(unsigned long port, unsigned long size)
{
return IO_CONCAT(__IO_PREFIX,ioremap) (port, size);
}
-static inline void __iomem * ioremap_nocache(unsigned long offset,
- unsigned long size)
-{
- return ioremap(offset, size);
-}
-
-#define ioremap_wc ioremap_nocache
-#define ioremap_uc ioremap_nocache
+#define ioremap_wc ioremap
static inline void iounmap(volatile void __iomem *addr)
{
@@ -314,16 +331,20 @@ static inline int __is_mmio(const volatile void __iomem *addr)
*/
#if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
-extern inline unsigned int ioread8(void __iomem *addr)
+extern inline unsigned int ioread8(const void __iomem *addr)
{
- unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
+ unsigned int ret;
+ mb();
+ ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
mb();
return ret;
}
-extern inline unsigned int ioread16(void __iomem *addr)
+extern inline unsigned int ioread16(const void __iomem *addr)
{
- unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
+ unsigned int ret;
+ mb();
+ ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
mb();
return ret;
}
@@ -361,10 +382,26 @@ extern inline void outw(u16 b, unsigned long port)
}
#endif
+#define ioread8 ioread8
+#define ioread16 ioread16
+#define iowrite8 iowrite8
+#define iowrite16 iowrite16
+
#if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
-extern inline unsigned int ioread32(void __iomem *addr)
+extern inline unsigned int ioread32(const void __iomem *addr)
{
- unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
+ unsigned int ret;
+ mb();
+ ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
+ mb();
+ return ret;
+}
+
+extern inline u64 ioread64(const void __iomem *addr)
+{
+ unsigned int ret;
+ mb();
+ ret = IO_CONCAT(__IO_PREFIX,ioread64)(addr);
mb();
return ret;
}
@@ -375,6 +412,12 @@ extern inline void iowrite32(u32 b, void __iomem *addr)
IO_CONCAT(__IO_PREFIX, iowrite32)(b, addr);
}
+extern inline void iowrite64(u64 b, void __iomem *addr)
+{
+ mb();
+ IO_CONCAT(__IO_PREFIX, iowrite64)(b, addr);
+}
+
extern inline u32 inl(unsigned long port)
{
return ioread32(ioport_map(port, 4));
@@ -386,6 +429,11 @@ extern inline void outl(u32 b, unsigned long port)
}
#endif
+#define ioread32 ioread32
+#define ioread64 ioread64
+#define iowrite32 iowrite32
+#define iowrite64 iowrite64
+
#if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1
extern inline u8 __raw_readb(const volatile void __iomem *addr)
{
@@ -409,14 +457,18 @@ extern inline void __raw_writew(u16 b, volatile void __iomem *addr)
extern inline u8 readb(const volatile void __iomem *addr)
{
- u8 ret = __raw_readb(addr);
+ u8 ret;
+ mb();
+ ret = __raw_readb(addr);
mb();
return ret;
}
extern inline u16 readw(const volatile void __iomem *addr)
{
- u16 ret = __raw_readw(addr);
+ u16 ret;
+ mb();
+ ret = __raw_readw(addr);
mb();
return ret;
}
@@ -457,14 +509,18 @@ extern inline void __raw_writeq(u64 b, volatile void __iomem *addr)
extern inline u32 readl(const volatile void __iomem *addr)
{
- u32 ret = __raw_readl(addr);
+ u32 ret;
+ mb();
+ ret = __raw_readl(addr);
mb();
return ret;
}
extern inline u64 readq(const volatile void __iomem *addr)
{
- u64 ret = __raw_readq(addr);
+ u64 ret;
+ mb();
+ ret = __raw_readq(addr);
mb();
return ret;
}
@@ -482,10 +538,12 @@ extern inline void writeq(u64 b, volatile void __iomem *addr)
}
#endif
-#define ioread16be(p) be16_to_cpu(ioread16(p))
-#define ioread32be(p) be32_to_cpu(ioread32(p))
-#define iowrite16be(v,p) iowrite16(cpu_to_be16(v), (p))
-#define iowrite32be(v,p) iowrite32(cpu_to_be32(v), (p))
+#define ioread16be(p) swab16(ioread16(p))
+#define ioread32be(p) swab32(ioread32(p))
+#define ioread64be(p) swab64(ioread64(p))
+#define iowrite16be(v,p) iowrite16(swab16(v), (p))
+#define iowrite32be(v,p) iowrite32(swab32(v), (p))
+#define iowrite64be(v,p) iowrite64(swab64(v), (p))
#define inb_p inb
#define inw_p inw
@@ -493,14 +551,48 @@ extern inline void writeq(u64 b, volatile void __iomem *addr)
#define outb_p outb
#define outw_p outw
#define outl_p outl
-#define readb_relaxed(addr) __raw_readb(addr)
-#define readw_relaxed(addr) __raw_readw(addr)
-#define readl_relaxed(addr) __raw_readl(addr)
-#define readq_relaxed(addr) __raw_readq(addr)
-#define writeb_relaxed(b, addr) __raw_writeb(b, addr)
-#define writew_relaxed(b, addr) __raw_writew(b, addr)
-#define writel_relaxed(b, addr) __raw_writel(b, addr)
-#define writeq_relaxed(b, addr) __raw_writeq(b, addr)
+
+extern u8 readb_relaxed(const volatile void __iomem *addr);
+extern u16 readw_relaxed(const volatile void __iomem *addr);
+extern u32 readl_relaxed(const volatile void __iomem *addr);
+extern u64 readq_relaxed(const volatile void __iomem *addr);
+#define readb_relaxed readb_relaxed
+#define readw_relaxed readw_relaxed
+#define readl_relaxed readl_relaxed
+#define readq_relaxed readq_relaxed
+
+#if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
+extern inline u8 readb_relaxed(const volatile void __iomem *addr)
+{
+ mb();
+ return __raw_readb(addr);
+}
+
+extern inline u16 readw_relaxed(const volatile void __iomem *addr)
+{
+ mb();
+ return __raw_readw(addr);
+}
+#endif
+
+#if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
+extern inline u32 readl_relaxed(const volatile void __iomem *addr)
+{
+ mb();
+ return __raw_readl(addr);
+}
+
+extern inline u64 readq_relaxed(const volatile void __iomem *addr)
+{
+ mb();
+ return __raw_readq(addr);
+}
+#endif
+
+#define writeb_relaxed writeb
+#define writew_relaxed writew
+#define writel_relaxed writel
+#define writeq_relaxed writeq
/*
* String version of IO memory access ops:
@@ -520,6 +612,10 @@ static inline void memsetw_io(volatile void __iomem *addr, u16 c, long len)
_memset_c_io(addr, 0x0001000100010001UL * c, len);
}
+#define memset_io memset_io
+#define memcpy_fromio memcpy_fromio
+#define memcpy_toio memcpy_toio
+
/*
* String versions of in/out ops:
*/
@@ -530,45 +626,27 @@ extern void outsb (unsigned long port, const void *src, unsigned long count);
extern void outsw (unsigned long port, const void *src, unsigned long count);
extern void outsl (unsigned long port, const void *src, unsigned long count);
-/*
- * The Alpha Jensen hardware for some rather strange reason puts
- * the RTC clock at 0x170 instead of 0x70. Probably due to some
- * misguided idea about using 0x70 for NMI stuff.
- *
- * These defines will override the defaults when doing RTC queries
- */
+#define insb insb
+#define insw insw
+#define insl insl
+#define outsb outsb
+#define outsw outsw
+#define outsl outsl
-#ifdef CONFIG_ALPHA_GENERIC
-# define RTC_PORT(x) ((x) + alpha_mv.rtc_port)
-#else
-# ifdef CONFIG_ALPHA_JENSEN
-# define RTC_PORT(x) (0x170+(x))
-# else
-# define RTC_PORT(x) (0x70 + (x))
-# endif
-#endif
+#define RTC_PORT(x) (0x70 + (x))
#define RTC_ALWAYS_BCD 0
-/*
- * Some mucking forons use if[n]def writeq to check if platform has it.
- * It's a bloody bad idea and we probably want ARCH_HAS_WRITEQ for them
- * to play with; for now just use cpp anti-recursion logics and make sure
- * that damn thing is defined and expands to itself.
- */
-
-#define writeq writeq
-#define readq readq
-
-/*
- * Convert a physical pointer to a virtual kernel pointer for /dev/mem
- * access
- */
-#define xlate_dev_mem_ptr(p) __va(p)
-
-/*
- * Convert a virtual cached pointer to an uncached pointer
- */
-#define xlate_dev_kmem_ptr(p) p
+#define ioread64 ioread64
+#define iowrite64 iowrite64
+#define ioread8_rep ioread8_rep
+#define ioread16_rep ioread16_rep
+#define ioread32_rep ioread32_rep
+#define iowrite8_rep iowrite8_rep
+#define iowrite16_rep iowrite16_rep
+#define iowrite32_rep iowrite32_rep
+#define pci_iounmap pci_iounmap
+
+#include <asm-generic/io.h>
#endif /* __KERNEL__ */
diff --git a/arch/alpha/include/asm/io_trivial.h b/arch/alpha/include/asm/io_trivial.h
index ba3d8f0cfe0c..00032093bcfc 100644
--- a/arch/alpha/include/asm/io_trivial.h
+++ b/arch/alpha/include/asm/io_trivial.h
@@ -6,16 +6,16 @@
/* This file may be included multiple times. */
#if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
-__EXTERN_INLINE unsigned int
-IO_CONCAT(__IO_PREFIX,ioread8)(void __iomem *a)
+__EXTERN_INLINE u8
+IO_CONCAT(__IO_PREFIX,ioread8)(const void __iomem *a)
{
- return __kernel_ldbu(*(volatile u8 __force *)a);
+ return __kernel_ldbu(*(const volatile u8 __force *)a);
}
-__EXTERN_INLINE unsigned int
-IO_CONCAT(__IO_PREFIX,ioread16)(void __iomem *a)
+__EXTERN_INLINE u16
+IO_CONCAT(__IO_PREFIX,ioread16)(const void __iomem *a)
{
- return __kernel_ldwu(*(volatile u16 __force *)a);
+ return __kernel_ldwu(*(const volatile u16 __force *)a);
}
__EXTERN_INLINE void
@@ -32,10 +32,10 @@ IO_CONCAT(__IO_PREFIX,iowrite16)(u16 b, void __iomem *a)
#endif
#if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
-__EXTERN_INLINE unsigned int
-IO_CONCAT(__IO_PREFIX,ioread32)(void __iomem *a)
+__EXTERN_INLINE u32
+IO_CONCAT(__IO_PREFIX,ioread32)(const void __iomem *a)
{
- return *(volatile u32 __force *)a;
+ return *(const volatile u32 __force *)a;
}
__EXTERN_INLINE void
@@ -43,6 +43,18 @@ IO_CONCAT(__IO_PREFIX,iowrite32)(u32 b, void __iomem *a)
{
*(volatile u32 __force *)a = b;
}
+
+__EXTERN_INLINE u64
+IO_CONCAT(__IO_PREFIX,ioread64)(const void __iomem *a)
+{
+ return *(const volatile u64 __force *)a;
+}
+
+__EXTERN_INLINE void
+IO_CONCAT(__IO_PREFIX,iowrite64)(u64 b, void __iomem *a)
+{
+ *(volatile u64 __force *)a = b;
+}
#endif
#if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1
@@ -73,14 +85,14 @@ IO_CONCAT(__IO_PREFIX,writew)(u16 b, volatile void __iomem *a)
__EXTERN_INLINE u8
IO_CONCAT(__IO_PREFIX,readb)(const volatile void __iomem *a)
{
- void __iomem *addr = (void __iomem *)a;
+ const void __iomem *addr = (const void __iomem *)a;
return IO_CONCAT(__IO_PREFIX,ioread8)(addr);
}
__EXTERN_INLINE u16
IO_CONCAT(__IO_PREFIX,readw)(const volatile void __iomem *a)
{
- void __iomem *addr = (void __iomem *)a;
+ const void __iomem *addr = (const void __iomem *)a;
return IO_CONCAT(__IO_PREFIX,ioread16)(addr);
}
diff --git a/arch/alpha/include/asm/irq.h b/arch/alpha/include/asm/irq.h
index 432402c8e47f..d83b26b6660f 100644
--- a/arch/alpha/include/asm/irq.h
+++ b/arch/alpha/include/asm/irq.h
@@ -31,16 +31,11 @@
# define NR_IRQS (32768 + 16) /* marvel - 32 pids */
# endif
-#elif defined(CONFIG_ALPHA_CABRIOLET) || \
- defined(CONFIG_ALPHA_EB66P) || \
- defined(CONFIG_ALPHA_EB164) || \
- defined(CONFIG_ALPHA_PC164) || \
+#elif defined(CONFIG_ALPHA_PC164) || \
defined(CONFIG_ALPHA_LX164)
# define NR_IRQS 35
-#elif defined(CONFIG_ALPHA_EB66) || \
- defined(CONFIG_ALPHA_EB64P) || \
- defined(CONFIG_ALPHA_MIKASA)
+#elif defined(CONFIG_ALPHA_MIKASA)
# define NR_IRQS 32
#elif defined(CONFIG_ALPHA_ALCOR) || \
@@ -55,7 +50,6 @@
# define NR_IRQS 40
#elif defined(CONFIG_ALPHA_DP264) || \
- defined(CONFIG_ALPHA_LYNX) || \
defined(CONFIG_ALPHA_SHARK)
# define NR_IRQS 64
diff --git a/arch/alpha/include/asm/irq_regs.h b/arch/alpha/include/asm/irq_regs.h
deleted file mode 100644
index 3dd9c0b70270..000000000000
--- a/arch/alpha/include/asm/irq_regs.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/irq_regs.h>
diff --git a/arch/alpha/include/asm/jensen.h b/arch/alpha/include/asm/jensen.h
deleted file mode 100644
index 436dc905b6ad..000000000000
--- a/arch/alpha/include/asm/jensen.h
+++ /dev/null
@@ -1,347 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ALPHA_JENSEN_H
-#define __ALPHA_JENSEN_H
-
-#include <asm/compiler.h>
-
-/*
- * Defines for the AlphaPC EISA IO and memory address space.
- */
-
-/*
- * NOTE! The memory operations do not set any memory barriers, as it's
- * not needed for cases like a frame buffer that is essentially memory-like.
- * You need to do them by hand if the operations depend on ordering.
- *
- * Similarly, the port IO operations do a "mb" only after a write operation:
- * if an mb is needed before (as in the case of doing memory mapped IO
- * first, and then a port IO operation to the same device), it needs to be
- * done by hand.
- *
- * After the above has bitten me 100 times, I'll give up and just do the
- * mb all the time, but right now I'm hoping this will work out. Avoiding
- * mb's may potentially be a noticeable speed improvement, but I can't
- * honestly say I've tested it.
- *
- * Handling interrupts that need to do mb's to synchronize to non-interrupts
- * is another fun race area. Don't do it (because if you do, I'll have to
- * do *everything* with interrupts disabled, ugh).
- */
-
-/*
- * EISA Interrupt Acknowledge address
- */
-#define EISA_INTA (IDENT_ADDR + 0x100000000UL)
-
-/*
- * FEPROM addresses
- */
-#define EISA_FEPROM0 (IDENT_ADDR + 0x180000000UL)
-#define EISA_FEPROM1 (IDENT_ADDR + 0x1A0000000UL)
-
-/*
- * VL82C106 base address
- */
-#define EISA_VL82C106 (IDENT_ADDR + 0x1C0000000UL)
-
-/*
- * EISA "Host Address Extension" address (bits 25-31 of the EISA address)
- */
-#define EISA_HAE (IDENT_ADDR + 0x1D0000000UL)
-
-/*
- * "SYSCTL" register address
- */
-#define EISA_SYSCTL (IDENT_ADDR + 0x1E0000000UL)
-
-/*
- * "spare" register address
- */
-#define EISA_SPARE (IDENT_ADDR + 0x1F0000000UL)
-
-/*
- * EISA memory address offset
- */
-#define EISA_MEM (IDENT_ADDR + 0x200000000UL)
-
-/*
- * EISA IO address offset
- */
-#define EISA_IO (IDENT_ADDR + 0x300000000UL)
-
-
-#ifdef __KERNEL__
-
-#ifndef __EXTERN_INLINE
-#define __EXTERN_INLINE extern inline
-#define __IO_EXTERN_INLINE
-#endif
-
-/*
- * Handle the "host address register". This needs to be set
- * to the high 7 bits of the EISA address. This is also needed
- * for EISA IO addresses, which are only 16 bits wide (the
- * hae needs to be set to 0).
- *
- * HAE isn't needed for the local IO operations, though.
- */
-
-#define JENSEN_HAE_ADDRESS EISA_HAE
-#define JENSEN_HAE_MASK 0x1ffffff
-
-__EXTERN_INLINE void jensen_set_hae(unsigned long addr)
-{
- /* hae on the Jensen is bits 31:25 shifted right */
- addr >>= 25;
- if (addr != alpha_mv.hae_cache)
- set_hae(addr);
-}
-
-#define vuip volatile unsigned int *
-
-/*
- * IO functions
- *
- * The "local" functions are those that don't go out to the EISA bus,
- * but instead act on the VL82C106 chip directly.. This is mainly the
- * keyboard, RTC, printer and first two serial lines..
- *
- * The local stuff makes for some complications, but it seems to be
- * gone in the PCI version. I hope I can get DEC suckered^H^H^H^H^H^H^H^H
- * convinced that I need one of the newer machines.
- */
-
-static inline unsigned int jensen_local_inb(unsigned long addr)
-{
- return 0xff & *(vuip)((addr << 9) + EISA_VL82C106);
-}
-
-static inline void jensen_local_outb(u8 b, unsigned long addr)
-{
- *(vuip)((addr << 9) + EISA_VL82C106) = b;
- mb();
-}
-
-static inline unsigned int jensen_bus_inb(unsigned long addr)
-{
- long result;
-
- jensen_set_hae(0);
- result = *(volatile int *)((addr << 7) + EISA_IO + 0x00);
- return __kernel_extbl(result, addr & 3);
-}
-
-static inline void jensen_bus_outb(u8 b, unsigned long addr)
-{
- jensen_set_hae(0);
- *(vuip)((addr << 7) + EISA_IO + 0x00) = b * 0x01010101;
- mb();
-}
-
-/*
- * It seems gcc is not very good at optimizing away logical
- * operations that result in operations across inline functions.
- * Which is why this is a macro.
- */
-
-#define jensen_is_local(addr) ( \
-/* keyboard */ (addr == 0x60 || addr == 0x64) || \
-/* RTC */ (addr == 0x170 || addr == 0x171) || \
-/* mb COM2 */ (addr >= 0x2f8 && addr <= 0x2ff) || \
-/* mb LPT1 */ (addr >= 0x3bc && addr <= 0x3be) || \
-/* mb COM2 */ (addr >= 0x3f8 && addr <= 0x3ff))
-
-__EXTERN_INLINE u8 jensen_inb(unsigned long addr)
-{
- if (jensen_is_local(addr))
- return jensen_local_inb(addr);
- else
- return jensen_bus_inb(addr);
-}
-
-__EXTERN_INLINE void jensen_outb(u8 b, unsigned long addr)
-{
- if (jensen_is_local(addr))
- jensen_local_outb(b, addr);
- else
- jensen_bus_outb(b, addr);
-}
-
-__EXTERN_INLINE u16 jensen_inw(unsigned long addr)
-{
- long result;
-
- jensen_set_hae(0);
- result = *(volatile int *) ((addr << 7) + EISA_IO + 0x20);
- result >>= (addr & 3) * 8;
- return 0xffffUL & result;
-}
-
-__EXTERN_INLINE u32 jensen_inl(unsigned long addr)
-{
- jensen_set_hae(0);
- return *(vuip) ((addr << 7) + EISA_IO + 0x60);
-}
-
-__EXTERN_INLINE void jensen_outw(u16 b, unsigned long addr)
-{
- jensen_set_hae(0);
- *(vuip) ((addr << 7) + EISA_IO + 0x20) = b * 0x00010001;
- mb();
-}
-
-__EXTERN_INLINE void jensen_outl(u32 b, unsigned long addr)
-{
- jensen_set_hae(0);
- *(vuip) ((addr << 7) + EISA_IO + 0x60) = b;
- mb();
-}
-
-/*
- * Memory functions.
- */
-
-__EXTERN_INLINE u8 jensen_readb(const volatile void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- long result;
-
- jensen_set_hae(addr);
- addr &= JENSEN_HAE_MASK;
- result = *(volatile int *) ((addr << 7) + EISA_MEM + 0x00);
- result >>= (addr & 3) * 8;
- return 0xffUL & result;
-}
-
-__EXTERN_INLINE u16 jensen_readw(const volatile void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- long result;
-
- jensen_set_hae(addr);
- addr &= JENSEN_HAE_MASK;
- result = *(volatile int *) ((addr << 7) + EISA_MEM + 0x20);
- result >>= (addr & 3) * 8;
- return 0xffffUL & result;
-}
-
-__EXTERN_INLINE u32 jensen_readl(const volatile void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- jensen_set_hae(addr);
- addr &= JENSEN_HAE_MASK;
- return *(vuip) ((addr << 7) + EISA_MEM + 0x60);
-}
-
-__EXTERN_INLINE u64 jensen_readq(const volatile void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- unsigned long r0, r1;
-
- jensen_set_hae(addr);
- addr &= JENSEN_HAE_MASK;
- addr = (addr << 7) + EISA_MEM + 0x60;
- r0 = *(vuip) (addr);
- r1 = *(vuip) (addr + (4 << 7));
- return r1 << 32 | r0;
-}
-
-__EXTERN_INLINE void jensen_writeb(u8 b, volatile void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- jensen_set_hae(addr);
- addr &= JENSEN_HAE_MASK;
- *(vuip) ((addr << 7) + EISA_MEM + 0x00) = b * 0x01010101;
-}
-
-__EXTERN_INLINE void jensen_writew(u16 b, volatile void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- jensen_set_hae(addr);
- addr &= JENSEN_HAE_MASK;
- *(vuip) ((addr << 7) + EISA_MEM + 0x20) = b * 0x00010001;
-}
-
-__EXTERN_INLINE void jensen_writel(u32 b, volatile void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- jensen_set_hae(addr);
- addr &= JENSEN_HAE_MASK;
- *(vuip) ((addr << 7) + EISA_MEM + 0x60) = b;
-}
-
-__EXTERN_INLINE void jensen_writeq(u64 b, volatile void __iomem *xaddr)
-{
- unsigned long addr = (unsigned long) xaddr;
- jensen_set_hae(addr);
- addr &= JENSEN_HAE_MASK;
- addr = (addr << 7) + EISA_MEM + 0x60;
- *(vuip) (addr) = b;
- *(vuip) (addr + (4 << 7)) = b >> 32;
-}
-
-__EXTERN_INLINE void __iomem *jensen_ioportmap(unsigned long addr)
-{
- return (void __iomem *)addr;
-}
-
-__EXTERN_INLINE void __iomem *jensen_ioremap(unsigned long addr,
- unsigned long size)
-{
- return (void __iomem *)(addr + 0x100000000ul);
-}
-
-__EXTERN_INLINE int jensen_is_ioaddr(unsigned long addr)
-{
- return (long)addr >= 0;
-}
-
-__EXTERN_INLINE int jensen_is_mmio(const volatile void __iomem *addr)
-{
- return (unsigned long)addr >= 0x100000000ul;
-}
-
-/* New-style ioread interface. All the routines are so ugly for Jensen
- that it doesn't make sense to merge them. */
-
-#define IOPORT(OS, NS) \
-__EXTERN_INLINE unsigned int jensen_ioread##NS(void __iomem *xaddr) \
-{ \
- if (jensen_is_mmio(xaddr)) \
- return jensen_read##OS(xaddr - 0x100000000ul); \
- else \
- return jensen_in##OS((unsigned long)xaddr); \
-} \
-__EXTERN_INLINE void jensen_iowrite##NS(u##NS b, void __iomem *xaddr) \
-{ \
- if (jensen_is_mmio(xaddr)) \
- jensen_write##OS(b, xaddr - 0x100000000ul); \
- else \
- jensen_out##OS(b, (unsigned long)xaddr); \
-}
-
-IOPORT(b, 8)
-IOPORT(w, 16)
-IOPORT(l, 32)
-
-#undef IOPORT
-
-#undef vuip
-
-#undef __IO_PREFIX
-#define __IO_PREFIX jensen
-#define jensen_trivial_rw_bw 0
-#define jensen_trivial_rw_lq 0
-#define jensen_trivial_io_bw 0
-#define jensen_trivial_io_lq 0
-#define jensen_trivial_iounmap 1
-#include <asm/io_trivial.h>
-
-#ifdef __IO_EXTERN_INLINE
-#undef __EXTERN_INLINE
-#undef __IO_EXTERN_INLINE
-#endif
-
-#endif /* __KERNEL__ */
-
-#endif /* __ALPHA_JENSEN_H */
diff --git a/arch/alpha/include/asm/kdebug.h b/arch/alpha/include/asm/kdebug.h
deleted file mode 100644
index 6ece1b037665..000000000000
--- a/arch/alpha/include/asm/kdebug.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/kdebug.h>
diff --git a/arch/alpha/include/asm/kmap_types.h b/arch/alpha/include/asm/kmap_types.h
deleted file mode 100644
index 651714b45729..000000000000
--- a/arch/alpha/include/asm/kmap_types.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_KMAP_TYPES_H
-#define _ASM_KMAP_TYPES_H
-
-/* Dummy header just to define km_type. */
-
-#ifdef CONFIG_DEBUG_HIGHMEM
-#define __WITH_KM_FENCE
-#endif
-
-#include <asm-generic/kmap_types.h>
-
-#undef __WITH_KM_FENCE
-
-#endif
diff --git a/arch/alpha/include/asm/local.h b/arch/alpha/include/asm/local.h
index fab26a1c93d5..88eb398947a5 100644
--- a/arch/alpha/include/asm/local.h
+++ b/arch/alpha/include/asm/local.h
@@ -52,33 +52,40 @@ static __inline__ long local_sub_return(long i, local_t * l)
return result;
}
-#define local_cmpxchg(l, o, n) \
- (cmpxchg_local(&((l)->a.counter), (o), (n)))
+static __inline__ long local_cmpxchg(local_t *l, long old, long new)
+{
+ return cmpxchg_local(&l->a.counter, old, new);
+}
+
+static __inline__ bool local_try_cmpxchg(local_t *l, long *old, long new)
+{
+ return try_cmpxchg_local(&l->a.counter, (s64 *)old, new);
+}
+
#define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
/**
- * local_add_unless - add unless the number is a given value
+ * local_add_unless - add unless the number is already a given value
* @l: pointer of type local_t
* @a: the amount to add to l...
* @u: ...unless l is equal to u.
*
- * Atomically adds @a to @l, so long as it was not @u.
- * Returns non-zero if @l was not @u, and zero otherwise.
+ * Atomically adds @a to @l, if @v was not already @u.
+ * Returns true if the addition was done.
*/
-#define local_add_unless(l, a, u) \
-({ \
- long c, old; \
- c = local_read(l); \
- for (;;) { \
- if (unlikely(c == (u))) \
- break; \
- old = local_cmpxchg((l), c, c + (a)); \
- if (likely(old == c)) \
- break; \
- c = old; \
- } \
- c != (u); \
-})
+static __inline__ bool
+local_add_unless(local_t *l, long a, long u)
+{
+ long c = local_read(l);
+
+ do {
+ if (unlikely(c == u))
+ return false;
+ } while (!local_try_cmpxchg(l, &c, c + a));
+
+ return true;
+}
+
#define local_inc_not_zero(l) local_add_unless((l), 1, 0)
#define local_add_negative(a, l) (local_add_return((a), (l)) < 0)
diff --git a/arch/alpha/include/asm/local64.h b/arch/alpha/include/asm/local64.h
deleted file mode 100644
index 36c93b5cc239..000000000000
--- a/arch/alpha/include/asm/local64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local64.h>
diff --git a/arch/alpha/include/asm/machvec.h b/arch/alpha/include/asm/machvec.h
index a6b73c6d10ee..490fc880bb3f 100644
--- a/arch/alpha/include/asm/machvec.h
+++ b/arch/alpha/include/asm/machvec.h
@@ -46,13 +46,15 @@ struct alpha_machine_vector
void (*mv_pci_tbi)(struct pci_controller *hose,
dma_addr_t start, dma_addr_t end);
- unsigned int (*mv_ioread8)(void __iomem *);
- unsigned int (*mv_ioread16)(void __iomem *);
- unsigned int (*mv_ioread32)(void __iomem *);
+ u8 (*mv_ioread8)(const void __iomem *);
+ u16 (*mv_ioread16)(const void __iomem *);
+ u32 (*mv_ioread32)(const void __iomem *);
+ u64 (*mv_ioread64)(const void __iomem *);
void (*mv_iowrite8)(u8, void __iomem *);
void (*mv_iowrite16)(u16, void __iomem *);
void (*mv_iowrite32)(u32, void __iomem *);
+ void (*mv_iowrite64)(u64, void __iomem *);
u8 (*mv_readb)(const volatile void __iomem *);
u16 (*mv_readw)(const volatile void __iomem *);
@@ -70,15 +72,6 @@ struct alpha_machine_vector
int (*mv_is_ioaddr)(unsigned long);
int (*mv_is_mmio)(const volatile void __iomem *);
- void (*mv_switch_mm)(struct mm_struct *, struct mm_struct *,
- struct task_struct *);
- void (*mv_activate_mm)(struct mm_struct *, struct mm_struct *);
-
- void (*mv_flush_tlb_current)(struct mm_struct *);
- void (*mv_flush_tlb_current_page)(struct mm_struct * mm,
- struct vm_area_struct *vma,
- unsigned long addr);
-
void (*update_irq_hw)(unsigned long, unsigned long, int);
void (*ack_irq)(unsigned long);
void (*device_interrupt)(unsigned long vector);
@@ -99,12 +92,6 @@ struct alpha_machine_vector
const char *vector_name;
- /* NUMA information */
- int (*pa_to_nid)(unsigned long);
- int (*cpuid_to_nid)(int);
- unsigned long (*node_mem_start)(int);
- unsigned long (*node_mem_size)(int);
-
/* System specific parameters. */
union {
struct {
diff --git a/arch/alpha/include/asm/mmu_context.h b/arch/alpha/include/asm/mmu_context.h
index 6d7d9bc1b4b8..eee8fe836a59 100644
--- a/arch/alpha/include/asm/mmu_context.h
+++ b/arch/alpha/include/asm/mmu_context.h
@@ -71,9 +71,7 @@ __reload_thread(struct pcb_struct *pcb)
#ifdef CONFIG_ALPHA_GENERIC
# define MAX_ASN (alpha_mv.max_asn)
#else
-# ifdef CONFIG_ALPHA_EV4
-# define MAX_ASN EV4_MAX_ASN
-# elif defined(CONFIG_ALPHA_EV5)
+# if defined(CONFIG_ALPHA_EV56)
# define MAX_ASN EV5_MAX_ASN
# else
# define MAX_ASN EV6_MAX_ASN
@@ -162,27 +160,9 @@ ev5_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
task_thread_info(next)->pcb.asn = mmc & HARDWARE_ASN_MASK;
}
-__EXTERN_INLINE void
-ev4_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
- struct task_struct *next)
-{
- /* As described, ASN's are broken for TLB usage. But we can
- optimize for switching between threads -- if the mm is
- unchanged from current we needn't flush. */
- /* ??? May not be needed because EV4 PALcode recognizes that
- ASN's are broken and does a tbiap itself on swpctx, under
- the "Must set ASN or flush" rule. At least this is true
- for a 1992 SRM, reports Joseph Martin (jmartin@hlo.dec.com).
- I'm going to leave this here anyway, just to Be Sure. -- r~ */
- if (prev_mm != next_mm)
- tbiap();
-
- /* Do continue to allocate ASNs, because we can still use them
- to avoid flushing the icache. */
- ev5_switch_mm(prev_mm, next_mm, next);
-}
-
extern void __load_new_mm_context(struct mm_struct *);
+asmlinkage void do_page_fault(unsigned long address, unsigned long mmcsr,
+ long cause, struct pt_regs *regs);
#ifdef CONFIG_SMP
#define check_mmu_context() \
@@ -207,28 +187,10 @@ ev5_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
__load_new_mm_context(next_mm);
}
-__EXTERN_INLINE void
-ev4_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
-{
- __load_new_mm_context(next_mm);
- tbiap();
-}
-
-#define deactivate_mm(tsk,mm) do { } while (0)
-
-#ifdef CONFIG_ALPHA_GENERIC
-# define switch_mm(a,b,c) alpha_mv.mv_switch_mm((a),(b),(c))
-# define activate_mm(x,y) alpha_mv.mv_activate_mm((x),(y))
-#else
-# ifdef CONFIG_ALPHA_EV4
-# define switch_mm(a,b,c) ev4_switch_mm((a),(b),(c))
-# define activate_mm(x,y) ev4_activate_mm((x),(y))
-# else
-# define switch_mm(a,b,c) ev5_switch_mm((a),(b),(c))
-# define activate_mm(x,y) ev5_activate_mm((x),(y))
-# endif
-#endif
+#define switch_mm(a,b,c) ev5_switch_mm((a),(b),(c))
+#define activate_mm(x,y) ev5_activate_mm((x),(y))
+#define init_new_context init_new_context
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
@@ -242,12 +204,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
return 0;
}
-extern inline void
-destroy_context(struct mm_struct *mm)
-{
- /* Nothing to do. */
-}
-
+#define enter_lazy_tlb enter_lazy_tlb
static inline void
enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
@@ -255,6 +212,8 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
= ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
}
+#include <asm-generic/mmu_context.h>
+
#ifdef __MMU_EXTERN_INLINE
#undef __EXTERN_INLINE
#undef __MMU_EXTERN_INLINE
diff --git a/arch/alpha/include/asm/mmzone.h b/arch/alpha/include/asm/mmzone.h
deleted file mode 100644
index 7ee144f484f1..000000000000
--- a/arch/alpha/include/asm/mmzone.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Written by Kanoj Sarcar (kanoj@sgi.com) Aug 99
- * Adapted for the alpha wildfire architecture Jan 2001.
- */
-#ifndef _ASM_MMZONE_H_
-#define _ASM_MMZONE_H_
-
-#include <asm/smp.h>
-
-struct bootmem_data_t; /* stupid forward decl. */
-
-/*
- * Following are macros that are specific to this numa platform.
- */
-
-extern pg_data_t node_data[];
-
-#define alpha_pa_to_nid(pa) \
- (alpha_mv.pa_to_nid \
- ? alpha_mv.pa_to_nid(pa) \
- : (0))
-#define node_mem_start(nid) \
- (alpha_mv.node_mem_start \
- ? alpha_mv.node_mem_start(nid) \
- : (0UL))
-#define node_mem_size(nid) \
- (alpha_mv.node_mem_size \
- ? alpha_mv.node_mem_size(nid) \
- : ((nid) ? (0UL) : (~0UL)))
-
-#define pa_to_nid(pa) alpha_pa_to_nid(pa)
-#define NODE_DATA(nid) (&node_data[(nid)])
-
-#define node_localnr(pfn, nid) ((pfn) - NODE_DATA(nid)->node_start_pfn)
-
-#if 1
-#define PLAT_NODE_DATA_LOCALNR(p, n) \
- (((p) >> PAGE_SHIFT) - PLAT_NODE_DATA(n)->gendata.node_start_pfn)
-#else
-static inline unsigned long
-PLAT_NODE_DATA_LOCALNR(unsigned long p, int n)
-{
- unsigned long temp;
- temp = p >> PAGE_SHIFT;
- return temp - PLAT_NODE_DATA(n)->gendata.node_start_pfn;
-}
-#endif
-
-#ifdef CONFIG_DISCONTIGMEM
-
-/*
- * Following are macros that each numa implementation must define.
- */
-
-/*
- * Given a kernel address, find the home node of the underlying memory.
- */
-#define kvaddr_to_nid(kaddr) pa_to_nid(__pa(kaddr))
-
-/*
- * Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory
- * and returns the kaddr corresponding to first physical page in the
- * node's mem_map.
- */
-#define LOCAL_BASE_ADDR(kaddr) \
- ((unsigned long)__va(NODE_DATA(kvaddr_to_nid(kaddr))->node_start_pfn \
- << PAGE_SHIFT))
-
-/* XXX: FIXME -- nyc */
-#define kern_addr_valid(kaddr) (0)
-
-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-
-#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> 32))
-#define pte_pfn(pte) (pte_val(pte) >> 32)
-
-#define mk_pte(page, pgprot) \
-({ \
- pte_t pte; \
- unsigned long pfn; \
- \
- pfn = page_to_pfn(page) << 32; \
- pte_val(pte) = pfn | pgprot_val(pgprot); \
- \
- pte; \
-})
-
-#define pte_page(x) \
-({ \
- unsigned long kvirt; \
- struct page * __xx; \
- \
- kvirt = (unsigned long)__va(pte_val(x) >> (32-PAGE_SHIFT)); \
- __xx = virt_to_page(kvirt); \
- \
- __xx; \
-})
-
-#define page_to_pa(page) \
- (page_to_pfn(page) << PAGE_SHIFT)
-
-#define pfn_to_nid(pfn) pa_to_nid(((u64)(pfn) << PAGE_SHIFT))
-#define pfn_valid(pfn) \
- (((pfn) - node_start_pfn(pfn_to_nid(pfn))) < \
- node_spanned_pages(pfn_to_nid(pfn))) \
-
-#define virt_addr_valid(kaddr) pfn_valid((__pa(kaddr) >> PAGE_SHIFT))
-
-#endif /* CONFIG_DISCONTIGMEM */
-
-#endif /* _ASM_MMZONE_H_ */
diff --git a/arch/alpha/include/asm/page.h b/arch/alpha/include/asm/page.h
index f3fb2848470a..5ec4c77e432e 100644
--- a/arch/alpha/include/asm/page.h
+++ b/arch/alpha/include/asm/page.h
@@ -4,11 +4,7 @@
#include <linux/const.h>
#include <asm/pal.h>
-
-/* PAGE_SHIFT determines the page size */
-#define PAGE_SHIFT 13
-#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE-1))
+#include <vdso/page.h>
#ifndef __ASSEMBLY__
@@ -17,9 +13,8 @@
extern void clear_page(void *page);
#define clear_user_page(page, vaddr, pg) clear_page(page)
-#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
- alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vmaddr)
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
+ vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr)
extern void copy_page(void * _to, void * _from);
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
@@ -83,15 +78,9 @@ typedef struct page *pgtable_t;
#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
-#ifndef CONFIG_DISCONTIGMEM
-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-#define pfn_valid(pfn) ((pfn) < max_mapnr)
-#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
-#endif /* CONFIG_DISCONTIGMEM */
-
-#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#define virt_addr_valid(kaddr) pfn_valid((__pa(kaddr) >> PAGE_SHIFT))
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>
diff --git a/arch/alpha/include/asm/pci.h b/arch/alpha/include/asm/pci.h
index cf6bc1e64d66..6c04fcbdc8ed 100644
--- a/arch/alpha/include/asm/pci.h
+++ b/arch/alpha/include/asm/pci.h
@@ -56,12 +56,6 @@ struct pci_controller {
/* IOMMU controls. */
-/* TODO: integrate with include/asm-generic/pci.h ? */
-static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
-{
- return channel ? 15 : 14;
-}
-
#define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index
static inline int pci_proc_domain(struct pci_bus *bus)
@@ -94,7 +88,4 @@ extern void pci_adjust_legacy_attr(struct pci_bus *bus,
enum pci_mmap_state mmap_type);
#define HAVE_PCI_LEGACY 1
-extern int pci_create_resource_files(struct pci_dev *dev);
-extern void pci_remove_resource_files(struct pci_dev *dev);
-
#endif /* __ALPHA_PCI_H */
diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
index a1a29f60934c..68be7adbfe58 100644
--- a/arch/alpha/include/asm/pgalloc.h
+++ b/arch/alpha/include/asm/pgalloc.h
@@ -5,7 +5,7 @@
#include <linux/mm.h>
#include <linux/mmzone.h>
-#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */
+#include <asm-generic/pgalloc.h>
/*
* Allocate and free page tables. The xxx_kernel() versions are
@@ -18,7 +18,6 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte)
{
pmd_set(pmd, (pte_t *)(page_to_pa(pte) + PAGE_OFFSET));
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
@@ -34,23 +33,4 @@ pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
extern pgd_t *pgd_alloc(struct mm_struct *mm);
-static inline void
-pgd_free(struct mm_struct *mm, pgd_t *pgd)
-{
- free_page((unsigned long)pgd);
-}
-
-static inline pmd_t *
-pmd_alloc_one(struct mm_struct *mm, unsigned long address)
-{
- pmd_t *ret = (pmd_t *)__get_free_page(GFP_PGTABLE_USER);
- return ret;
-}
-
-static inline void
-pmd_free(struct mm_struct *mm, pmd_t *pmd)
-{
- free_page((unsigned long)pmd);
-}
-
#endif /* _ALPHA_PGALLOC_H */
diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
index 299791ce14b6..02e8817a8921 100644
--- a/arch/alpha/include/asm/pgtable.h
+++ b/arch/alpha/include/asm/pgtable.h
@@ -26,7 +26,6 @@ struct vm_area_struct;
* hook is made available.
*/
#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
-#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
/* PMD_SHIFT determines the size of the area a second-level page table can map */
#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
@@ -46,7 +45,6 @@ struct vm_area_struct;
#define PTRS_PER_PMD (1UL << (PAGE_SHIFT-3))
#define PTRS_PER_PGD (1UL << (PAGE_SHIFT-3))
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0UL
/* Number of pointers that fit on a page: this will go away. */
#define PTRS_PER_PAGE (1UL << (PAGE_SHIFT-3))
@@ -75,6 +73,9 @@ struct vm_area_struct;
#define _PAGE_DIRTY 0x20000
#define _PAGE_ACCESSED 0x40000
+/* We borrow bit 39 to store the exclusive marker in swap PTEs. */
+#define _PAGE_SWP_EXCLUSIVE 0x8000000000UL
+
/*
* NOTE! The "accessed" bit isn't necessarily exact: it can be kept exactly
* by software (use the KRE/URE/KWE/UWE bits appropriately), but I'll fake it.
@@ -117,23 +118,6 @@ struct vm_area_struct;
* arch/alpha/mm/fault.c)
*/
/* xwr */
-#define __P000 _PAGE_P(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
-#define __P001 _PAGE_P(_PAGE_FOE | _PAGE_FOW)
-#define __P010 _PAGE_P(_PAGE_FOE)
-#define __P011 _PAGE_P(_PAGE_FOE)
-#define __P100 _PAGE_P(_PAGE_FOW | _PAGE_FOR)
-#define __P101 _PAGE_P(_PAGE_FOW)
-#define __P110 _PAGE_P(0)
-#define __P111 _PAGE_P(0)
-
-#define __S000 _PAGE_S(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
-#define __S001 _PAGE_S(_PAGE_FOE | _PAGE_FOW)
-#define __S010 _PAGE_S(_PAGE_FOE)
-#define __S011 _PAGE_S(_PAGE_FOE)
-#define __S100 _PAGE_S(_PAGE_FOW | _PAGE_FOR)
-#define __S101 _PAGE_S(_PAGE_FOW)
-#define __S110 _PAGE_S(0)
-#define __S111 _PAGE_S(0)
/*
* pgprot_noncached() is only for infiniband pci support, and a real
@@ -203,10 +187,10 @@ extern unsigned long __zero_page(void);
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
-#ifndef CONFIG_DISCONTIGMEM
-#define page_to_pa(page) (((page) - mem_map) << PAGE_SHIFT)
+#define page_to_pa(page) (page_to_pfn(page) << PAGE_SHIFT)
+#define PFN_PTE_SHIFT 32
+#define pte_pfn(pte) (pte_val(pte) >> PFN_PTE_SHIFT)
-#define pte_pfn(pte) (pte_val(pte) >> 32)
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
#define mk_pte(page, pgprot) \
({ \
@@ -215,7 +199,6 @@ extern unsigned long __zero_page(void);
pte_val(pte) = (page_to_pfn(page) << 32) | pgprot_val(pgprot); \
pte; \
})
-#endif
extern inline pte_t pfn_pte(unsigned long physpfn, pgprot_t pgprot)
{ pte_t pte; pte_val(pte) = (PHYS_TWIDDLE(physpfn) << 32) | pgprot_val(pgprot); return pte; }
@@ -236,13 +219,14 @@ pmd_page_vaddr(pmd_t pmd)
return ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)) + PAGE_OFFSET;
}
-#ifndef CONFIG_DISCONTIGMEM
-#define pmd_page(pmd) (mem_map + ((pmd_val(pmd) & _PFN_MASK) >> 32))
-#define pud_page(pud) (mem_map + ((pud_val(pud) & _PFN_MASK) >> 32))
-#endif
+#define pmd_pfn(pmd) (pmd_val(pmd) >> 32)
+#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> 32))
+#define pud_page(pud) (pfn_to_page(pud_val(pud) >> 32))
-extern inline unsigned long pud_page_vaddr(pud_t pgd)
-{ return PAGE_OFFSET + ((pud_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
+extern inline pmd_t *pud_pgtable(pud_t pgd)
+{
+ return (pmd_t *)(PAGE_OFFSET + ((pud_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)));
+}
extern inline int pte_none(pte_t pte) { return !pte_val(pte); }
extern inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_VALID; }
@@ -268,29 +252,18 @@ extern inline void pud_clear(pud_t * pudp) { pud_val(*pudp) = 0; }
extern inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_FOW); }
extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
-extern inline int pte_special(pte_t pte) { return 0; }
extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOW; return pte; }
extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~(__DIRTY_BITS); return pte; }
extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~(__ACCESS_BITS); return pte; }
-extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_FOW; return pte; }
+extern inline pte_t pte_mkwrite_novma(pte_t pte){ pte_val(pte) &= ~_PAGE_FOW; return pte; }
extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= __DIRTY_BITS; return pte; }
extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= __ACCESS_BITS; return pte; }
-extern inline pte_t pte_mkspecial(pte_t pte) { return pte; }
-
-#define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
-
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
-
-/* to find an entry in a page-table-directory. */
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
/*
- * The smp_read_barrier_depends() in the following functions are required to
- * order the load of *dir (the pointer in the top level page table) with any
- * subsequent load of the returned pmd_t *ret (ret is data dependent on *dir).
+ * The smp_rmb() in the following functions are required to order the load of
+ * *dir (the pointer in the top level page table) with any subsequent load of
+ * the returned pmd_t *ret (ret is data dependent on *dir).
*
* If this ordering is not enforced, the CPU might load an older value of
* *ret, which may be uninitialized data. See mm/memory.c:__pte_alloc for
@@ -303,22 +276,21 @@ extern inline pte_t pte_mkspecial(pte_t pte) { return pte; }
/* Find an entry in the second-level page table.. */
extern inline pmd_t * pmd_offset(pud_t * dir, unsigned long address)
{
- pmd_t *ret = (pmd_t *) pud_page_vaddr(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
- smp_read_barrier_depends(); /* see above */
+ pmd_t *ret = pud_pgtable(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
+ smp_rmb(); /* see above */
return ret;
}
+#define pmd_offset pmd_offset
/* Find an entry in the third-level page table.. */
extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address)
{
pte_t *ret = (pte_t *) pmd_page_vaddr(*dir)
+ ((address >> PAGE_SHIFT) & (PTRS_PER_PAGE - 1));
- smp_read_barrier_depends(); /* see above */
+ smp_rmb(); /* see above */
return ret;
}
-
-#define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr))
-#define pte_unmap(pte) do { } while (0)
+#define pte_offset_kernel pte_offset_kernel
extern pgd_t swapper_pg_dir[1024];
@@ -331,22 +303,53 @@ extern inline void update_mmu_cache(struct vm_area_struct * vma,
{
}
+static inline void update_mmu_cache_range(struct vm_fault *vmf,
+ struct vm_area_struct *vma, unsigned long address,
+ pte_t *ptep, unsigned int nr)
+{
+}
+
/*
- * Non-present pages: high 24 bits are offset, next 8 bits type,
- * low 32 bits zero.
+ * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
+ * are !pte_none() && !pte_present().
+ *
+ * Format of swap PTEs:
+ *
+ * 6 6 6 6 5 5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3
+ * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
+ * <------------------- offset ------------------> E <--- type -->
+ *
+ * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * <--------------------------- zeroes -------------------------->
+ *
+ * E is the exclusive marker that is not stored in swap entries.
*/
extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
-{ pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; }
+{ pte_t pte; pte_val(pte) = ((type & 0x7f) << 32) | (offset << 40); return pte; }
-#define __swp_type(x) (((x).val >> 32) & 0xff)
+#define __swp_type(x) (((x).val >> 32) & 0x7f)
#define __swp_offset(x) ((x).val >> 40)
#define __swp_entry(type, off) ((swp_entry_t) { pte_val(mk_swap_pte((type), (off))) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-#ifndef CONFIG_DISCONTIGMEM
-#define kern_addr_valid(addr) (1)
-#endif
+static inline int pte_swp_exclusive(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
+}
+
+static inline pte_t pte_swp_mkexclusive(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_SWP_EXCLUSIVE;
+ return pte;
+}
+
+static inline pte_t pte_swp_clear_exclusive(pte_t pte)
+{
+ pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE;
+ return pte;
+}
#define pte_ERROR(e) \
printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
@@ -357,9 +360,7 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
extern void paging_init(void);
-#include <asm-generic/pgtable.h>
-
-/* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT. */
+/* We have our own get_unmapped_area */
#define HAVE_ARCH_UNMAPPED_AREA
#endif /* _ALPHA_PGTABLE_H */
diff --git a/arch/alpha/include/asm/processor.h b/arch/alpha/include/asm/processor.h
index 6100431da07a..5dce5518a211 100644
--- a/arch/alpha/include/asm/processor.h
+++ b/arch/alpha/include/asm/processor.h
@@ -8,27 +8,19 @@
#ifndef __ASM_ALPHA_PROCESSOR_H
#define __ASM_ALPHA_PROCESSOR_H
-#include <linux/personality.h> /* for ADDR_LIMIT_32BIT */
-
/*
* We have a 42-bit user address space: 4TB user VM...
*/
#define TASK_SIZE (0x40000000000UL)
-#define STACK_TOP \
- (current->personality & ADDR_LIMIT_32BIT ? 0x80000000 : 0x00120000000UL)
+#define STACK_TOP (0x00120000000UL)
#define STACK_TOP_MAX 0x00120000000UL
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
-#define TASK_UNMAPPED_BASE \
- ((current->personality & ADDR_LIMIT_32BIT) ? 0x40000000 : TASK_SIZE / 2)
-
-typedef struct {
- unsigned long seg;
-} mm_segment_t;
+#define TASK_UNMAPPED_BASE (TASK_SIZE / 2)
/* This is dead. Everything has been moved to thread_info. */
struct thread_struct { };
@@ -40,9 +32,7 @@ extern void start_thread(struct pt_regs *, unsigned long, unsigned long);
/* Free all resources held by a thread. */
struct task_struct;
-extern void release_thread(struct task_struct *);
-
-unsigned long get_wchan(struct task_struct *p);
+unsigned long __get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
@@ -53,12 +43,6 @@ unsigned long get_wchan(struct task_struct *p);
#define ARCH_HAS_PREFETCH
#define ARCH_HAS_PREFETCHW
-#define ARCH_HAS_SPINLOCK_PREFETCH
-
-#ifndef CONFIG_SMP
-/* Nothing to prefetch. */
-#define spin_lock_prefetch(lock) do { } while (0)
-#endif
extern inline void prefetch(const void *ptr)
{
@@ -70,11 +54,4 @@ extern inline void prefetchw(const void *ptr)
__builtin_prefetch(ptr, 1, 3);
}
-#ifdef CONFIG_SMP
-extern inline void spin_lock_prefetch(const void *ptr)
-{
- __builtin_prefetch(ptr, 1, 3);
-}
-#endif
-
#endif /* __ASM_ALPHA_PROCESSOR_H */
diff --git a/arch/alpha/include/asm/ptrace.h b/arch/alpha/include/asm/ptrace.h
index df5f317ab3fc..3557ce64ed21 100644
--- a/arch/alpha/include/asm/ptrace.h
+++ b/arch/alpha/include/asm/ptrace.h
@@ -16,7 +16,6 @@
#define current_pt_regs() \
((struct pt_regs *) ((char *)current_thread_info() + 2*PAGE_SIZE) - 1)
-#define signal_pt_regs current_pt_regs
#define force_successful_syscall_return() (current_pt_regs()->r0 = 0)
diff --git a/arch/alpha/include/asm/rwonce.h b/arch/alpha/include/asm/rwonce.h
new file mode 100644
index 000000000000..35542bcf92b3
--- /dev/null
+++ b/arch/alpha/include/asm/rwonce.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Google LLC.
+ */
+#ifndef __ASM_RWONCE_H
+#define __ASM_RWONCE_H
+
+#ifdef CONFIG_SMP
+
+#include <asm/barrier.h>
+
+/*
+ * Alpha is apparently daft enough to reorder address-dependent loads
+ * on some CPU implementations. Knock some common sense into it with
+ * a memory barrier in READ_ONCE().
+ *
+ * For the curious, more information about this unusual reordering is
+ * available in chapter 15 of the "perfbook":
+ *
+ * https://kernel.org/pub/linux/kernel/people/paulmck/perfbook/perfbook.html
+ *
+ */
+#define __READ_ONCE(x) \
+({ \
+ __unqual_scalar_typeof(x) __x = \
+ (*(volatile typeof(__x) *)(&(x))); \
+ mb(); \
+ (typeof(x))__x; \
+})
+
+#endif /* CONFIG_SMP */
+
+#include <asm-generic/rwonce.h>
+
+#endif /* __ASM_RWONCE_H */
diff --git a/arch/alpha/include/asm/setup.h b/arch/alpha/include/asm/setup.h
new file mode 100644
index 000000000000..262aab99e391
--- /dev/null
+++ b/arch/alpha/include/asm/setup.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ALPHA_SETUP_H
+#define __ALPHA_SETUP_H
+
+#include <uapi/asm/setup.h>
+
+/*
+ * We leave one page for the initial stack page, and one page for
+ * the initial process structure. Also, the console eats 3 MB for
+ * the initial bootloader (one of which we can reclaim later).
+ */
+#define BOOT_PCB 0x20000000
+#define BOOT_ADDR 0x20000000
+/* Remove when official MILO sources have ELF support: */
+#define BOOT_SIZE (16*1024)
+
+#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
+#define KERNEL_START_PHYS 0x300000 /* Old bootloaders hardcoded this. */
+#else
+#define KERNEL_START_PHYS 0x1000000 /* required: Wildfire/Titan/Marvel */
+#endif
+
+#define KERNEL_START (PAGE_OFFSET+KERNEL_START_PHYS)
+#define SWAPPER_PGD KERNEL_START
+#define INIT_STACK (PAGE_OFFSET+KERNEL_START_PHYS+0x02000)
+#define EMPTY_PGT (PAGE_OFFSET+KERNEL_START_PHYS+0x04000)
+#define EMPTY_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x08000)
+#define ZERO_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000)
+
+#define START_ADDR (PAGE_OFFSET+KERNEL_START_PHYS+0x10000)
+
+/*
+ * This is setup by the secondary bootstrap loader. Because
+ * the zero page is zeroed out as soon as the vm system is
+ * initialized, we need to copy things out into a more permanent
+ * place.
+ */
+#define PARAM ZERO_PGE
+#define COMMAND_LINE ((char *)(absolute_pointer(PARAM + 0x0000)))
+#define INITRD_START (*(unsigned long *) (PARAM+0x100))
+#define INITRD_SIZE (*(unsigned long *) (PARAM+0x108))
+
+#endif
diff --git a/arch/alpha/include/asm/sparsemem.h b/arch/alpha/include/asm/sparsemem.h
new file mode 100644
index 000000000000..a0820fd2d4b1
--- /dev/null
+++ b/arch/alpha/include/asm/sparsemem.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ALPHA_SPARSEMEM_H
+#define _ASM_ALPHA_SPARSEMEM_H
+
+#ifdef CONFIG_SPARSEMEM
+
+#define SECTION_SIZE_BITS 27
+
+/*
+ * According to "Alpha Architecture Reference Manual" physical
+ * addresses are at most 48 bits.
+ * https://download.majix.org/dec/alpha_arch_ref.pdf
+ */
+#define MAX_PHYSMEM_BITS 48
+
+#endif /* CONFIG_SPARSEMEM */
+
+#endif /* _ASM_ALPHA_SPARSEMEM_H */
diff --git a/arch/alpha/include/asm/special_insns.h b/arch/alpha/include/asm/special_insns.h
index ca2c5c30b22e..798d0bdb11f9 100644
--- a/arch/alpha/include/asm/special_insns.h
+++ b/arch/alpha/include/asm/special_insns.h
@@ -15,10 +15,7 @@ enum implver_enum {
(enum implver_enum) __implver; })
#else
/* Try to eliminate some dead code. */
-#ifdef CONFIG_ALPHA_EV4
-#define implver() IMPLVER_EV4
-#endif
-#ifdef CONFIG_ALPHA_EV5
+#ifdef CONFIG_ALPHA_EV56
#define implver() IMPLVER_EV5
#endif
#if defined(CONFIG_ALPHA_EV6)
diff --git a/arch/alpha/include/asm/spinlock_types.h b/arch/alpha/include/asm/spinlock_types.h
index 1d5716bc060b..05a444d77c53 100644
--- a/arch/alpha/include/asm/spinlock_types.h
+++ b/arch/alpha/include/asm/spinlock_types.h
@@ -2,8 +2,8 @@
#ifndef _ALPHA_SPINLOCK_TYPES_H
#define _ALPHA_SPINLOCK_TYPES_H
-#ifndef __LINUX_SPINLOCK_TYPES_H
-# error "please don't include this file directly"
+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
+# error "Please do not include this file directly."
#endif
typedef struct {
diff --git a/arch/alpha/include/asm/syscall.h b/arch/alpha/include/asm/syscall.h
index 11c688c1d7ec..f21babaeed85 100644
--- a/arch/alpha/include/asm/syscall.h
+++ b/arch/alpha/include/asm/syscall.h
@@ -9,4 +9,10 @@ static inline int syscall_get_arch(struct task_struct *task)
return AUDIT_ARCH_ALPHA;
}
+static inline long syscall_get_return_value(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->r0;
+}
+
#endif /* _ASM_ALPHA_SYSCALL_H */
diff --git a/arch/alpha/include/asm/termios.h b/arch/alpha/include/asm/termios.h
deleted file mode 100644
index b7c77bb1bfd2..000000000000
--- a/arch/alpha/include/asm/termios.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ALPHA_TERMIOS_H
-#define _ALPHA_TERMIOS_H
-
-#include <uapi/asm/termios.h>
-
-/* eof=^D eol=\0 eol2=\0 erase=del
- werase=^W kill=^U reprint=^R sxtc=\0
- intr=^C quit=^\ susp=^Z <OSF/1 VDSUSP>
- start=^Q stop=^S lnext=^V discard=^U
- vmin=\1 vtime=\0
-*/
-#define INIT_C_CC "\004\000\000\177\027\025\022\000\003\034\032\000\021\023\026\025\001\000"
-
-/*
- * Translate a "termio" structure into a "termios". Ugh.
- */
-
-#define user_termio_to_kernel_termios(a_termios, u_termio) \
-({ \
- struct ktermios *k_termios = (a_termios); \
- struct termio k_termio; \
- int canon, ret; \
- \
- ret = copy_from_user(&k_termio, u_termio, sizeof(k_termio)); \
- if (!ret) { \
- /* Overwrite only the low bits. */ \
- *(unsigned short *)&k_termios->c_iflag = k_termio.c_iflag; \
- *(unsigned short *)&k_termios->c_oflag = k_termio.c_oflag; \
- *(unsigned short *)&k_termios->c_cflag = k_termio.c_cflag; \
- *(unsigned short *)&k_termios->c_lflag = k_termio.c_lflag; \
- canon = k_termio.c_lflag & ICANON; \
- \
- k_termios->c_cc[VINTR] = k_termio.c_cc[_VINTR]; \
- k_termios->c_cc[VQUIT] = k_termio.c_cc[_VQUIT]; \
- k_termios->c_cc[VERASE] = k_termio.c_cc[_VERASE]; \
- k_termios->c_cc[VKILL] = k_termio.c_cc[_VKILL]; \
- k_termios->c_cc[VEOL2] = k_termio.c_cc[_VEOL2]; \
- k_termios->c_cc[VSWTC] = k_termio.c_cc[_VSWTC]; \
- k_termios->c_cc[canon ? VEOF : VMIN] = k_termio.c_cc[_VEOF]; \
- k_termios->c_cc[canon ? VEOL : VTIME] = k_termio.c_cc[_VEOL]; \
- } \
- ret; \
-})
-
-/*
- * Translate a "termios" structure into a "termio". Ugh.
- *
- * Note the "fun" _VMIN overloading.
- */
-#define kernel_termios_to_user_termio(u_termio, a_termios) \
-({ \
- struct ktermios *k_termios = (a_termios); \
- struct termio k_termio; \
- int canon; \
- \
- k_termio.c_iflag = k_termios->c_iflag; \
- k_termio.c_oflag = k_termios->c_oflag; \
- k_termio.c_cflag = k_termios->c_cflag; \
- canon = (k_termio.c_lflag = k_termios->c_lflag) & ICANON; \
- \
- k_termio.c_line = k_termios->c_line; \
- k_termio.c_cc[_VINTR] = k_termios->c_cc[VINTR]; \
- k_termio.c_cc[_VQUIT] = k_termios->c_cc[VQUIT]; \
- k_termio.c_cc[_VERASE] = k_termios->c_cc[VERASE]; \
- k_termio.c_cc[_VKILL] = k_termios->c_cc[VKILL]; \
- k_termio.c_cc[_VEOF] = k_termios->c_cc[canon ? VEOF : VMIN]; \
- k_termio.c_cc[_VEOL] = k_termios->c_cc[canon ? VEOL : VTIME]; \
- k_termio.c_cc[_VEOL2] = k_termios->c_cc[VEOL2]; \
- k_termio.c_cc[_VSWTC] = k_termios->c_cc[VSWTC]; \
- \
- copy_to_user(u_termio, &k_termio, sizeof(k_termio)); \
-})
-
-#define user_termios_to_kernel_termios(k, u) \
- copy_from_user(k, u, sizeof(struct termios2))
-
-#define kernel_termios_to_user_termios(u, k) \
- copy_to_user(u, k, sizeof(struct termios2))
-
-#define user_termios_to_kernel_termios_1(k, u) \
- copy_from_user(k, u, sizeof(struct termios))
-
-#define kernel_termios_to_user_termios_1(u, k) \
- copy_to_user(u, k, sizeof(struct termios))
-
-#endif /* _ALPHA_TERMIOS_H */
diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h
index 807d7b9a1860..4a4d00b37986 100644
--- a/arch/alpha/include/asm/thread_info.h
+++ b/arch/alpha/include/asm/thread_info.h
@@ -19,7 +19,6 @@ struct thread_info {
unsigned int flags; /* low level flags */
unsigned int ieee_state; /* see fpu.h */
- mm_segment_t addr_limit; /* thread address space */
unsigned cpu; /* current CPU */
int preempt_count; /* 0 => preemptable, <0 => BUG */
unsigned int status; /* thread-synchronous flags */
@@ -27,6 +26,7 @@ struct thread_info {
int bpt_nsaved;
unsigned long bpt_addr[2]; /* breakpoint handling */
unsigned int bpt_insn[2];
+ unsigned long fp[32];
};
/*
@@ -35,7 +35,6 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
- .addr_limit = KERNEL_DS, \
.preempt_count = INIT_PREEMPT_COUNT, \
}
@@ -43,6 +42,8 @@ struct thread_info {
register struct thread_info *__current_thread_info __asm__("$8");
#define current_thread_info() __current_thread_info
+register unsigned long *current_stack_pointer __asm__ ("$30");
+
#endif /* __ASSEMBLY__ */
/* Thread information allocation. */
@@ -62,6 +63,7 @@ register struct thread_info *__current_thread_info __asm__("$8");
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_SYSCALL_AUDIT 4 /* syscall audit active */
+#define TIF_NOTIFY_SIGNAL 5 /* signal notifications exist */
#define TIF_DIE_IF_KERNEL 9 /* dik recursion lock */
#define TIF_MEMDIE 13 /* is terminating due to OOM killer */
#define TIF_POLLING_NRFLAG 14 /* idle is polling for TIF_NEED_RESCHED */
@@ -71,20 +73,20 @@ register struct thread_info *__current_thread_info __asm__("$8");
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
+#define _TIF_NOTIFY_SIGNAL (1<<TIF_NOTIFY_SIGNAL)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
/* Work to do on interrupt/exception return. */
#define _TIF_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
- _TIF_NOTIFY_RESUME)
-
-/* Work to do on any return to userspace. */
-#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK \
- | _TIF_SYSCALL_TRACE)
+ _TIF_NOTIFY_RESUME | _TIF_NOTIFY_SIGNAL)
#define TS_UAC_NOPRINT 0x0001 /* ! Preserve the following three */
#define TS_UAC_NOFIX 0x0002 /* ! flags as they match */
#define TS_UAC_SIGBUS 0x0004 /* ! userspace part of 'osf_sysinfo' */
+#define TS_SAVED_FP 0x0008
+#define TS_RESTORE_FP 0x0010
+
#define SET_UNALIGN_CTL(task,value) ({ \
__u32 status = task_thread_info(task)->status & ~UAC_BITMASK; \
if (value & PR_UNALIGN_NOPRINT) \
@@ -108,5 +110,17 @@ register struct thread_info *__current_thread_info __asm__("$8");
put_user(res, (int __user *)(value)); \
})
+#ifndef __ASSEMBLY__
+extern void __save_fpu(void);
+
+static inline void save_fpu(void)
+{
+ if (!(current_thread_info()->status & TS_SAVED_FP)) {
+ current_thread_info()->status |= TS_SAVED_FP;
+ __save_fpu();
+ }
+}
+#endif
+
#endif /* __KERNEL__ */
#endif /* _ALPHA_THREAD_INFO_H */
diff --git a/arch/alpha/include/asm/timex.h b/arch/alpha/include/asm/timex.h
index b565cc6f408e..f89798da8a14 100644
--- a/arch/alpha/include/asm/timex.h
+++ b/arch/alpha/include/asm/timex.h
@@ -28,5 +28,6 @@ static inline cycles_t get_cycles (void)
__asm__ __volatile__ ("rpcc %0" : "=r"(ret));
return ret;
}
+#define get_cycles get_cycles
#endif
diff --git a/arch/alpha/include/asm/tlbflush.h b/arch/alpha/include/asm/tlbflush.h
index f8b492408f51..ba4b359d6c39 100644
--- a/arch/alpha/include/asm/tlbflush.h
+++ b/arch/alpha/include/asm/tlbflush.h
@@ -5,7 +5,6 @@
#include <linux/mm.h>
#include <linux/sched.h>
#include <asm/compiler.h>
-#include <asm/pgalloc.h>
#ifndef __EXTERN_INLINE
#define __EXTERN_INLINE extern inline
@@ -15,16 +14,6 @@
extern void __load_new_mm_context(struct mm_struct *);
-/* Use a few helper functions to hide the ugly broken ASN
- numbers on early Alphas (ev4 and ev45). */
-
-__EXTERN_INLINE void
-ev4_flush_tlb_current(struct mm_struct *mm)
-{
- __load_new_mm_context(mm);
- tbiap();
-}
-
__EXTERN_INLINE void
ev5_flush_tlb_current(struct mm_struct *mm)
{
@@ -36,19 +25,6 @@ ev5_flush_tlb_current(struct mm_struct *mm)
specific icache page. */
__EXTERN_INLINE void
-ev4_flush_tlb_current_page(struct mm_struct * mm,
- struct vm_area_struct *vma,
- unsigned long addr)
-{
- int tbi_flag = 2;
- if (vma->vm_flags & VM_EXEC) {
- __load_new_mm_context(mm);
- tbi_flag = 3;
- }
- tbi(tbi_flag, addr);
-}
-
-__EXTERN_INLINE void
ev5_flush_tlb_current_page(struct mm_struct * mm,
struct vm_area_struct *vma,
unsigned long addr)
@@ -60,18 +36,8 @@ ev5_flush_tlb_current_page(struct mm_struct * mm,
}
-#ifdef CONFIG_ALPHA_GENERIC
-# define flush_tlb_current alpha_mv.mv_flush_tlb_current
-# define flush_tlb_current_page alpha_mv.mv_flush_tlb_current_page
-#else
-# ifdef CONFIG_ALPHA_EV4
-# define flush_tlb_current ev4_flush_tlb_current
-# define flush_tlb_current_page ev4_flush_tlb_current_page
-# else
-# define flush_tlb_current ev5_flush_tlb_current
-# define flush_tlb_current_page ev5_flush_tlb_current_page
-# endif
-#endif
+#define flush_tlb_current ev5_flush_tlb_current
+#define flush_tlb_current_page ev5_flush_tlb_current_page
#ifdef __MMU_EXTERN_INLINE
#undef __EXTERN_INLINE
diff --git a/arch/alpha/include/asm/topology.h b/arch/alpha/include/asm/topology.h
index 5a77a40567fa..7d393036aa8f 100644
--- a/arch/alpha/include/asm/topology.h
+++ b/arch/alpha/include/asm/topology.h
@@ -7,45 +7,6 @@
#include <linux/numa.h>
#include <asm/machvec.h>
-#ifdef CONFIG_NUMA
-static inline int cpu_to_node(int cpu)
-{
- int node;
-
- if (!alpha_mv.cpuid_to_nid)
- return 0;
-
- node = alpha_mv.cpuid_to_nid(cpu);
-
-#ifdef DEBUG_NUMA
- BUG_ON(node < 0);
-#endif
-
- return node;
-}
-
-extern struct cpumask node_to_cpumask_map[];
-/* FIXME: This is dumb, recalculating every time. But simple. */
-static const struct cpumask *cpumask_of_node(int node)
-{
- int cpu;
-
- if (node == NUMA_NO_NODE)
- return cpu_all_mask;
-
- cpumask_clear(&node_to_cpumask_map[node]);
-
- for_each_online_cpu(cpu) {
- if (cpu_to_node(cpu) == node)
- cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
- }
-
- return &node_to_cpumask_map[node];
-}
-
-#define cpumask_of_pcibus(bus) (cpu_online_mask)
-
-#endif /* !CONFIG_NUMA */
# include <asm-generic/topology.h>
#endif /* _ASM_ALPHA_TOPOLOGY_H */
diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h
index 1fe2b56cb861..ef295cbb797c 100644
--- a/arch/alpha/include/asm/uaccess.h
+++ b/arch/alpha/include/asm/uaccess.h
@@ -2,47 +2,7 @@
#ifndef __ALPHA_UACCESS_H
#define __ALPHA_UACCESS_H
-/*
- * The fs value determines whether argument validity checking should be
- * performed or not. If get_fs() == USER_DS, checking is performed, with
- * get_fs() == KERNEL_DS, checking is bypassed.
- *
- * Or at least it did once upon a time. Nowadays it is a mask that
- * defines which bits of the address space are off limits. This is a
- * wee bit faster than the above.
- *
- * For historical reasons, these macros are grossly misnamed.
- */
-
-#define KERNEL_DS ((mm_segment_t) { 0UL })
-#define USER_DS ((mm_segment_t) { -0x40000000000UL })
-
-#define get_fs() (current_thread_info()->addr_limit)
-#define set_fs(x) (current_thread_info()->addr_limit = (x))
-
-#define segment_eq(a, b) ((a).seg == (b).seg)
-
-/*
- * Is a address valid? This does a straightforward calculation rather
- * than tests.
- *
- * Address valid if:
- * - "addr" doesn't have any high-bits set
- * - AND "size" doesn't have any high-bits set
- * - AND "addr+size-(size != 0)" doesn't have any high-bits set
- * - OR we are in kernel mode.
- */
-#define __access_ok(addr, size) ({ \
- unsigned long __ao_a = (addr), __ao_b = (size); \
- unsigned long __ao_end = __ao_a + __ao_b - !!__ao_b; \
- (get_fs().seg & (__ao_a | __ao_b | __ao_end)) == 0; })
-
-#define access_ok(addr, size) \
-({ \
- __chk_user_ptr(addr); \
- __access_ok(((unsigned long)(addr)), (size)); \
-})
-
+#include <asm-generic/access_ok.h>
/*
* These are the main single-value transfer routines. They automatically
* use the right size if we just have the right pointer type.
@@ -105,7 +65,7 @@ extern void __get_user_unknown(void);
long __gu_err = -EFAULT; \
unsigned long __gu_val = 0; \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
- if (__access_ok((unsigned long)__gu_addr, size)) { \
+ if (__access_ok(__gu_addr, size)) { \
__gu_err = 0; \
switch (size) { \
case 1: __get_user_8(__gu_addr); break; \
@@ -136,9 +96,6 @@ struct __large_struct { unsigned long buf[100]; };
: "=r"(__gu_val), "=r"(__gu_err) \
: "m"(__m(addr)), "1"(__gu_err))
-#ifdef __alpha_bwx__
-/* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */
-
#define __get_user_16(addr) \
__asm__("1: ldwu %0,%2\n" \
"2:\n" \
@@ -152,33 +109,6 @@ struct __large_struct { unsigned long buf[100]; };
EXC(1b,2b,%0,%1) \
: "=r"(__gu_val), "=r"(__gu_err) \
: "m"(__m(addr)), "1"(__gu_err))
-#else
-/* Unfortunately, we can't get an unaligned access trap for the sub-word
- load, so we have to do a general unaligned operation. */
-
-#define __get_user_16(addr) \
-{ \
- long __gu_tmp; \
- __asm__("1: ldq_u %0,0(%3)\n" \
- "2: ldq_u %1,1(%3)\n" \
- " extwl %0,%3,%0\n" \
- " extwh %1,%3,%1\n" \
- " or %0,%1,%0\n" \
- "3:\n" \
- EXC(1b,3b,%0,%2) \
- EXC(2b,3b,%0,%2) \
- : "=&r"(__gu_val), "=&r"(__gu_tmp), "=r"(__gu_err) \
- : "r"(addr), "2"(__gu_err)); \
-}
-
-#define __get_user_8(addr) \
- __asm__("1: ldq_u %0,0(%2)\n" \
- " extbl %0,%2,%0\n" \
- "2:\n" \
- EXC(1b,2b,%0,%1) \
- : "=&r"(__gu_val), "=r"(__gu_err) \
- : "r"(addr), "1"(__gu_err))
-#endif
extern void __put_user_unknown(void);
@@ -200,7 +130,7 @@ extern void __put_user_unknown(void);
({ \
long __pu_err = -EFAULT; \
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
- if (__access_ok((unsigned long)__pu_addr, size)) { \
+ if (__access_ok(__pu_addr, size)) { \
__pu_err = 0; \
switch (size) { \
case 1: __put_user_8(x, __pu_addr); break; \
@@ -232,9 +162,6 @@ __asm__ __volatile__("1: stl %r2,%1\n" \
: "=r"(__pu_err) \
: "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
-#ifdef __alpha_bwx__
-/* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */
-
#define __put_user_16(x, addr) \
__asm__ __volatile__("1: stw %r2,%1\n" \
"2:\n" \
@@ -248,53 +175,6 @@ __asm__ __volatile__("1: stb %r2,%1\n" \
EXC(1b,2b,$31,%0) \
: "=r"(__pu_err) \
: "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
-#else
-/* Unfortunately, we can't get an unaligned access trap for the sub-word
- write, so we have to do a general unaligned operation. */
-
-#define __put_user_16(x, addr) \
-{ \
- long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4; \
- __asm__ __volatile__( \
- "1: ldq_u %2,1(%5)\n" \
- "2: ldq_u %1,0(%5)\n" \
- " inswh %6,%5,%4\n" \
- " inswl %6,%5,%3\n" \
- " mskwh %2,%5,%2\n" \
- " mskwl %1,%5,%1\n" \
- " or %2,%4,%2\n" \
- " or %1,%3,%1\n" \
- "3: stq_u %2,1(%5)\n" \
- "4: stq_u %1,0(%5)\n" \
- "5:\n" \
- EXC(1b,5b,$31,%0) \
- EXC(2b,5b,$31,%0) \
- EXC(3b,5b,$31,%0) \
- EXC(4b,5b,$31,%0) \
- : "=r"(__pu_err), "=&r"(__pu_tmp1), \
- "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \
- "=&r"(__pu_tmp4) \
- : "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \
-}
-
-#define __put_user_8(x, addr) \
-{ \
- long __pu_tmp1, __pu_tmp2; \
- __asm__ __volatile__( \
- "1: ldq_u %1,0(%4)\n" \
- " insbl %3,%4,%2\n" \
- " mskbl %1,%4,%1\n" \
- " or %1,%2,%1\n" \
- "2: stq_u %1,0(%4)\n" \
- "3:\n" \
- EXC(1b,3b,$31,%0) \
- EXC(2b,3b,$31,%0) \
- : "=r"(__pu_err), \
- "=&r"(__pu_tmp1), "=&r"(__pu_tmp2) \
- : "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \
-}
-#endif
-
/*
* Complex access routines
@@ -316,17 +196,14 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long len)
extern long __clear_user(void __user *to, long len);
-extern inline long
+static inline long
clear_user(void __user *to, long len)
{
- if (__access_ok((unsigned long)to, len))
+ if (__access_ok(to, len))
len = __clear_user(to, len);
return len;
}
-#define user_addr_max() \
- (uaccess_kernel() ? ~0UL : TASK_SIZE)
-
extern long strncpy_from_user(char *dest, const char __user *src, long count);
extern __must_check long strnlen_user(const char __user *str, long n);
diff --git a/arch/alpha/include/asm/unaligned.h b/arch/alpha/include/asm/unaligned.h
deleted file mode 100644
index 863c807b66f8..000000000000
--- a/arch/alpha/include/asm/unaligned.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_ALPHA_UNALIGNED_H
-#define _ASM_ALPHA_UNALIGNED_H
-
-#include <linux/unaligned/le_struct.h>
-#include <linux/unaligned/be_byteshift.h>
-#include <linux/unaligned/generic.h>
-
-#define get_unaligned __get_unaligned_le
-#define put_unaligned __put_unaligned_le
-
-#endif /* _ASM_ALPHA_UNALIGNED_H */
diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h
index 986f5da9b7d8..caabd92ea709 100644
--- a/arch/alpha/include/asm/unistd.h
+++ b/arch/alpha/include/asm/unistd.h
@@ -4,7 +4,7 @@
#include <uapi/asm/unistd.h>
-#define NR_SYSCALLS __NR_syscalls
+#define NR_syscalls __NR_syscalls
#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/alpha/include/asm/user.h b/arch/alpha/include/asm/user.h
index 3df37492c7b7..c9f525a6aab8 100644
--- a/arch/alpha/include/asm/user.h
+++ b/arch/alpha/include/asm/user.h
@@ -45,10 +45,4 @@ struct user {
char u_comm[32]; /* user command name */
};
-#define NBPG PAGE_SIZE
-#define UPAGES 1
-#define HOST_TEXT_START_ADDR (u.start_code)
-#define HOST_DATA_START_ADDR (u.start_data)
-#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
-
#endif /* _ALPHA_USER_H */
diff --git a/arch/alpha/include/asm/vga.h b/arch/alpha/include/asm/vga.h
index 4c347a8454c7..919931cb5b63 100644
--- a/arch/alpha/include/asm/vga.h
+++ b/arch/alpha/include/asm/vga.h
@@ -13,6 +13,7 @@
#define VT_BUF_HAVE_RW
#define VT_BUF_HAVE_MEMSETW
#define VT_BUF_HAVE_MEMCPYW
+#define VT_BUF_HAVE_MEMMOVEW
static inline void scr_writew(u16 val, volatile u16 *addr)
{
@@ -40,6 +41,7 @@ static inline void scr_memsetw(u16 *s, u16 c, unsigned int count)
/* Do not trust that the usage will be correct; analyze the arguments. */
extern void scr_memcpyw(u16 *d, const u16 *s, unsigned int count);
+extern void scr_memmovew(u16 *d, const u16 *s, unsigned int count);
/* ??? These are currently only used for downloading character sets. As
such, they don't need memory barriers. Is this all they are intended
diff --git a/arch/alpha/include/asm/vmalloc.h b/arch/alpha/include/asm/vmalloc.h
new file mode 100644
index 000000000000..0a9a366a4d34
--- /dev/null
+++ b/arch/alpha/include/asm/vmalloc.h
@@ -0,0 +1,4 @@
+#ifndef _ASM_ALPHA_VMALLOC_H
+#define _ASM_ALPHA_VMALLOC_H
+
+#endif /* _ASM_ALPHA_VMALLOC_H */
diff --git a/arch/alpha/include/asm/xchg.h b/arch/alpha/include/asm/xchg.h
deleted file mode 100644
index 7adb80c6746a..000000000000
--- a/arch/alpha/include/asm/xchg.h
+++ /dev/null
@@ -1,246 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ALPHA_CMPXCHG_H
-#error Do not include xchg.h directly!
-#else
-/*
- * xchg/xchg_local and cmpxchg/cmpxchg_local share the same code
- * except that local version do not have the expensive memory barrier.
- * So this file is included twice from asm/cmpxchg.h.
- */
-
-/*
- * Atomic exchange.
- * Since it can be used to implement critical sections
- * it must clobber "memory" (also for interrupts in UP).
- */
-
-static inline unsigned long
-____xchg(_u8, volatile char *m, unsigned long val)
-{
- unsigned long ret, tmp, addr64;
-
- __asm__ __volatile__(
- " andnot %4,7,%3\n"
- " insbl %1,%4,%1\n"
- "1: ldq_l %2,0(%3)\n"
- " extbl %2,%4,%0\n"
- " mskbl %2,%4,%2\n"
- " or %1,%2,%2\n"
- " stq_c %2,0(%3)\n"
- " beq %2,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
- : "r" ((long)m), "1" (val) : "memory");
-
- return ret;
-}
-
-static inline unsigned long
-____xchg(_u16, volatile short *m, unsigned long val)
-{
- unsigned long ret, tmp, addr64;
-
- __asm__ __volatile__(
- " andnot %4,7,%3\n"
- " inswl %1,%4,%1\n"
- "1: ldq_l %2,0(%3)\n"
- " extwl %2,%4,%0\n"
- " mskwl %2,%4,%2\n"
- " or %1,%2,%2\n"
- " stq_c %2,0(%3)\n"
- " beq %2,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
- : "r" ((long)m), "1" (val) : "memory");
-
- return ret;
-}
-
-static inline unsigned long
-____xchg(_u32, volatile int *m, unsigned long val)
-{
- unsigned long dummy;
-
- __asm__ __volatile__(
- "1: ldl_l %0,%4\n"
- " bis $31,%3,%1\n"
- " stl_c %1,%2\n"
- " beq %1,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- : "=&r" (val), "=&r" (dummy), "=m" (*m)
- : "rI" (val), "m" (*m) : "memory");
-
- return val;
-}
-
-static inline unsigned long
-____xchg(_u64, volatile long *m, unsigned long val)
-{
- unsigned long dummy;
-
- __asm__ __volatile__(
- "1: ldq_l %0,%4\n"
- " bis $31,%3,%1\n"
- " stq_c %1,%2\n"
- " beq %1,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- : "=&r" (val), "=&r" (dummy), "=m" (*m)
- : "rI" (val), "m" (*m) : "memory");
-
- return val;
-}
-
-/* This function doesn't exist, so you'll get a linker error
- if something tries to do an invalid xchg(). */
-extern void __xchg_called_with_bad_pointer(void);
-
-static __always_inline unsigned long
-____xchg(, volatile void *ptr, unsigned long x, int size)
-{
- switch (size) {
- case 1:
- return ____xchg(_u8, ptr, x);
- case 2:
- return ____xchg(_u16, ptr, x);
- case 4:
- return ____xchg(_u32, ptr, x);
- case 8:
- return ____xchg(_u64, ptr, x);
- }
- __xchg_called_with_bad_pointer();
- return x;
-}
-
-/*
- * Atomic compare and exchange. Compare OLD with MEM, if identical,
- * store NEW in MEM. Return the initial value in MEM. Success is
- * indicated by comparing RETURN with OLD.
- */
-
-static inline unsigned long
-____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
-{
- unsigned long prev, tmp, cmp, addr64;
-
- __asm__ __volatile__(
- " andnot %5,7,%4\n"
- " insbl %1,%5,%1\n"
- "1: ldq_l %2,0(%4)\n"
- " extbl %2,%5,%0\n"
- " cmpeq %0,%6,%3\n"
- " beq %3,2f\n"
- " mskbl %2,%5,%2\n"
- " or %1,%2,%2\n"
- " stq_c %2,0(%4)\n"
- " beq %2,3f\n"
- "2:\n"
- ".subsection 2\n"
- "3: br 1b\n"
- ".previous"
- : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
- : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
-
- return prev;
-}
-
-static inline unsigned long
-____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
-{
- unsigned long prev, tmp, cmp, addr64;
-
- __asm__ __volatile__(
- " andnot %5,7,%4\n"
- " inswl %1,%5,%1\n"
- "1: ldq_l %2,0(%4)\n"
- " extwl %2,%5,%0\n"
- " cmpeq %0,%6,%3\n"
- " beq %3,2f\n"
- " mskwl %2,%5,%2\n"
- " or %1,%2,%2\n"
- " stq_c %2,0(%4)\n"
- " beq %2,3f\n"
- "2:\n"
- ".subsection 2\n"
- "3: br 1b\n"
- ".previous"
- : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
- : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
-
- return prev;
-}
-
-static inline unsigned long
-____cmpxchg(_u32, volatile int *m, int old, int new)
-{
- unsigned long prev, cmp;
-
- __asm__ __volatile__(
- "1: ldl_l %0,%5\n"
- " cmpeq %0,%3,%1\n"
- " beq %1,2f\n"
- " mov %4,%1\n"
- " stl_c %1,%2\n"
- " beq %1,3f\n"
- "2:\n"
- ".subsection 2\n"
- "3: br 1b\n"
- ".previous"
- : "=&r"(prev), "=&r"(cmp), "=m"(*m)
- : "r"((long) old), "r"(new), "m"(*m) : "memory");
-
- return prev;
-}
-
-static inline unsigned long
-____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
-{
- unsigned long prev, cmp;
-
- __asm__ __volatile__(
- "1: ldq_l %0,%5\n"
- " cmpeq %0,%3,%1\n"
- " beq %1,2f\n"
- " mov %4,%1\n"
- " stq_c %1,%2\n"
- " beq %1,3f\n"
- "2:\n"
- ".subsection 2\n"
- "3: br 1b\n"
- ".previous"
- : "=&r"(prev), "=&r"(cmp), "=m"(*m)
- : "r"((long) old), "r"(new), "m"(*m) : "memory");
-
- return prev;
-}
-
-/* This function doesn't exist, so you'll get a linker error
- if something tries to do an invalid cmpxchg(). */
-extern void __cmpxchg_called_with_bad_pointer(void);
-
-static __always_inline unsigned long
-____cmpxchg(, volatile void *ptr, unsigned long old, unsigned long new,
- int size)
-{
- switch (size) {
- case 1:
- return ____cmpxchg(_u8, ptr, old, new);
- case 2:
- return ____cmpxchg(_u16, ptr, old, new);
- case 4:
- return ____cmpxchg(_u32, ptr, old, new);
- case 8:
- return ____cmpxchg(_u64, ptr, old, new);
- }
- __cmpxchg_called_with_bad_pointer();
- return old;
-}
-
-#endif
diff --git a/arch/alpha/include/asm/xor.h b/arch/alpha/include/asm/xor.h
index 5aeb4fb3cb7c..e0de0c233ab9 100644
--- a/arch/alpha/include/asm/xor.h
+++ b/arch/alpha/include/asm/xor.h
@@ -5,24 +5,43 @@
* Optimized RAID-5 checksumming functions for alpha EV5 and EV6
*/
-extern void xor_alpha_2(unsigned long, unsigned long *, unsigned long *);
-extern void xor_alpha_3(unsigned long, unsigned long *, unsigned long *,
- unsigned long *);
-extern void xor_alpha_4(unsigned long, unsigned long *, unsigned long *,
- unsigned long *, unsigned long *);
-extern void xor_alpha_5(unsigned long, unsigned long *, unsigned long *,
- unsigned long *, unsigned long *, unsigned long *);
+extern void
+xor_alpha_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2);
+extern void
+xor_alpha_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3);
+extern void
+xor_alpha_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4);
+extern void
+xor_alpha_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5);
-extern void xor_alpha_prefetch_2(unsigned long, unsigned long *,
- unsigned long *);
-extern void xor_alpha_prefetch_3(unsigned long, unsigned long *,
- unsigned long *, unsigned long *);
-extern void xor_alpha_prefetch_4(unsigned long, unsigned long *,
- unsigned long *, unsigned long *,
- unsigned long *);
-extern void xor_alpha_prefetch_5(unsigned long, unsigned long *,
- unsigned long *, unsigned long *,
- unsigned long *, unsigned long *);
+extern void
+xor_alpha_prefetch_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2);
+extern void
+xor_alpha_prefetch_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3);
+extern void
+xor_alpha_prefetch_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4);
+extern void
+xor_alpha_prefetch_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5);
asm(" \n\
.text \n\