diff options
Diffstat (limited to 'include/linux/percpu-defs.h')
| -rw-r--r-- | include/linux/percpu-defs.h | 56 |
1 files changed, 33 insertions, 23 deletions
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index ec3573119923..43c854a273c3 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h @@ -26,13 +26,11 @@ #define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned" #define PER_CPU_ALIGNED_SECTION "..shared_aligned" #endif -#define PER_CPU_FIRST_SECTION "..first" #else #define PER_CPU_SHARED_ALIGNED_SECTION "" #define PER_CPU_ALIGNED_SECTION "..shared_aligned" -#define PER_CPU_FIRST_SECTION "" #endif @@ -54,7 +52,7 @@ __section(".discard") __attribute__((unused)) /* - * s390 and alpha modules require percpu variables to be defined as + * alpha modules require percpu variables to be defined as * weak to force the compiler to generate GOT based external * references for them. This is necessary because percpu sections * will be located outside of the usually addressable area. @@ -65,14 +63,15 @@ * 1. The symbol must be globally unique, even the static ones. * 2. Static percpu variables cannot be defined inside a function. * - * Archs which need weak percpu definitions should define - * ARCH_NEEDS_WEAK_PER_CPU in asm/percpu.h when necessary. + * Archs which need weak percpu definitions should set + * CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU when necessary. * * To ensure that the generic code observes the above two * restrictions, if CONFIG_DEBUG_FORCE_WEAK_PER_CPU is set weak * definition is used for all cases. */ -#if defined(ARCH_NEEDS_WEAK_PER_CPU) || defined(CONFIG_DEBUG_FORCE_WEAK_PER_CPU) +#if (defined(CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU) && defined(MODULE)) || \ + defined(CONFIG_DEBUG_FORCE_WEAK_PER_CPU) /* * __pcpu_scope_* dummy variable is used to enforce scope. It * receives the static modifier when it's used in front of @@ -115,14 +114,17 @@ DEFINE_PER_CPU_SECTION(type, name, "") /* - * Declaration/definition used for per-CPU variables that must come first in - * the set of variables. + * Declaration/definition used for per-CPU variables that are frequently + * accessed and should be in a single cacheline. + * + * For use only by architecture and core code. Only use scalar or pointer + * types to maximize density. */ -#define DECLARE_PER_CPU_FIRST(type, name) \ - DECLARE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION) +#define DECLARE_PER_CPU_CACHE_HOT(type, name) \ + DECLARE_PER_CPU_SECTION(type, name, "..hot.." #name) -#define DEFINE_PER_CPU_FIRST(type, name) \ - DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION) +#define DEFINE_PER_CPU_CACHE_HOT(type, name) \ + DEFINE_PER_CPU_SECTION(type, name, "..hot.." #name) /* * Declaration/definition used for per-CPU variables that must be cacheline @@ -220,15 +222,17 @@ do { \ (void)__vpp_verify; \ } while (0) +#define PERCPU_PTR(__p) \ + (TYPEOF_UNQUAL(*(__p)) __force __kernel *)((__force unsigned long)(__p)) + #ifdef CONFIG_SMP /* - * Add an offset to a pointer but keep the pointer as-is. Use RELOC_HIDE() - * to prevent the compiler from making incorrect assumptions about the - * pointer value. The weird cast keeps both GCC and sparse happy. + * Add an offset to a pointer. Use RELOC_HIDE() to prevent the compiler + * from making incorrect assumptions about the pointer value. */ #define SHIFT_PERCPU_PTR(__p, __offset) \ - RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset)) + RELOC_HIDE(PERCPU_PTR(__p), (__offset)) #define per_cpu_ptr(ptr, cpu) \ ({ \ @@ -254,13 +258,13 @@ do { \ #else /* CONFIG_SMP */ -#define VERIFY_PERCPU_PTR(__p) \ +#define per_cpu_ptr(ptr, cpu) \ ({ \ - __verify_pcpu_ptr(__p); \ - (typeof(*(__p)) __kernel __force *)(__p); \ + (void)(cpu); \ + __verify_pcpu_ptr(ptr); \ + PERCPU_PTR(ptr); \ }) -#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); }) #define raw_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) #define this_cpu_ptr(ptr) raw_cpu_ptr(ptr) @@ -315,7 +319,7 @@ static __always_inline void __this_cpu_preempt_check(const char *op) { } #define __pcpu_size_call_return(stem, variable) \ ({ \ - typeof(variable) pscr_ret__; \ + TYPEOF_UNQUAL(variable) pscr_ret__; \ __verify_pcpu_ptr(&(variable)); \ switch(sizeof(variable)) { \ case 1: pscr_ret__ = stem##1(variable); break; \ @@ -330,7 +334,7 @@ static __always_inline void __this_cpu_preempt_check(const char *op) { } #define __pcpu_size_call_return2(stem, variable, ...) \ ({ \ - typeof(variable) pscr2_ret__; \ + TYPEOF_UNQUAL(variable) pscr2_ret__; \ __verify_pcpu_ptr(&(variable)); \ switch(sizeof(variable)) { \ case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \ @@ -372,7 +376,7 @@ do { \ } while (0) /* - * this_cpu operations (C) 2008-2013 Christoph Lameter <cl@linux.com> + * this_cpu operations (C) 2008-2013 Christoph Lameter <cl@gentwo.org> * * Optimized manipulation for memory allocated through the per cpu * allocator or for addresses of per cpu variables. @@ -475,6 +479,12 @@ do { \ raw_cpu_cmpxchg(pcp, oval, nval); \ }) +#define __this_cpu_try_cmpxchg(pcp, ovalp, nval) \ +({ \ + __this_cpu_preempt_check("try_cmpxchg"); \ + raw_cpu_try_cmpxchg(pcp, ovalp, nval); \ +}) + #define __this_cpu_sub(pcp, val) __this_cpu_add(pcp, -(typeof(pcp))(val)) #define __this_cpu_inc(pcp) __this_cpu_add(pcp, 1) #define __this_cpu_dec(pcp) __this_cpu_sub(pcp, 1) |
