diff options
Diffstat (limited to 'arch/riscv/include/asm/barrier.h')
| -rw-r--r-- | arch/riscv/include/asm/barrier.h | 90 |
1 files changed, 49 insertions, 41 deletions
diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h index d4628e4b3a5e..700ba3f922cb 100644 --- a/arch/riscv/include/asm/barrier.h +++ b/arch/riscv/include/asm/barrier.h @@ -1,57 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Based on arch/arm/include/asm/barrier.h * * Copyright (C) 2012 ARM Ltd. * Copyright (C) 2013 Regents of the University of California * Copyright (C) 2017 SiFive - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #ifndef _ASM_RISCV_BARRIER_H #define _ASM_RISCV_BARRIER_H -#ifndef __ASSEMBLY__ - -#define nop() __asm__ __volatile__ ("nop") - -#define RISCV_FENCE(p, s) \ - __asm__ __volatile__ ("fence " #p "," #s : : : "memory") +#ifndef __ASSEMBLER__ +#include <asm/cmpxchg.h> +#include <asm/fence.h> /* These barriers need to enforce ordering on both devices or memory. */ -#define mb() RISCV_FENCE(iorw,iorw) -#define rmb() RISCV_FENCE(ir,ir) -#define wmb() RISCV_FENCE(ow,ow) +#define __mb() RISCV_FENCE(iorw, iorw) +#define __rmb() RISCV_FENCE(ir, ir) +#define __wmb() RISCV_FENCE(ow, ow) /* These barriers do not need to enforce ordering on devices, just memory. */ -#define __smp_mb() RISCV_FENCE(rw,rw) -#define __smp_rmb() RISCV_FENCE(r,r) -#define __smp_wmb() RISCV_FENCE(w,w) - -#define __smp_store_release(p, v) \ -do { \ - compiletime_assert_atomic_type(*p); \ - RISCV_FENCE(rw,w); \ - WRITE_ONCE(*p, v); \ -} while (0) - -#define __smp_load_acquire(p) \ -({ \ - typeof(*p) ___p1 = READ_ONCE(*p); \ - compiletime_assert_atomic_type(*p); \ - RISCV_FENCE(r,rw); \ - ___p1; \ -}) +#define __smp_mb() RISCV_FENCE(rw, rw) +#define __smp_rmb() RISCV_FENCE(r, r) +#define __smp_wmb() RISCV_FENCE(w, w) /* * This is a very specific barrier: it's currently only used in two places in @@ -69,11 +40,48 @@ do { \ * The AQ/RL pair provides a RCpc critical section, but there's not really any * way we can take advantage of that here because the ordering is only enforced * on that one lock. Thus, we're just doing a full fence. + * + * Since we allow writeX to be called from preemptive regions we need at least + * an "o" in the predecessor set to ensure device writes are visible before the + * task is marked as available for scheduling on a new hart. While I don't see + * any concrete reason we need a full IO fence, it seems safer to just upgrade + * this in order to avoid any IO crossing a scheduling boundary. In both + * instances the scheduler pairs this with an mb(), so nothing is necessary on + * the new hart. */ -#define smp_mb__after_spinlock() RISCV_FENCE(rw,rw) +#define smp_mb__after_spinlock() RISCV_FENCE(iorw, iorw) + +#define __smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + RISCV_FENCE(rw, w); \ + WRITE_ONCE(*p, v); \ +} while (0) + +#define __smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = READ_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + RISCV_FENCE(r, rw); \ + ___p1; \ +}) + +#ifdef CONFIG_RISCV_ISA_ZAWRS +#define smp_cond_load_relaxed(ptr, cond_expr) ({ \ + typeof(ptr) __PTR = (ptr); \ + __unqual_scalar_typeof(*ptr) VAL; \ + for (;;) { \ + VAL = READ_ONCE(*__PTR); \ + if (cond_expr) \ + break; \ + __cmpwait_relaxed(ptr, VAL); \ + } \ + (typeof(*ptr))VAL; \ +}) +#endif #include <asm-generic/barrier.h> -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _ASM_RISCV_BARRIER_H */ |
