summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arc/kernel/smp.c2
-rw-r--r--arch/arm/include/asm/spinlock.h2
-rw-r--r--arch/arm/mach-tegra/cpuidle-tegra20.c2
-rw-r--r--arch/arm/vdso/vgettimeofday.c2
-rw-r--r--arch/ia64/include/asm/spinlock.h8
-rw-r--r--arch/mips/include/asm/vdso.h2
-rw-r--r--arch/mips/kernel/pm-cps.c2
-rw-r--r--arch/mn10300/kernel/mn10300-serial.c4
-rw-r--r--arch/parisc/include/asm/atomic.h2
-rw-r--r--arch/powerpc/platforms/powernv/opal-msglog.c2
-rw-r--r--arch/s390/include/asm/spinlock.h6
-rw-r--r--arch/s390/lib/spinlock.c16
-rw-r--r--arch/sparc/include/asm/atomic_32.h2
-rw-r--r--arch/tile/gxio/dma_queue.c4
-rw-r--r--arch/tile/include/gxio/dma_queue.h2
-rw-r--r--arch/tile/kernel/ptrace.c2
-rw-r--r--arch/x86/entry/common.c2
-rw-r--r--arch/x86/entry/vdso/vclock_gettime.c2
-rw-r--r--arch/x86/events/core.c2
-rw-r--r--arch/x86/include/asm/vgtod.h2
-rw-r--r--arch/x86/kernel/espfix_64.c6
-rw-r--r--arch/x86/kernel/nmi.c2
-rw-r--r--arch/x86/kvm/mmu.c4
-rw-r--r--arch/x86/kvm/page_track.c2
-rw-r--r--arch/x86/xen/p2m.c2
-rw-r--r--arch/xtensa/platforms/xtfpga/lcd.c14
26 files changed, 49 insertions, 49 deletions
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index f46267153ec2..94cabe73664b 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -245,7 +245,7 @@ static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg)
* and read back old value
*/
do {
- new = old = ACCESS_ONCE(*ipi_data_ptr);
+ new = old = READ_ONCE(*ipi_data_ptr);
new |= 1U << msg;
} while (cmpxchg(ipi_data_ptr, old, new) != old);
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index daa87212c9a1..77f50ae0aeb4 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -71,7 +71,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
while (lockval.tickets.next != lockval.tickets.owner) {
wfe();
- lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner);
+ lockval.tickets.owner = READ_ONCE(lock->tickets.owner);
}
smp_mb();
diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
index 76e4c83cd5c8..3f24addd7972 100644
--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
@@ -179,7 +179,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
bool entered_lp2 = false;
if (tegra_pending_sgi())
- ACCESS_ONCE(abort_flag) = true;
+ WRITE_ONCE(abort_flag, true);
cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
diff --git a/arch/arm/vdso/vgettimeofday.c b/arch/arm/vdso/vgettimeofday.c
index 79214d5ff097..a9dd619c6c29 100644
--- a/arch/arm/vdso/vgettimeofday.c
+++ b/arch/arm/vdso/vgettimeofday.c
@@ -35,7 +35,7 @@ static notrace u32 __vdso_read_begin(const struct vdso_data *vdata)
{
u32 seq;
repeat:
- seq = ACCESS_ONCE(vdata->seq_count);
+ seq = READ_ONCE(vdata->seq_count);
if (seq & 1) {
cpu_relax();
goto repeat;
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index 35b31884863b..e98775be112d 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -61,7 +61,7 @@ static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
{
- int tmp = ACCESS_ONCE(lock->lock);
+ int tmp = READ_ONCE(lock->lock);
if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK))
return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp;
@@ -73,19 +73,19 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
+ WRITE_ONCE(*p, (tmp + 2) & ~1);
}
static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
{
- long tmp = ACCESS_ONCE(lock->lock);
+ long tmp = READ_ONCE(lock->lock);
return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
}
static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
{
- long tmp = ACCESS_ONCE(lock->lock);
+ long tmp = READ_ONCE(lock->lock);
return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
}
diff --git a/arch/mips/include/asm/vdso.h b/arch/mips/include/asm/vdso.h
index b7cd6cf77b83..91bf0c2c265c 100644
--- a/arch/mips/include/asm/vdso.h
+++ b/arch/mips/include/asm/vdso.h
@@ -99,7 +99,7 @@ static inline u32 vdso_data_read_begin(const union mips_vdso_data *data)
u32 seq;
while (true) {
- seq = ACCESS_ONCE(data->seq_count);
+ seq = READ_ONCE(data->seq_count);
if (likely(!(seq & 1))) {
/* Paired with smp_wmb() in vdso_data_write_*(). */
smp_rmb();
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
index 4655017f2377..1d2996cd58da 100644
--- a/arch/mips/kernel/pm-cps.c
+++ b/arch/mips/kernel/pm-cps.c
@@ -166,7 +166,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
nc_core_ready_count = nc_addr;
/* Ensure ready_count is zero-initialised before the assembly runs */
- ACCESS_ONCE(*nc_core_ready_count) = 0;
+ WRITE_ONCE(*nc_core_ready_count, 0);
coupled_barrier(&per_cpu(pm_barrier, core), online);
/* Run the generated entry code */
diff --git a/arch/mn10300/kernel/mn10300-serial.c b/arch/mn10300/kernel/mn10300-serial.c
index 7ecf69879e2d..d7ef1232a82a 100644
--- a/arch/mn10300/kernel/mn10300-serial.c
+++ b/arch/mn10300/kernel/mn10300-serial.c
@@ -543,7 +543,7 @@ static void mn10300_serial_receive_interrupt(struct mn10300_serial_port *port)
try_again:
/* pull chars out of the hat */
- ix = ACCESS_ONCE(port->rx_outp);
+ ix = READ_ONCE(port->rx_outp);
if (CIRC_CNT(port->rx_inp, ix, MNSC_BUFFER_SIZE) == 0) {
if (push && !tport->low_latency)
tty_flip_buffer_push(tport);
@@ -1724,7 +1724,7 @@ static int mn10300_serial_poll_get_char(struct uart_port *_port)
if (mn10300_serial_int_tbl[port->rx_irq].port != NULL) {
do {
/* pull chars out of the hat */
- ix = ACCESS_ONCE(port->rx_outp);
+ ix = READ_ONCE(port->rx_outp);
if (CIRC_CNT(port->rx_inp, ix, MNSC_BUFFER_SIZE) == 0)
return NO_POLL_CHAR;
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 17b98a87e5e2..c57d4e8307f2 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -260,7 +260,7 @@ atomic64_set(atomic64_t *v, s64 i)
static __inline__ s64
atomic64_read(const atomic64_t *v)
{
- return ACCESS_ONCE((v)->counter);
+ return READ_ONCE((v)->counter);
}
#define atomic64_inc(v) (atomic64_add( 1,(v)))
diff --git a/arch/powerpc/platforms/powernv/opal-msglog.c b/arch/powerpc/platforms/powernv/opal-msglog.c
index 7a9cde0cfbd1..acd3206dfae3 100644
--- a/arch/powerpc/platforms/powernv/opal-msglog.c
+++ b/arch/powerpc/platforms/powernv/opal-msglog.c
@@ -43,7 +43,7 @@ ssize_t opal_msglog_copy(char *to, loff_t pos, size_t count)
if (!opal_memcons)
return -ENODEV;
- out_pos = be32_to_cpu(ACCESS_ONCE(opal_memcons->out_pos));
+ out_pos = be32_to_cpu(READ_ONCE(opal_memcons->out_pos));
/* Now we've read out_pos, put a barrier in before reading the new
* data it points to in conbuf. */
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index 9fa855f91e55..66f4160010ef 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -117,14 +117,14 @@ extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
static inline int arch_read_trylock_once(arch_rwlock_t *rw)
{
- int old = ACCESS_ONCE(rw->lock);
+ int old = READ_ONCE(rw->lock);
return likely(old >= 0 &&
__atomic_cmpxchg_bool(&rw->lock, old, old + 1));
}
static inline int arch_write_trylock_once(arch_rwlock_t *rw)
{
- int old = ACCESS_ONCE(rw->lock);
+ int old = READ_ONCE(rw->lock);
return likely(old == 0 &&
__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000));
}
@@ -211,7 +211,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
int old;
do {
- old = ACCESS_ONCE(rw->lock);
+ old = READ_ONCE(rw->lock);
} while (!__atomic_cmpxchg_bool(&rw->lock, old, old - 1));
}
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index b12663d653d8..34e30b9ea234 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -162,8 +162,8 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
smp_yield_cpu(~owner);
count = spin_retry;
}
- old = ACCESS_ONCE(rw->lock);
- owner = ACCESS_ONCE(rw->owner);
+ old = READ_ONCE(rw->lock);
+ owner = READ_ONCE(rw->owner);
if (old < 0)
continue;
if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
@@ -178,7 +178,7 @@ int _raw_read_trylock_retry(arch_rwlock_t *rw)
int old;
while (count-- > 0) {
- old = ACCESS_ONCE(rw->lock);
+ old = READ_ONCE(rw->lock);
if (old < 0)
continue;
if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
@@ -202,8 +202,8 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, int prev)
smp_yield_cpu(~owner);
count = spin_retry;
}
- old = ACCESS_ONCE(rw->lock);
- owner = ACCESS_ONCE(rw->owner);
+ old = READ_ONCE(rw->lock);
+ owner = READ_ONCE(rw->owner);
smp_mb();
if (old >= 0) {
prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
@@ -230,8 +230,8 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
smp_yield_cpu(~owner);
count = spin_retry;
}
- old = ACCESS_ONCE(rw->lock);
- owner = ACCESS_ONCE(rw->owner);
+ old = READ_ONCE(rw->lock);
+ owner = READ_ONCE(rw->owner);
if (old >= 0 &&
__atomic_cmpxchg_bool(&rw->lock, old, old | 0x80000000))
prev = old;
@@ -251,7 +251,7 @@ int _raw_write_trylock_retry(arch_rwlock_t *rw)
int old;
while (count-- > 0) {
- old = ACCESS_ONCE(rw->lock);
+ old = READ_ONCE(rw->lock);
if (old)
continue;
if (__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000))
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
index 7643e979e333..e2f398e9456c 100644
--- a/arch/sparc/include/asm/atomic_32.h
+++ b/arch/sparc/include/asm/atomic_32.h
@@ -31,7 +31,7 @@ void atomic_set(atomic_t *, int);
#define atomic_set_release(v, i) atomic_set((v), (i))
-#define atomic_read(v) ACCESS_ONCE((v)->counter)
+#define atomic_read(v) READ_ONCE((v)->counter)
#define atomic_add(i, v) ((void)atomic_add_return( (int)(i), (v)))
#define atomic_sub(i, v) ((void)atomic_add_return(-(int)(i), (v)))
diff --git a/arch/tile/gxio/dma_queue.c b/arch/tile/gxio/dma_queue.c
index baa60357f8ba..b7ba577d82ca 100644
--- a/arch/tile/gxio/dma_queue.c
+++ b/arch/tile/gxio/dma_queue.c
@@ -163,14 +163,14 @@ int __gxio_dma_queue_is_complete(__gxio_dma_queue_t *dma_queue,
int64_t completion_slot, int update)
{
if (update) {
- if (ACCESS_ONCE(dma_queue->hw_complete_count) >
+ if (READ_ONCE(dma_queue->hw_complete_count) >
completion_slot)
return 1;
__gxio_dma_queue_update_credits(dma_queue);
}
- return ACCESS_ONCE(dma_queue->hw_complete_count) > completion_slot;
+ return READ_ONCE(dma_queue->hw_complete_count) > completion_slot;
}
EXPORT_SYMBOL_GPL(__gxio_dma_queue_is_complete);
diff --git a/arch/tile/include/gxio/dma_queue.h b/arch/tile/include/gxio/dma_queue.h
index b9e45e37649e..c8fd47edba30 100644
--- a/arch/tile/include/gxio/dma_queue.h
+++ b/arch/tile/include/gxio/dma_queue.h
@@ -121,7 +121,7 @@ static inline int64_t __gxio_dma_queue_reserve(__gxio_dma_queue_t *dma_queue,
* if the result is LESS than "hw_complete_count".
*/
uint64_t complete;
- complete = ACCESS_ONCE(dma_queue->hw_complete_count);
+ complete = READ_ONCE(dma_queue->hw_complete_count);
slot |= (complete & 0xffffffffff000000);
if (slot < complete)
slot += 0x1000000;
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
index e1a078e6828e..d516d61751c2 100644
--- a/arch/tile/kernel/ptrace.c
+++ b/arch/tile/kernel/ptrace.c
@@ -255,7 +255,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
int do_syscall_trace_enter(struct pt_regs *regs)
{
- u32 work = ACCESS_ONCE(current_thread_info()->flags);
+ u32 work = READ_ONCE(current_thread_info()->flags);
if ((work & _TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs)) {
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 03505ffbe1b6..eaa0ba66cf96 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -75,7 +75,7 @@ static long syscall_trace_enter(struct pt_regs *regs)
if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
BUG_ON(regs != task_pt_regs(current));
- work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
+ work = READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
if (unlikely(work & _TIF_SYSCALL_EMU))
emulated = true;
diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
index fa8dbfcf7ed3..11b13c4b43d5 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -318,7 +318,7 @@ int gettimeofday(struct timeval *, struct timezone *)
notrace time_t __vdso_time(time_t *t)
{
/* This is atomic on x86 so we don't need any locks. */
- time_t result = ACCESS_ONCE(gtod->wall_time_sec);
+ time_t result = READ_ONCE(gtod->wall_time_sec);
if (t)
*t = result;
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 589af1eec7c1..140d33288e78 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2118,7 +2118,7 @@ static int x86_pmu_event_init(struct perf_event *event)
event->destroy(event);
}
- if (ACCESS_ONCE(x86_pmu.attr_rdpmc))
+ if (READ_ONCE(x86_pmu.attr_rdpmc))
event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED;
return err;
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index 022e59714562..53dd162576a8 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -48,7 +48,7 @@ static inline unsigned gtod_read_begin(const struct vsyscall_gtod_data *s)
unsigned ret;
repeat:
- ret = ACCESS_ONCE(s->seq);
+ ret = READ_ONCE(s->seq);
if (unlikely(ret & 1)) {
cpu_relax();
goto repeat;
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
index 9c4e7ba6870c..7d7715dde901 100644
--- a/arch/x86/kernel/espfix_64.c
+++ b/arch/x86/kernel/espfix_64.c
@@ -155,14 +155,14 @@ void init_espfix_ap(int cpu)
page = cpu/ESPFIX_STACKS_PER_PAGE;
/* Did another CPU already set this up? */
- stack_page = ACCESS_ONCE(espfix_pages[page]);
+ stack_page = READ_ONCE(espfix_pages[page]);
if (likely(stack_page))
goto done;
mutex_lock(&espfix_init_mutex);
/* Did we race on the lock? */
- stack_page = ACCESS_ONCE(espfix_pages[page]);
+ stack_page = READ_ONCE(espfix_pages[page]);
if (stack_page)
goto unlock_done;
@@ -200,7 +200,7 @@ void init_espfix_ap(int cpu)
set_pte(&pte_p[n*PTE_STRIDE], pte);
/* Job is done for this CPU and any CPU which shares this page */
- ACCESS_ONCE(espfix_pages[page]) = stack_page;
+ WRITE_ONCE(espfix_pages[page], stack_page);
unlock_done:
mutex_unlock(&espfix_init_mutex);
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 35aafc95e4b8..18bc9b51ac9b 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -105,7 +105,7 @@ static void nmi_max_handler(struct irq_work *w)
{
struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
int remainder_ns, decimal_msecs;
- u64 whole_msecs = ACCESS_ONCE(a->max_duration);
+ u64 whole_msecs = READ_ONCE(a->max_duration);
remainder_ns = do_div(whole_msecs, (1000 * 1000));
decimal_msecs = remainder_ns / 1000;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 7a69cf053711..a119b361b8b7 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -443,7 +443,7 @@ static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
static u64 __get_spte_lockless(u64 *sptep)
{
- return ACCESS_ONCE(*sptep);
+ return READ_ONCE(*sptep);
}
#else
union split_spte {
@@ -4819,7 +4819,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
* If we don't have indirect shadow pages, it means no page is
* write-protected, so we can exit simply.
*/
- if (!ACCESS_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
+ if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
return;
remote_flush = local_flush = false;
diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/page_track.c
index ea67dc876316..01c1371f39f8 100644
--- a/arch/x86/kvm/page_track.c
+++ b/arch/x86/kvm/page_track.c
@@ -157,7 +157,7 @@ bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
return false;
index = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL);
- return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]);
+ return !!READ_ONCE(slot->arch.gfn_track[mode][index]);
}
void kvm_page_track_cleanup(struct kvm *kvm)
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 6083ba462f35..13b4f19b9131 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -547,7 +547,7 @@ int xen_alloc_p2m_entry(unsigned long pfn)
if (p2m_top_mfn && pfn < MAX_P2M_PFN) {
topidx = p2m_top_index(pfn);
top_mfn_p = &p2m_top_mfn[topidx];
- mid_mfn = ACCESS_ONCE(p2m_top_mfn_p[topidx]);
+ mid_mfn = READ_ONCE(p2m_top_mfn_p[topidx]);
BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p);
diff --git a/arch/xtensa/platforms/xtfpga/lcd.c b/arch/xtensa/platforms/xtfpga/lcd.c
index 4dc0c1b43f4b..2f7eb66c23ec 100644
--- a/arch/xtensa/platforms/xtfpga/lcd.c
+++ b/arch/xtensa/platforms/xtfpga/lcd.c
@@ -34,23 +34,23 @@
static void lcd_put_byte(u8 *addr, u8 data)
{
#ifdef CONFIG_XTFPGA_LCD_8BIT_ACCESS
- ACCESS_ONCE(*addr) = data;
+ WRITE_ONCE(*addr, data);
#else
- ACCESS_ONCE(*addr) = data & 0xf0;
- ACCESS_ONCE(*addr) = (data << 4) & 0xf0;
+ WRITE_ONCE(*addr, data & 0xf0);
+ WRITE_ONCE(*addr, (data << 4) & 0xf0);
#endif
}
static int __init lcd_init(void)
{
- ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
+ WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE8BIT);
mdelay(5);
- ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
+ WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE8BIT);
udelay(200);
- ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
+ WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE8BIT);
udelay(50);
#ifndef CONFIG_XTFPGA_LCD_8BIT_ACCESS
- ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE4BIT;
+ WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT);
udelay(50);
lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT);
udelay(50);