You've already forked linux-rockchip
mirror of
https://github.com/armbian/linux-rockchip.git
synced 2026-01-06 11:08:10 -08:00
locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE()
Please do not apply this to mainline directly, instead please re-run the
coccinelle script shown below and apply its output.
For several reasons, it is desirable to use {READ,WRITE}_ONCE() in
preference to ACCESS_ONCE(), and new code is expected to use one of the
former. So far, there's been no reason to change most existing uses of
ACCESS_ONCE(), as these aren't harmful, and changing them results in
churn.
However, for some features, the read/write distinction is critical to
correct operation. To distinguish these cases, separate read/write
accessors must be used. This patch migrates (most) remaining
ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following
coccinelle script:
----
// Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and
// WRITE_ONCE()
// $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch
virtual patch
@ depends on patch @
expression E1, E2;
@@
- ACCESS_ONCE(E1) = E2
+ WRITE_ONCE(E1, E2)
@ depends on patch @
expression E;
@@
- ACCESS_ONCE(E)
+ READ_ONCE(E)
----
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: davem@davemloft.net
Cc: linux-arch@vger.kernel.org
Cc: mpe@ellerman.id.au
Cc: shuah@kernel.org
Cc: snitzer@redhat.com
Cc: thor.thayer@linux.intel.com
Cc: tj@kernel.org
Cc: viro@zeniv.linux.org.uk
Cc: will.deacon@arm.com
Link: http://lkml.kernel.org/r/1508792849-3115-19-git-send-email-paulmck@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
committed by
Ingo Molnar
parent
b03a0fe0c5
commit
6aa7de0591
@@ -245,7 +245,7 @@ static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg)
|
||||
* and read back old value
|
||||
*/
|
||||
do {
|
||||
new = old = ACCESS_ONCE(*ipi_data_ptr);
|
||||
new = old = READ_ONCE(*ipi_data_ptr);
|
||||
new |= 1U << msg;
|
||||
} while (cmpxchg(ipi_data_ptr, old, new) != old);
|
||||
|
||||
|
||||
@@ -71,7 +71,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||
|
||||
while (lockval.tickets.next != lockval.tickets.owner) {
|
||||
wfe();
|
||||
lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner);
|
||||
lockval.tickets.owner = READ_ONCE(lock->tickets.owner);
|
||||
}
|
||||
|
||||
smp_mb();
|
||||
|
||||
@@ -179,7 +179,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
|
||||
bool entered_lp2 = false;
|
||||
|
||||
if (tegra_pending_sgi())
|
||||
ACCESS_ONCE(abort_flag) = true;
|
||||
WRITE_ONCE(abort_flag, true);
|
||||
|
||||
cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
|
||||
|
||||
|
||||
@@ -35,7 +35,7 @@ static notrace u32 __vdso_read_begin(const struct vdso_data *vdata)
|
||||
{
|
||||
u32 seq;
|
||||
repeat:
|
||||
seq = ACCESS_ONCE(vdata->seq_count);
|
||||
seq = READ_ONCE(vdata->seq_count);
|
||||
if (seq & 1) {
|
||||
cpu_relax();
|
||||
goto repeat;
|
||||
|
||||
@@ -61,7 +61,7 @@ static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
|
||||
|
||||
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
|
||||
{
|
||||
int tmp = ACCESS_ONCE(lock->lock);
|
||||
int tmp = READ_ONCE(lock->lock);
|
||||
|
||||
if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK))
|
||||
return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp;
|
||||
@@ -73,19 +73,19 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
|
||||
unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
|
||||
|
||||
asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
|
||||
ACCESS_ONCE(*p) = (tmp + 2) & ~1;
|
||||
WRITE_ONCE(*p, (tmp + 2) & ~1);
|
||||
}
|
||||
|
||||
static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
|
||||
{
|
||||
long tmp = ACCESS_ONCE(lock->lock);
|
||||
long tmp = READ_ONCE(lock->lock);
|
||||
|
||||
return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
|
||||
}
|
||||
|
||||
static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
|
||||
{
|
||||
long tmp = ACCESS_ONCE(lock->lock);
|
||||
long tmp = READ_ONCE(lock->lock);
|
||||
|
||||
return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
|
||||
}
|
||||
|
||||
@@ -99,7 +99,7 @@ static inline u32 vdso_data_read_begin(const union mips_vdso_data *data)
|
||||
u32 seq;
|
||||
|
||||
while (true) {
|
||||
seq = ACCESS_ONCE(data->seq_count);
|
||||
seq = READ_ONCE(data->seq_count);
|
||||
if (likely(!(seq & 1))) {
|
||||
/* Paired with smp_wmb() in vdso_data_write_*(). */
|
||||
smp_rmb();
|
||||
|
||||
@@ -166,7 +166,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
|
||||
nc_core_ready_count = nc_addr;
|
||||
|
||||
/* Ensure ready_count is zero-initialised before the assembly runs */
|
||||
ACCESS_ONCE(*nc_core_ready_count) = 0;
|
||||
WRITE_ONCE(*nc_core_ready_count, 0);
|
||||
coupled_barrier(&per_cpu(pm_barrier, core), online);
|
||||
|
||||
/* Run the generated entry code */
|
||||
|
||||
@@ -543,7 +543,7 @@ static void mn10300_serial_receive_interrupt(struct mn10300_serial_port *port)
|
||||
|
||||
try_again:
|
||||
/* pull chars out of the hat */
|
||||
ix = ACCESS_ONCE(port->rx_outp);
|
||||
ix = READ_ONCE(port->rx_outp);
|
||||
if (CIRC_CNT(port->rx_inp, ix, MNSC_BUFFER_SIZE) == 0) {
|
||||
if (push && !tport->low_latency)
|
||||
tty_flip_buffer_push(tport);
|
||||
@@ -1724,7 +1724,7 @@ static int mn10300_serial_poll_get_char(struct uart_port *_port)
|
||||
if (mn10300_serial_int_tbl[port->rx_irq].port != NULL) {
|
||||
do {
|
||||
/* pull chars out of the hat */
|
||||
ix = ACCESS_ONCE(port->rx_outp);
|
||||
ix = READ_ONCE(port->rx_outp);
|
||||
if (CIRC_CNT(port->rx_inp, ix, MNSC_BUFFER_SIZE) == 0)
|
||||
return NO_POLL_CHAR;
|
||||
|
||||
|
||||
@@ -260,7 +260,7 @@ atomic64_set(atomic64_t *v, s64 i)
|
||||
static __inline__ s64
|
||||
atomic64_read(const atomic64_t *v)
|
||||
{
|
||||
return ACCESS_ONCE((v)->counter);
|
||||
return READ_ONCE((v)->counter);
|
||||
}
|
||||
|
||||
#define atomic64_inc(v) (atomic64_add( 1,(v)))
|
||||
|
||||
@@ -43,7 +43,7 @@ ssize_t opal_msglog_copy(char *to, loff_t pos, size_t count)
|
||||
if (!opal_memcons)
|
||||
return -ENODEV;
|
||||
|
||||
out_pos = be32_to_cpu(ACCESS_ONCE(opal_memcons->out_pos));
|
||||
out_pos = be32_to_cpu(READ_ONCE(opal_memcons->out_pos));
|
||||
|
||||
/* Now we've read out_pos, put a barrier in before reading the new
|
||||
* data it points to in conbuf. */
|
||||
|
||||
@@ -117,14 +117,14 @@ extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
|
||||
|
||||
static inline int arch_read_trylock_once(arch_rwlock_t *rw)
|
||||
{
|
||||
int old = ACCESS_ONCE(rw->lock);
|
||||
int old = READ_ONCE(rw->lock);
|
||||
return likely(old >= 0 &&
|
||||
__atomic_cmpxchg_bool(&rw->lock, old, old + 1));
|
||||
}
|
||||
|
||||
static inline int arch_write_trylock_once(arch_rwlock_t *rw)
|
||||
{
|
||||
int old = ACCESS_ONCE(rw->lock);
|
||||
int old = READ_ONCE(rw->lock);
|
||||
return likely(old == 0 &&
|
||||
__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000));
|
||||
}
|
||||
@@ -211,7 +211,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
|
||||
int old;
|
||||
|
||||
do {
|
||||
old = ACCESS_ONCE(rw->lock);
|
||||
old = READ_ONCE(rw->lock);
|
||||
} while (!__atomic_cmpxchg_bool(&rw->lock, old, old - 1));
|
||||
}
|
||||
|
||||
|
||||
@@ -162,8 +162,8 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
|
||||
smp_yield_cpu(~owner);
|
||||
count = spin_retry;
|
||||
}
|
||||
old = ACCESS_ONCE(rw->lock);
|
||||
owner = ACCESS_ONCE(rw->owner);
|
||||
old = READ_ONCE(rw->lock);
|
||||
owner = READ_ONCE(rw->owner);
|
||||
if (old < 0)
|
||||
continue;
|
||||
if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
|
||||
@@ -178,7 +178,7 @@ int _raw_read_trylock_retry(arch_rwlock_t *rw)
|
||||
int old;
|
||||
|
||||
while (count-- > 0) {
|
||||
old = ACCESS_ONCE(rw->lock);
|
||||
old = READ_ONCE(rw->lock);
|
||||
if (old < 0)
|
||||
continue;
|
||||
if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
|
||||
@@ -202,8 +202,8 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, int prev)
|
||||
smp_yield_cpu(~owner);
|
||||
count = spin_retry;
|
||||
}
|
||||
old = ACCESS_ONCE(rw->lock);
|
||||
owner = ACCESS_ONCE(rw->owner);
|
||||
old = READ_ONCE(rw->lock);
|
||||
owner = READ_ONCE(rw->owner);
|
||||
smp_mb();
|
||||
if (old >= 0) {
|
||||
prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
|
||||
@@ -230,8 +230,8 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
|
||||
smp_yield_cpu(~owner);
|
||||
count = spin_retry;
|
||||
}
|
||||
old = ACCESS_ONCE(rw->lock);
|
||||
owner = ACCESS_ONCE(rw->owner);
|
||||
old = READ_ONCE(rw->lock);
|
||||
owner = READ_ONCE(rw->owner);
|
||||
if (old >= 0 &&
|
||||
__atomic_cmpxchg_bool(&rw->lock, old, old | 0x80000000))
|
||||
prev = old;
|
||||
@@ -251,7 +251,7 @@ int _raw_write_trylock_retry(arch_rwlock_t *rw)
|
||||
int old;
|
||||
|
||||
while (count-- > 0) {
|
||||
old = ACCESS_ONCE(rw->lock);
|
||||
old = READ_ONCE(rw->lock);
|
||||
if (old)
|
||||
continue;
|
||||
if (__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000))
|
||||
|
||||
@@ -31,7 +31,7 @@ void atomic_set(atomic_t *, int);
|
||||
|
||||
#define atomic_set_release(v, i) atomic_set((v), (i))
|
||||
|
||||
#define atomic_read(v) ACCESS_ONCE((v)->counter)
|
||||
#define atomic_read(v) READ_ONCE((v)->counter)
|
||||
|
||||
#define atomic_add(i, v) ((void)atomic_add_return( (int)(i), (v)))
|
||||
#define atomic_sub(i, v) ((void)atomic_add_return(-(int)(i), (v)))
|
||||
|
||||
@@ -163,14 +163,14 @@ int __gxio_dma_queue_is_complete(__gxio_dma_queue_t *dma_queue,
|
||||
int64_t completion_slot, int update)
|
||||
{
|
||||
if (update) {
|
||||
if (ACCESS_ONCE(dma_queue->hw_complete_count) >
|
||||
if (READ_ONCE(dma_queue->hw_complete_count) >
|
||||
completion_slot)
|
||||
return 1;
|
||||
|
||||
__gxio_dma_queue_update_credits(dma_queue);
|
||||
}
|
||||
|
||||
return ACCESS_ONCE(dma_queue->hw_complete_count) > completion_slot;
|
||||
return READ_ONCE(dma_queue->hw_complete_count) > completion_slot;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(__gxio_dma_queue_is_complete);
|
||||
|
||||
@@ -121,7 +121,7 @@ static inline int64_t __gxio_dma_queue_reserve(__gxio_dma_queue_t *dma_queue,
|
||||
* if the result is LESS than "hw_complete_count".
|
||||
*/
|
||||
uint64_t complete;
|
||||
complete = ACCESS_ONCE(dma_queue->hw_complete_count);
|
||||
complete = READ_ONCE(dma_queue->hw_complete_count);
|
||||
slot |= (complete & 0xffffffffff000000);
|
||||
if (slot < complete)
|
||||
slot += 0x1000000;
|
||||
|
||||
@@ -255,7 +255,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
|
||||
|
||||
int do_syscall_trace_enter(struct pt_regs *regs)
|
||||
{
|
||||
u32 work = ACCESS_ONCE(current_thread_info()->flags);
|
||||
u32 work = READ_ONCE(current_thread_info()->flags);
|
||||
|
||||
if ((work & _TIF_SYSCALL_TRACE) &&
|
||||
tracehook_report_syscall_entry(regs)) {
|
||||
|
||||
@@ -75,7 +75,7 @@ static long syscall_trace_enter(struct pt_regs *regs)
|
||||
if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
|
||||
BUG_ON(regs != task_pt_regs(current));
|
||||
|
||||
work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
|
||||
work = READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
|
||||
|
||||
if (unlikely(work & _TIF_SYSCALL_EMU))
|
||||
emulated = true;
|
||||
|
||||
@@ -318,7 +318,7 @@ int gettimeofday(struct timeval *, struct timezone *)
|
||||
notrace time_t __vdso_time(time_t *t)
|
||||
{
|
||||
/* This is atomic on x86 so we don't need any locks. */
|
||||
time_t result = ACCESS_ONCE(gtod->wall_time_sec);
|
||||
time_t result = READ_ONCE(gtod->wall_time_sec);
|
||||
|
||||
if (t)
|
||||
*t = result;
|
||||
|
||||
@@ -2118,7 +2118,7 @@ static int x86_pmu_event_init(struct perf_event *event)
|
||||
event->destroy(event);
|
||||
}
|
||||
|
||||
if (ACCESS_ONCE(x86_pmu.attr_rdpmc))
|
||||
if (READ_ONCE(x86_pmu.attr_rdpmc))
|
||||
event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED;
|
||||
|
||||
return err;
|
||||
|
||||
@@ -48,7 +48,7 @@ static inline unsigned gtod_read_begin(const struct vsyscall_gtod_data *s)
|
||||
unsigned ret;
|
||||
|
||||
repeat:
|
||||
ret = ACCESS_ONCE(s->seq);
|
||||
ret = READ_ONCE(s->seq);
|
||||
if (unlikely(ret & 1)) {
|
||||
cpu_relax();
|
||||
goto repeat;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user