mirror of
https://github.com/izzy2lost/xemu.git
synced 2026-03-26 18:22:55 -07:00
qemu/atomic.h: rename atomic_ to qatomic_
clang's C11 atomic_fetch_*() functions only take a C11 atomic type
pointer argument. QEMU uses direct types (int, etc) and this causes a
compiler error when a QEMU code calls these functions in a source file
that also included <stdatomic.h> via a system header file:
$ CC=clang CXX=clang++ ./configure ... && make
../util/async.c:79:17: error: address argument to atomic operation must be a pointer to _Atomic type ('unsigned int *' invalid)
Avoid using atomic_*() names in QEMU's atomic.h since that namespace is
used by <stdatomic.h>. Prefix QEMU's APIs with 'q' so that atomic.h
and <stdatomic.h> can co-exist. I checked /usr/include on my machine and
searched GitHub for existing "qatomic_" users but there seem to be none.
This patch was generated using:
$ git grep -h -o '\<atomic\(64\)\?_[a-z0-9_]\+' include/qemu/atomic.h | \
sort -u >/tmp/changed_identifiers
$ for identifier in $(</tmp/changed_identifiers); do
sed -i "s%\<$identifier\>%q$identifier%g" \
$(git grep -I -l "\<$identifier\>")
done
I manually fixed line-wrap issues and misaligned rST tables.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200923105646.47864-1-stefanha@redhat.com>
This commit is contained in:
@@ -2379,7 +2379,7 @@ static __thread bool have_sigbus_pending;
|
||||
|
||||
static void kvm_cpu_kick(CPUState *cpu)
|
||||
{
|
||||
atomic_set(&cpu->kvm_run->immediate_exit, 1);
|
||||
qatomic_set(&cpu->kvm_run->immediate_exit, 1);
|
||||
}
|
||||
|
||||
static void kvm_cpu_kick_self(void)
|
||||
@@ -2400,7 +2400,7 @@ static void kvm_eat_signals(CPUState *cpu)
|
||||
int r;
|
||||
|
||||
if (kvm_immediate_exit) {
|
||||
atomic_set(&cpu->kvm_run->immediate_exit, 0);
|
||||
qatomic_set(&cpu->kvm_run->immediate_exit, 0);
|
||||
/* Write kvm_run->immediate_exit before the cpu->exit_request
|
||||
* write in kvm_cpu_exec.
|
||||
*/
|
||||
@@ -2434,7 +2434,7 @@ int kvm_cpu_exec(CPUState *cpu)
|
||||
DPRINTF("kvm_cpu_exec()\n");
|
||||
|
||||
if (kvm_arch_process_async_events(cpu)) {
|
||||
atomic_set(&cpu->exit_request, 0);
|
||||
qatomic_set(&cpu->exit_request, 0);
|
||||
return EXCP_HLT;
|
||||
}
|
||||
|
||||
@@ -2450,7 +2450,7 @@ int kvm_cpu_exec(CPUState *cpu)
|
||||
}
|
||||
|
||||
kvm_arch_pre_run(cpu, run);
|
||||
if (atomic_read(&cpu->exit_request)) {
|
||||
if (qatomic_read(&cpu->exit_request)) {
|
||||
DPRINTF("interrupt exit requested\n");
|
||||
/*
|
||||
* KVM requires us to reenter the kernel after IO exits to complete
|
||||
@@ -2577,7 +2577,7 @@ int kvm_cpu_exec(CPUState *cpu)
|
||||
vm_stop(RUN_STATE_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
atomic_set(&cpu->exit_request, 0);
|
||||
qatomic_set(&cpu->exit_request, 0);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -2994,7 +2994,7 @@ int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
|
||||
have_sigbus_pending = true;
|
||||
pending_sigbus_addr = addr;
|
||||
pending_sigbus_code = code;
|
||||
atomic_set(&cpu->exit_request, 1);
|
||||
qatomic_set(&cpu->exit_request, 1);
|
||||
return 0;
|
||||
#else
|
||||
return 1;
|
||||
|
||||
@@ -83,7 +83,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
|
||||
#if DATA_SIZE == 16
|
||||
ret = atomic16_cmpxchg(haddr, cmpv, newv);
|
||||
#else
|
||||
ret = atomic_cmpxchg__nocheck(haddr, cmpv, newv);
|
||||
ret = qatomic_cmpxchg__nocheck(haddr, cmpv, newv);
|
||||
#endif
|
||||
ATOMIC_MMU_CLEANUP;
|
||||
atomic_trace_rmw_post(env, addr, info);
|
||||
@@ -131,7 +131,7 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
|
||||
ATOMIC_MMU_IDX);
|
||||
|
||||
atomic_trace_rmw_pre(env, addr, info);
|
||||
ret = atomic_xchg__nocheck(haddr, val);
|
||||
ret = qatomic_xchg__nocheck(haddr, val);
|
||||
ATOMIC_MMU_CLEANUP;
|
||||
atomic_trace_rmw_post(env, addr, info);
|
||||
return ret;
|
||||
@@ -147,7 +147,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
|
||||
uint16_t info = trace_mem_build_info(SHIFT, false, 0, false, \
|
||||
ATOMIC_MMU_IDX); \
|
||||
atomic_trace_rmw_pre(env, addr, info); \
|
||||
ret = atomic_##X(haddr, val); \
|
||||
ret = qatomic_##X(haddr, val); \
|
||||
ATOMIC_MMU_CLEANUP; \
|
||||
atomic_trace_rmw_post(env, addr, info); \
|
||||
return ret; \
|
||||
@@ -182,10 +182,10 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
|
||||
ATOMIC_MMU_IDX); \
|
||||
atomic_trace_rmw_pre(env, addr, info); \
|
||||
smp_mb(); \
|
||||
cmp = atomic_read__nocheck(haddr); \
|
||||
cmp = qatomic_read__nocheck(haddr); \
|
||||
do { \
|
||||
old = cmp; new = FN(old, val); \
|
||||
cmp = atomic_cmpxchg__nocheck(haddr, old, new); \
|
||||
cmp = qatomic_cmpxchg__nocheck(haddr, old, new); \
|
||||
} while (cmp != old); \
|
||||
ATOMIC_MMU_CLEANUP; \
|
||||
atomic_trace_rmw_post(env, addr, info); \
|
||||
@@ -230,7 +230,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
|
||||
#if DATA_SIZE == 16
|
||||
ret = atomic16_cmpxchg(haddr, BSWAP(cmpv), BSWAP(newv));
|
||||
#else
|
||||
ret = atomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv));
|
||||
ret = qatomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv));
|
||||
#endif
|
||||
ATOMIC_MMU_CLEANUP;
|
||||
atomic_trace_rmw_post(env, addr, info);
|
||||
@@ -280,7 +280,7 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
|
||||
ATOMIC_MMU_IDX);
|
||||
|
||||
atomic_trace_rmw_pre(env, addr, info);
|
||||
ret = atomic_xchg__nocheck(haddr, BSWAP(val));
|
||||
ret = qatomic_xchg__nocheck(haddr, BSWAP(val));
|
||||
ATOMIC_MMU_CLEANUP;
|
||||
atomic_trace_rmw_post(env, addr, info);
|
||||
return BSWAP(ret);
|
||||
@@ -296,7 +296,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
|
||||
uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, \
|
||||
false, ATOMIC_MMU_IDX); \
|
||||
atomic_trace_rmw_pre(env, addr, info); \
|
||||
ret = atomic_##X(haddr, BSWAP(val)); \
|
||||
ret = qatomic_##X(haddr, BSWAP(val)); \
|
||||
ATOMIC_MMU_CLEANUP; \
|
||||
atomic_trace_rmw_post(env, addr, info); \
|
||||
return BSWAP(ret); \
|
||||
@@ -329,10 +329,10 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
|
||||
false, ATOMIC_MMU_IDX); \
|
||||
atomic_trace_rmw_pre(env, addr, info); \
|
||||
smp_mb(); \
|
||||
ldn = atomic_read__nocheck(haddr); \
|
||||
ldn = qatomic_read__nocheck(haddr); \
|
||||
do { \
|
||||
ldo = ldn; old = BSWAP(ldo); new = FN(old, val); \
|
||||
ldn = atomic_cmpxchg__nocheck(haddr, ldo, BSWAP(new)); \
|
||||
ldn = qatomic_cmpxchg__nocheck(haddr, ldo, BSWAP(new)); \
|
||||
} while (ldo != ldn); \
|
||||
ATOMIC_MMU_CLEANUP; \
|
||||
atomic_trace_rmw_post(env, addr, info); \
|
||||
|
||||
@@ -367,7 +367,8 @@ static inline void tb_add_jump(TranslationBlock *tb, int n,
|
||||
goto out_unlock_next;
|
||||
}
|
||||
/* Atomically claim the jump destination slot only if it was NULL */
|
||||
old = atomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL, (uintptr_t)tb_next);
|
||||
old = qatomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL,
|
||||
(uintptr_t)tb_next);
|
||||
if (old) {
|
||||
goto out_unlock_next;
|
||||
}
|
||||
@@ -407,7 +408,7 @@ static inline TranslationBlock *tb_find(CPUState *cpu,
|
||||
tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask);
|
||||
mmap_unlock();
|
||||
/* We add the TB in the virtual pc hash table for the fast lookup */
|
||||
atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
|
||||
qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
|
||||
}
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
/* We don't take care of direct jumps when address mapping changes in
|
||||
@@ -536,9 +537,9 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||
* Ensure zeroing happens before reading cpu->exit_request or
|
||||
* cpu->interrupt_request (see also smp_wmb in cpu_exit())
|
||||
*/
|
||||
atomic_mb_set(&cpu_neg(cpu)->icount_decr.u16.high, 0);
|
||||
qatomic_mb_set(&cpu_neg(cpu)->icount_decr.u16.high, 0);
|
||||
|
||||
if (unlikely(atomic_read(&cpu->interrupt_request))) {
|
||||
if (unlikely(qatomic_read(&cpu->interrupt_request))) {
|
||||
int interrupt_request;
|
||||
qemu_mutex_lock_iothread();
|
||||
interrupt_request = cpu->interrupt_request;
|
||||
@@ -613,10 +614,10 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||
}
|
||||
|
||||
/* Finally, check if we need to exit to the main loop. */
|
||||
if (unlikely(atomic_read(&cpu->exit_request))
|
||||
if (unlikely(qatomic_read(&cpu->exit_request))
|
||||
|| (use_icount
|
||||
&& cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) {
|
||||
atomic_set(&cpu->exit_request, 0);
|
||||
qatomic_set(&cpu->exit_request, 0);
|
||||
if (cpu->exception_index == -1) {
|
||||
cpu->exception_index = EXCP_INTERRUPT;
|
||||
}
|
||||
@@ -642,7 +643,7 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
|
||||
}
|
||||
|
||||
*last_tb = NULL;
|
||||
insns_left = atomic_read(&cpu_neg(cpu)->icount_decr.u32);
|
||||
insns_left = qatomic_read(&cpu_neg(cpu)->icount_decr.u32);
|
||||
if (insns_left < 0) {
|
||||
/* Something asked us to stop executing chained TBs; just
|
||||
* continue round the main loop. Whatever requested the exit
|
||||
|
||||
@@ -312,9 +312,9 @@ void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
|
||||
CPU_FOREACH(cpu) {
|
||||
CPUArchState *env = cpu->env_ptr;
|
||||
|
||||
full += atomic_read(&env_tlb(env)->c.full_flush_count);
|
||||
part += atomic_read(&env_tlb(env)->c.part_flush_count);
|
||||
elide += atomic_read(&env_tlb(env)->c.elide_flush_count);
|
||||
full += qatomic_read(&env_tlb(env)->c.full_flush_count);
|
||||
part += qatomic_read(&env_tlb(env)->c.part_flush_count);
|
||||
elide += qatomic_read(&env_tlb(env)->c.elide_flush_count);
|
||||
}
|
||||
*pfull = full;
|
||||
*ppart = part;
|
||||
@@ -349,13 +349,13 @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
|
||||
cpu_tb_jmp_cache_clear(cpu);
|
||||
|
||||
if (to_clean == ALL_MMUIDX_BITS) {
|
||||
atomic_set(&env_tlb(env)->c.full_flush_count,
|
||||
qatomic_set(&env_tlb(env)->c.full_flush_count,
|
||||
env_tlb(env)->c.full_flush_count + 1);
|
||||
} else {
|
||||
atomic_set(&env_tlb(env)->c.part_flush_count,
|
||||
qatomic_set(&env_tlb(env)->c.part_flush_count,
|
||||
env_tlb(env)->c.part_flush_count + ctpop16(to_clean));
|
||||
if (to_clean != asked) {
|
||||
atomic_set(&env_tlb(env)->c.elide_flush_count,
|
||||
qatomic_set(&env_tlb(env)->c.elide_flush_count,
|
||||
env_tlb(env)->c.elide_flush_count +
|
||||
ctpop16(asked & ~to_clean));
|
||||
}
|
||||
@@ -693,7 +693,7 @@ void tlb_unprotect_code(ram_addr_t ram_addr)
|
||||
* generated code.
|
||||
*
|
||||
* Other vCPUs might be reading their TLBs during guest execution, so we update
|
||||
* te->addr_write with atomic_set. We don't need to worry about this for
|
||||
* te->addr_write with qatomic_set. We don't need to worry about this for
|
||||
* oversized guests as MTTCG is disabled for them.
|
||||
*
|
||||
* Called with tlb_c.lock held.
|
||||
@@ -711,7 +711,7 @@ static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
|
||||
#if TCG_OVERSIZED_GUEST
|
||||
tlb_entry->addr_write |= TLB_NOTDIRTY;
|
||||
#else
|
||||
atomic_set(&tlb_entry->addr_write,
|
||||
qatomic_set(&tlb_entry->addr_write,
|
||||
tlb_entry->addr_write | TLB_NOTDIRTY);
|
||||
#endif
|
||||
}
|
||||
@@ -1138,8 +1138,8 @@ static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs)
|
||||
#if TCG_OVERSIZED_GUEST
|
||||
return *(target_ulong *)((uintptr_t)entry + ofs);
|
||||
#else
|
||||
/* ofs might correspond to .addr_write, so use atomic_read */
|
||||
return atomic_read((target_ulong *)((uintptr_t)entry + ofs));
|
||||
/* ofs might correspond to .addr_write, so use qatomic_read */
|
||||
return qatomic_read((target_ulong *)((uintptr_t)entry + ofs));
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -1155,11 +1155,11 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
|
||||
CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx];
|
||||
target_ulong cmp;
|
||||
|
||||
/* elt_ofs might correspond to .addr_write, so use atomic_read */
|
||||
/* elt_ofs might correspond to .addr_write, so use qatomic_read */
|
||||
#if TCG_OVERSIZED_GUEST
|
||||
cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
|
||||
#else
|
||||
cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
|
||||
cmp = qatomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
|
||||
#endif
|
||||
|
||||
if (cmp == page) {
|
||||
|
||||
@@ -65,7 +65,7 @@ static void tcg_handle_interrupt(CPUState *cpu, int mask)
|
||||
if (!qemu_cpu_is_self(cpu)) {
|
||||
qemu_cpu_kick(cpu);
|
||||
} else {
|
||||
atomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
|
||||
qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
|
||||
if (use_icount &&
|
||||
!cpu->can_do_io
|
||||
&& (mask & ~old_mask) != 0) {
|
||||
|
||||
@@ -377,9 +377,9 @@ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
|
||||
restore_state_to_opc(env, tb, data);
|
||||
|
||||
#ifdef CONFIG_PROFILER
|
||||
atomic_set(&prof->restore_time,
|
||||
qatomic_set(&prof->restore_time,
|
||||
prof->restore_time + profile_getclock() - ti);
|
||||
atomic_set(&prof->restore_count, prof->restore_count + 1);
|
||||
qatomic_set(&prof->restore_count, prof->restore_count + 1);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
@@ -509,7 +509,7 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
|
||||
|
||||
/* Level 2..N-1. */
|
||||
for (i = v_l2_levels; i > 0; i--) {
|
||||
void **p = atomic_rcu_read(lp);
|
||||
void **p = qatomic_rcu_read(lp);
|
||||
|
||||
if (p == NULL) {
|
||||
void *existing;
|
||||
@@ -518,7 +518,7 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
|
||||
return NULL;
|
||||
}
|
||||
p = g_new0(void *, V_L2_SIZE);
|
||||
existing = atomic_cmpxchg(lp, NULL, p);
|
||||
existing = qatomic_cmpxchg(lp, NULL, p);
|
||||
if (unlikely(existing)) {
|
||||
g_free(p);
|
||||
p = existing;
|
||||
@@ -528,7 +528,7 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
|
||||
lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
|
||||
}
|
||||
|
||||
pd = atomic_rcu_read(lp);
|
||||
pd = qatomic_rcu_read(lp);
|
||||
if (pd == NULL) {
|
||||
void *existing;
|
||||
|
||||
@@ -545,7 +545,7 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
|
||||
}
|
||||
}
|
||||
#endif
|
||||
existing = atomic_cmpxchg(lp, NULL, pd);
|
||||
existing = qatomic_cmpxchg(lp, NULL, pd);
|
||||
if (unlikely(existing)) {
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
{
|
||||
@@ -1253,7 +1253,7 @@ static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
|
||||
tcg_region_reset_all();
|
||||
/* XXX: flush processor icache at this point if cache flush is
|
||||
expensive */
|
||||
atomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
|
||||
qatomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
|
||||
|
||||
done:
|
||||
mmap_unlock();
|
||||
@@ -1265,7 +1265,7 @@ done:
|
||||
void tb_flush(CPUState *cpu)
|
||||
{
|
||||
if (tcg_enabled()) {
|
||||
unsigned tb_flush_count = atomic_mb_read(&tb_ctx.tb_flush_count);
|
||||
unsigned tb_flush_count = qatomic_mb_read(&tb_ctx.tb_flush_count);
|
||||
|
||||
if (cpu_in_exclusive_context(cpu)) {
|
||||
do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count));
|
||||
@@ -1358,7 +1358,7 @@ static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig)
|
||||
int n;
|
||||
|
||||
/* mark the LSB of jmp_dest[] so that no further jumps can be inserted */
|
||||
ptr = atomic_or_fetch(&orig->jmp_dest[n_orig], 1);
|
||||
ptr = qatomic_or_fetch(&orig->jmp_dest[n_orig], 1);
|
||||
dest = (TranslationBlock *)(ptr & ~1);
|
||||
if (dest == NULL) {
|
||||
return;
|
||||
@@ -1369,7 +1369,7 @@ static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig)
|
||||
* While acquiring the lock, the jump might have been removed if the
|
||||
* destination TB was invalidated; check again.
|
||||
*/
|
||||
ptr_locked = atomic_read(&orig->jmp_dest[n_orig]);
|
||||
ptr_locked = qatomic_read(&orig->jmp_dest[n_orig]);
|
||||
if (ptr_locked != ptr) {
|
||||
qemu_spin_unlock(&dest->jmp_lock);
|
||||
/*
|
||||
@@ -1415,7 +1415,7 @@ static inline void tb_jmp_unlink(TranslationBlock *dest)
|
||||
|
||||
TB_FOR_EACH_JMP(dest, tb, n) {
|
||||
tb_reset_jump(tb, n);
|
||||
atomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1);
|
||||
qatomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1);
|
||||
/* No need to clear the list entry; setting the dest ptr is enough */
|
||||
}
|
||||
dest->jmp_list_head = (uintptr_t)NULL;
|
||||
@@ -1439,7 +1439,7 @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
|
||||
|
||||
/* make sure no further incoming jumps will be chained to this TB */
|
||||
qemu_spin_lock(&tb->jmp_lock);
|
||||
atomic_set(&tb->cflags, tb->cflags | CF_INVALID);
|
||||
qatomic_set(&tb->cflags, tb->cflags | CF_INVALID);
|
||||
qemu_spin_unlock(&tb->jmp_lock);
|
||||
|
||||
/* remove the TB from the hash list */
|
||||
@@ -1466,8 +1466,8 @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
|
||||
/* remove the TB from the hash list */
|
||||
h = tb_jmp_cache_hash_func(tb->pc);
|
||||
CPU_FOREACH(cpu) {
|
||||
if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) {
|
||||
atomic_set(&cpu->tb_jmp_cache[h], NULL);
|
||||
if (qatomic_read(&cpu->tb_jmp_cache[h]) == tb) {
|
||||
qatomic_set(&cpu->tb_jmp_cache[h], NULL);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1478,7 +1478,7 @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
|
||||
/* suppress any remaining jumps to this TB */
|
||||
tb_jmp_unlink(tb);
|
||||
|
||||
atomic_set(&tcg_ctx->tb_phys_invalidate_count,
|
||||
qatomic_set(&tcg_ctx->tb_phys_invalidate_count,
|
||||
tcg_ctx->tb_phys_invalidate_count + 1);
|
||||
}
|
||||
|
||||
@@ -1733,7 +1733,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
||||
|
||||
#ifdef CONFIG_PROFILER
|
||||
/* includes aborted translations because of exceptions */
|
||||
atomic_set(&prof->tb_count1, prof->tb_count1 + 1);
|
||||
qatomic_set(&prof->tb_count1, prof->tb_count1 + 1);
|
||||
ti = profile_getclock();
|
||||
#endif
|
||||
|
||||
@@ -1758,8 +1758,9 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROFILER
|
||||
atomic_set(&prof->tb_count, prof->tb_count + 1);
|
||||
atomic_set(&prof->interm_time, prof->interm_time + profile_getclock() - ti);
|
||||
qatomic_set(&prof->tb_count, prof->tb_count + 1);
|
||||
qatomic_set(&prof->interm_time,
|
||||
prof->interm_time + profile_getclock() - ti);
|
||||
ti = profile_getclock();
|
||||
#endif
|
||||
|
||||
@@ -1804,10 +1805,10 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
||||
tb->tc.size = gen_code_size;
|
||||
|
||||
#ifdef CONFIG_PROFILER
|
||||
atomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
|
||||
atomic_set(&prof->code_in_len, prof->code_in_len + tb->size);
|
||||
atomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size);
|
||||
atomic_set(&prof->search_out_len, prof->search_out_len + search_size);
|
||||
qatomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
|
||||
qatomic_set(&prof->code_in_len, prof->code_in_len + tb->size);
|
||||
qatomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size);
|
||||
qatomic_set(&prof->search_out_len, prof->search_out_len + search_size);
|
||||
#endif
|
||||
|
||||
#ifdef DEBUG_DISAS
|
||||
@@ -1869,7 +1870,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
||||
}
|
||||
#endif
|
||||
|
||||
atomic_set(&tcg_ctx->code_gen_ptr, (void *)
|
||||
qatomic_set(&tcg_ctx->code_gen_ptr, (void *)
|
||||
ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
|
||||
CODE_GEN_ALIGN));
|
||||
|
||||
@@ -1905,7 +1906,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
||||
uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
|
||||
|
||||
orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize);
|
||||
atomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned);
|
||||
qatomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned);
|
||||
tb_destroy(tb);
|
||||
return existing_tb;
|
||||
}
|
||||
@@ -2273,7 +2274,7 @@ static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
|
||||
unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
|
||||
|
||||
for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
|
||||
atomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
|
||||
qatomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2393,7 +2394,7 @@ void dump_exec_info(void)
|
||||
|
||||
qemu_printf("\nStatistics:\n");
|
||||
qemu_printf("TB flush count %u\n",
|
||||
atomic_read(&tb_ctx.tb_flush_count));
|
||||
qatomic_read(&tb_ctx.tb_flush_count));
|
||||
qemu_printf("TB invalidate count %zu\n",
|
||||
tcg_tb_phys_invalidate_count());
|
||||
|
||||
@@ -2415,7 +2416,7 @@ void cpu_interrupt(CPUState *cpu, int mask)
|
||||
{
|
||||
g_assert(qemu_mutex_iothread_locked());
|
||||
cpu->interrupt_request |= mask;
|
||||
atomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
|
||||
qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -104,7 +104,7 @@ static void qjack_buffer_create(QJackBuffer *buffer, int channels, int frames)
|
||||
static void qjack_buffer_clear(QJackBuffer *buffer)
|
||||
{
|
||||
assert(buffer->data);
|
||||
atomic_store_release(&buffer->used, 0);
|
||||
qatomic_store_release(&buffer->used, 0);
|
||||
buffer->rptr = 0;
|
||||
buffer->wptr = 0;
|
||||
}
|
||||
@@ -129,7 +129,7 @@ static int qjack_buffer_write(QJackBuffer *buffer, float *data, int size)
|
||||
assert(buffer->data);
|
||||
const int samples = size / sizeof(float);
|
||||
int frames = samples / buffer->channels;
|
||||
const int avail = buffer->frames - atomic_load_acquire(&buffer->used);
|
||||
const int avail = buffer->frames - qatomic_load_acquire(&buffer->used);
|
||||
|
||||
if (frames > avail) {
|
||||
frames = avail;
|
||||
@@ -153,7 +153,7 @@ static int qjack_buffer_write(QJackBuffer *buffer, float *data, int size)
|
||||
|
||||
buffer->wptr = wptr;
|
||||
|
||||
atomic_add(&buffer->used, frames);
|
||||
qatomic_add(&buffer->used, frames);
|
||||
return frames * buffer->channels * sizeof(float);
|
||||
};
|
||||
|
||||
@@ -161,7 +161,7 @@ static int qjack_buffer_write(QJackBuffer *buffer, float *data, int size)
|
||||
static int qjack_buffer_write_l(QJackBuffer *buffer, float **dest, int frames)
|
||||
{
|
||||
assert(buffer->data);
|
||||
const int avail = buffer->frames - atomic_load_acquire(&buffer->used);
|
||||
const int avail = buffer->frames - qatomic_load_acquire(&buffer->used);
|
||||
int wptr = buffer->wptr;
|
||||
|
||||
if (frames > avail) {
|
||||
@@ -185,7 +185,7 @@ static int qjack_buffer_write_l(QJackBuffer *buffer, float **dest, int frames)
|
||||
}
|
||||
buffer->wptr = wptr;
|
||||
|
||||
atomic_add(&buffer->used, frames);
|
||||
qatomic_add(&buffer->used, frames);
|
||||
return frames;
|
||||
}
|
||||
|
||||
@@ -195,7 +195,7 @@ static int qjack_buffer_read(QJackBuffer *buffer, float *dest, int size)
|
||||
assert(buffer->data);
|
||||
const int samples = size / sizeof(float);
|
||||
int frames = samples / buffer->channels;
|
||||
const int avail = atomic_load_acquire(&buffer->used);
|
||||
const int avail = qatomic_load_acquire(&buffer->used);
|
||||
|
||||
if (frames > avail) {
|
||||
frames = avail;
|
||||
@@ -219,7 +219,7 @@ static int qjack_buffer_read(QJackBuffer *buffer, float *dest, int size)
|
||||
|
||||
buffer->rptr = rptr;
|
||||
|
||||
atomic_sub(&buffer->used, frames);
|
||||
qatomic_sub(&buffer->used, frames);
|
||||
return frames * buffer->channels * sizeof(float);
|
||||
}
|
||||
|
||||
@@ -228,7 +228,7 @@ static int qjack_buffer_read_l(QJackBuffer *buffer, float **dest, int frames)
|
||||
{
|
||||
assert(buffer->data);
|
||||
int copy = frames;
|
||||
const int used = atomic_load_acquire(&buffer->used);
|
||||
const int used = qatomic_load_acquire(&buffer->used);
|
||||
int rptr = buffer->rptr;
|
||||
|
||||
if (copy > used) {
|
||||
@@ -252,7 +252,7 @@ static int qjack_buffer_read_l(QJackBuffer *buffer, float **dest, int frames)
|
||||
}
|
||||
buffer->rptr = rptr;
|
||||
|
||||
atomic_sub(&buffer->used, copy);
|
||||
qatomic_sub(&buffer->used, copy);
|
||||
return copy;
|
||||
}
|
||||
|
||||
|
||||
4
block.c
4
block.c
@@ -1694,7 +1694,7 @@ static int bdrv_open_common(BlockDriverState *bs, BlockBackend *file,
|
||||
}
|
||||
|
||||
/* bdrv_new() and bdrv_close() make it so */
|
||||
assert(atomic_read(&bs->copy_on_read) == 0);
|
||||
assert(qatomic_read(&bs->copy_on_read) == 0);
|
||||
|
||||
if (bs->open_flags & BDRV_O_COPY_ON_READ) {
|
||||
if (!bs->read_only) {
|
||||
@@ -4436,7 +4436,7 @@ static void bdrv_close(BlockDriverState *bs)
|
||||
bs->file = NULL;
|
||||
g_free(bs->opaque);
|
||||
bs->opaque = NULL;
|
||||
atomic_set(&bs->copy_on_read, 0);
|
||||
qatomic_set(&bs->copy_on_read, 0);
|
||||
bs->backing_file[0] = '\0';
|
||||
bs->backing_format[0] = '\0';
|
||||
bs->total_sectors = 0;
|
||||
|
||||
@@ -1353,12 +1353,12 @@ int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags)
|
||||
|
||||
void blk_inc_in_flight(BlockBackend *blk)
|
||||
{
|
||||
atomic_inc(&blk->in_flight);
|
||||
qatomic_inc(&blk->in_flight);
|
||||
}
|
||||
|
||||
void blk_dec_in_flight(BlockBackend *blk)
|
||||
{
|
||||
atomic_dec(&blk->in_flight);
|
||||
qatomic_dec(&blk->in_flight);
|
||||
aio_wait_kick();
|
||||
}
|
||||
|
||||
@@ -1720,7 +1720,7 @@ void blk_drain(BlockBackend *blk)
|
||||
|
||||
/* We may have -ENOMEDIUM completions in flight */
|
||||
AIO_WAIT_WHILE(blk_get_aio_context(blk),
|
||||
atomic_mb_read(&blk->in_flight) > 0);
|
||||
qatomic_mb_read(&blk->in_flight) > 0);
|
||||
|
||||
if (bs) {
|
||||
bdrv_drained_end(bs);
|
||||
@@ -1739,7 +1739,7 @@ void blk_drain_all(void)
|
||||
aio_context_acquire(ctx);
|
||||
|
||||
/* We may have -ENOMEDIUM completions in flight */
|
||||
AIO_WAIT_WHILE(ctx, atomic_mb_read(&blk->in_flight) > 0);
|
||||
AIO_WAIT_WHILE(ctx, qatomic_mb_read(&blk->in_flight) > 0);
|
||||
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
@@ -2346,6 +2346,7 @@ void blk_io_limits_update_group(BlockBackend *blk, const char *group)
|
||||
static void blk_root_drained_begin(BdrvChild *child)
|
||||
{
|
||||
BlockBackend *blk = child->opaque;
|
||||
ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
|
||||
|
||||
if (++blk->quiesce_counter == 1) {
|
||||
if (blk->dev_ops && blk->dev_ops->drained_begin) {
|
||||
@@ -2356,8 +2357,8 @@ static void blk_root_drained_begin(BdrvChild *child)
|
||||
/* Note that blk->root may not be accessible here yet if we are just
|
||||
* attaching to a BlockDriverState that is drained. Use child instead. */
|
||||
|
||||
if (atomic_fetch_inc(&blk->public.throttle_group_member.io_limits_disabled) == 0) {
|
||||
throttle_group_restart_tgm(&blk->public.throttle_group_member);
|
||||
if (qatomic_fetch_inc(&tgm->io_limits_disabled) == 0) {
|
||||
throttle_group_restart_tgm(tgm);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2374,7 +2375,7 @@ static void blk_root_drained_end(BdrvChild *child, int *drained_end_counter)
|
||||
assert(blk->quiesce_counter);
|
||||
|
||||
assert(blk->public.throttle_group_member.io_limits_disabled);
|
||||
atomic_dec(&blk->public.throttle_group_member.io_limits_disabled);
|
||||
qatomic_dec(&blk->public.throttle_group_member.io_limits_disabled);
|
||||
|
||||
if (--blk->quiesce_counter == 0) {
|
||||
if (blk->dev_ops && blk->dev_ops->drained_end) {
|
||||
|
||||
48
block/io.c
48
block/io.c
@@ -69,7 +69,7 @@ void bdrv_parent_drained_end_single(BdrvChild *c)
|
||||
{
|
||||
int drained_end_counter = 0;
|
||||
bdrv_parent_drained_end_single_no_poll(c, &drained_end_counter);
|
||||
BDRV_POLL_WHILE(c->bs, atomic_read(&drained_end_counter) > 0);
|
||||
BDRV_POLL_WHILE(c->bs, qatomic_read(&drained_end_counter) > 0);
|
||||
}
|
||||
|
||||
static void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore,
|
||||
@@ -186,12 +186,12 @@ void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
|
||||
*/
|
||||
void bdrv_enable_copy_on_read(BlockDriverState *bs)
|
||||
{
|
||||
atomic_inc(&bs->copy_on_read);
|
||||
qatomic_inc(&bs->copy_on_read);
|
||||
}
|
||||
|
||||
void bdrv_disable_copy_on_read(BlockDriverState *bs)
|
||||
{
|
||||
int old = atomic_fetch_dec(&bs->copy_on_read);
|
||||
int old = qatomic_fetch_dec(&bs->copy_on_read);
|
||||
assert(old >= 1);
|
||||
}
|
||||
|
||||
@@ -219,9 +219,9 @@ static void coroutine_fn bdrv_drain_invoke_entry(void *opaque)
|
||||
}
|
||||
|
||||
/* Set data->done and decrement drained_end_counter before bdrv_wakeup() */
|
||||
atomic_mb_set(&data->done, true);
|
||||
qatomic_mb_set(&data->done, true);
|
||||
if (!data->begin) {
|
||||
atomic_dec(data->drained_end_counter);
|
||||
qatomic_dec(data->drained_end_counter);
|
||||
}
|
||||
bdrv_dec_in_flight(bs);
|
||||
|
||||
@@ -248,7 +248,7 @@ static void bdrv_drain_invoke(BlockDriverState *bs, bool begin,
|
||||
};
|
||||
|
||||
if (!begin) {
|
||||
atomic_inc(drained_end_counter);
|
||||
qatomic_inc(drained_end_counter);
|
||||
}
|
||||
|
||||
/* Make sure the driver callback completes during the polling phase for
|
||||
@@ -268,7 +268,7 @@ bool bdrv_drain_poll(BlockDriverState *bs, bool recursive,
|
||||
return true;
|
||||
}
|
||||
|
||||
if (atomic_read(&bs->in_flight)) {
|
||||
if (qatomic_read(&bs->in_flight)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -382,7 +382,7 @@ void bdrv_do_drained_begin_quiesce(BlockDriverState *bs,
|
||||
assert(!qemu_in_coroutine());
|
||||
|
||||
/* Stop things in parent-to-child order */
|
||||
if (atomic_fetch_inc(&bs->quiesce_counter) == 0) {
|
||||
if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) {
|
||||
aio_disable_external(bdrv_get_aio_context(bs));
|
||||
}
|
||||
|
||||
@@ -473,7 +473,7 @@ static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
|
||||
bdrv_parent_drained_end(bs, parent, ignore_bds_parents,
|
||||
drained_end_counter);
|
||||
|
||||
old_quiesce_counter = atomic_fetch_dec(&bs->quiesce_counter);
|
||||
old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter);
|
||||
if (old_quiesce_counter == 1) {
|
||||
aio_enable_external(bdrv_get_aio_context(bs));
|
||||
}
|
||||
@@ -492,7 +492,7 @@ void bdrv_drained_end(BlockDriverState *bs)
|
||||
{
|
||||
int drained_end_counter = 0;
|
||||
bdrv_do_drained_end(bs, false, NULL, false, &drained_end_counter);
|
||||
BDRV_POLL_WHILE(bs, atomic_read(&drained_end_counter) > 0);
|
||||
BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0);
|
||||
}
|
||||
|
||||
void bdrv_drained_end_no_poll(BlockDriverState *bs, int *drained_end_counter)
|
||||
@@ -504,7 +504,7 @@ void bdrv_subtree_drained_end(BlockDriverState *bs)
|
||||
{
|
||||
int drained_end_counter = 0;
|
||||
bdrv_do_drained_end(bs, true, NULL, false, &drained_end_counter);
|
||||
BDRV_POLL_WHILE(bs, atomic_read(&drained_end_counter) > 0);
|
||||
BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0);
|
||||
}
|
||||
|
||||
void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent)
|
||||
@@ -526,7 +526,7 @@ void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent)
|
||||
&drained_end_counter);
|
||||
}
|
||||
|
||||
BDRV_POLL_WHILE(child->bs, atomic_read(&drained_end_counter) > 0);
|
||||
BDRV_POLL_WHILE(child->bs, qatomic_read(&drained_end_counter) > 0);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -553,7 +553,7 @@ static void bdrv_drain_assert_idle(BlockDriverState *bs)
|
||||
{
|
||||
BdrvChild *child, *next;
|
||||
|
||||
assert(atomic_read(&bs->in_flight) == 0);
|
||||
assert(qatomic_read(&bs->in_flight) == 0);
|
||||
QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
|
||||
bdrv_drain_assert_idle(child->bs);
|
||||
}
|
||||
@@ -655,7 +655,7 @@ void bdrv_drain_all_end(void)
|
||||
}
|
||||
|
||||
assert(qemu_get_current_aio_context() == qemu_get_aio_context());
|
||||
AIO_WAIT_WHILE(NULL, atomic_read(&drained_end_counter) > 0);
|
||||
AIO_WAIT_WHILE(NULL, qatomic_read(&drained_end_counter) > 0);
|
||||
|
||||
assert(bdrv_drain_all_count > 0);
|
||||
bdrv_drain_all_count--;
|
||||
@@ -675,7 +675,7 @@ void bdrv_drain_all(void)
|
||||
static void tracked_request_end(BdrvTrackedRequest *req)
|
||||
{
|
||||
if (req->serialising) {
|
||||
atomic_dec(&req->bs->serialising_in_flight);
|
||||
qatomic_dec(&req->bs->serialising_in_flight);
|
||||
}
|
||||
|
||||
qemu_co_mutex_lock(&req->bs->reqs_lock);
|
||||
@@ -777,7 +777,7 @@ bool bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
|
||||
|
||||
qemu_co_mutex_lock(&bs->reqs_lock);
|
||||
if (!req->serialising) {
|
||||
atomic_inc(&req->bs->serialising_in_flight);
|
||||
qatomic_inc(&req->bs->serialising_in_flight);
|
||||
req->serialising = true;
|
||||
}
|
||||
|
||||
@@ -841,7 +841,7 @@ static int bdrv_get_cluster_size(BlockDriverState *bs)
|
||||
|
||||
void bdrv_inc_in_flight(BlockDriverState *bs)
|
||||
{
|
||||
atomic_inc(&bs->in_flight);
|
||||
qatomic_inc(&bs->in_flight);
|
||||
}
|
||||
|
||||
void bdrv_wakeup(BlockDriverState *bs)
|
||||
@@ -851,7 +851,7 @@ void bdrv_wakeup(BlockDriverState *bs)
|
||||
|
||||
void bdrv_dec_in_flight(BlockDriverState *bs)
|
||||
{
|
||||
atomic_dec(&bs->in_flight);
|
||||
qatomic_dec(&bs->in_flight);
|
||||
bdrv_wakeup(bs);
|
||||
}
|
||||
|
||||
@@ -860,7 +860,7 @@ static bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self
|
||||
BlockDriverState *bs = self->bs;
|
||||
bool waited = false;
|
||||
|
||||
if (!atomic_read(&bs->serialising_in_flight)) {
|
||||
if (!qatomic_read(&bs->serialising_in_flight)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -1747,7 +1747,7 @@ int coroutine_fn bdrv_co_preadv_part(BdrvChild *child,
|
||||
bdrv_inc_in_flight(bs);
|
||||
|
||||
/* Don't do copy-on-read if we read data before write operation */
|
||||
if (atomic_read(&bs->copy_on_read)) {
|
||||
if (qatomic_read(&bs->copy_on_read)) {
|
||||
flags |= BDRV_REQ_COPY_ON_READ;
|
||||
}
|
||||
|
||||
@@ -1935,7 +1935,7 @@ bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, uint64_t bytes,
|
||||
int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
|
||||
BlockDriverState *bs = child->bs;
|
||||
|
||||
atomic_inc(&bs->write_gen);
|
||||
qatomic_inc(&bs->write_gen);
|
||||
|
||||
/*
|
||||
* Discard cannot extend the image, but in error handling cases, such as
|
||||
@@ -2768,7 +2768,7 @@ int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
|
||||
}
|
||||
|
||||
qemu_co_mutex_lock(&bs->reqs_lock);
|
||||
current_gen = atomic_read(&bs->write_gen);
|
||||
current_gen = qatomic_read(&bs->write_gen);
|
||||
|
||||
/* Wait until any previous flushes are completed */
|
||||
while (bs->active_flush_req) {
|
||||
@@ -3116,7 +3116,7 @@ void bdrv_io_plug(BlockDriverState *bs)
|
||||
bdrv_io_plug(child->bs);
|
||||
}
|
||||
|
||||
if (atomic_fetch_inc(&bs->io_plugged) == 0) {
|
||||
if (qatomic_fetch_inc(&bs->io_plugged) == 0) {
|
||||
BlockDriver *drv = bs->drv;
|
||||
if (drv && drv->bdrv_io_plug) {
|
||||
drv->bdrv_io_plug(bs);
|
||||
@@ -3129,7 +3129,7 @@ void bdrv_io_unplug(BlockDriverState *bs)
|
||||
BdrvChild *child;
|
||||
|
||||
assert(bs->io_plugged);
|
||||
if (atomic_fetch_dec(&bs->io_plugged) == 1) {
|
||||
if (qatomic_fetch_dec(&bs->io_plugged) == 1) {
|
||||
BlockDriver *drv = bs->drv;
|
||||
if (drv && drv->bdrv_io_unplug) {
|
||||
drv->bdrv_io_unplug(bs);
|
||||
|
||||
@@ -721,7 +721,7 @@ nfs_get_allocated_file_size_cb(int ret, struct nfs_context *nfs, void *data,
|
||||
}
|
||||
|
||||
/* Set task->complete before reading bs->wakeup. */
|
||||
atomic_mb_set(&task->complete, 1);
|
||||
qatomic_mb_set(&task->complete, 1);
|
||||
bdrv_wakeup(task->bs);
|
||||
}
|
||||
|
||||
|
||||
@@ -665,7 +665,7 @@ out:
|
||||
srco->co = NULL;
|
||||
srco->ret = ret;
|
||||
/* Set srco->finished before reading bs->wakeup. */
|
||||
atomic_mb_set(&srco->finished, true);
|
||||
qatomic_mb_set(&srco->finished, true);
|
||||
if (srco->bs) {
|
||||
bdrv_wakeup(srco->bs);
|
||||
}
|
||||
|
||||
@@ -228,7 +228,7 @@ static ThrottleGroupMember *next_throttle_token(ThrottleGroupMember *tgm,
|
||||
* immediately if it has pending requests. Otherwise we could be
|
||||
* forcing it to wait for other member's throttled requests. */
|
||||
if (tgm_has_pending_reqs(tgm, is_write) &&
|
||||
atomic_read(&tgm->io_limits_disabled)) {
|
||||
qatomic_read(&tgm->io_limits_disabled)) {
|
||||
return tgm;
|
||||
}
|
||||
|
||||
@@ -272,7 +272,7 @@ static bool throttle_group_schedule_timer(ThrottleGroupMember *tgm,
|
||||
ThrottleTimers *tt = &tgm->throttle_timers;
|
||||
bool must_wait;
|
||||
|
||||
if (atomic_read(&tgm->io_limits_disabled)) {
|
||||
if (qatomic_read(&tgm->io_limits_disabled)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -417,7 +417,7 @@ static void coroutine_fn throttle_group_restart_queue_entry(void *opaque)
|
||||
|
||||
g_free(data);
|
||||
|
||||
atomic_dec(&tgm->restart_pending);
|
||||
qatomic_dec(&tgm->restart_pending);
|
||||
aio_wait_kick();
|
||||
}
|
||||
|
||||
@@ -434,7 +434,7 @@ static void throttle_group_restart_queue(ThrottleGroupMember *tgm, bool is_write
|
||||
* be no timer pending on this tgm at this point */
|
||||
assert(!timer_pending(tgm->throttle_timers.timers[is_write]));
|
||||
|
||||
atomic_inc(&tgm->restart_pending);
|
||||
qatomic_inc(&tgm->restart_pending);
|
||||
|
||||
co = qemu_coroutine_create(throttle_group_restart_queue_entry, rd);
|
||||
aio_co_enter(tgm->aio_context, co);
|
||||
@@ -544,7 +544,7 @@ void throttle_group_register_tgm(ThrottleGroupMember *tgm,
|
||||
|
||||
tgm->throttle_state = ts;
|
||||
tgm->aio_context = ctx;
|
||||
atomic_set(&tgm->restart_pending, 0);
|
||||
qatomic_set(&tgm->restart_pending, 0);
|
||||
|
||||
qemu_mutex_lock(&tg->lock);
|
||||
/* If the ThrottleGroup is new set this ThrottleGroupMember as the token */
|
||||
@@ -592,7 +592,7 @@ void throttle_group_unregister_tgm(ThrottleGroupMember *tgm)
|
||||
}
|
||||
|
||||
/* Wait for throttle_group_restart_queue_entry() coroutines to finish */
|
||||
AIO_WAIT_WHILE(tgm->aio_context, atomic_read(&tgm->restart_pending) > 0);
|
||||
AIO_WAIT_WHILE(tgm->aio_context, qatomic_read(&tgm->restart_pending) > 0);
|
||||
|
||||
qemu_mutex_lock(&tg->lock);
|
||||
for (i = 0; i < 2; i++) {
|
||||
|
||||
@@ -217,7 +217,7 @@ static void throttle_reopen_abort(BDRVReopenState *reopen_state)
|
||||
static void coroutine_fn throttle_co_drain_begin(BlockDriverState *bs)
|
||||
{
|
||||
ThrottleGroupMember *tgm = bs->opaque;
|
||||
if (atomic_fetch_inc(&tgm->io_limits_disabled) == 0) {
|
||||
if (qatomic_fetch_inc(&tgm->io_limits_disabled) == 0) {
|
||||
throttle_group_restart_tgm(tgm);
|
||||
}
|
||||
}
|
||||
@@ -226,7 +226,7 @@ static void coroutine_fn throttle_co_drain_end(BlockDriverState *bs)
|
||||
{
|
||||
ThrottleGroupMember *tgm = bs->opaque;
|
||||
assert(tgm->io_limits_disabled);
|
||||
atomic_dec(&tgm->io_limits_disabled);
|
||||
qatomic_dec(&tgm->io_limits_disabled);
|
||||
}
|
||||
|
||||
static const char *const throttle_strong_runtime_opts[] = {
|
||||
|
||||
@@ -1604,7 +1604,7 @@ static void external_snapshot_commit(BlkActionState *common)
|
||||
/* We don't need (or want) to use the transactional
|
||||
* bdrv_reopen_multiple() across all the entries at once, because we
|
||||
* don't want to abort all of them if one of them fails the reopen */
|
||||
if (!atomic_read(&state->old_bs->copy_on_read)) {
|
||||
if (!qatomic_read(&state->old_bs->copy_on_read)) {
|
||||
bdrv_reopen_set_read_only(state->old_bs, true, NULL);
|
||||
}
|
||||
|
||||
|
||||
@@ -298,7 +298,7 @@ BlockJobInfo *block_job_query(BlockJob *job, Error **errp)
|
||||
info = g_new0(BlockJobInfo, 1);
|
||||
info->type = g_strdup(job_type_str(&job->job));
|
||||
info->device = g_strdup(job->job.id);
|
||||
info->busy = atomic_read(&job->job.busy);
|
||||
info->busy = qatomic_read(&job->job.busy);
|
||||
info->paused = job->job.pause_count > 0;
|
||||
info->offset = job->job.progress.current;
|
||||
info->len = job->job.progress.total;
|
||||
|
||||
@@ -449,7 +449,7 @@ static void
|
||||
vu_log_page(uint8_t *log_table, uint64_t page)
|
||||
{
|
||||
DPRINT("Logged dirty guest page: %"PRId64"\n", page);
|
||||
atomic_or(&log_table[page / 8], 1 << (page % 8));
|
||||
qatomic_or(&log_table[page / 8], 1 << (page % 8));
|
||||
}
|
||||
|
||||
static void
|
||||
|
||||
@@ -148,7 +148,7 @@ void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data,
|
||||
wi.exclusive = false;
|
||||
|
||||
queue_work_on_cpu(cpu, &wi);
|
||||
while (!atomic_mb_read(&wi.done)) {
|
||||
while (!qatomic_mb_read(&wi.done)) {
|
||||
CPUState *self_cpu = current_cpu;
|
||||
|
||||
qemu_cond_wait(&qemu_work_cond, mutex);
|
||||
@@ -188,20 +188,20 @@ void start_exclusive(void)
|
||||
exclusive_idle();
|
||||
|
||||
/* Make all other cpus stop executing. */
|
||||
atomic_set(&pending_cpus, 1);
|
||||
qatomic_set(&pending_cpus, 1);
|
||||
|
||||
/* Write pending_cpus before reading other_cpu->running. */
|
||||
smp_mb();
|
||||
running_cpus = 0;
|
||||
CPU_FOREACH(other_cpu) {
|
||||
if (atomic_read(&other_cpu->running)) {
|
||||
if (qatomic_read(&other_cpu->running)) {
|
||||
other_cpu->has_waiter = true;
|
||||
running_cpus++;
|
||||
qemu_cpu_kick(other_cpu);
|
||||
}
|
||||
}
|
||||
|
||||
atomic_set(&pending_cpus, running_cpus + 1);
|
||||
qatomic_set(&pending_cpus, running_cpus + 1);
|
||||
while (pending_cpus > 1) {
|
||||
qemu_cond_wait(&exclusive_cond, &qemu_cpu_list_lock);
|
||||
}
|
||||
@@ -220,7 +220,7 @@ void end_exclusive(void)
|
||||
current_cpu->in_exclusive_context = false;
|
||||
|
||||
qemu_mutex_lock(&qemu_cpu_list_lock);
|
||||
atomic_set(&pending_cpus, 0);
|
||||
qatomic_set(&pending_cpus, 0);
|
||||
qemu_cond_broadcast(&exclusive_resume);
|
||||
qemu_mutex_unlock(&qemu_cpu_list_lock);
|
||||
}
|
||||
@@ -228,7 +228,7 @@ void end_exclusive(void)
|
||||
/* Wait for exclusive ops to finish, and begin cpu execution. */
|
||||
void cpu_exec_start(CPUState *cpu)
|
||||
{
|
||||
atomic_set(&cpu->running, true);
|
||||
qatomic_set(&cpu->running, true);
|
||||
|
||||
/* Write cpu->running before reading pending_cpus. */
|
||||
smp_mb();
|
||||
@@ -246,17 +246,17 @@ void cpu_exec_start(CPUState *cpu)
|
||||
* 3. pending_cpus == 0. Then start_exclusive is definitely going to
|
||||
* see cpu->running == true, and it will kick the CPU.
|
||||
*/
|
||||
if (unlikely(atomic_read(&pending_cpus))) {
|
||||
if (unlikely(qatomic_read(&pending_cpus))) {
|
||||
QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
|
||||
if (!cpu->has_waiter) {
|
||||
/* Not counted in pending_cpus, let the exclusive item
|
||||
* run. Since we have the lock, just set cpu->running to true
|
||||
* while holding it; no need to check pending_cpus again.
|
||||
*/
|
||||
atomic_set(&cpu->running, false);
|
||||
qatomic_set(&cpu->running, false);
|
||||
exclusive_idle();
|
||||
/* Now pending_cpus is zero. */
|
||||
atomic_set(&cpu->running, true);
|
||||
qatomic_set(&cpu->running, true);
|
||||
} else {
|
||||
/* Counted in pending_cpus, go ahead and release the
|
||||
* waiter at cpu_exec_end.
|
||||
@@ -268,7 +268,7 @@ void cpu_exec_start(CPUState *cpu)
|
||||
/* Mark cpu as not executing, and release pending exclusive ops. */
|
||||
void cpu_exec_end(CPUState *cpu)
|
||||
{
|
||||
atomic_set(&cpu->running, false);
|
||||
qatomic_set(&cpu->running, false);
|
||||
|
||||
/* Write cpu->running before reading pending_cpus. */
|
||||
smp_mb();
|
||||
@@ -288,11 +288,11 @@ void cpu_exec_end(CPUState *cpu)
|
||||
* see cpu->running == false, and it can ignore this CPU until the
|
||||
* next cpu_exec_start.
|
||||
*/
|
||||
if (unlikely(atomic_read(&pending_cpus))) {
|
||||
if (unlikely(qatomic_read(&pending_cpus))) {
|
||||
QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
|
||||
if (cpu->has_waiter) {
|
||||
cpu->has_waiter = false;
|
||||
atomic_set(&pending_cpus, pending_cpus - 1);
|
||||
qatomic_set(&pending_cpus, pending_cpus - 1);
|
||||
if (pending_cpus == 1) {
|
||||
qemu_cond_signal(&exclusive_cond);
|
||||
}
|
||||
@@ -346,7 +346,7 @@ void process_queued_cpu_work(CPUState *cpu)
|
||||
if (wi->free) {
|
||||
g_free(wi);
|
||||
} else {
|
||||
atomic_mb_set(&wi->done, true);
|
||||
qatomic_mb_set(&wi->done, true);
|
||||
}
|
||||
}
|
||||
qemu_mutex_unlock(&cpu->work_mutex);
|
||||
|
||||
@@ -23,9 +23,9 @@ provides macros that fall in three camps:
|
||||
|
||||
- compiler barriers: ``barrier()``;
|
||||
|
||||
- weak atomic access and manual memory barriers: ``atomic_read()``,
|
||||
``atomic_set()``, ``smp_rmb()``, ``smp_wmb()``, ``smp_mb()``, ``smp_mb_acquire()``,
|
||||
``smp_mb_release()``, ``smp_read_barrier_depends()``;
|
||||
- weak atomic access and manual memory barriers: ``qatomic_read()``,
|
||||
``qatomic_set()``, ``smp_rmb()``, ``smp_wmb()``, ``smp_mb()``,
|
||||
``smp_mb_acquire()``, ``smp_mb_release()``, ``smp_read_barrier_depends()``;
|
||||
|
||||
- sequentially consistent atomic access: everything else.
|
||||
|
||||
@@ -67,23 +67,23 @@ in the order specified by its program".
|
||||
``qemu/atomic.h`` provides the following set of atomic read-modify-write
|
||||
operations::
|
||||
|
||||
void atomic_inc(ptr)
|
||||
void atomic_dec(ptr)
|
||||
void atomic_add(ptr, val)
|
||||
void atomic_sub(ptr, val)
|
||||
void atomic_and(ptr, val)
|
||||
void atomic_or(ptr, val)
|
||||
void qatomic_inc(ptr)
|
||||
void qatomic_dec(ptr)
|
||||
void qatomic_add(ptr, val)
|
||||
void qatomic_sub(ptr, val)
|
||||
void qatomic_and(ptr, val)
|
||||
void qatomic_or(ptr, val)
|
||||
|
||||
typeof(*ptr) atomic_fetch_inc(ptr)
|
||||
typeof(*ptr) atomic_fetch_dec(ptr)
|
||||
typeof(*ptr) atomic_fetch_add(ptr, val)
|
||||
typeof(*ptr) atomic_fetch_sub(ptr, val)
|
||||
typeof(*ptr) atomic_fetch_and(ptr, val)
|
||||
typeof(*ptr) atomic_fetch_or(ptr, val)
|
||||
typeof(*ptr) atomic_fetch_xor(ptr, val)
|
||||
typeof(*ptr) atomic_fetch_inc_nonzero(ptr)
|
||||
typeof(*ptr) atomic_xchg(ptr, val)
|
||||
typeof(*ptr) atomic_cmpxchg(ptr, old, new)
|
||||
typeof(*ptr) qatomic_fetch_inc(ptr)
|
||||
typeof(*ptr) qatomic_fetch_dec(ptr)
|
||||
typeof(*ptr) qatomic_fetch_add(ptr, val)
|
||||
typeof(*ptr) qatomic_fetch_sub(ptr, val)
|
||||
typeof(*ptr) qatomic_fetch_and(ptr, val)
|
||||
typeof(*ptr) qatomic_fetch_or(ptr, val)
|
||||
typeof(*ptr) qatomic_fetch_xor(ptr, val)
|
||||
typeof(*ptr) qatomic_fetch_inc_nonzero(ptr)
|
||||
typeof(*ptr) qatomic_xchg(ptr, val)
|
||||
typeof(*ptr) qatomic_cmpxchg(ptr, old, new)
|
||||
|
||||
all of which return the old value of ``*ptr``. These operations are
|
||||
polymorphic; they operate on any type that is as wide as a pointer or
|
||||
@@ -91,19 +91,19 @@ smaller.
|
||||
|
||||
Similar operations return the new value of ``*ptr``::
|
||||
|
||||
typeof(*ptr) atomic_inc_fetch(ptr)
|
||||
typeof(*ptr) atomic_dec_fetch(ptr)
|
||||
typeof(*ptr) atomic_add_fetch(ptr, val)
|
||||
typeof(*ptr) atomic_sub_fetch(ptr, val)
|
||||
typeof(*ptr) atomic_and_fetch(ptr, val)
|
||||
typeof(*ptr) atomic_or_fetch(ptr, val)
|
||||
typeof(*ptr) atomic_xor_fetch(ptr, val)
|
||||
typeof(*ptr) qatomic_inc_fetch(ptr)
|
||||
typeof(*ptr) qatomic_dec_fetch(ptr)
|
||||
typeof(*ptr) qatomic_add_fetch(ptr, val)
|
||||
typeof(*ptr) qatomic_sub_fetch(ptr, val)
|
||||
typeof(*ptr) qatomic_and_fetch(ptr, val)
|
||||
typeof(*ptr) qatomic_or_fetch(ptr, val)
|
||||
typeof(*ptr) qatomic_xor_fetch(ptr, val)
|
||||
|
||||
``qemu/atomic.h`` also provides loads and stores that cannot be reordered
|
||||
with each other::
|
||||
|
||||
typeof(*ptr) atomic_mb_read(ptr)
|
||||
void atomic_mb_set(ptr, val)
|
||||
typeof(*ptr) qatomic_mb_read(ptr)
|
||||
void qatomic_mb_set(ptr, val)
|
||||
|
||||
However these do not provide sequential consistency and, in particular,
|
||||
they do not participate in the total ordering enforced by
|
||||
@@ -115,11 +115,11 @@ easiest to hardest):
|
||||
|
||||
- lightweight synchronization primitives such as ``QemuEvent``
|
||||
|
||||
- RCU operations (``atomic_rcu_read``, ``atomic_rcu_set``) when publishing
|
||||
- RCU operations (``qatomic_rcu_read``, ``qatomic_rcu_set``) when publishing
|
||||
or accessing a new version of a data structure
|
||||
|
||||
- other atomic accesses: ``atomic_read`` and ``atomic_load_acquire`` for
|
||||
loads, ``atomic_set`` and ``atomic_store_release`` for stores, ``smp_mb``
|
||||
- other atomic accesses: ``qatomic_read`` and ``qatomic_load_acquire`` for
|
||||
loads, ``qatomic_set`` and ``qatomic_store_release`` for stores, ``smp_mb``
|
||||
to forbid reordering subsequent loads before a store.
|
||||
|
||||
|
||||
@@ -149,22 +149,22 @@ The only guarantees that you can rely upon in this case are:
|
||||
|
||||
When using this model, variables are accessed with:
|
||||
|
||||
- ``atomic_read()`` and ``atomic_set()``; these prevent the compiler from
|
||||
- ``qatomic_read()`` and ``qatomic_set()``; these prevent the compiler from
|
||||
optimizing accesses out of existence and creating unsolicited
|
||||
accesses, but do not otherwise impose any ordering on loads and
|
||||
stores: both the compiler and the processor are free to reorder
|
||||
them.
|
||||
|
||||
- ``atomic_load_acquire()``, which guarantees the LOAD to appear to
|
||||
- ``qatomic_load_acquire()``, which guarantees the LOAD to appear to
|
||||
happen, with respect to the other components of the system,
|
||||
before all the LOAD or STORE operations specified afterwards.
|
||||
Operations coming before ``atomic_load_acquire()`` can still be
|
||||
Operations coming before ``qatomic_load_acquire()`` can still be
|
||||
reordered after it.
|
||||
|
||||
- ``atomic_store_release()``, which guarantees the STORE to appear to
|
||||
- ``qatomic_store_release()``, which guarantees the STORE to appear to
|
||||
happen, with respect to the other components of the system,
|
||||
after all the LOAD or STORE operations specified before.
|
||||
Operations coming after ``atomic_store_release()`` can still be
|
||||
Operations coming after ``qatomic_store_release()`` can still be
|
||||
reordered before it.
|
||||
|
||||
Restrictions to the ordering of accesses can also be specified
|
||||
@@ -229,7 +229,7 @@ They come in six kinds:
|
||||
dependency and a full read barrier or better is required.
|
||||
|
||||
|
||||
Memory barriers and ``atomic_load_acquire``/``atomic_store_release`` are
|
||||
Memory barriers and ``qatomic_load_acquire``/``qatomic_store_release`` are
|
||||
mostly used when a data structure has one thread that is always a writer
|
||||
and one thread that is always a reader:
|
||||
|
||||
@@ -238,8 +238,8 @@ and one thread that is always a reader:
|
||||
+==================================+==================================+
|
||||
| :: | :: |
|
||||
| | |
|
||||
| atomic_store_release(&a, x); | y = atomic_load_acquire(&b); |
|
||||
| atomic_store_release(&b, y); | x = atomic_load_acquire(&a); |
|
||||
| qatomic_store_release(&a, x); | y = qatomic_load_acquire(&b); |
|
||||
| qatomic_store_release(&b, y); | x = qatomic_load_acquire(&a); |
|
||||
+----------------------------------+----------------------------------+
|
||||
|
||||
In this case, correctness is easy to check for using the "pairing"
|
||||
@@ -258,14 +258,14 @@ outside a loop. For example:
|
||||
| | |
|
||||
| n = 0; | n = 0; |
|
||||
| for (i = 0; i < 10; i++) | for (i = 0; i < 10; i++) |
|
||||
| n += atomic_load_acquire(&a[i]); | n += atomic_read(&a[i]); |
|
||||
| n += qatomic_load_acquire(&a[i]); | n += qatomic_read(&a[i]); |
|
||||
| | smp_mb_acquire(); |
|
||||
+------------------------------------------+----------------------------------+
|
||||
| :: | :: |
|
||||
| | |
|
||||
| | smp_mb_release(); |
|
||||
| for (i = 0; i < 10; i++) | for (i = 0; i < 10; i++) |
|
||||
| atomic_store_release(&a[i], false); | atomic_set(&a[i], false); |
|
||||
| qatomic_store_release(&a[i], false); | qatomic_set(&a[i], false); |
|
||||
+------------------------------------------+----------------------------------+
|
||||
|
||||
Splitting a loop can also be useful to reduce the number of barriers:
|
||||
@@ -277,11 +277,11 @@ Splitting a loop can also be useful to reduce the number of barriers:
|
||||
| | |
|
||||
| n = 0; | smp_mb_release(); |
|
||||
| for (i = 0; i < 10; i++) { | for (i = 0; i < 10; i++) |
|
||||
| atomic_store_release(&a[i], false); | atomic_set(&a[i], false); |
|
||||
| qatomic_store_release(&a[i], false); | qatomic_set(&a[i], false); |
|
||||
| smp_mb(); | smb_mb(); |
|
||||
| n += atomic_read(&b[i]); | n = 0; |
|
||||
| n += qatomic_read(&b[i]); | n = 0; |
|
||||
| } | for (i = 0; i < 10; i++) |
|
||||
| | n += atomic_read(&b[i]); |
|
||||
| | n += qatomic_read(&b[i]); |
|
||||
+------------------------------------------+----------------------------------+
|
||||
|
||||
In this case, a ``smp_mb_release()`` is also replaced with a (possibly cheaper, and clearer
|
||||
@@ -294,10 +294,10 @@ as well) ``smp_wmb()``:
|
||||
| | |
|
||||
| | smp_mb_release(); |
|
||||
| for (i = 0; i < 10; i++) { | for (i = 0; i < 10; i++) |
|
||||
| atomic_store_release(&a[i], false); | atomic_set(&a[i], false); |
|
||||
| atomic_store_release(&b[i], false); | smb_wmb(); |
|
||||
| qatomic_store_release(&a[i], false); | qatomic_set(&a[i], false); |
|
||||
| qatomic_store_release(&b[i], false); | smb_wmb(); |
|
||||
| } | for (i = 0; i < 10; i++) |
|
||||
| | atomic_set(&b[i], false); |
|
||||
| | qatomic_set(&b[i], false); |
|
||||
+------------------------------------------+----------------------------------+
|
||||
|
||||
|
||||
@@ -306,7 +306,7 @@ as well) ``smp_wmb()``:
|
||||
Acquire/release pairing and the *synchronizes-with* relation
|
||||
------------------------------------------------------------
|
||||
|
||||
Atomic operations other than ``atomic_set()`` and ``atomic_read()`` have
|
||||
Atomic operations other than ``qatomic_set()`` and ``qatomic_read()`` have
|
||||
either *acquire* or *release* semantics [#rmw]_. This has two effects:
|
||||
|
||||
.. [#rmw] Read-modify-write operations can have both---acquire applies to the
|
||||
@@ -357,16 +357,16 @@ thread 2 is relying on the *synchronizes-with* relation between ``pthread_exit``
|
||||
|
||||
Synchronization between threads basically descends from this pairing of
|
||||
a release operation and an acquire operation. Therefore, atomic operations
|
||||
other than ``atomic_set()`` and ``atomic_read()`` will almost always be
|
||||
other than ``qatomic_set()`` and ``qatomic_read()`` will almost always be
|
||||
paired with another operation of the opposite kind: an acquire operation
|
||||
will pair with a release operation and vice versa. This rule of thumb is
|
||||
extremely useful; in the case of QEMU, however, note that the other
|
||||
operation may actually be in a driver that runs in the guest!
|
||||
|
||||
``smp_read_barrier_depends()``, ``smp_rmb()``, ``smp_mb_acquire()``,
|
||||
``atomic_load_acquire()`` and ``atomic_rcu_read()`` all count
|
||||
``qatomic_load_acquire()`` and ``qatomic_rcu_read()`` all count
|
||||
as acquire operations. ``smp_wmb()``, ``smp_mb_release()``,
|
||||
``atomic_store_release()`` and ``atomic_rcu_set()`` all count as release
|
||||
``qatomic_store_release()`` and ``qatomic_rcu_set()`` all count as release
|
||||
operations. ``smp_mb()`` counts as both acquire and release, therefore
|
||||
it can pair with any other atomic operation. Here is an example:
|
||||
|
||||
@@ -375,11 +375,11 @@ it can pair with any other atomic operation. Here is an example:
|
||||
+======================+==============================+
|
||||
| :: | :: |
|
||||
| | |
|
||||
| atomic_set(&a, 1); | |
|
||||
| qatomic_set(&a, 1);| |
|
||||
| smp_wmb(); | |
|
||||
| atomic_set(&b, 2); | x = atomic_read(&b); |
|
||||
| qatomic_set(&b, 2);| x = qatomic_read(&b); |
|
||||
| | smp_rmb(); |
|
||||
| | y = atomic_read(&a); |
|
||||
| | y = qatomic_read(&a); |
|
||||
+----------------------+------------------------------+
|
||||
|
||||
Note that a load-store pair only counts if the two operations access the
|
||||
@@ -393,9 +393,9 @@ correct synchronization:
|
||||
+================================+================================+
|
||||
| :: | :: |
|
||||
| | |
|
||||
| atomic_set(&a, 1); | |
|
||||
| atomic_store_release(&b, 2); | x = atomic_load_acquire(&b); |
|
||||
| | y = atomic_read(&a); |
|
||||
| qatomic_set(&a, 1); | |
|
||||
| qatomic_store_release(&b, 2);| x = qatomic_load_acquire(&b);|
|
||||
| | y = qatomic_read(&a); |
|
||||
+--------------------------------+--------------------------------+
|
||||
|
||||
Acquire and release semantics of higher-level primitives can also be
|
||||
@@ -421,7 +421,7 @@ cannot be a data race:
|
||||
| smp_wmb(); | |
|
||||
| x->i = 2; | |
|
||||
| smp_wmb(); | |
|
||||
| atomic_set(&a, x); | x = atomic_read(&a); |
|
||||
| qatomic_set(&a, x);| x = qatomic_read(&a); |
|
||||
| | smp_read_barrier_depends(); |
|
||||
| | y = x->i; |
|
||||
| | smp_read_barrier_depends(); |
|
||||
@@ -442,7 +442,7 @@ and memory barriers, and the equivalents in QEMU:
|
||||
at all. Linux 4.1 updated them to implement volatile
|
||||
semantics via ``ACCESS_ONCE`` (or the more recent ``READ``/``WRITE_ONCE``).
|
||||
|
||||
QEMU's ``atomic_read`` and ``atomic_set`` implement C11 atomic relaxed
|
||||
QEMU's ``qatomic_read`` and ``qatomic_set`` implement C11 atomic relaxed
|
||||
semantics if the compiler supports it, and volatile semantics otherwise.
|
||||
Both semantics prevent the compiler from doing certain transformations;
|
||||
the difference is that atomic accesses are guaranteed to be atomic,
|
||||
@@ -451,8 +451,8 @@ and memory barriers, and the equivalents in QEMU:
|
||||
since we assume the variables passed are machine-word sized and
|
||||
properly aligned.
|
||||
|
||||
No barriers are implied by ``atomic_read`` and ``atomic_set`` in either Linux
|
||||
or QEMU.
|
||||
No barriers are implied by ``qatomic_read`` and ``qatomic_set`` in either
|
||||
Linux or QEMU.
|
||||
|
||||
- atomic read-modify-write operations in Linux are of three kinds:
|
||||
|
||||
@@ -469,7 +469,7 @@ and memory barriers, and the equivalents in QEMU:
|
||||
a different set of memory barriers; in QEMU, all of them enforce
|
||||
sequential consistency.
|
||||
|
||||
- in QEMU, ``atomic_read()`` and ``atomic_set()`` do not participate in
|
||||
- in QEMU, ``qatomic_read()`` and ``qatomic_set()`` do not participate in
|
||||
the total ordering enforced by sequentially-consistent operations.
|
||||
This is because QEMU uses the C11 memory model. The following example
|
||||
is correct in Linux but not in QEMU:
|
||||
@@ -479,8 +479,8 @@ and memory barriers, and the equivalents in QEMU:
|
||||
+==================================+================================+
|
||||
| :: | :: |
|
||||
| | |
|
||||
| a = atomic_fetch_add(&x, 2); | a = atomic_fetch_add(&x, 2); |
|
||||
| b = READ_ONCE(&y); | b = atomic_read(&y); |
|
||||
| a = atomic_fetch_add(&x, 2); | a = qatomic_fetch_add(&x, 2);|
|
||||
| b = READ_ONCE(&y); | b = qatomic_read(&y); |
|
||||
+----------------------------------+--------------------------------+
|
||||
|
||||
because the read of ``y`` can be moved (by either the processor or the
|
||||
@@ -495,10 +495,10 @@ and memory barriers, and the equivalents in QEMU:
|
||||
+================================+
|
||||
| :: |
|
||||
| |
|
||||
| a = atomic_read(&x); |
|
||||
| atomic_set(&x, a + 2); |
|
||||
| a = qatomic_read(&x); |
|
||||
| qatomic_set(&x, a + 2); |
|
||||
| smp_mb(); |
|
||||
| b = atomic_read(&y); |
|
||||
| b = qatomic_read(&y); |
|
||||
+--------------------------------+
|
||||
|
||||
Sources
|
||||
|
||||
@@ -95,7 +95,7 @@ not just frees, though there could be cases where this is not necessary.
|
||||
|
||||
Reads, instead, can be done without taking the mutex, as long as the
|
||||
readers and writers use the same macros that are used for RCU, for
|
||||
example atomic_rcu_read, atomic_rcu_set, QLIST_FOREACH_RCU, etc. This is
|
||||
example qatomic_rcu_read, qatomic_rcu_set, QLIST_FOREACH_RCU, etc. This is
|
||||
because the reads are done outside a lock and a set or QLIST_INSERT_HEAD
|
||||
can happen concurrently with the read. The RCU API ensures that the
|
||||
processor and the compiler see all required memory barriers.
|
||||
@@ -189,7 +189,7 @@ qemu_lockcnt_lock and qemu_lockcnt_unlock:
|
||||
if (!xyz) {
|
||||
new_xyz = g_new(XYZ, 1);
|
||||
...
|
||||
atomic_rcu_set(&xyz, new_xyz);
|
||||
qatomic_rcu_set(&xyz, new_xyz);
|
||||
}
|
||||
qemu_lockcnt_unlock(&xyz_lockcnt);
|
||||
|
||||
@@ -198,7 +198,7 @@ qemu_lockcnt_dec:
|
||||
|
||||
qemu_lockcnt_inc(&xyz_lockcnt);
|
||||
if (xyz) {
|
||||
XYZ *p = atomic_rcu_read(&xyz);
|
||||
XYZ *p = qatomic_rcu_read(&xyz);
|
||||
...
|
||||
/* Accesses can now be done through "p". */
|
||||
}
|
||||
@@ -222,7 +222,7 @@ the decrement, the locking and the check on count as follows:
|
||||
|
||||
qemu_lockcnt_inc(&xyz_lockcnt);
|
||||
if (xyz) {
|
||||
XYZ *p = atomic_rcu_read(&xyz);
|
||||
XYZ *p = qatomic_rcu_read(&xyz);
|
||||
...
|
||||
/* Accesses can now be done through "p". */
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user