You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge commit '3cf2f34' into sched/core, to fix build error
Fix this dependency on the locking tree's smp_mb*() API changes: kernel/sched/idle.c:247:3: error: implicit declaration of function ‘smp_mb__after_atomic’ [-Werror=implicit-function-declaration] Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -534,7 +534,7 @@ return_normal:
|
||||
kgdb_info[cpu].exception_state &=
|
||||
~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
|
||||
kgdb_info[cpu].enter_kgdb--;
|
||||
smp_mb__before_atomic_dec();
|
||||
smp_mb__before_atomic();
|
||||
atomic_dec(&slaves_in_kgdb);
|
||||
dbg_touch_watchdogs();
|
||||
local_irq_restore(flags);
|
||||
@@ -662,7 +662,7 @@ kgdb_restore:
|
||||
kgdb_info[cpu].exception_state &=
|
||||
~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
|
||||
kgdb_info[cpu].enter_kgdb--;
|
||||
smp_mb__before_atomic_dec();
|
||||
smp_mb__before_atomic();
|
||||
atomic_dec(&masters_in_kgdb);
|
||||
/* Free kgdb_active */
|
||||
atomic_set(&kgdb_active, -1);
|
||||
|
||||
+2
-2
@@ -267,7 +267,7 @@ static inline void futex_get_mm(union futex_key *key)
|
||||
* get_futex_key() implies a full barrier. This is relied upon
|
||||
* as full barrier (B), see the ordering comment above.
|
||||
*/
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -280,7 +280,7 @@ static inline void hb_waiters_inc(struct futex_hash_bucket *hb)
|
||||
/*
|
||||
* Full barrier (A), see the ordering comment above.
|
||||
*/
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
+1
-1
@@ -498,7 +498,7 @@ int __usermodehelper_disable(enum umh_disable_depth depth)
|
||||
static void helper_lock(void)
|
||||
{
|
||||
atomic_inc(&running_helpers);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
|
||||
static void helper_unlock(void)
|
||||
|
||||
@@ -54,9 +54,9 @@ enum {
|
||||
* table (if it's not there yet), and we check it for lock order
|
||||
* conflicts and deadlocks.
|
||||
*/
|
||||
#define MAX_LOCKDEP_ENTRIES 16384UL
|
||||
#define MAX_LOCKDEP_ENTRIES 32768UL
|
||||
|
||||
#define MAX_LOCKDEP_CHAINS_BITS 15
|
||||
#define MAX_LOCKDEP_CHAINS_BITS 16
|
||||
#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
|
||||
|
||||
#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
|
||||
@@ -65,7 +65,7 @@ enum {
|
||||
* Stack-trace: tightly packed array of stack backtrace
|
||||
* addresses. Protected by the hash_lock.
|
||||
*/
|
||||
#define MAX_STACK_TRACE_ENTRIES 262144UL
|
||||
#define MAX_STACK_TRACE_ENTRIES 524288UL
|
||||
|
||||
extern struct list_head all_lock_classes;
|
||||
extern struct lock_chain lock_chains[];
|
||||
|
||||
@@ -11,6 +11,55 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
/*
|
||||
* Guide to the rw_semaphore's count field for common values.
|
||||
* (32-bit case illustrated, similar for 64-bit)
|
||||
*
|
||||
* 0x0000000X (1) X readers active or attempting lock, no writer waiting
|
||||
* X = #active_readers + #readers attempting to lock
|
||||
* (X*ACTIVE_BIAS)
|
||||
*
|
||||
* 0x00000000 rwsem is unlocked, and no one is waiting for the lock or
|
||||
* attempting to read lock or write lock.
|
||||
*
|
||||
* 0xffff000X (1) X readers active or attempting lock, with waiters for lock
|
||||
* X = #active readers + # readers attempting lock
|
||||
* (X*ACTIVE_BIAS + WAITING_BIAS)
|
||||
* (2) 1 writer attempting lock, no waiters for lock
|
||||
* X-1 = #active readers + #readers attempting lock
|
||||
* ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
|
||||
* (3) 1 writer active, no waiters for lock
|
||||
* X-1 = #active readers + #readers attempting lock
|
||||
* ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
|
||||
*
|
||||
* 0xffff0001 (1) 1 reader active or attempting lock, waiters for lock
|
||||
* (WAITING_BIAS + ACTIVE_BIAS)
|
||||
* (2) 1 writer active or attempting lock, no waiters for lock
|
||||
* (ACTIVE_WRITE_BIAS)
|
||||
*
|
||||
* 0xffff0000 (1) There are writers or readers queued but none active
|
||||
* or in the process of attempting lock.
|
||||
* (WAITING_BIAS)
|
||||
* Note: writer can attempt to steal lock for this count by adding
|
||||
* ACTIVE_WRITE_BIAS in cmpxchg and checking the old count
|
||||
*
|
||||
* 0xfffe0001 (1) 1 writer active, or attempting lock. Waiters on queue.
|
||||
* (ACTIVE_WRITE_BIAS + WAITING_BIAS)
|
||||
*
|
||||
* Note: Readers attempt to lock by adding ACTIVE_BIAS in down_read and checking
|
||||
* the count becomes more than 0 for successful lock acquisition,
|
||||
* i.e. the case where there are only readers or nobody has lock.
|
||||
* (1st and 2nd case above).
|
||||
*
|
||||
* Writers attempt to lock by adding ACTIVE_WRITE_BIAS in down_write and
|
||||
* checking the count becomes ACTIVE_WRITE_BIAS for successful lock
|
||||
* acquisition (i.e. nobody else has lock or attempts lock). If
|
||||
* unsuccessful, in rwsem_down_write_failed, we'll check to see if there
|
||||
* are only waiters but none active (5th case above), and attempt to
|
||||
* steal the lock.
|
||||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
* Initialize an rwsem:
|
||||
*/
|
||||
|
||||
+11
-11
@@ -387,9 +387,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
|
||||
}
|
||||
rcu_prepare_for_idle(smp_processor_id());
|
||||
/* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
|
||||
smp_mb__before_atomic_inc(); /* See above. */
|
||||
smp_mb__before_atomic(); /* See above. */
|
||||
atomic_inc(&rdtp->dynticks);
|
||||
smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
|
||||
smp_mb__after_atomic(); /* Force ordering with next sojourn. */
|
||||
WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
|
||||
|
||||
/*
|
||||
@@ -507,10 +507,10 @@ void rcu_irq_exit(void)
|
||||
static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
|
||||
int user)
|
||||
{
|
||||
smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
|
||||
smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */
|
||||
atomic_inc(&rdtp->dynticks);
|
||||
/* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
|
||||
smp_mb__after_atomic_inc(); /* See above. */
|
||||
smp_mb__after_atomic(); /* See above. */
|
||||
WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
|
||||
rcu_cleanup_after_idle(smp_processor_id());
|
||||
trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
|
||||
@@ -635,10 +635,10 @@ void rcu_nmi_enter(void)
|
||||
(atomic_read(&rdtp->dynticks) & 0x1))
|
||||
return;
|
||||
rdtp->dynticks_nmi_nesting++;
|
||||
smp_mb__before_atomic_inc(); /* Force delay from prior write. */
|
||||
smp_mb__before_atomic(); /* Force delay from prior write. */
|
||||
atomic_inc(&rdtp->dynticks);
|
||||
/* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
|
||||
smp_mb__after_atomic_inc(); /* See above. */
|
||||
smp_mb__after_atomic(); /* See above. */
|
||||
WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
|
||||
}
|
||||
|
||||
@@ -657,9 +657,9 @@ void rcu_nmi_exit(void)
|
||||
--rdtp->dynticks_nmi_nesting != 0)
|
||||
return;
|
||||
/* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
|
||||
smp_mb__before_atomic_inc(); /* See above. */
|
||||
smp_mb__before_atomic(); /* See above. */
|
||||
atomic_inc(&rdtp->dynticks);
|
||||
smp_mb__after_atomic_inc(); /* Force delay to next write. */
|
||||
smp_mb__after_atomic(); /* Force delay to next write. */
|
||||
WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
|
||||
}
|
||||
|
||||
@@ -2790,7 +2790,7 @@ void synchronize_sched_expedited(void)
|
||||
s = atomic_long_read(&rsp->expedited_done);
|
||||
if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
|
||||
/* ensure test happens before caller kfree */
|
||||
smp_mb__before_atomic_inc(); /* ^^^ */
|
||||
smp_mb__before_atomic(); /* ^^^ */
|
||||
atomic_long_inc(&rsp->expedited_workdone1);
|
||||
return;
|
||||
}
|
||||
@@ -2808,7 +2808,7 @@ void synchronize_sched_expedited(void)
|
||||
s = atomic_long_read(&rsp->expedited_done);
|
||||
if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
|
||||
/* ensure test happens before caller kfree */
|
||||
smp_mb__before_atomic_inc(); /* ^^^ */
|
||||
smp_mb__before_atomic(); /* ^^^ */
|
||||
atomic_long_inc(&rsp->expedited_workdone2);
|
||||
return;
|
||||
}
|
||||
@@ -2837,7 +2837,7 @@ void synchronize_sched_expedited(void)
|
||||
s = atomic_long_read(&rsp->expedited_done);
|
||||
if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
|
||||
/* ensure test happens before caller kfree */
|
||||
smp_mb__before_atomic_inc(); /* ^^^ */
|
||||
smp_mb__before_atomic(); /* ^^^ */
|
||||
atomic_long_inc(&rsp->expedited_done_lost);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -2523,9 +2523,9 @@ static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq)
|
||||
/* Record start of fully idle period. */
|
||||
j = jiffies;
|
||||
ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
|
||||
smp_mb__before_atomic_inc();
|
||||
smp_mb__before_atomic();
|
||||
atomic_inc(&rdtp->dynticks_idle);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
|
||||
}
|
||||
|
||||
@@ -2590,9 +2590,9 @@ static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq)
|
||||
}
|
||||
|
||||
/* Record end of idle period. */
|
||||
smp_mb__before_atomic_inc();
|
||||
smp_mb__before_atomic();
|
||||
atomic_inc(&rdtp->dynticks_idle);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
|
||||
|
||||
/*
|
||||
|
||||
@@ -90,6 +90,22 @@
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/sched.h>
|
||||
|
||||
#ifdef smp_mb__before_atomic
|
||||
void __smp_mb__before_atomic(void)
|
||||
{
|
||||
smp_mb__before_atomic();
|
||||
}
|
||||
EXPORT_SYMBOL(__smp_mb__before_atomic);
|
||||
#endif
|
||||
|
||||
#ifdef smp_mb__after_atomic
|
||||
void __smp_mb__after_atomic(void)
|
||||
{
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
EXPORT_SYMBOL(__smp_mb__after_atomic);
|
||||
#endif
|
||||
|
||||
void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
|
||||
{
|
||||
unsigned long delta;
|
||||
|
||||
@@ -165,7 +165,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
|
||||
* do a write memory barrier, and then update the count, to
|
||||
* make sure the vector is visible when count is set.
|
||||
*/
|
||||
smp_mb__before_atomic_inc();
|
||||
smp_mb__before_atomic();
|
||||
atomic_inc(&(vec)->count);
|
||||
do_mb = 1;
|
||||
}
|
||||
@@ -185,14 +185,14 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
|
||||
* the new priority vec.
|
||||
*/
|
||||
if (do_mb)
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
/*
|
||||
* When removing from the vector, we decrement the counter first
|
||||
* do a memory barrier and then clear the mask.
|
||||
*/
|
||||
atomic_dec(&(vec)->count);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
cpumask_clear_cpu(cpu, vec->mask);
|
||||
}
|
||||
|
||||
|
||||
+1
-1
@@ -394,7 +394,7 @@ EXPORT_SYMBOL(__wake_up_bit);
|
||||
*
|
||||
* In order for this to function properly, as it uses waitqueue_active()
|
||||
* internally, some kind of memory barrier must be done prior to calling
|
||||
* this. Typically, this will be smp_mb__after_clear_bit(), but in some
|
||||
* this. Typically, this will be smp_mb__after_atomic(), but in some
|
||||
* cases where bitflags are manipulated non-atomically under a lock, one
|
||||
* may need to use a less regular barrier, such fs/inode.c's smp_mb(),
|
||||
* because spin_unlock() does not guarantee a memory barrier.
|
||||
|
||||
Reference in New Issue
Block a user