You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'rcu-v28-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'rcu-v28-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (21 commits) rcu: RCU-based detection of stalled CPUs for Classic RCU, fix rcu: RCU-based detection of stalled CPUs for Classic RCU rcu: add rcu_read_lock_sched() / rcu_read_unlock_sched() rcu: fix sparse shadowed variable warning doc/RCU: fix pseudocode in rcuref.txt rcuclassic: fix compiler warning rcu: use irq-safe locks rcuclassic: fix compilation NG rcu: fix locking cleanup fallout rcu: remove redundant ACCESS_ONCE definition from rcupreempt.c rcu: fix classic RCU locking cleanup lockdep problem rcu: trace fix possible mem-leak rcu: just rename call_rcu_bh instead of making it a macro rcu: remove list_for_each_rcu() rcu: fixes to include/linux/rcupreempt.h rcu: classic RCU locking and memory-barrier cleanups rcu: prevent console flood when one CPU sees another AWOL via RCU rcu, debug: detect stalled grace periods, cleanups rcu, debug: detect stalled grace periods rcu classic: new algorithm for callbacks-processing(v2) ...
This commit is contained in:
@@ -210,7 +210,7 @@ over a rather long period of time, but improvements are always welcome!
|
||||
number of updates per grace period.
|
||||
|
||||
9. All RCU list-traversal primitives, which include
|
||||
rcu_dereference(), list_for_each_rcu(), list_for_each_entry_rcu(),
|
||||
rcu_dereference(), list_for_each_entry_rcu(),
|
||||
list_for_each_continue_rcu(), and list_for_each_safe_rcu(),
|
||||
must be either within an RCU read-side critical section or
|
||||
must be protected by appropriate update-side locks. RCU
|
||||
|
||||
@@ -29,9 +29,9 @@ release_referenced() delete()
|
||||
}
|
||||
|
||||
If this list/array is made lock free using RCU as in changing the
|
||||
write_lock() in add() and delete() to spin_lock and changing read_lock
|
||||
in search_and_reference to rcu_read_lock(), the atomic_get in
|
||||
search_and_reference could potentially hold reference to an element which
|
||||
write_lock() in add() and delete() to spin_lock() and changing read_lock()
|
||||
in search_and_reference() to rcu_read_lock(), the atomic_inc() in
|
||||
search_and_reference() could potentially hold reference to an element which
|
||||
has already been deleted from the list/array. Use atomic_inc_not_zero()
|
||||
in this scenario as follows:
|
||||
|
||||
@@ -40,20 +40,20 @@ add() search_and_reference()
|
||||
{ {
|
||||
alloc_object rcu_read_lock();
|
||||
... search_for_element
|
||||
atomic_set(&el->rc, 1); if (atomic_inc_not_zero(&el->rc)) {
|
||||
write_lock(&list_lock); rcu_read_unlock();
|
||||
atomic_set(&el->rc, 1); if (!atomic_inc_not_zero(&el->rc)) {
|
||||
spin_lock(&list_lock); rcu_read_unlock();
|
||||
return FAIL;
|
||||
add_element }
|
||||
... ...
|
||||
write_unlock(&list_lock); rcu_read_unlock();
|
||||
spin_unlock(&list_lock); rcu_read_unlock();
|
||||
} }
|
||||
3. 4.
|
||||
release_referenced() delete()
|
||||
{ {
|
||||
... write_lock(&list_lock);
|
||||
... spin_lock(&list_lock);
|
||||
if (atomic_dec_and_test(&el->rc)) ...
|
||||
call_rcu(&el->head, el_free); delete_element
|
||||
... write_unlock(&list_lock);
|
||||
... spin_unlock(&list_lock);
|
||||
} ...
|
||||
if (atomic_dec_and_test(&el->rc))
|
||||
call_rcu(&el->head, el_free);
|
||||
|
||||
@@ -786,8 +786,6 @@ RCU pointer/list traversal:
|
||||
list_for_each_entry_rcu
|
||||
hlist_for_each_entry_rcu
|
||||
|
||||
list_for_each_rcu (to be deprecated in favor of
|
||||
list_for_each_entry_rcu)
|
||||
list_for_each_continue_rcu (to be deprecated in favor of new
|
||||
list_for_each_entry_continue_rcu)
|
||||
|
||||
|
||||
@@ -190,7 +190,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
|
||||
* ACCESS_ONCE() in different C statements.
|
||||
*
|
||||
* This macro does absolutely -nothing- to prevent the CPU from reordering,
|
||||
* merging, or refetching absolutely anything at any time.
|
||||
* merging, or refetching absolutely anything at any time. Its main intended
|
||||
* use is to mediate communication between process-level code and irq/NMI
|
||||
* handlers, all running on the same CPU.
|
||||
*/
|
||||
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
|
||||
|
||||
|
||||
+27
-10
@@ -40,12 +40,21 @@
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/seqlock.h>
|
||||
|
||||
#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
|
||||
#define RCU_SECONDS_TILL_STALL_CHECK ( 3 * HZ) /* for rcp->jiffies_stall */
|
||||
#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rcp->jiffies_stall */
|
||||
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
|
||||
|
||||
/* Global control variables for rcupdate callback mechanism. */
|
||||
struct rcu_ctrlblk {
|
||||
long cur; /* Current batch number. */
|
||||
long completed; /* Number of the last completed batch */
|
||||
int next_pending; /* Is the next batch already waiting? */
|
||||
long pending; /* Number of the last pending batch */
|
||||
#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
|
||||
unsigned long gp_start; /* Time at which GP started in jiffies. */
|
||||
unsigned long jiffies_stall;
|
||||
/* Time at which to check for CPU stalls. */
|
||||
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
|
||||
|
||||
int signaled;
|
||||
|
||||
@@ -66,11 +75,7 @@ static inline int rcu_batch_after(long a, long b)
|
||||
return (a - b) > 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Per-CPU data for Read-Copy UPdate.
|
||||
* nxtlist - new callbacks are added here
|
||||
* curlist - current batch for which quiescent cycle started if any
|
||||
*/
|
||||
/* Per-CPU data for Read-Copy UPdate. */
|
||||
struct rcu_data {
|
||||
/* 1) quiescent state handling : */
|
||||
long quiescbatch; /* Batch # for grace period */
|
||||
@@ -78,12 +83,24 @@ struct rcu_data {
|
||||
int qs_pending; /* core waits for quiesc state */
|
||||
|
||||
/* 2) batch handling */
|
||||
long batch; /* Batch # for current RCU batch */
|
||||
/*
|
||||
* if nxtlist is not NULL, then:
|
||||
* batch:
|
||||
* The batch # for the last entry of nxtlist
|
||||
* [*nxttail[1], NULL = *nxttail[2]):
|
||||
* Entries that batch # <= batch
|
||||
* [*nxttail[0], *nxttail[1]):
|
||||
* Entries that batch # <= batch - 1
|
||||
* [nxtlist, *nxttail[0]):
|
||||
* Entries that batch # <= batch - 2
|
||||
* The grace period for these entries has completed, and
|
||||
* the other grace-period-completed entries may be moved
|
||||
* here temporarily in rcu_process_callbacks().
|
||||
*/
|
||||
long batch;
|
||||
struct rcu_head *nxtlist;
|
||||
struct rcu_head **nxttail;
|
||||
struct rcu_head **nxttail[3];
|
||||
long qlen; /* # of queued callbacks */
|
||||
struct rcu_head *curlist;
|
||||
struct rcu_head **curtail;
|
||||
struct rcu_head *donelist;
|
||||
struct rcu_head **donetail;
|
||||
long blimit; /* Upper limit on a processed batch */
|
||||
|
||||
@@ -198,20 +198,6 @@ static inline void list_splice_init_rcu(struct list_head *list,
|
||||
at->prev = last;
|
||||
}
|
||||
|
||||
/**
|
||||
* list_for_each_rcu - iterate over an rcu-protected list
|
||||
* @pos: the &struct list_head to use as a loop cursor.
|
||||
* @head: the head for your list.
|
||||
*
|
||||
* This list-traversal primitive may safely run concurrently with
|
||||
* the _rcu list-mutation primitives such as list_add_rcu()
|
||||
* as long as the traversal is guarded by rcu_read_lock().
|
||||
*/
|
||||
#define list_for_each_rcu(pos, head) \
|
||||
for (pos = rcu_dereference((head)->next); \
|
||||
prefetch(pos->next), pos != (head); \
|
||||
pos = rcu_dereference(pos->next))
|
||||
|
||||
#define __list_for_each_rcu(pos, head) \
|
||||
for (pos = rcu_dereference((head)->next); \
|
||||
pos != (head); \
|
||||
|
||||
@@ -132,6 +132,26 @@ struct rcu_head {
|
||||
*/
|
||||
#define rcu_read_unlock_bh() __rcu_read_unlock_bh()
|
||||
|
||||
/**
|
||||
* rcu_read_lock_sched - mark the beginning of a RCU-classic critical section
|
||||
*
|
||||
* Should be used with either
|
||||
* - synchronize_sched()
|
||||
* or
|
||||
* - call_rcu_sched() and rcu_barrier_sched()
|
||||
* on the write-side to insure proper synchronization.
|
||||
*/
|
||||
#define rcu_read_lock_sched() preempt_disable()
|
||||
|
||||
/*
|
||||
* rcu_read_unlock_sched - marks the end of a RCU-classic critical section
|
||||
*
|
||||
* See rcu_read_lock_sched for more information.
|
||||
*/
|
||||
#define rcu_read_unlock_sched() preempt_enable()
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* rcu_dereference - fetch an RCU-protected pointer in an
|
||||
* RCU read-side critical section. This pointer may later
|
||||
|
||||
@@ -57,7 +57,13 @@ static inline void rcu_qsctr_inc(int cpu)
|
||||
rdssp->sched_qs++;
|
||||
}
|
||||
#define rcu_bh_qsctr_inc(cpu)
|
||||
#define call_rcu_bh(head, rcu) call_rcu(head, rcu)
|
||||
|
||||
/*
|
||||
* Someone might want to pass call_rcu_bh as a function pointer.
|
||||
* So this needs to just be a rename and not a macro function.
|
||||
* (no parentheses)
|
||||
*/
|
||||
#define call_rcu_bh call_rcu
|
||||
|
||||
/**
|
||||
* call_rcu_sched - Queue RCU callback for invocation after sched grace period.
|
||||
@@ -111,7 +117,6 @@ extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu);
|
||||
struct softirq_action;
|
||||
|
||||
#ifdef CONFIG_NO_HZ
|
||||
DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched);
|
||||
|
||||
static inline void rcu_enter_nohz(void)
|
||||
{
|
||||
@@ -126,8 +131,8 @@ static inline void rcu_exit_nohz(void)
|
||||
{
|
||||
static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1);
|
||||
|
||||
smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
|
||||
__get_cpu_var(rcu_dyntick_sched).dynticks++;
|
||||
smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
|
||||
WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1),
|
||||
&rs);
|
||||
}
|
||||
|
||||
+251
-86
File diff suppressed because it is too large
Load Diff
@@ -58,14 +58,6 @@
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/rcupreempt_trace.h>
|
||||
|
||||
/*
|
||||
* Macro that prevents the compiler from reordering accesses, but does
|
||||
* absolutely -nothing- to prevent CPUs from reordering. This is used
|
||||
* only to mediate communication between mainline code and hardware
|
||||
* interrupt and NMI handlers.
|
||||
*/
|
||||
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
|
||||
|
||||
/*
|
||||
* PREEMPT_RCU data structures.
|
||||
*/
|
||||
|
||||
@@ -308,11 +308,16 @@ out:
|
||||
|
||||
static int __init rcupreempt_trace_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_init(&rcupreempt_trace_mutex);
|
||||
rcupreempt_trace_buf = kmalloc(RCUPREEMPT_TRACE_BUF_SIZE, GFP_KERNEL);
|
||||
if (!rcupreempt_trace_buf)
|
||||
return 1;
|
||||
return rcupreempt_debugfs_init();
|
||||
ret = rcupreempt_debugfs_init();
|
||||
if (ret)
|
||||
kfree(rcupreempt_trace_buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit rcupreempt_trace_cleanup(void)
|
||||
|
||||
@@ -597,6 +597,19 @@ config RCU_TORTURE_TEST_RUNNABLE
|
||||
Say N here if you want the RCU torture tests to start only
|
||||
after being manually enabled via /proc.
|
||||
|
||||
config RCU_CPU_STALL_DETECTOR
|
||||
bool "Check for stalled CPUs delaying RCU grace periods"
|
||||
depends on CLASSIC_RCU
|
||||
default n
|
||||
help
|
||||
This option causes RCU to printk information on which
|
||||
CPUs are delaying the current grace period, but only when
|
||||
the grace period extends for excessive time periods.
|
||||
|
||||
Say Y if you want RCU to perform such checks.
|
||||
|
||||
Say N if you are unsure.
|
||||
|
||||
config KPROBES_SANITY_TEST
|
||||
bool "Kprobes sanity tests"
|
||||
depends on DEBUG_KERNEL
|
||||
|
||||
Reference in New Issue
Block a user