mirror of
https://github.com/armbian/linux-cix.git
synced 2026-01-06 12:30:45 -08:00
Merge branches 'doc.2021.11.30c', 'exp.2021.12.07a', 'fastnohz.2021.11.30c', 'fixes.2021.11.30c', 'nocb.2021.12.09a', 'nolibc.2021.11.30c', 'tasks.2021.12.09a', 'torture.2021.12.07a' and 'torturescript.2021.11.30c' into HEAD
doc.2021.11.30c: Documentation updates. exp.2021.12.07a: Expedited-grace-period fixes. fastnohz.2021.11.30c: Remove CONFIG_RCU_FAST_NO_HZ. fixes.2021.11.30c: Miscellaneous fixes. nocb.2021.12.09a: No-CB CPU updates. nolibc.2021.11.30c: Tiny in-kernel library updates. tasks.2021.12.09a: RCU-tasks updates, including update-side scalability. torture.2021.12.07a: Torture-test in-kernel module updates. torturescript.2021.11.30c: Torture-test scripting updates.
This commit is contained in:
@@ -254,17 +254,6 @@ period (in this case 2603), the grace-period sequence number (7075), and
|
||||
an estimate of the total number of RCU callbacks queued across all CPUs
|
||||
(625 in this case).
|
||||
|
||||
In kernels with CONFIG_RCU_FAST_NO_HZ, more information is printed
|
||||
for each CPU::
|
||||
|
||||
0: (64628 ticks this GP) idle=dd5/3fffffffffffffff/0 softirq=82/543 last_accelerate: a345/d342 dyntick_enabled: 1
|
||||
|
||||
The "last_accelerate:" prints the low-order 16 bits (in hex) of the
|
||||
jiffies counter when this CPU last invoked rcu_try_advance_all_cbs()
|
||||
from rcu_needs_cpu() or last invoked rcu_accelerate_cbs() from
|
||||
rcu_prepare_for_idle(). "dyntick_enabled: 1" indicates that dyntick-idle
|
||||
processing is enabled.
|
||||
|
||||
If the grace period ends just as the stall warning starts printing,
|
||||
there will be a spurious stall-warning message, which will include
|
||||
the following::
|
||||
|
||||
@@ -4343,19 +4343,30 @@
|
||||
Disable the Correctable Errors Collector,
|
||||
see CONFIG_RAS_CEC help text.
|
||||
|
||||
rcu_nocbs= [KNL]
|
||||
The argument is a cpu list, as described above.
|
||||
rcu_nocbs[=cpu-list]
|
||||
[KNL] The optional argument is a cpu list,
|
||||
as described above.
|
||||
|
||||
In kernels built with CONFIG_RCU_NOCB_CPU=y, set
|
||||
the specified list of CPUs to be no-callback CPUs.
|
||||
Invocation of these CPUs' RCU callbacks will be
|
||||
offloaded to "rcuox/N" kthreads created for that
|
||||
purpose, where "x" is "p" for RCU-preempt, and
|
||||
"s" for RCU-sched, and "N" is the CPU number.
|
||||
This reduces OS jitter on the offloaded CPUs,
|
||||
which can be useful for HPC and real-time
|
||||
workloads. It can also improve energy efficiency
|
||||
for asymmetric multiprocessors.
|
||||
In kernels built with CONFIG_RCU_NOCB_CPU=y,
|
||||
enable the no-callback CPU mode, which prevents
|
||||
such CPUs' callbacks from being invoked in
|
||||
softirq context. Invocation of such CPUs' RCU
|
||||
callbacks will instead be offloaded to "rcuox/N"
|
||||
kthreads created for that purpose, where "x" is
|
||||
"p" for RCU-preempt, "s" for RCU-sched, and "g"
|
||||
for the kthreads that mediate grace periods; and
|
||||
"N" is the CPU number. This reduces OS jitter on
|
||||
the offloaded CPUs, which can be useful for HPC
|
||||
and real-time workloads. It can also improve
|
||||
energy efficiency for asymmetric multiprocessors.
|
||||
|
||||
If a cpulist is passed as an argument, the specified
|
||||
list of CPUs is set to no-callback mode from boot.
|
||||
|
||||
Otherwise, if the '=' sign and the cpulist
|
||||
arguments are omitted, no CPU will be set to
|
||||
no-callback mode from boot but the mode may be
|
||||
toggled at runtime via cpusets.
|
||||
|
||||
rcu_nocb_poll [KNL]
|
||||
Rather than requiring that offloaded CPUs
|
||||
@@ -4489,10 +4500,6 @@
|
||||
on rcutree.qhimark at boot time and to zero to
|
||||
disable more aggressive help enlistment.
|
||||
|
||||
rcutree.rcu_idle_gp_delay= [KNL]
|
||||
Set wakeup interval for idle CPUs that have
|
||||
RCU callbacks (RCU_FAST_NO_HZ=y).
|
||||
|
||||
rcutree.rcu_kick_kthreads= [KNL]
|
||||
Cause the grace-period kthread to get an extra
|
||||
wake_up() if it sleeps three times longer than
|
||||
@@ -4603,8 +4610,12 @@
|
||||
in seconds.
|
||||
|
||||
rcutorture.fwd_progress= [KNL]
|
||||
Enable RCU grace-period forward-progress testing
|
||||
Specifies the number of kthreads to be used
|
||||
for RCU grace-period forward-progress testing
|
||||
for the types of RCU supporting this notion.
|
||||
Defaults to 1 kthread, values less than zero or
|
||||
greater than the number of CPUs cause the number
|
||||
of CPUs to be used.
|
||||
|
||||
rcutorture.fwd_progress_div= [KNL]
|
||||
Specify the fraction of a CPU-stall-warning
|
||||
@@ -4805,6 +4816,29 @@
|
||||
period to instead use normal non-expedited
|
||||
grace-period processing.
|
||||
|
||||
rcupdate.rcu_task_collapse_lim= [KNL]
|
||||
Set the maximum number of callbacks present
|
||||
at the beginning of a grace period that allows
|
||||
the RCU Tasks flavors to collapse back to using
|
||||
a single callback queue. This switching only
|
||||
occurs when rcupdate.rcu_task_enqueue_lim is
|
||||
set to the default value of -1.
|
||||
|
||||
rcupdate.rcu_task_contend_lim= [KNL]
|
||||
Set the minimum number of callback-queuing-time
|
||||
lock-contention events per jiffy required to
|
||||
cause the RCU Tasks flavors to switch to per-CPU
|
||||
callback queuing. This switching only occurs
|
||||
when rcupdate.rcu_task_enqueue_lim is set to
|
||||
the default value of -1.
|
||||
|
||||
rcupdate.rcu_task_enqueue_lim= [KNL]
|
||||
Set the number of callback queues to use for the
|
||||
RCU Tasks family of RCU flavors. The default
|
||||
of -1 allows this to be automatically (and
|
||||
dynamically) adjusted. This parameter is intended
|
||||
for use in testing.
|
||||
|
||||
rcupdate.rcu_task_ipi_delay= [KNL]
|
||||
Set time in jiffies during which RCU tasks will
|
||||
avoid sending IPIs, starting with the beginning
|
||||
|
||||
@@ -184,16 +184,12 @@ There are situations in which idle CPUs cannot be permitted to
|
||||
enter either dyntick-idle mode or adaptive-tick mode, the most
|
||||
common being when that CPU has RCU callbacks pending.
|
||||
|
||||
The CONFIG_RCU_FAST_NO_HZ=y Kconfig option may be used to cause such CPUs
|
||||
to enter dyntick-idle mode or adaptive-tick mode anyway. In this case,
|
||||
a timer will awaken these CPUs every four jiffies in order to ensure
|
||||
that the RCU callbacks are processed in a timely fashion.
|
||||
|
||||
Another approach is to offload RCU callback processing to "rcuo" kthreads
|
||||
Avoid this by offloading RCU callback processing to "rcuo" kthreads
|
||||
using the CONFIG_RCU_NOCB_CPU=y Kconfig option. The specific CPUs to
|
||||
offload may be selected using The "rcu_nocbs=" kernel boot parameter,
|
||||
which takes a comma-separated list of CPUs and CPU ranges, for example,
|
||||
"1,3-5" selects CPUs 1, 3, 4, and 5.
|
||||
"1,3-5" selects CPUs 1, 3, 4, and 5. Note that CPUs specified by
|
||||
the "nohz_full" kernel boot parameter are also offloaded.
|
||||
|
||||
The offloaded CPUs will never queue RCU callbacks, and therefore RCU
|
||||
never prevents offloaded CPUs from entering either dyntick-idle mode
|
||||
|
||||
@@ -69,7 +69,7 @@ struct rcu_cblist {
|
||||
*
|
||||
*
|
||||
* ----------------------------------------------------------------------------
|
||||
* | SEGCBLIST_SOFTIRQ_ONLY |
|
||||
* | SEGCBLIST_RCU_CORE |
|
||||
* | |
|
||||
* | Callbacks processed by rcu_core() from softirqs or local |
|
||||
* | rcuc kthread, without holding nocb_lock. |
|
||||
@@ -77,7 +77,7 @@ struct rcu_cblist {
|
||||
* |
|
||||
* v
|
||||
* ----------------------------------------------------------------------------
|
||||
* | SEGCBLIST_OFFLOADED |
|
||||
* | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING | SEGCBLIST_OFFLOADED |
|
||||
* | |
|
||||
* | Callbacks processed by rcu_core() from softirqs or local |
|
||||
* | rcuc kthread, while holding nocb_lock. Waking up CB and GP kthreads, |
|
||||
@@ -89,7 +89,9 @@ struct rcu_cblist {
|
||||
* | |
|
||||
* v v
|
||||
* --------------------------------------- ----------------------------------|
|
||||
* | SEGCBLIST_OFFLOADED | | | SEGCBLIST_OFFLOADED | |
|
||||
* | SEGCBLIST_RCU_CORE | | | SEGCBLIST_RCU_CORE | |
|
||||
* | SEGCBLIST_LOCKING | | | SEGCBLIST_LOCKING | |
|
||||
* | SEGCBLIST_OFFLOADED | | | SEGCBLIST_OFFLOADED | |
|
||||
* | SEGCBLIST_KTHREAD_CB | | SEGCBLIST_KTHREAD_GP |
|
||||
* | | | |
|
||||
* | | | |
|
||||
@@ -104,9 +106,10 @@ struct rcu_cblist {
|
||||
* |
|
||||
* v
|
||||
* |--------------------------------------------------------------------------|
|
||||
* | SEGCBLIST_OFFLOADED | |
|
||||
* | SEGCBLIST_KTHREAD_CB | |
|
||||
* | SEGCBLIST_KTHREAD_GP |
|
||||
* | SEGCBLIST_LOCKING | |
|
||||
* | SEGCBLIST_OFFLOADED | |
|
||||
* | SEGCBLIST_KTHREAD_GP | |
|
||||
* | SEGCBLIST_KTHREAD_CB |
|
||||
* | |
|
||||
* | Kthreads handle callbacks holding nocb_lock, local rcu_core() stops |
|
||||
* | handling callbacks. Enable bypass queueing. |
|
||||
@@ -120,7 +123,8 @@ struct rcu_cblist {
|
||||
*
|
||||
*
|
||||
* |--------------------------------------------------------------------------|
|
||||
* | SEGCBLIST_OFFLOADED | |
|
||||
* | SEGCBLIST_LOCKING | |
|
||||
* | SEGCBLIST_OFFLOADED | |
|
||||
* | SEGCBLIST_KTHREAD_CB | |
|
||||
* | SEGCBLIST_KTHREAD_GP |
|
||||
* | |
|
||||
@@ -130,6 +134,22 @@ struct rcu_cblist {
|
||||
* |
|
||||
* v
|
||||
* |--------------------------------------------------------------------------|
|
||||
* | SEGCBLIST_RCU_CORE | |
|
||||
* | SEGCBLIST_LOCKING | |
|
||||
* | SEGCBLIST_OFFLOADED | |
|
||||
* | SEGCBLIST_KTHREAD_CB | |
|
||||
* | SEGCBLIST_KTHREAD_GP |
|
||||
* | |
|
||||
* | CB/GP kthreads handle callbacks holding nocb_lock, local rcu_core() |
|
||||
* | handles callbacks concurrently. Bypass enqueue is enabled. |
|
||||
* | Invoke RCU core so we make sure not to preempt it in the middle with |
|
||||
* | leaving some urgent work unattended within a jiffy. |
|
||||
* ----------------------------------------------------------------------------
|
||||
* |
|
||||
* v
|
||||
* |--------------------------------------------------------------------------|
|
||||
* | SEGCBLIST_RCU_CORE | |
|
||||
* | SEGCBLIST_LOCKING | |
|
||||
* | SEGCBLIST_KTHREAD_CB | |
|
||||
* | SEGCBLIST_KTHREAD_GP |
|
||||
* | |
|
||||
@@ -143,7 +163,9 @@ struct rcu_cblist {
|
||||
* | |
|
||||
* v v
|
||||
* ---------------------------------------------------------------------------|
|
||||
* | |
|
||||
* | | |
|
||||
* | SEGCBLIST_RCU_CORE | | SEGCBLIST_RCU_CORE | |
|
||||
* | SEGCBLIST_LOCKING | | SEGCBLIST_LOCKING | |
|
||||
* | SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP |
|
||||
* | | |
|
||||
* | GP kthread woke up and | CB kthread woke up and |
|
||||
@@ -159,7 +181,7 @@ struct rcu_cblist {
|
||||
* |
|
||||
* v
|
||||
* ----------------------------------------------------------------------------
|
||||
* | 0 |
|
||||
* | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING |
|
||||
* | |
|
||||
* | Callbacks processed by rcu_core() from softirqs or local |
|
||||
* | rcuc kthread, while holding nocb_lock. Forbid nocb_timer to be armed. |
|
||||
@@ -168,17 +190,18 @@ struct rcu_cblist {
|
||||
* |
|
||||
* v
|
||||
* ----------------------------------------------------------------------------
|
||||
* | SEGCBLIST_SOFTIRQ_ONLY |
|
||||
* | SEGCBLIST_RCU_CORE |
|
||||
* | |
|
||||
* | Callbacks processed by rcu_core() from softirqs or local |
|
||||
* | rcuc kthread, without holding nocb_lock. |
|
||||
* ----------------------------------------------------------------------------
|
||||
*/
|
||||
#define SEGCBLIST_ENABLED BIT(0)
|
||||
#define SEGCBLIST_SOFTIRQ_ONLY BIT(1)
|
||||
#define SEGCBLIST_KTHREAD_CB BIT(2)
|
||||
#define SEGCBLIST_KTHREAD_GP BIT(3)
|
||||
#define SEGCBLIST_OFFLOADED BIT(4)
|
||||
#define SEGCBLIST_RCU_CORE BIT(1)
|
||||
#define SEGCBLIST_LOCKING BIT(2)
|
||||
#define SEGCBLIST_KTHREAD_CB BIT(3)
|
||||
#define SEGCBLIST_KTHREAD_GP BIT(4)
|
||||
#define SEGCBLIST_OFFLOADED BIT(5)
|
||||
|
||||
struct rcu_segcblist {
|
||||
struct rcu_head *head;
|
||||
|
||||
@@ -364,6 +364,12 @@ static inline void rcu_preempt_sleep_check(void) { }
|
||||
#define rcu_check_sparse(p, space)
|
||||
#endif /* #else #ifdef __CHECKER__ */
|
||||
|
||||
#define __unrcu_pointer(p, local) \
|
||||
({ \
|
||||
typeof(*p) *local = (typeof(*p) *__force)(p); \
|
||||
rcu_check_sparse(p, __rcu); \
|
||||
((typeof(*p) __force __kernel *)(local)); \
|
||||
})
|
||||
/**
|
||||
* unrcu_pointer - mark a pointer as not being RCU protected
|
||||
* @p: pointer needing to lose its __rcu property
|
||||
@@ -371,39 +377,35 @@ static inline void rcu_preempt_sleep_check(void) { }
|
||||
* Converts @p from an __rcu pointer to a __kernel pointer.
|
||||
* This allows an __rcu pointer to be used with xchg() and friends.
|
||||
*/
|
||||
#define unrcu_pointer(p) \
|
||||
({ \
|
||||
typeof(*p) *_________p1 = (typeof(*p) *__force)(p); \
|
||||
rcu_check_sparse(p, __rcu); \
|
||||
((typeof(*p) __force __kernel *)(_________p1)); \
|
||||
})
|
||||
#define unrcu_pointer(p) __unrcu_pointer(p, __UNIQUE_ID(rcu))
|
||||
|
||||
#define __rcu_access_pointer(p, space) \
|
||||
#define __rcu_access_pointer(p, local, space) \
|
||||
({ \
|
||||
typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \
|
||||
typeof(*p) *local = (typeof(*p) *__force)READ_ONCE(p); \
|
||||
rcu_check_sparse(p, space); \
|
||||
((typeof(*p) __force __kernel *)(_________p1)); \
|
||||
((typeof(*p) __force __kernel *)(local)); \
|
||||
})
|
||||
#define __rcu_dereference_check(p, c, space) \
|
||||
#define __rcu_dereference_check(p, local, c, space) \
|
||||
({ \
|
||||
/* Dependency order vs. p above. */ \
|
||||
typeof(*p) *________p1 = (typeof(*p) *__force)READ_ONCE(p); \
|
||||
typeof(*p) *local = (typeof(*p) *__force)READ_ONCE(p); \
|
||||
RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \
|
||||
rcu_check_sparse(p, space); \
|
||||
((typeof(*p) __force __kernel *)(________p1)); \
|
||||
((typeof(*p) __force __kernel *)(local)); \
|
||||
})
|
||||
#define __rcu_dereference_protected(p, c, space) \
|
||||
#define __rcu_dereference_protected(p, local, c, space) \
|
||||
({ \
|
||||
RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \
|
||||
rcu_check_sparse(p, space); \
|
||||
((typeof(*p) __force __kernel *)(p)); \
|
||||
})
|
||||
#define rcu_dereference_raw(p) \
|
||||
#define __rcu_dereference_raw(p, local) \
|
||||
({ \
|
||||
/* Dependency order vs. p above. */ \
|
||||
typeof(p) ________p1 = READ_ONCE(p); \
|
||||
((typeof(*p) __force __kernel *)(________p1)); \
|
||||
typeof(p) local = READ_ONCE(p); \
|
||||
((typeof(*p) __force __kernel *)(local)); \
|
||||
})
|
||||
#define rcu_dereference_raw(p) __rcu_dereference_raw(p, __UNIQUE_ID(rcu))
|
||||
|
||||
/**
|
||||
* RCU_INITIALIZER() - statically initialize an RCU-protected global variable
|
||||
@@ -490,7 +492,7 @@ do { \
|
||||
* when tearing down multi-linked structures after a grace period
|
||||
* has elapsed.
|
||||
*/
|
||||
#define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu)
|
||||
#define rcu_access_pointer(p) __rcu_access_pointer((p), __UNIQUE_ID(rcu), __rcu)
|
||||
|
||||
/**
|
||||
* rcu_dereference_check() - rcu_dereference with debug checking
|
||||
@@ -526,7 +528,8 @@ do { \
|
||||
* annotated as __rcu.
|
||||
*/
|
||||
#define rcu_dereference_check(p, c) \
|
||||
__rcu_dereference_check((p), (c) || rcu_read_lock_held(), __rcu)
|
||||
__rcu_dereference_check((p), __UNIQUE_ID(rcu), \
|
||||
(c) || rcu_read_lock_held(), __rcu)
|
||||
|
||||
/**
|
||||
* rcu_dereference_bh_check() - rcu_dereference_bh with debug checking
|
||||
@@ -541,7 +544,8 @@ do { \
|
||||
* rcu_read_lock() but also rcu_read_lock_bh() into account.
|
||||
*/
|
||||
#define rcu_dereference_bh_check(p, c) \
|
||||
__rcu_dereference_check((p), (c) || rcu_read_lock_bh_held(), __rcu)
|
||||
__rcu_dereference_check((p), __UNIQUE_ID(rcu), \
|
||||
(c) || rcu_read_lock_bh_held(), __rcu)
|
||||
|
||||
/**
|
||||
* rcu_dereference_sched_check() - rcu_dereference_sched with debug checking
|
||||
@@ -556,7 +560,8 @@ do { \
|
||||
* only rcu_read_lock() but also rcu_read_lock_sched() into account.
|
||||
*/
|
||||
#define rcu_dereference_sched_check(p, c) \
|
||||
__rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \
|
||||
__rcu_dereference_check((p), __UNIQUE_ID(rcu), \
|
||||
(c) || rcu_read_lock_sched_held(), \
|
||||
__rcu)
|
||||
|
||||
/*
|
||||
@@ -566,7 +571,8 @@ do { \
|
||||
* The no-tracing version of rcu_dereference_raw() must not call
|
||||
* rcu_read_lock_held().
|
||||
*/
|
||||
#define rcu_dereference_raw_check(p) __rcu_dereference_check((p), 1, __rcu)
|
||||
#define rcu_dereference_raw_check(p) \
|
||||
__rcu_dereference_check((p), __UNIQUE_ID(rcu), 1, __rcu)
|
||||
|
||||
/**
|
||||
* rcu_dereference_protected() - fetch RCU pointer when updates prevented
|
||||
@@ -585,7 +591,7 @@ do { \
|
||||
* but very ugly failures.
|
||||
*/
|
||||
#define rcu_dereference_protected(p, c) \
|
||||
__rcu_dereference_protected((p), (c), __rcu)
|
||||
__rcu_dereference_protected((p), __UNIQUE_ID(rcu), (c), __rcu)
|
||||
|
||||
|
||||
/**
|
||||
|
||||
@@ -85,7 +85,7 @@ static inline void rcu_irq_enter_irqson(void) { }
|
||||
static inline void rcu_irq_exit(void) { }
|
||||
static inline void rcu_irq_exit_check_preempt(void) { }
|
||||
#define rcu_is_idle_cpu(cpu) \
|
||||
(is_idle_task(current) && !in_nmi() && !in_irq() && !in_serving_softirq())
|
||||
(is_idle_task(current) && !in_nmi() && !in_hardirq() && !in_serving_softirq())
|
||||
static inline void exit_rcu(void) { }
|
||||
static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t)
|
||||
{
|
||||
|
||||
@@ -117,7 +117,8 @@ static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
|
||||
* lockdep_is_held() calls.
|
||||
*/
|
||||
#define srcu_dereference_check(p, ssp, c) \
|
||||
__rcu_dereference_check((p), (c) || srcu_read_lock_held(ssp), __rcu)
|
||||
__rcu_dereference_check((p), __UNIQUE_ID(rcu), \
|
||||
(c) || srcu_read_lock_held(ssp), __rcu)
|
||||
|
||||
/**
|
||||
* srcu_dereference - fetch SRCU-protected pointer for later dereferencing
|
||||
|
||||
@@ -38,13 +38,8 @@ do { \
|
||||
pr_alert("%s" TORTURE_FLAG " %s\n", torture_type, s); \
|
||||
} \
|
||||
} while (0)
|
||||
#define VERBOSE_TOROUT_ERRSTRING(s) \
|
||||
do { \
|
||||
if (verbose) { \
|
||||
verbose_torout_sleep(); \
|
||||
pr_alert("%s" TORTURE_FLAG "!!! %s\n", torture_type, s); \
|
||||
} \
|
||||
} while (0)
|
||||
#define TOROUT_ERRSTRING(s) \
|
||||
pr_alert("%s" TORTURE_FLAG "!!! %s\n", torture_type, s)
|
||||
void verbose_torout_sleep(void);
|
||||
|
||||
#define torture_init_error(firsterr) \
|
||||
|
||||
@@ -1047,7 +1047,7 @@ static int __init lock_torture_init(void)
|
||||
sizeof(writer_tasks[0]),
|
||||
GFP_KERNEL);
|
||||
if (writer_tasks == NULL) {
|
||||
VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory");
|
||||
TOROUT_ERRSTRING("writer_tasks: Out of memory");
|
||||
firsterr = -ENOMEM;
|
||||
goto unwind;
|
||||
}
|
||||
@@ -1058,7 +1058,7 @@ static int __init lock_torture_init(void)
|
||||
sizeof(reader_tasks[0]),
|
||||
GFP_KERNEL);
|
||||
if (reader_tasks == NULL) {
|
||||
VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory");
|
||||
TOROUT_ERRSTRING("reader_tasks: Out of memory");
|
||||
kfree(writer_tasks);
|
||||
writer_tasks = NULL;
|
||||
firsterr = -ENOMEM;
|
||||
|
||||
@@ -112,7 +112,7 @@ config RCU_STALL_COMMON
|
||||
making these warnings mandatory for the tree variants.
|
||||
|
||||
config RCU_NEED_SEGCBLIST
|
||||
def_bool ( TREE_RCU || TREE_SRCU )
|
||||
def_bool ( TREE_RCU || TREE_SRCU || TASKS_RCU_GENERIC )
|
||||
|
||||
config RCU_FANOUT
|
||||
int "Tree-based hierarchical RCU fanout value"
|
||||
@@ -169,24 +169,6 @@ config RCU_FANOUT_LEAF
|
||||
|
||||
Take the default if unsure.
|
||||
|
||||
config RCU_FAST_NO_HZ
|
||||
bool "Accelerate last non-dyntick-idle CPU's grace periods"
|
||||
depends on NO_HZ_COMMON && SMP && RCU_EXPERT
|
||||
default n
|
||||
help
|
||||
This option permits CPUs to enter dynticks-idle state even if
|
||||
they have RCU callbacks queued, and prevents RCU from waking
|
||||
these CPUs up more than roughly once every four jiffies (by
|
||||
default, you can adjust this using the rcutree.rcu_idle_gp_delay
|
||||
parameter), thus improving energy efficiency. On the other
|
||||
hand, this option increases the duration of RCU grace periods,
|
||||
for example, slowing down synchronize_rcu().
|
||||
|
||||
Say Y if energy efficiency is critically important, and you
|
||||
don't care about increased grace-period durations.
|
||||
|
||||
Say N if you are unsure.
|
||||
|
||||
config RCU_BOOST
|
||||
bool "Enable RCU priority boosting"
|
||||
depends on (RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT) || PREEMPT_RT
|
||||
|
||||
@@ -261,16 +261,14 @@ void rcu_segcblist_disable(struct rcu_segcblist *rsclp)
|
||||
}
|
||||
|
||||
/*
|
||||
* Mark the specified rcu_segcblist structure as offloaded.
|
||||
* Mark the specified rcu_segcblist structure as offloaded (or not)
|
||||
*/
|
||||
void rcu_segcblist_offload(struct rcu_segcblist *rsclp, bool offload)
|
||||
{
|
||||
if (offload) {
|
||||
rcu_segcblist_clear_flags(rsclp, SEGCBLIST_SOFTIRQ_ONLY);
|
||||
rcu_segcblist_set_flags(rsclp, SEGCBLIST_OFFLOADED);
|
||||
} else {
|
||||
if (offload)
|
||||
rcu_segcblist_set_flags(rsclp, SEGCBLIST_LOCKING | SEGCBLIST_OFFLOADED);
|
||||
else
|
||||
rcu_segcblist_clear_flags(rsclp, SEGCBLIST_OFFLOADED);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -80,11 +80,14 @@ static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp)
|
||||
return rcu_segcblist_test_flags(rsclp, SEGCBLIST_ENABLED);
|
||||
}
|
||||
|
||||
/* Is the specified rcu_segcblist offloaded, or is SEGCBLIST_SOFTIRQ_ONLY set? */
|
||||
/*
|
||||
* Is the specified rcu_segcblist NOCB offloaded (or in the middle of the
|
||||
* [de]offloading process)?
|
||||
*/
|
||||
static inline bool rcu_segcblist_is_offloaded(struct rcu_segcblist *rsclp)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
|
||||
!rcu_segcblist_test_flags(rsclp, SEGCBLIST_SOFTIRQ_ONLY))
|
||||
rcu_segcblist_test_flags(rsclp, SEGCBLIST_LOCKING))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
@@ -92,9 +95,8 @@ static inline bool rcu_segcblist_is_offloaded(struct rcu_segcblist *rsclp)
|
||||
|
||||
static inline bool rcu_segcblist_completely_offloaded(struct rcu_segcblist *rsclp)
|
||||
{
|
||||
int flags = SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP | SEGCBLIST_OFFLOADED;
|
||||
|
||||
if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) && (rsclp->flags & flags) == flags)
|
||||
if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
|
||||
!rcu_segcblist_test_flags(rsclp, SEGCBLIST_RCU_CORE))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
|
||||
@@ -50,8 +50,8 @@ MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
|
||||
pr_alert("%s" SCALE_FLAG " %s\n", scale_type, s)
|
||||
#define VERBOSE_SCALEOUT_STRING(s) \
|
||||
do { if (verbose) pr_alert("%s" SCALE_FLAG " %s\n", scale_type, s); } while (0)
|
||||
#define VERBOSE_SCALEOUT_ERRSTRING(s) \
|
||||
do { if (verbose) pr_alert("%s" SCALE_FLAG "!!! %s\n", scale_type, s); } while (0)
|
||||
#define SCALEOUT_ERRSTRING(s) \
|
||||
pr_alert("%s" SCALE_FLAG "!!! %s\n", scale_type, s)
|
||||
|
||||
/*
|
||||
* The intended use cases for the nreaders and nwriters module parameters
|
||||
@@ -514,11 +514,11 @@ rcu_scale_cleanup(void)
|
||||
* during the mid-boot phase, so have to wait till the end.
|
||||
*/
|
||||
if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp)
|
||||
VERBOSE_SCALEOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!");
|
||||
SCALEOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!");
|
||||
if (rcu_gp_is_normal() && gp_exp)
|
||||
VERBOSE_SCALEOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!");
|
||||
SCALEOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!");
|
||||
if (gp_exp && gp_async)
|
||||
VERBOSE_SCALEOUT_ERRSTRING("No expedited async GPs, so went with async!");
|
||||
SCALEOUT_ERRSTRING("No expedited async GPs, so went with async!");
|
||||
|
||||
if (torture_cleanup_begin())
|
||||
return;
|
||||
@@ -845,7 +845,7 @@ rcu_scale_init(void)
|
||||
reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
|
||||
GFP_KERNEL);
|
||||
if (reader_tasks == NULL) {
|
||||
VERBOSE_SCALEOUT_ERRSTRING("out of memory");
|
||||
SCALEOUT_ERRSTRING("out of memory");
|
||||
firsterr = -ENOMEM;
|
||||
goto unwind;
|
||||
}
|
||||
@@ -865,7 +865,7 @@ rcu_scale_init(void)
|
||||
kcalloc(nrealwriters, sizeof(*writer_n_durations),
|
||||
GFP_KERNEL);
|
||||
if (!writer_tasks || !writer_durations || !writer_n_durations) {
|
||||
VERBOSE_SCALEOUT_ERRSTRING("out of memory");
|
||||
SCALEOUT_ERRSTRING("out of memory");
|
||||
firsterr = -ENOMEM;
|
||||
goto unwind;
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -44,7 +44,10 @@
|
||||
pr_alert("%s" SCALE_FLAG s, scale_type, ## x)
|
||||
|
||||
#define VERBOSE_SCALEOUT(s, x...) \
|
||||
do { if (verbose) pr_alert("%s" SCALE_FLAG s, scale_type, ## x); } while (0)
|
||||
do { \
|
||||
if (verbose) \
|
||||
pr_alert("%s" SCALE_FLAG s "\n", scale_type, ## x); \
|
||||
} while (0)
|
||||
|
||||
static atomic_t verbose_batch_ctr;
|
||||
|
||||
@@ -54,12 +57,11 @@ do { \
|
||||
(verbose_batched <= 0 || \
|
||||
!(atomic_inc_return(&verbose_batch_ctr) % verbose_batched))) { \
|
||||
schedule_timeout_uninterruptible(1); \
|
||||
pr_alert("%s" SCALE_FLAG s, scale_type, ## x); \
|
||||
pr_alert("%s" SCALE_FLAG s "\n", scale_type, ## x); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define VERBOSE_SCALEOUT_ERRSTRING(s, x...) \
|
||||
do { if (verbose) pr_alert("%s" SCALE_FLAG "!!! " s, scale_type, ## x); } while (0)
|
||||
#define SCALEOUT_ERRSTRING(s, x...) pr_alert("%s" SCALE_FLAG "!!! " s "\n", scale_type, ## x)
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Joel Fernandes (Google) <joel@joelfernandes.org>");
|
||||
@@ -604,7 +606,7 @@ static u64 process_durations(int n)
|
||||
char *buf;
|
||||
u64 sum = 0;
|
||||
|
||||
buf = kmalloc(128 + nreaders * 32, GFP_KERNEL);
|
||||
buf = kmalloc(800 + 64, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return 0;
|
||||
buf[0] = 0;
|
||||
@@ -617,13 +619,15 @@ static u64 process_durations(int n)
|
||||
|
||||
if (i % 5 == 0)
|
||||
strcat(buf, "\n");
|
||||
if (strlen(buf) >= 800) {
|
||||
pr_alert("%s", buf);
|
||||
buf[0] = 0;
|
||||
}
|
||||
strcat(buf, buf1);
|
||||
|
||||
sum += rt->last_duration_ns;
|
||||
}
|
||||
strcat(buf, "\n");
|
||||
|
||||
SCALEOUT("%s\n", buf);
|
||||
pr_alert("%s\n", buf);
|
||||
|
||||
kfree(buf);
|
||||
return sum;
|
||||
@@ -637,7 +641,6 @@ static u64 process_durations(int n)
|
||||
// point all the timestamps are printed.
|
||||
static int main_func(void *arg)
|
||||
{
|
||||
bool errexit = false;
|
||||
int exp, r;
|
||||
char buf1[64];
|
||||
char *buf;
|
||||
@@ -648,10 +651,10 @@ static int main_func(void *arg)
|
||||
|
||||
VERBOSE_SCALEOUT("main_func task started");
|
||||
result_avg = kzalloc(nruns * sizeof(*result_avg), GFP_KERNEL);
|
||||
buf = kzalloc(64 + nruns * 32, GFP_KERNEL);
|
||||
buf = kzalloc(800 + 64, GFP_KERNEL);
|
||||
if (!result_avg || !buf) {
|
||||
VERBOSE_SCALEOUT_ERRSTRING("out of memory");
|
||||
errexit = true;
|
||||
SCALEOUT_ERRSTRING("out of memory");
|
||||
goto oom_exit;
|
||||
}
|
||||
if (holdoff)
|
||||
schedule_timeout_interruptible(holdoff * HZ);
|
||||
@@ -663,8 +666,6 @@ static int main_func(void *arg)
|
||||
|
||||
// Start exp readers up per experiment
|
||||
for (exp = 0; exp < nruns && !torture_must_stop(); exp++) {
|
||||
if (errexit)
|
||||
break;
|
||||
if (torture_must_stop())
|
||||
goto end;
|
||||
|
||||
@@ -698,26 +699,23 @@ static int main_func(void *arg)
|
||||
// Print the average of all experiments
|
||||
SCALEOUT("END OF TEST. Calculating average duration per loop (nanoseconds)...\n");
|
||||
|
||||
if (!errexit) {
|
||||
buf[0] = 0;
|
||||
strcat(buf, "\n");
|
||||
strcat(buf, "Runs\tTime(ns)\n");
|
||||
}
|
||||
|
||||
pr_alert("Runs\tTime(ns)\n");
|
||||
for (exp = 0; exp < nruns; exp++) {
|
||||
u64 avg;
|
||||
u32 rem;
|
||||
|
||||
if (errexit)
|
||||
break;
|
||||
avg = div_u64_rem(result_avg[exp], 1000, &rem);
|
||||
sprintf(buf1, "%d\t%llu.%03u\n", exp + 1, avg, rem);
|
||||
strcat(buf, buf1);
|
||||
if (strlen(buf) >= 800) {
|
||||
pr_alert("%s", buf);
|
||||
buf[0] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (!errexit)
|
||||
SCALEOUT("%s", buf);
|
||||
pr_alert("%s", buf);
|
||||
|
||||
oom_exit:
|
||||
// This will shutdown everything including us.
|
||||
if (shutdown) {
|
||||
shutdown_start = 1;
|
||||
@@ -841,12 +839,12 @@ ref_scale_init(void)
|
||||
reader_tasks = kcalloc(nreaders, sizeof(reader_tasks[0]),
|
||||
GFP_KERNEL);
|
||||
if (!reader_tasks) {
|
||||
VERBOSE_SCALEOUT_ERRSTRING("out of memory");
|
||||
SCALEOUT_ERRSTRING("out of memory");
|
||||
firsterr = -ENOMEM;
|
||||
goto unwind;
|
||||
}
|
||||
|
||||
VERBOSE_SCALEOUT("Starting %d reader threads\n", nreaders);
|
||||
VERBOSE_SCALEOUT("Starting %d reader threads", nreaders);
|
||||
|
||||
for (i = 0; i < nreaders; i++) {
|
||||
firsterr = torture_create_kthread(ref_scale_reader, (void *)i,
|
||||
|
||||
@@ -99,7 +99,7 @@ void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
|
||||
int newval = READ_ONCE(ssp->srcu_lock_nesting[idx]) - 1;
|
||||
|
||||
WRITE_ONCE(ssp->srcu_lock_nesting[idx], newval);
|
||||
if (!newval && READ_ONCE(ssp->srcu_gp_waiting))
|
||||
if (!newval && READ_ONCE(ssp->srcu_gp_waiting) && in_task())
|
||||
swake_up_one(&ssp->srcu_wq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__srcu_read_unlock);
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -79,7 +79,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
|
||||
.dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
|
||||
.dynticks = ATOMIC_INIT(1),
|
||||
#ifdef CONFIG_RCU_NOCB_CPU
|
||||
.cblist.flags = SEGCBLIST_SOFTIRQ_ONLY,
|
||||
.cblist.flags = SEGCBLIST_RCU_CORE,
|
||||
#endif
|
||||
};
|
||||
static struct rcu_state rcu_state = {
|
||||
@@ -624,7 +624,6 @@ static noinstr void rcu_eqs_enter(bool user)
|
||||
instrumentation_begin();
|
||||
trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks));
|
||||
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
|
||||
rcu_prepare_for_idle();
|
||||
rcu_preempt_deferred_qs(current);
|
||||
|
||||
// instrumentation for the noinstr rcu_dynticks_eqs_enter()
|
||||
@@ -768,9 +767,6 @@ noinstr void rcu_nmi_exit(void)
|
||||
trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
|
||||
WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
|
||||
|
||||
if (!in_nmi())
|
||||
rcu_prepare_for_idle();
|
||||
|
||||
// instrumentation for the noinstr rcu_dynticks_eqs_enter()
|
||||
instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
|
||||
instrumentation_end();
|
||||
@@ -872,7 +868,6 @@ static void noinstr rcu_eqs_exit(bool user)
|
||||
// instrumentation for the noinstr rcu_dynticks_eqs_exit()
|
||||
instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
|
||||
|
||||
rcu_cleanup_after_idle();
|
||||
trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks));
|
||||
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
|
||||
WRITE_ONCE(rdp->dynticks_nesting, 1);
|
||||
@@ -1014,12 +1009,6 @@ noinstr void rcu_nmi_enter(void)
|
||||
rcu_dynticks_eqs_exit();
|
||||
// ... but is watching here.
|
||||
|
||||
if (!in_nmi()) {
|
||||
instrumentation_begin();
|
||||
rcu_cleanup_after_idle();
|
||||
instrumentation_end();
|
||||
}
|
||||
|
||||
instrumentation_begin();
|
||||
// instrumentation for the noinstr rcu_dynticks_curr_cpu_in_eqs()
|
||||
instrument_atomic_read(&rdp->dynticks, sizeof(rdp->dynticks));
|
||||
@@ -1086,6 +1075,24 @@ void rcu_irq_enter_irqson(void)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check to see if any future non-offloaded RCU-related work will need
|
||||
* to be done by the current CPU, even if none need be done immediately,
|
||||
* returning 1 if so. This function is part of the RCU implementation;
|
||||
* it is -not- an exported member of the RCU API. This is used by
|
||||
* the idle-entry code to figure out whether it is safe to disable the
|
||||
* scheduler-clock interrupt.
|
||||
*
|
||||
* Just check whether or not this CPU has non-offloaded RCU callbacks
|
||||
* queued.
|
||||
*/
|
||||
int rcu_needs_cpu(u64 basemono, u64 *nextevt)
|
||||
{
|
||||
*nextevt = KTIME_MAX;
|
||||
return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) &&
|
||||
!rcu_rdp_is_offloaded(this_cpu_ptr(&rcu_data));
|
||||
}
|
||||
|
||||
/*
|
||||
* If any sort of urgency was applied to the current CPU (for example,
|
||||
* the scheduler-clock interrupt was enabled on a nohz_full CPU) in order
|
||||
@@ -1467,7 +1474,7 @@ static void rcu_gp_kthread_wake(void)
|
||||
{
|
||||
struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
|
||||
|
||||
if ((current == t && !in_irq() && !in_serving_softirq()) ||
|
||||
if ((current == t && !in_hardirq() && !in_serving_softirq()) ||
|
||||
!READ_ONCE(rcu_state.gp_flags) || !t)
|
||||
return;
|
||||
WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
|
||||
@@ -1590,10 +1597,11 @@ static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp,
|
||||
struct rcu_data *rdp)
|
||||
{
|
||||
rcu_lockdep_assert_cblist_protected(rdp);
|
||||
if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) ||
|
||||
!raw_spin_trylock_rcu_node(rnp))
|
||||
if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || !raw_spin_trylock_rcu_node(rnp))
|
||||
return;
|
||||
WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp));
|
||||
// The grace period cannot end while we hold the rcu_node lock.
|
||||
if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))
|
||||
WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp));
|
||||
raw_spin_unlock_rcu_node(rnp);
|
||||
}
|
||||
|
||||
@@ -2277,7 +2285,7 @@ rcu_report_qs_rdp(struct rcu_data *rdp)
|
||||
unsigned long flags;
|
||||
unsigned long mask;
|
||||
bool needwake = false;
|
||||
const bool offloaded = rcu_rdp_is_offloaded(rdp);
|
||||
bool needacc = false;
|
||||
struct rcu_node *rnp;
|
||||
|
||||
WARN_ON_ONCE(rdp->cpu != smp_processor_id());
|
||||
@@ -2304,15 +2312,30 @@ rcu_report_qs_rdp(struct rcu_data *rdp)
|
||||
/*
|
||||
* This GP can't end until cpu checks in, so all of our
|
||||
* callbacks can be processed during the next GP.
|
||||
*
|
||||
* NOCB kthreads have their own way to deal with that...
|
||||
*/
|
||||
if (!offloaded)
|
||||
if (!rcu_rdp_is_offloaded(rdp)) {
|
||||
needwake = rcu_accelerate_cbs(rnp, rdp);
|
||||
} else if (!rcu_segcblist_completely_offloaded(&rdp->cblist)) {
|
||||
/*
|
||||
* ...but NOCB kthreads may miss or delay callbacks acceleration
|
||||
* if in the middle of a (de-)offloading process.
|
||||
*/
|
||||
needacc = true;
|
||||
}
|
||||
|
||||
rcu_disable_urgency_upon_qs(rdp);
|
||||
rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
|
||||
/* ^^^ Released rnp->lock */
|
||||
if (needwake)
|
||||
rcu_gp_kthread_wake();
|
||||
|
||||
if (needacc) {
|
||||
rcu_nocb_lock_irqsave(rdp, flags);
|
||||
rcu_accelerate_cbs_unlocked(rnp, rdp);
|
||||
rcu_nocb_unlock_irqrestore(rdp, flags);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2444,7 +2467,6 @@ static void rcu_do_batch(struct rcu_data *rdp)
|
||||
int div;
|
||||
bool __maybe_unused empty;
|
||||
unsigned long flags;
|
||||
const bool offloaded = rcu_rdp_is_offloaded(rdp);
|
||||
struct rcu_head *rhp;
|
||||
struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
|
||||
long bl, count = 0;
|
||||
@@ -2462,18 +2484,17 @@ static void rcu_do_batch(struct rcu_data *rdp)
|
||||
}
|
||||
|
||||
/*
|
||||
* Extract the list of ready callbacks, disabling to prevent
|
||||
* Extract the list of ready callbacks, disabling IRQs to prevent
|
||||
* races with call_rcu() from interrupt handlers. Leave the
|
||||
* callback counts, as rcu_barrier() needs to be conservative.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
rcu_nocb_lock(rdp);
|
||||
rcu_nocb_lock_irqsave(rdp, flags);
|
||||
WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
|
||||
pending = rcu_segcblist_n_cbs(&rdp->cblist);
|
||||
div = READ_ONCE(rcu_divisor);
|
||||
div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div;
|
||||
bl = max(rdp->blimit, pending >> div);
|
||||
if (unlikely(bl > 100)) {
|
||||
if (in_serving_softirq() && unlikely(bl > 100)) {
|
||||
long rrn = READ_ONCE(rcu_resched_ns);
|
||||
|
||||
rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn;
|
||||
@@ -2482,7 +2503,7 @@ static void rcu_do_batch(struct rcu_data *rdp)
|
||||
trace_rcu_batch_start(rcu_state.name,
|
||||
rcu_segcblist_n_cbs(&rdp->cblist), bl);
|
||||
rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
|
||||
if (offloaded)
|
||||
if (rcu_rdp_is_offloaded(rdp))
|
||||
rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
|
||||
|
||||
trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbDequeued"));
|
||||
@@ -2510,18 +2531,21 @@ static void rcu_do_batch(struct rcu_data *rdp)
|
||||
/*
|
||||
* Stop only if limit reached and CPU has something to do.
|
||||
*/
|
||||
if (count >= bl && !offloaded &&
|
||||
(need_resched() ||
|
||||
(!is_idle_task(current) && !rcu_is_callbacks_kthread())))
|
||||
break;
|
||||
if (unlikely(tlimit)) {
|
||||
/* only call local_clock() every 32 callbacks */
|
||||
if (likely((count & 31) || local_clock() < tlimit))
|
||||
continue;
|
||||
/* Exceeded the time limit, so leave. */
|
||||
break;
|
||||
}
|
||||
if (!in_serving_softirq()) {
|
||||
if (in_serving_softirq()) {
|
||||
if (count >= bl && (need_resched() || !is_idle_task(current)))
|
||||
break;
|
||||
/*
|
||||
* Make sure we don't spend too much time here and deprive other
|
||||
* softirq vectors of CPU cycles.
|
||||
*/
|
||||
if (unlikely(tlimit)) {
|
||||
/* only call local_clock() every 32 callbacks */
|
||||
if (likely((count & 31) || local_clock() < tlimit))
|
||||
continue;
|
||||
/* Exceeded the time limit, so leave. */
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
local_bh_enable();
|
||||
lockdep_assert_irqs_enabled();
|
||||
cond_resched_tasks_rcu_qs();
|
||||
@@ -2530,8 +2554,7 @@ static void rcu_do_batch(struct rcu_data *rdp)
|
||||
}
|
||||
}
|
||||
|
||||
local_irq_save(flags);
|
||||
rcu_nocb_lock(rdp);
|
||||
rcu_nocb_lock_irqsave(rdp, flags);
|
||||
rdp->n_cbs_invoked += count;
|
||||
trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(),
|
||||
is_idle_task(current), rcu_is_callbacks_kthread());
|
||||
@@ -2565,9 +2588,6 @@ static void rcu_do_batch(struct rcu_data *rdp)
|
||||
|
||||
rcu_nocb_unlock_irqrestore(rdp, flags);
|
||||
|
||||
/* Re-invoke RCU core processing if there are callbacks remaining. */
|
||||
if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist))
|
||||
invoke_rcu_core();
|
||||
tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
|
||||
}
|
||||
|
||||
@@ -2706,6 +2726,23 @@ static __latent_entropy void rcu_core(void)
|
||||
unsigned long flags;
|
||||
struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
|
||||
struct rcu_node *rnp = rdp->mynode;
|
||||
/*
|
||||
* On RT rcu_core() can be preempted when IRQs aren't disabled.
|
||||
* Therefore this function can race with concurrent NOCB (de-)offloading
|
||||
* on this CPU and the below condition must be considered volatile.
|
||||
* However if we race with:
|
||||
*
|
||||
* _ Offloading: In the worst case we accelerate or process callbacks
|
||||
* concurrently with NOCB kthreads. We are guaranteed to
|
||||
* call rcu_nocb_lock() if that happens.
|
||||
*
|
||||
* _ Deoffloading: In the worst case we miss callbacks acceleration or
|
||||
* processing. This is fine because the early stage
|
||||
* of deoffloading invokes rcu_core() after setting
|
||||
* SEGCBLIST_RCU_CORE. So we guarantee that we'll process
|
||||
* what could have been dismissed without the need to wait
|
||||
* for the next rcu_pending() check in the next jiffy.
|
||||
*/
|
||||
const bool do_batch = !rcu_segcblist_completely_offloaded(&rdp->cblist);
|
||||
|
||||
if (cpu_is_offline(smp_processor_id()))
|
||||
@@ -2714,7 +2751,7 @@ static __latent_entropy void rcu_core(void)
|
||||
WARN_ON_ONCE(!rdp->beenonline);
|
||||
|
||||
/* Report any deferred quiescent states if preemption enabled. */
|
||||
if (!(preempt_count() & PREEMPT_MASK)) {
|
||||
if (IS_ENABLED(CONFIG_PREEMPT_COUNT) && (!(preempt_count() & PREEMPT_MASK))) {
|
||||
rcu_preempt_deferred_qs(current);
|
||||
} else if (rcu_preempt_need_deferred_qs(current)) {
|
||||
set_tsk_need_resched(current);
|
||||
@@ -2737,8 +2774,12 @@ static __latent_entropy void rcu_core(void)
|
||||
|
||||
/* If there are callbacks ready, invoke them. */
|
||||
if (do_batch && rcu_segcblist_ready_cbs(&rdp->cblist) &&
|
||||
likely(READ_ONCE(rcu_scheduler_fully_active)))
|
||||
likely(READ_ONCE(rcu_scheduler_fully_active))) {
|
||||
rcu_do_batch(rdp);
|
||||
/* Re-invoke RCU core processing if there are callbacks remaining. */
|
||||
if (rcu_segcblist_ready_cbs(&rdp->cblist))
|
||||
invoke_rcu_core();
|
||||
}
|
||||
|
||||
/* Do any needed deferred wakeups of rcuo kthreads. */
|
||||
do_nocb_deferred_wakeup(rdp);
|
||||
@@ -2982,7 +3023,7 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func)
|
||||
head->func = func;
|
||||
head->next = NULL;
|
||||
local_irq_save(flags);
|
||||
kasan_record_aux_stack(head);
|
||||
kasan_record_aux_stack_noalloc(head);
|
||||
rdp = this_cpu_ptr(&rcu_data);
|
||||
|
||||
/* Add the callback to our list. */
|
||||
@@ -3547,7 +3588,7 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
|
||||
return;
|
||||
}
|
||||
|
||||
kasan_record_aux_stack(ptr);
|
||||
kasan_record_aux_stack_noalloc(ptr);
|
||||
success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head);
|
||||
if (!success) {
|
||||
run_page_cache_worker(krcp);
|
||||
|
||||
@@ -157,7 +157,6 @@ struct rcu_data {
|
||||
bool core_needs_qs; /* Core waits for quiescent state. */
|
||||
bool beenonline; /* CPU online at least once. */
|
||||
bool gpwrap; /* Possible ->gp_seq wrap. */
|
||||
bool exp_deferred_qs; /* This CPU awaiting a deferred QS? */
|
||||
bool cpu_started; /* RCU watching this onlining CPU. */
|
||||
struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
|
||||
unsigned long grpmask; /* Mask to apply to leaf qsmask. */
|
||||
@@ -189,11 +188,6 @@ struct rcu_data {
|
||||
bool rcu_urgent_qs; /* GP old need light quiescent state. */
|
||||
bool rcu_forced_tick; /* Forced tick to provide QS. */
|
||||
bool rcu_forced_tick_exp; /* ... provide QS to expedited GP. */
|
||||
#ifdef CONFIG_RCU_FAST_NO_HZ
|
||||
unsigned long last_accelerate; /* Last jiffy CBs were accelerated. */
|
||||
unsigned long last_advance_all; /* Last jiffy CBs were all advanced. */
|
||||
int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
|
||||
#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
|
||||
|
||||
/* 4) rcu_barrier(), OOM callbacks, and expediting. */
|
||||
struct rcu_head barrier_head;
|
||||
@@ -227,8 +221,11 @@ struct rcu_data {
|
||||
struct swait_queue_head nocb_gp_wq; /* For nocb kthreads to sleep on. */
|
||||
bool nocb_cb_sleep; /* Is the nocb CB thread asleep? */
|
||||
struct task_struct *nocb_cb_kthread;
|
||||
struct rcu_data *nocb_next_cb_rdp;
|
||||
/* Next rcu_data in wakeup chain. */
|
||||
struct list_head nocb_head_rdp; /*
|
||||
* Head of rcu_data list in wakeup chain,
|
||||
* if rdp_gp.
|
||||
*/
|
||||
struct list_head nocb_entry_rdp; /* rcu_data node in wakeup chain. */
|
||||
|
||||
/* The following fields are used by CB kthread, hence new cacheline. */
|
||||
struct rcu_data *nocb_gp_rdp ____cacheline_internodealigned_in_smp;
|
||||
@@ -419,8 +416,6 @@ static bool rcu_is_callbacks_kthread(void);
|
||||
static void rcu_cpu_kthread_setup(unsigned int cpu);
|
||||
static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp);
|
||||
static void __init rcu_spawn_boost_kthreads(void);
|
||||
static void rcu_cleanup_after_idle(void);
|
||||
static void rcu_prepare_for_idle(void);
|
||||
static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
|
||||
static bool rcu_preempt_need_deferred_qs(struct task_struct *t);
|
||||
static void rcu_preempt_deferred_qs(struct task_struct *t);
|
||||
@@ -447,12 +442,16 @@ static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
|
||||
static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp);
|
||||
#ifdef CONFIG_RCU_NOCB_CPU
|
||||
static void __init rcu_organize_nocb_kthreads(void);
|
||||
#define rcu_nocb_lock_irqsave(rdp, flags) \
|
||||
do { \
|
||||
if (!rcu_segcblist_is_offloaded(&(rdp)->cblist)) \
|
||||
local_irq_save(flags); \
|
||||
else \
|
||||
raw_spin_lock_irqsave(&(rdp)->nocb_lock, (flags)); \
|
||||
|
||||
/*
|
||||
* Disable IRQs before checking offloaded state so that local
|
||||
* locking is safe against concurrent de-offloading.
|
||||
*/
|
||||
#define rcu_nocb_lock_irqsave(rdp, flags) \
|
||||
do { \
|
||||
local_irq_save(flags); \
|
||||
if (rcu_segcblist_is_offloaded(&(rdp)->cblist)) \
|
||||
raw_spin_lock(&(rdp)->nocb_lock); \
|
||||
} while (0)
|
||||
#else /* #ifdef CONFIG_RCU_NOCB_CPU */
|
||||
#define rcu_nocb_lock_irqsave(rdp, flags) local_irq_save(flags)
|
||||
|
||||
@@ -255,7 +255,7 @@ static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
|
||||
*/
|
||||
static void rcu_report_exp_rdp(struct rcu_data *rdp)
|
||||
{
|
||||
WRITE_ONCE(rdp->exp_deferred_qs, false);
|
||||
WRITE_ONCE(rdp->cpu_no_qs.b.exp, false);
|
||||
rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true);
|
||||
}
|
||||
|
||||
@@ -387,6 +387,7 @@ retry_ipi:
|
||||
continue;
|
||||
}
|
||||
if (get_cpu() == cpu) {
|
||||
mask_ofl_test |= mask;
|
||||
put_cpu();
|
||||
continue;
|
||||
}
|
||||
@@ -506,7 +507,10 @@ static void synchronize_rcu_expedited_wait(void)
|
||||
if (rdp->rcu_forced_tick_exp)
|
||||
continue;
|
||||
rdp->rcu_forced_tick_exp = true;
|
||||
tick_dep_set_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
|
||||
preempt_disable();
|
||||
if (cpu_online(cpu))
|
||||
tick_dep_set_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
|
||||
preempt_enable();
|
||||
}
|
||||
}
|
||||
j = READ_ONCE(jiffies_till_first_fqs);
|
||||
@@ -655,7 +659,7 @@ static void rcu_exp_handler(void *unused)
|
||||
rcu_dynticks_curr_cpu_in_eqs()) {
|
||||
rcu_report_exp_rdp(rdp);
|
||||
} else {
|
||||
rdp->exp_deferred_qs = true;
|
||||
WRITE_ONCE(rdp->cpu_no_qs.b.exp, true);
|
||||
set_tsk_need_resched(t);
|
||||
set_preempt_need_resched();
|
||||
}
|
||||
@@ -677,7 +681,7 @@ static void rcu_exp_handler(void *unused)
|
||||
if (depth > 0) {
|
||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||
if (rnp->expmask & rdp->grpmask) {
|
||||
rdp->exp_deferred_qs = true;
|
||||
WRITE_ONCE(rdp->cpu_no_qs.b.exp, true);
|
||||
t->rcu_read_unlock_special.b.exp_hint = true;
|
||||
}
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
@@ -759,7 +763,7 @@ static void sync_sched_exp_online_cleanup(int cpu)
|
||||
my_cpu = get_cpu();
|
||||
/* Quiescent state either not needed or already requested, leave. */
|
||||
if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
|
||||
rdp->cpu_no_qs.b.exp) {
|
||||
READ_ONCE(rdp->cpu_no_qs.b.exp)) {
|
||||
put_cpu();
|
||||
return;
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user