You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: futex: fix requeue_pi key imbalance futex: Fix typo in FUTEX_WAIT/WAKE_BITSET_PRIVATE definitions rcu: Place root rcu_node structure in separate lockdep class rcu: Make hot-unplugged CPU relinquish its own RCU callbacks rcu: Move rcu_barrier() to rcutree futex: Move exit_pi_state() call to release_mm() futex: Nullify robust lists after cleanup futex: Fix locking imbalance panic: Fix panic message visibility by calling bust_spinlocks(0) before dying rcu: Replace the rcu_barrier enum with pointer to call_rcu*() function rcu: Clean up code based on review feedback from Josh Triplett, part 4 rcu: Clean up code based on review feedback from Josh Triplett, part 3 rcu: Fix rcu_lock_map build failure on CONFIG_PROVE_LOCKING=y rcu: Clean up code to address Ingo's checkpatch feedback rcu: Clean up code based on review feedback from Josh Triplett, part 2 rcu: Clean up code based on review feedback from Josh Triplett
This commit is contained in:
@@ -33,8 +33,8 @@
|
||||
#define FUTEX_LOCK_PI_PRIVATE (FUTEX_LOCK_PI | FUTEX_PRIVATE_FLAG)
|
||||
#define FUTEX_UNLOCK_PI_PRIVATE (FUTEX_UNLOCK_PI | FUTEX_PRIVATE_FLAG)
|
||||
#define FUTEX_TRYLOCK_PI_PRIVATE (FUTEX_TRYLOCK_PI | FUTEX_PRIVATE_FLAG)
|
||||
#define FUTEX_WAIT_BITSET_PRIVATE (FUTEX_WAIT_BITS | FUTEX_PRIVATE_FLAG)
|
||||
#define FUTEX_WAKE_BITSET_PRIVATE (FUTEX_WAKE_BITS | FUTEX_PRIVATE_FLAG)
|
||||
#define FUTEX_WAIT_BITSET_PRIVATE (FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG)
|
||||
#define FUTEX_WAKE_BITSET_PRIVATE (FUTEX_WAKE_BITSET | FUTEX_PRIVATE_FLAG)
|
||||
#define FUTEX_WAIT_REQUEUE_PI_PRIVATE (FUTEX_WAIT_REQUEUE_PI | \
|
||||
FUTEX_PRIVATE_FLAG)
|
||||
#define FUTEX_CMP_REQUEUE_PI_PRIVATE (FUTEX_CMP_REQUEUE_PI | \
|
||||
|
||||
@@ -77,7 +77,7 @@ extern int rcu_scheduler_active;
|
||||
#error "Unknown RCU implementation specified to kernel configuration"
|
||||
#endif
|
||||
|
||||
#define RCU_HEAD_INIT { .next = NULL, .func = NULL }
|
||||
#define RCU_HEAD_INIT { .next = NULL, .func = NULL }
|
||||
#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT
|
||||
#define INIT_RCU_HEAD(ptr) do { \
|
||||
(ptr)->next = NULL; (ptr)->func = NULL; \
|
||||
@@ -129,12 +129,6 @@ static inline void rcu_read_lock(void)
|
||||
rcu_read_acquire();
|
||||
}
|
||||
|
||||
/**
|
||||
* rcu_read_unlock - marks the end of an RCU read-side critical section.
|
||||
*
|
||||
* See rcu_read_lock() for more information.
|
||||
*/
|
||||
|
||||
/*
|
||||
* So where is rcu_write_lock()? It does not exist, as there is no
|
||||
* way for writers to lock out RCU readers. This is a feature, not
|
||||
@@ -144,6 +138,12 @@ static inline void rcu_read_lock(void)
|
||||
* used as well. RCU does not care how the writers keep out of each
|
||||
* others' way, as long as they do so.
|
||||
*/
|
||||
|
||||
/**
|
||||
* rcu_read_unlock - marks the end of an RCU read-side critical section.
|
||||
*
|
||||
* See rcu_read_lock() for more information.
|
||||
*/
|
||||
static inline void rcu_read_unlock(void)
|
||||
{
|
||||
rcu_read_release();
|
||||
@@ -196,6 +196,8 @@ static inline void rcu_read_lock_sched(void)
|
||||
__acquire(RCU_SCHED);
|
||||
rcu_read_acquire();
|
||||
}
|
||||
|
||||
/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
|
||||
static inline notrace void rcu_read_lock_sched_notrace(void)
|
||||
{
|
||||
preempt_disable_notrace();
|
||||
@@ -213,6 +215,8 @@ static inline void rcu_read_unlock_sched(void)
|
||||
__release(RCU_SCHED);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
|
||||
static inline notrace void rcu_read_unlock_sched_notrace(void)
|
||||
{
|
||||
__release(RCU_SCHED);
|
||||
|
||||
@@ -30,10 +30,14 @@
|
||||
#ifndef __LINUX_RCUTREE_H
|
||||
#define __LINUX_RCUTREE_H
|
||||
|
||||
struct notifier_block;
|
||||
|
||||
extern void rcu_sched_qs(int cpu);
|
||||
extern void rcu_bh_qs(int cpu);
|
||||
|
||||
extern int rcu_cpu_notify(struct notifier_block *self,
|
||||
unsigned long action, void *hcpu);
|
||||
extern int rcu_needs_cpu(int cpu);
|
||||
extern int rcu_expedited_torture_stats(char *page);
|
||||
|
||||
#ifdef CONFIG_TREE_PREEMPT_RCU
|
||||
|
||||
@@ -85,16 +89,11 @@ static inline void synchronize_rcu_bh_expedited(void)
|
||||
|
||||
extern void __rcu_init(void);
|
||||
extern void rcu_check_callbacks(int cpu, int user);
|
||||
extern void rcu_restart_cpu(int cpu);
|
||||
|
||||
extern long rcu_batches_completed(void);
|
||||
extern long rcu_batches_completed_bh(void);
|
||||
extern long rcu_batches_completed_sched(void);
|
||||
|
||||
static inline void rcu_init_sched(void)
|
||||
{
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NO_HZ
|
||||
void rcu_enter_nohz(void);
|
||||
void rcu_exit_nohz(void);
|
||||
@@ -107,7 +106,7 @@ static inline void rcu_exit_nohz(void)
|
||||
}
|
||||
#endif /* CONFIG_NO_HZ */
|
||||
|
||||
/* A context switch is a grace period for rcutree. */
|
||||
/* A context switch is a grace period for RCU-sched and RCU-bh. */
|
||||
static inline int rcu_blocking_is_gp(void)
|
||||
{
|
||||
return num_online_cpus() == 1;
|
||||
|
||||
@@ -778,7 +778,6 @@ static void __init do_initcalls(void)
|
||||
*/
|
||||
static void __init do_basic_setup(void)
|
||||
{
|
||||
rcu_init_sched(); /* needed by module_init stage. */
|
||||
init_workqueues();
|
||||
cpuset_init_smp();
|
||||
usermodehelper_init();
|
||||
|
||||
@@ -991,8 +991,6 @@ NORET_TYPE void do_exit(long code)
|
||||
tsk->mempolicy = NULL;
|
||||
#endif
|
||||
#ifdef CONFIG_FUTEX
|
||||
if (unlikely(!list_empty(&tsk->pi_state_list)))
|
||||
exit_pi_state_list(tsk);
|
||||
if (unlikely(current->pi_state_cache))
|
||||
kfree(current->pi_state_cache);
|
||||
#endif
|
||||
|
||||
+8
-2
@@ -570,12 +570,18 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
|
||||
|
||||
/* Get rid of any futexes when releasing the mm */
|
||||
#ifdef CONFIG_FUTEX
|
||||
if (unlikely(tsk->robust_list))
|
||||
if (unlikely(tsk->robust_list)) {
|
||||
exit_robust_list(tsk);
|
||||
tsk->robust_list = NULL;
|
||||
}
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (unlikely(tsk->compat_robust_list))
|
||||
if (unlikely(tsk->compat_robust_list)) {
|
||||
compat_exit_robust_list(tsk);
|
||||
tsk->compat_robust_list = NULL;
|
||||
}
|
||||
#endif
|
||||
if (unlikely(!list_empty(&tsk->pi_state_list)))
|
||||
exit_pi_state_list(tsk);
|
||||
#endif
|
||||
|
||||
/* Get rid of any cached register state */
|
||||
|
||||
+1
-2
@@ -916,8 +916,8 @@ retry:
|
||||
hb1 = hash_futex(&key1);
|
||||
hb2 = hash_futex(&key2);
|
||||
|
||||
double_lock_hb(hb1, hb2);
|
||||
retry_private:
|
||||
double_lock_hb(hb1, hb2);
|
||||
op_ret = futex_atomic_op_inuser(op, uaddr2);
|
||||
if (unlikely(op_ret < 0)) {
|
||||
|
||||
@@ -2117,7 +2117,6 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
|
||||
* Unqueue the futex_q and determine which it was.
|
||||
*/
|
||||
plist_del(&q->list, &q->list.plist);
|
||||
drop_futex_key_refs(&q->key);
|
||||
|
||||
if (timeout && !timeout->task)
|
||||
ret = -ETIMEDOUT;
|
||||
|
||||
+2
-1
@@ -90,6 +90,8 @@ NORET_TYPE void panic(const char * fmt, ...)
|
||||
|
||||
atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
|
||||
|
||||
bust_spinlocks(0);
|
||||
|
||||
if (!panic_blink)
|
||||
panic_blink = no_blink;
|
||||
|
||||
@@ -136,7 +138,6 @@ NORET_TYPE void panic(const char * fmt, ...)
|
||||
mdelay(1);
|
||||
i++;
|
||||
}
|
||||
bust_spinlocks(0);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(panic);
|
||||
|
||||
+7
-133
@@ -46,22 +46,15 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
|
||||
enum rcu_barrier {
|
||||
RCU_BARRIER_STD,
|
||||
RCU_BARRIER_BH,
|
||||
RCU_BARRIER_SCHED,
|
||||
};
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
static struct lock_class_key rcu_lock_key;
|
||||
struct lockdep_map rcu_lock_map =
|
||||
STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
|
||||
EXPORT_SYMBOL_GPL(rcu_lock_map);
|
||||
#endif
|
||||
|
||||
static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
|
||||
static atomic_t rcu_barrier_cpu_count;
|
||||
static DEFINE_MUTEX(rcu_barrier_mutex);
|
||||
static struct completion rcu_barrier_completion;
|
||||
int rcu_scheduler_active __read_mostly;
|
||||
|
||||
static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0);
|
||||
static struct rcu_head rcu_migrate_head[3];
|
||||
static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq);
|
||||
|
||||
/*
|
||||
* Awaken the corresponding synchronize_rcu() instance now that a
|
||||
* grace period has elapsed.
|
||||
@@ -164,129 +157,10 @@ void synchronize_rcu_bh(void)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
|
||||
|
||||
static void rcu_barrier_callback(struct rcu_head *notused)
|
||||
{
|
||||
if (atomic_dec_and_test(&rcu_barrier_cpu_count))
|
||||
complete(&rcu_barrier_completion);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called with preemption disabled, and from cross-cpu IRQ context.
|
||||
*/
|
||||
static void rcu_barrier_func(void *type)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu);
|
||||
|
||||
atomic_inc(&rcu_barrier_cpu_count);
|
||||
switch ((enum rcu_barrier)type) {
|
||||
case RCU_BARRIER_STD:
|
||||
call_rcu(head, rcu_barrier_callback);
|
||||
break;
|
||||
case RCU_BARRIER_BH:
|
||||
call_rcu_bh(head, rcu_barrier_callback);
|
||||
break;
|
||||
case RCU_BARRIER_SCHED:
|
||||
call_rcu_sched(head, rcu_barrier_callback);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void wait_migrated_callbacks(void)
|
||||
{
|
||||
wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count));
|
||||
smp_mb(); /* In case we didn't sleep. */
|
||||
}
|
||||
|
||||
/*
|
||||
* Orchestrate the specified type of RCU barrier, waiting for all
|
||||
* RCU callbacks of the specified type to complete.
|
||||
*/
|
||||
static void _rcu_barrier(enum rcu_barrier type)
|
||||
{
|
||||
BUG_ON(in_interrupt());
|
||||
/* Take cpucontrol mutex to protect against CPU hotplug */
|
||||
mutex_lock(&rcu_barrier_mutex);
|
||||
init_completion(&rcu_barrier_completion);
|
||||
/*
|
||||
* Initialize rcu_barrier_cpu_count to 1, then invoke
|
||||
* rcu_barrier_func() on each CPU, so that each CPU also has
|
||||
* incremented rcu_barrier_cpu_count. Only then is it safe to
|
||||
* decrement rcu_barrier_cpu_count -- otherwise the first CPU
|
||||
* might complete its grace period before all of the other CPUs
|
||||
* did their increment, causing this function to return too
|
||||
* early.
|
||||
*/
|
||||
atomic_set(&rcu_barrier_cpu_count, 1);
|
||||
on_each_cpu(rcu_barrier_func, (void *)type, 1);
|
||||
if (atomic_dec_and_test(&rcu_barrier_cpu_count))
|
||||
complete(&rcu_barrier_completion);
|
||||
wait_for_completion(&rcu_barrier_completion);
|
||||
mutex_unlock(&rcu_barrier_mutex);
|
||||
wait_migrated_callbacks();
|
||||
}
|
||||
|
||||
/**
|
||||
* rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
|
||||
*/
|
||||
void rcu_barrier(void)
|
||||
{
|
||||
_rcu_barrier(RCU_BARRIER_STD);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_barrier);
|
||||
|
||||
/**
|
||||
* rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
|
||||
*/
|
||||
void rcu_barrier_bh(void)
|
||||
{
|
||||
_rcu_barrier(RCU_BARRIER_BH);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_barrier_bh);
|
||||
|
||||
/**
|
||||
* rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
|
||||
*/
|
||||
void rcu_barrier_sched(void)
|
||||
{
|
||||
_rcu_barrier(RCU_BARRIER_SCHED);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_barrier_sched);
|
||||
|
||||
static void rcu_migrate_callback(struct rcu_head *notused)
|
||||
{
|
||||
if (atomic_dec_and_test(&rcu_migrate_type_count))
|
||||
wake_up(&rcu_migrate_wq);
|
||||
}
|
||||
|
||||
extern int rcu_cpu_notify(struct notifier_block *self,
|
||||
unsigned long action, void *hcpu);
|
||||
|
||||
static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
rcu_cpu_notify(self, action, hcpu);
|
||||
if (action == CPU_DYING) {
|
||||
/*
|
||||
* preempt_disable() in on_each_cpu() prevents stop_machine(),
|
||||
* so when "on_each_cpu(rcu_barrier_func, (void *)type, 1);"
|
||||
* returns, all online cpus have queued rcu_barrier_func(),
|
||||
* and the dead cpu(if it exist) queues rcu_migrate_callback()s.
|
||||
*
|
||||
* These callbacks ensure _rcu_barrier() waits for all
|
||||
* RCU callbacks of the specified type to complete.
|
||||
*/
|
||||
atomic_set(&rcu_migrate_type_count, 3);
|
||||
call_rcu_bh(rcu_migrate_head, rcu_migrate_callback);
|
||||
call_rcu_sched(rcu_migrate_head + 1, rcu_migrate_callback);
|
||||
call_rcu(rcu_migrate_head + 2, rcu_migrate_callback);
|
||||
} else if (action == CPU_DOWN_PREPARE) {
|
||||
/* Don't need to wait until next removal operation. */
|
||||
/* rcu_migrate_head is protected by cpu_add_remove_lock */
|
||||
wait_migrated_callbacks();
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
return rcu_cpu_notify(self, action, hcpu);
|
||||
}
|
||||
|
||||
void __init rcu_init(void)
|
||||
|
||||
+1
-3
@@ -606,8 +606,6 @@ static struct rcu_torture_ops sched_ops_sync = {
|
||||
.name = "sched_sync"
|
||||
};
|
||||
|
||||
extern int rcu_expedited_torture_stats(char *page);
|
||||
|
||||
static struct rcu_torture_ops sched_expedited_ops = {
|
||||
.init = rcu_sync_torture_init,
|
||||
.cleanup = NULL,
|
||||
@@ -650,7 +648,7 @@ rcu_torture_writer(void *arg)
|
||||
old_rp = rcu_torture_current;
|
||||
rp->rtort_mbtest = 1;
|
||||
rcu_assign_pointer(rcu_torture_current, rp);
|
||||
smp_wmb();
|
||||
smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
|
||||
if (old_rp) {
|
||||
i = old_rp->rtort_pipe_count;
|
||||
if (i > RCU_TORTURE_PIPE_LEN)
|
||||
|
||||
+216
-114
File diff suppressed because it is too large
Load Diff
+73
-13
@@ -48,14 +48,14 @@
|
||||
#elif NR_CPUS <= RCU_FANOUT_SQ
|
||||
# define NUM_RCU_LVLS 2
|
||||
# define NUM_RCU_LVL_0 1
|
||||
# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT - 1) / RCU_FANOUT)
|
||||
# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT)
|
||||
# define NUM_RCU_LVL_2 (NR_CPUS)
|
||||
# define NUM_RCU_LVL_3 0
|
||||
#elif NR_CPUS <= RCU_FANOUT_CUBE
|
||||
# define NUM_RCU_LVLS 3
|
||||
# define NUM_RCU_LVL_0 1
|
||||
# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT_SQ - 1) / RCU_FANOUT_SQ)
|
||||
# define NUM_RCU_LVL_2 (((NR_CPUS) + (RCU_FANOUT) - 1) / (RCU_FANOUT))
|
||||
# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_SQ)
|
||||
# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT)
|
||||
# define NUM_RCU_LVL_3 NR_CPUS
|
||||
#else
|
||||
# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
|
||||
@@ -79,15 +79,21 @@ struct rcu_dynticks {
|
||||
* Definition for node within the RCU grace-period-detection hierarchy.
|
||||
*/
|
||||
struct rcu_node {
|
||||
spinlock_t lock;
|
||||
spinlock_t lock; /* Root rcu_node's lock protects some */
|
||||
/* rcu_state fields as well as following. */
|
||||
long gpnum; /* Current grace period for this node. */
|
||||
/* This will either be equal to or one */
|
||||
/* behind the root rcu_node's gpnum. */
|
||||
unsigned long qsmask; /* CPUs or groups that need to switch in */
|
||||
/* order for current grace period to proceed.*/
|
||||
/* In leaf rcu_node, each bit corresponds to */
|
||||
/* an rcu_data structure, otherwise, each */
|
||||
/* bit corresponds to a child rcu_node */
|
||||
/* structure. */
|
||||
unsigned long qsmaskinit;
|
||||
/* Per-GP initialization for qsmask. */
|
||||
unsigned long grpmask; /* Mask to apply to parent qsmask. */
|
||||
/* Only one bit will be set in this mask. */
|
||||
int grplo; /* lowest-numbered CPU or group here. */
|
||||
int grphi; /* highest-numbered CPU or group here. */
|
||||
u8 grpnum; /* CPU/group number for next level up. */
|
||||
@@ -95,8 +101,23 @@ struct rcu_node {
|
||||
struct rcu_node *parent;
|
||||
struct list_head blocked_tasks[2];
|
||||
/* Tasks blocked in RCU read-side critsect. */
|
||||
/* Grace period number (->gpnum) x blocked */
|
||||
/* by tasks on the (x & 0x1) element of the */
|
||||
/* blocked_tasks[] array. */
|
||||
} ____cacheline_internodealigned_in_smp;
|
||||
|
||||
/*
|
||||
* Do a full breadth-first scan of the rcu_node structures for the
|
||||
* specified rcu_state structure.
|
||||
*/
|
||||
#define rcu_for_each_node_breadth_first(rsp, rnp) \
|
||||
for ((rnp) = &(rsp)->node[0]; \
|
||||
(rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
|
||||
|
||||
#define rcu_for_each_leaf_node(rsp, rnp) \
|
||||
for ((rnp) = (rsp)->level[NUM_RCU_LVLS - 1]; \
|
||||
(rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
|
||||
|
||||
/* Index values for nxttail array in struct rcu_data. */
|
||||
#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */
|
||||
#define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */
|
||||
@@ -126,19 +147,22 @@ struct rcu_data {
|
||||
* Any of the partitions might be empty, in which case the
|
||||
* pointer to that partition will be equal to the pointer for
|
||||
* the following partition. When the list is empty, all of
|
||||
* the nxttail elements point to nxtlist, which is NULL.
|
||||
* the nxttail elements point to the ->nxtlist pointer itself,
|
||||
* which in that case is NULL.
|
||||
*
|
||||
* [*nxttail[RCU_NEXT_READY_TAIL], NULL = *nxttail[RCU_NEXT_TAIL]):
|
||||
* Entries that might have arrived after current GP ended
|
||||
* [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]):
|
||||
* Entries known to have arrived before current GP ended
|
||||
* [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]):
|
||||
* Entries that batch # <= ->completed - 1: waiting for current GP
|
||||
* [nxtlist, *nxttail[RCU_DONE_TAIL]):
|
||||
* Entries that batch # <= ->completed
|
||||
* The grace period for these entries has completed, and
|
||||
* the other grace-period-completed entries may be moved
|
||||
* here temporarily in rcu_process_callbacks().
|
||||
* [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]):
|
||||
* Entries that batch # <= ->completed - 1: waiting for current GP
|
||||
* [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]):
|
||||
* Entries known to have arrived before current GP ended
|
||||
* [*nxttail[RCU_NEXT_READY_TAIL], *nxttail[RCU_NEXT_TAIL]):
|
||||
* Entries that might have arrived after current GP ended
|
||||
* Note that the value of *nxttail[RCU_NEXT_TAIL] will
|
||||
* always be NULL, as this is the end of the list.
|
||||
*/
|
||||
struct rcu_head *nxtlist;
|
||||
struct rcu_head **nxttail[RCU_NEXT_SIZE];
|
||||
@@ -216,8 +240,19 @@ struct rcu_state {
|
||||
/* Force QS state. */
|
||||
long gpnum; /* Current gp number. */
|
||||
long completed; /* # of last completed gp. */
|
||||
|
||||
/* End of fields guarded by root rcu_node's lock. */
|
||||
|
||||
spinlock_t onofflock; /* exclude on/offline and */
|
||||
/* starting new GP. */
|
||||
/* starting new GP. Also */
|
||||
/* protects the following */
|
||||
/* orphan_cbs fields. */
|
||||
struct rcu_head *orphan_cbs_list; /* list of rcu_head structs */
|
||||
/* orphaned by all CPUs in */
|
||||
/* a given leaf rcu_node */
|
||||
/* going offline. */
|
||||
struct rcu_head **orphan_cbs_tail; /* And tail pointer. */
|
||||
long orphan_qlen; /* Number of orphaned cbs. */
|
||||
spinlock_t fqslock; /* Only one task forcing */
|
||||
/* quiescent states. */
|
||||
unsigned long jiffies_force_qs; /* Time at which to invoke */
|
||||
@@ -255,5 +290,30 @@ extern struct rcu_state rcu_preempt_state;
|
||||
DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data);
|
||||
#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
|
||||
|
||||
#endif /* #ifdef RCU_TREE_NONCORE */
|
||||
#else /* #ifdef RCU_TREE_NONCORE */
|
||||
|
||||
/* Forward declarations for rcutree_plugin.h */
|
||||
static inline void rcu_bootup_announce(void);
|
||||
long rcu_batches_completed(void);
|
||||
static void rcu_preempt_note_context_switch(int cpu);
|
||||
static int rcu_preempted_readers(struct rcu_node *rnp);
|
||||
#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
|
||||
static void rcu_print_task_stall(struct rcu_node *rnp);
|
||||
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
|
||||
static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
|
||||
struct rcu_node *rnp,
|
||||
struct rcu_data *rdp);
|
||||
static void rcu_preempt_offline_cpu(int cpu);
|
||||
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
|
||||
static void rcu_preempt_check_callbacks(int cpu);
|
||||
static void rcu_preempt_process_callbacks(void);
|
||||
void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
|
||||
static int rcu_preempt_pending(int cpu);
|
||||
static int rcu_preempt_needs_cpu(int cpu);
|
||||
static void __cpuinit rcu_preempt_init_percpu_data(int cpu);
|
||||
static void rcu_preempt_send_cbs_to_orphanage(void);
|
||||
static void __init __rcu_init_preempt(void);
|
||||
|
||||
#endif /* #else #ifdef RCU_TREE_NONCORE */
|
||||
|
||||
+76
-27
@@ -150,6 +150,16 @@ void __rcu_read_lock(void)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__rcu_read_lock);
|
||||
|
||||
/*
|
||||
* Check for preempted RCU readers blocking the current grace period
|
||||
* for the specified rcu_node structure. If the caller needs a reliable
|
||||
* answer, it must hold the rcu_node's ->lock.
|
||||
*/
|
||||
static int rcu_preempted_readers(struct rcu_node *rnp)
|
||||
{
|
||||
return !list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]);
|
||||
}
|
||||
|
||||
static void rcu_read_unlock_special(struct task_struct *t)
|
||||
{
|
||||
int empty;
|
||||
@@ -196,7 +206,7 @@ static void rcu_read_unlock_special(struct task_struct *t)
|
||||
break;
|
||||
spin_unlock(&rnp->lock); /* irqs remain disabled. */
|
||||
}
|
||||
empty = list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]);
|
||||
empty = !rcu_preempted_readers(rnp);
|
||||
list_del_init(&t->rcu_node_entry);
|
||||
t->rcu_blocked_node = NULL;
|
||||
|
||||
@@ -207,7 +217,7 @@ static void rcu_read_unlock_special(struct task_struct *t)
|
||||
* drop rnp->lock and restore irq.
|
||||
*/
|
||||
if (!empty && rnp->qsmask == 0 &&
|
||||
list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1])) {
|
||||
!rcu_preempted_readers(rnp)) {
|
||||
struct rcu_node *rnp_p;
|
||||
|
||||
if (rnp->parent == NULL) {
|
||||
@@ -257,12 +267,12 @@ static void rcu_print_task_stall(struct rcu_node *rnp)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct list_head *lp;
|
||||
int phase = rnp->gpnum & 0x1;
|
||||
int phase;
|
||||
struct task_struct *t;
|
||||
|
||||
if (!list_empty(&rnp->blocked_tasks[phase])) {
|
||||
if (rcu_preempted_readers(rnp)) {
|
||||
spin_lock_irqsave(&rnp->lock, flags);
|
||||
phase = rnp->gpnum & 0x1; /* re-read under lock. */
|
||||
phase = rnp->gpnum & 0x1;
|
||||
lp = &rnp->blocked_tasks[phase];
|
||||
list_for_each_entry(t, lp, rcu_node_entry)
|
||||
printk(" P%d", t->pid);
|
||||
@@ -281,20 +291,10 @@ static void rcu_print_task_stall(struct rcu_node *rnp)
|
||||
*/
|
||||
static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
|
||||
{
|
||||
WARN_ON_ONCE(!list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]));
|
||||
WARN_ON_ONCE(rcu_preempted_readers(rnp));
|
||||
WARN_ON_ONCE(rnp->qsmask);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check for preempted RCU readers for the specified rcu_node structure.
|
||||
* If the caller needs a reliable answer, it must hold the rcu_node's
|
||||
* >lock.
|
||||
*/
|
||||
static int rcu_preempted_readers(struct rcu_node *rnp)
|
||||
{
|
||||
return !list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
|
||||
/*
|
||||
@@ -410,6 +410,15 @@ static int rcu_preempt_needs_cpu(int cpu)
|
||||
return !!per_cpu(rcu_preempt_data, cpu).nxtlist;
|
||||
}
|
||||
|
||||
/**
|
||||
* rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
|
||||
*/
|
||||
void rcu_barrier(void)
|
||||
{
|
||||
_rcu_barrier(&rcu_preempt_state, call_rcu);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_barrier);
|
||||
|
||||
/*
|
||||
* Initialize preemptable RCU's per-CPU data.
|
||||
*/
|
||||
@@ -418,6 +427,22 @@ static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
|
||||
rcu_init_percpu_data(cpu, &rcu_preempt_state, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Move preemptable RCU's callbacks to ->orphan_cbs_list.
|
||||
*/
|
||||
static void rcu_preempt_send_cbs_to_orphanage(void)
|
||||
{
|
||||
rcu_send_cbs_to_orphanage(&rcu_preempt_state);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize preemptable RCU's state structures.
|
||||
*/
|
||||
static void __init __rcu_init_preempt(void)
|
||||
{
|
||||
RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check for a task exiting while in a preemptable-RCU read-side
|
||||
* critical section, clean up if so. No need to issue warnings,
|
||||
@@ -461,6 +486,15 @@ static void rcu_preempt_note_context_switch(int cpu)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Because preemptable RCU does not exist, there are never any preempted
|
||||
* RCU readers.
|
||||
*/
|
||||
static int rcu_preempted_readers(struct rcu_node *rnp)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
|
||||
|
||||
/*
|
||||
@@ -483,15 +517,6 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
|
||||
WARN_ON_ONCE(rnp->qsmask);
|
||||
}
|
||||
|
||||
/*
|
||||
* Because preemptable RCU does not exist, there are never any preempted
|
||||
* RCU readers.
|
||||
*/
|
||||
static int rcu_preempted_readers(struct rcu_node *rnp)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
|
||||
/*
|
||||
@@ -518,7 +543,7 @@ static void rcu_preempt_offline_cpu(int cpu)
|
||||
* Because preemptable RCU does not exist, it never has any callbacks
|
||||
* to check.
|
||||
*/
|
||||
void rcu_preempt_check_callbacks(int cpu)
|
||||
static void rcu_preempt_check_callbacks(int cpu)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -526,7 +551,7 @@ void rcu_preempt_check_callbacks(int cpu)
|
||||
* Because preemptable RCU does not exist, it never has any callbacks
|
||||
* to process.
|
||||
*/
|
||||
void rcu_preempt_process_callbacks(void)
|
||||
static void rcu_preempt_process_callbacks(void)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -555,6 +580,16 @@ static int rcu_preempt_needs_cpu(int cpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Because preemptable RCU does not exist, rcu_barrier() is just
|
||||
* another name for rcu_barrier_sched().
|
||||
*/
|
||||
void rcu_barrier(void)
|
||||
{
|
||||
rcu_barrier_sched();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_barrier);
|
||||
|
||||
/*
|
||||
* Because preemptable RCU does not exist, there is no per-CPU
|
||||
* data to initialize.
|
||||
@@ -563,4 +598,18 @@ static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Because there is no preemptable RCU, there are no callbacks to move.
|
||||
*/
|
||||
static void rcu_preempt_send_cbs_to_orphanage(void)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Because preemptable RCU does not exist, it need not be initialized.
|
||||
*/
|
||||
static void __init __rcu_init_preempt(void)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
|
||||
|
||||
@@ -159,13 +159,13 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
|
||||
struct rcu_node *rnp;
|
||||
|
||||
seq_printf(m, "c=%ld g=%ld s=%d jfq=%ld j=%x "
|
||||
"nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu\n",
|
||||
"nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld\n",
|
||||
rsp->completed, rsp->gpnum, rsp->signaled,
|
||||
(long)(rsp->jiffies_force_qs - jiffies),
|
||||
(int)(jiffies & 0xffff),
|
||||
rsp->n_force_qs, rsp->n_force_qs_ngp,
|
||||
rsp->n_force_qs - rsp->n_force_qs_ngp,
|
||||
rsp->n_force_qs_lh);
|
||||
rsp->n_force_qs_lh, rsp->orphan_qlen);
|
||||
for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < NUM_RCU_NODES; rnp++) {
|
||||
if (rnp->level != level) {
|
||||
seq_puts(m, "\n");
|
||||
|
||||
Reference in New Issue
Block a user