You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar:
"The main changes in this cycle are:
- Make schedstats a runtime tunable (disabled by default) and
optimize it via static keys.
As most distributions enable CONFIG_SCHEDSTATS=y due to its
instrumentation value, this is a nice performance enhancement.
(Mel Gorman)
- Implement 'simple waitqueues' (swait): these are just pure
waitqueues without any of the more complex features of full-blown
waitqueues (callbacks, wake flags, wake keys, etc.). Simple
waitqueues have less memory overhead and are faster.
Use simple waitqueues in the RCU code (in 4 different places) and
for handling KVM vCPU wakeups.
(Peter Zijlstra, Daniel Wagner, Thomas Gleixner, Paul Gortmaker,
Marcelo Tosatti)
- sched/numa enhancements (Rik van Riel)
- NOHZ performance enhancements (Rik van Riel)
- Various sched/deadline enhancements (Steven Rostedt)
- Various fixes (Peter Zijlstra)
- ... and a number of other fixes, cleanups and smaller enhancements"
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (29 commits)
sched/cputime: Fix steal_account_process_tick() to always return jiffies
sched/deadline: Remove dl_new from struct sched_dl_entity
Revert "kbuild: Add option to turn incompatible pointer check into error"
sched/deadline: Remove superfluous call to switched_to_dl()
sched/debug: Fix preempt_disable_ip recording for preempt_disable()
sched, time: Switch VIRT_CPU_ACCOUNTING_GEN to jiffy granularity
time, acct: Drop irq save & restore from __acct_update_integrals()
acct, time: Change indentation in __acct_update_integrals()
sched, time: Remove non-power-of-two divides from __acct_update_integrals()
sched/rt: Kick RT bandwidth timer immediately on start up
sched/debug: Add deadline scheduler bandwidth ratio to /proc/sched_debug
sched/debug: Move sched_domain_sysctl to debug.c
sched/debug: Move the /sys/kernel/debug/sched_features file setup into debug.c
sched/rt: Fix PI handling vs. sched_setscheduler()
sched/core: Remove duplicated sched_group_set_shares() prototype
sched/fair: Consolidate nohz CPU load update code
sched/fair: Avoid using decay_load_missed() with a negative value
sched/deadline: Always calculate end of period on sched_yield()
sched/cgroup: Fix cgroup entity load tracking tear-down
rcu: Use simple wait queues where possible in rcutree
...
This commit is contained in:
@@ -3532,6 +3532,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
||||
|
||||
sched_debug [KNL] Enables verbose scheduler debug messages.
|
||||
|
||||
schedstats= [KNL,X86] Enable or disable scheduled statistics.
|
||||
Allowed values are enable and disable. This feature
|
||||
incurs a small amount of overhead in the scheduler
|
||||
but is useful for debugging and performance tuning.
|
||||
|
||||
skew_tick= [KNL] Offset the periodic timer tick per cpu to mitigate
|
||||
xtime_lock contention on larger systems, and/or RCU lock
|
||||
contention on all systems with CONFIG_MAXSMP set.
|
||||
|
||||
@@ -773,6 +773,14 @@ rtsig-nr shows the number of RT signals currently queued.
|
||||
|
||||
==============================================================
|
||||
|
||||
sched_schedstats:
|
||||
|
||||
Enables/disables scheduler statistics. Enabling this feature
|
||||
incurs a small amount of overhead in the scheduler but is
|
||||
useful for debugging and performance tuning.
|
||||
|
||||
==============================================================
|
||||
|
||||
sg-big-buff:
|
||||
|
||||
This file shows the size of the generic SCSI (sg) buffer.
|
||||
|
||||
+4
-4
@@ -506,18 +506,18 @@ static void kvm_arm_resume_guest(struct kvm *kvm)
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
|
||||
struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
|
||||
|
||||
vcpu->arch.pause = false;
|
||||
wake_up_interruptible(wq);
|
||||
swake_up(wq);
|
||||
}
|
||||
}
|
||||
|
||||
static void vcpu_sleep(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
|
||||
struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
|
||||
|
||||
wait_event_interruptible(*wq, ((!vcpu->arch.power_off) &&
|
||||
swait_event_interruptible(*wq, ((!vcpu->arch.power_off) &&
|
||||
(!vcpu->arch.pause)));
|
||||
}
|
||||
|
||||
|
||||
+2
-2
@@ -70,7 +70,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
|
||||
{
|
||||
struct kvm *kvm = source_vcpu->kvm;
|
||||
struct kvm_vcpu *vcpu = NULL;
|
||||
wait_queue_head_t *wq;
|
||||
struct swait_queue_head *wq;
|
||||
unsigned long cpu_id;
|
||||
unsigned long context_id;
|
||||
phys_addr_t target_pc;
|
||||
@@ -119,7 +119,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
|
||||
smp_mb(); /* Make sure the above is visible */
|
||||
|
||||
wq = kvm_arch_vcpu_wq(vcpu);
|
||||
wake_up_interruptible(wq);
|
||||
swake_up(wq);
|
||||
|
||||
return PSCI_RET_SUCCESS;
|
||||
}
|
||||
|
||||
@@ -445,8 +445,8 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
|
||||
|
||||
dvcpu->arch.wait = 0;
|
||||
|
||||
if (waitqueue_active(&dvcpu->wq))
|
||||
wake_up_interruptible(&dvcpu->wq);
|
||||
if (swait_active(&dvcpu->wq))
|
||||
swake_up(&dvcpu->wq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1174,8 +1174,8 @@ static void kvm_mips_comparecount_func(unsigned long data)
|
||||
kvm_mips_callbacks->queue_timer_int(vcpu);
|
||||
|
||||
vcpu->arch.wait = 0;
|
||||
if (waitqueue_active(&vcpu->wq))
|
||||
wake_up_interruptible(&vcpu->wq);
|
||||
if (swait_active(&vcpu->wq))
|
||||
swake_up(&vcpu->wq);
|
||||
}
|
||||
|
||||
/* low level hrtimer wake routine */
|
||||
|
||||
@@ -289,7 +289,7 @@ struct kvmppc_vcore {
|
||||
struct list_head runnable_threads;
|
||||
struct list_head preempt_list;
|
||||
spinlock_t lock;
|
||||
wait_queue_head_t wq;
|
||||
struct swait_queue_head wq;
|
||||
spinlock_t stoltb_lock; /* protects stolen_tb and preempt_tb */
|
||||
u64 stolen_tb;
|
||||
u64 preempt_tb;
|
||||
@@ -629,7 +629,7 @@ struct kvm_vcpu_arch {
|
||||
u8 prodded;
|
||||
u32 last_inst;
|
||||
|
||||
wait_queue_head_t *wqp;
|
||||
struct swait_queue_head *wqp;
|
||||
struct kvmppc_vcore *vcore;
|
||||
int ret;
|
||||
int trap;
|
||||
|
||||
@@ -114,11 +114,11 @@ static bool kvmppc_ipi_thread(int cpu)
|
||||
static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int cpu;
|
||||
wait_queue_head_t *wqp;
|
||||
struct swait_queue_head *wqp;
|
||||
|
||||
wqp = kvm_arch_vcpu_wq(vcpu);
|
||||
if (waitqueue_active(wqp)) {
|
||||
wake_up_interruptible(wqp);
|
||||
if (swait_active(wqp)) {
|
||||
swake_up(wqp);
|
||||
++vcpu->stat.halt_wakeup;
|
||||
}
|
||||
|
||||
@@ -701,8 +701,8 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
|
||||
tvcpu->arch.prodded = 1;
|
||||
smp_mb();
|
||||
if (vcpu->arch.ceded) {
|
||||
if (waitqueue_active(&vcpu->wq)) {
|
||||
wake_up_interruptible(&vcpu->wq);
|
||||
if (swait_active(&vcpu->wq)) {
|
||||
swake_up(&vcpu->wq);
|
||||
vcpu->stat.halt_wakeup++;
|
||||
}
|
||||
}
|
||||
@@ -1459,7 +1459,7 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
|
||||
INIT_LIST_HEAD(&vcore->runnable_threads);
|
||||
spin_lock_init(&vcore->lock);
|
||||
spin_lock_init(&vcore->stoltb_lock);
|
||||
init_waitqueue_head(&vcore->wq);
|
||||
init_swait_queue_head(&vcore->wq);
|
||||
vcore->preempt_tb = TB_NIL;
|
||||
vcore->lpcr = kvm->arch.lpcr;
|
||||
vcore->first_vcpuid = core * threads_per_subcore;
|
||||
@@ -2531,10 +2531,9 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
int do_sleep = 1;
|
||||
DECLARE_SWAITQUEUE(wait);
|
||||
|
||||
DEFINE_WAIT(wait);
|
||||
|
||||
prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
|
||||
prepare_to_swait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
|
||||
|
||||
/*
|
||||
* Check one last time for pending exceptions and ceded state after
|
||||
@@ -2548,7 +2547,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
|
||||
}
|
||||
|
||||
if (!do_sleep) {
|
||||
finish_wait(&vc->wq, &wait);
|
||||
finish_swait(&vc->wq, &wait);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -2556,7 +2555,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
|
||||
trace_kvmppc_vcore_blocked(vc, 0);
|
||||
spin_unlock(&vc->lock);
|
||||
schedule();
|
||||
finish_wait(&vc->wq, &wait);
|
||||
finish_swait(&vc->wq, &wait);
|
||||
spin_lock(&vc->lock);
|
||||
vc->vcore_state = VCORE_INACTIVE;
|
||||
trace_kvmppc_vcore_blocked(vc, 1);
|
||||
@@ -2612,7 +2611,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
||||
kvmppc_start_thread(vcpu, vc);
|
||||
trace_kvm_guest_enter(vcpu);
|
||||
} else if (vc->vcore_state == VCORE_SLEEPING) {
|
||||
wake_up(&vc->wq);
|
||||
swake_up(&vc->wq);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -467,7 +467,7 @@ struct kvm_s390_irq_payload {
|
||||
struct kvm_s390_local_interrupt {
|
||||
spinlock_t lock;
|
||||
struct kvm_s390_float_interrupt *float_int;
|
||||
wait_queue_head_t *wq;
|
||||
struct swait_queue_head *wq;
|
||||
atomic_t *cpuflags;
|
||||
DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
|
||||
struct kvm_s390_irq_payload irq;
|
||||
|
||||
@@ -966,13 +966,13 @@ no_timer:
|
||||
|
||||
void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (waitqueue_active(&vcpu->wq)) {
|
||||
if (swait_active(&vcpu->wq)) {
|
||||
/*
|
||||
* The vcpu gave up the cpu voluntarily, mark it as a good
|
||||
* yield-candidate.
|
||||
*/
|
||||
vcpu->preempted = true;
|
||||
wake_up_interruptible(&vcpu->wq);
|
||||
swake_up(&vcpu->wq);
|
||||
vcpu->stat.halt_wakeup++;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1195,7 +1195,7 @@ static void apic_update_lvtt(struct kvm_lapic *apic)
|
||||
static void apic_timer_expired(struct kvm_lapic *apic)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = apic->vcpu;
|
||||
wait_queue_head_t *q = &vcpu->wq;
|
||||
struct swait_queue_head *q = &vcpu->wq;
|
||||
struct kvm_timer *ktimer = &apic->lapic_timer;
|
||||
|
||||
if (atomic_read(&apic->lapic_timer.pending))
|
||||
@@ -1204,8 +1204,8 @@ static void apic_timer_expired(struct kvm_lapic *apic)
|
||||
atomic_inc(&apic->lapic_timer.pending);
|
||||
kvm_set_pending_timer(vcpu);
|
||||
|
||||
if (waitqueue_active(q))
|
||||
wake_up_interruptible(q);
|
||||
if (swait_active(q))
|
||||
swake_up(q);
|
||||
|
||||
if (apic_lvtt_tscdeadline(apic))
|
||||
ktimer->expired_tscdeadline = ktimer->tscdeadline;
|
||||
|
||||
@@ -713,6 +713,18 @@ static inline void __ftrace_enabled_restore(int enabled)
|
||||
#define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
|
||||
#define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
|
||||
|
||||
static inline unsigned long get_lock_parent_ip(void)
|
||||
{
|
||||
unsigned long addr = CALLER_ADDR0;
|
||||
|
||||
if (!in_lock_functions(addr))
|
||||
return addr;
|
||||
addr = CALLER_ADDR1;
|
||||
if (!in_lock_functions(addr))
|
||||
return addr;
|
||||
return CALLER_ADDR2;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IRQSOFF_TRACER
|
||||
extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
|
||||
extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
|
||||
|
||||
@@ -25,6 +25,7 @@
|
||||
#include <linux/irqflags.h>
|
||||
#include <linux/context_tracking.h>
|
||||
#include <linux/irqbypass.h>
|
||||
#include <linux/swait.h>
|
||||
#include <asm/signal.h>
|
||||
|
||||
#include <linux/kvm.h>
|
||||
@@ -218,7 +219,7 @@ struct kvm_vcpu {
|
||||
int fpu_active;
|
||||
int guest_fpu_loaded, guest_xcr0_loaded;
|
||||
unsigned char fpu_counter;
|
||||
wait_queue_head_t wq;
|
||||
struct swait_queue_head wq;
|
||||
struct pid *pid;
|
||||
int sigset_active;
|
||||
sigset_t sigset;
|
||||
@@ -782,7 +783,7 @@ static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
|
||||
static inline struct swait_queue_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
#ifdef __KVM_HAVE_ARCH_WQP
|
||||
return vcpu->arch.wqp;
|
||||
|
||||
@@ -37,6 +37,9 @@ account_scheduler_latency(struct task_struct *task, int usecs, int inter)
|
||||
|
||||
void clear_all_latency_tracing(struct task_struct *p);
|
||||
|
||||
extern int sysctl_latencytop(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos);
|
||||
|
||||
#else
|
||||
|
||||
static inline void
|
||||
|
||||
@@ -182,8 +182,6 @@ extern void update_cpu_load_nohz(int active);
|
||||
static inline void update_cpu_load_nohz(int active) { }
|
||||
#endif
|
||||
|
||||
extern unsigned long get_parent_ip(unsigned long addr);
|
||||
|
||||
extern void dump_cpu_task(int cpu);
|
||||
|
||||
struct seq_file;
|
||||
@@ -920,6 +918,10 @@ static inline int sched_info_on(void)
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SCHEDSTATS
|
||||
void force_schedstat_enabled(void);
|
||||
#endif
|
||||
|
||||
enum cpu_idle_type {
|
||||
CPU_IDLE,
|
||||
CPU_NOT_IDLE,
|
||||
@@ -1289,6 +1291,8 @@ struct sched_rt_entity {
|
||||
unsigned long timeout;
|
||||
unsigned long watchdog_stamp;
|
||||
unsigned int time_slice;
|
||||
unsigned short on_rq;
|
||||
unsigned short on_list;
|
||||
|
||||
struct sched_rt_entity *back;
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
@@ -1329,10 +1333,6 @@ struct sched_dl_entity {
|
||||
* task has to wait for a replenishment to be performed at the
|
||||
* next firing of dl_timer.
|
||||
*
|
||||
* @dl_new tells if a new instance arrived. If so we must
|
||||
* start executing it with full runtime and reset its absolute
|
||||
* deadline;
|
||||
*
|
||||
* @dl_boosted tells if we are boosted due to DI. If so we are
|
||||
* outside bandwidth enforcement mechanism (but only until we
|
||||
* exit the critical section);
|
||||
@@ -1340,7 +1340,7 @@ struct sched_dl_entity {
|
||||
* @dl_yielded tells if task gave up the cpu before consuming
|
||||
* all its available runtime during the last job.
|
||||
*/
|
||||
int dl_throttled, dl_new, dl_boosted, dl_yielded;
|
||||
int dl_throttled, dl_boosted, dl_yielded;
|
||||
|
||||
/*
|
||||
* Bandwidth enforcement timer. Each -deadline task has its
|
||||
|
||||
@@ -95,4 +95,8 @@ extern int sysctl_numa_balancing(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos);
|
||||
|
||||
extern int sysctl_schedstats(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos);
|
||||
|
||||
#endif /* _SCHED_SYSCTL_H */
|
||||
|
||||
@@ -0,0 +1,172 @@
|
||||
#ifndef _LINUX_SWAIT_H
|
||||
#define _LINUX_SWAIT_H
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <asm/current.h>
|
||||
|
||||
/*
|
||||
* Simple wait queues
|
||||
*
|
||||
* While these are very similar to the other/complex wait queues (wait.h) the
|
||||
* most important difference is that the simple waitqueue allows for
|
||||
* deterministic behaviour -- IOW it has strictly bounded IRQ and lock hold
|
||||
* times.
|
||||
*
|
||||
* In order to make this so, we had to drop a fair number of features of the
|
||||
* other waitqueue code; notably:
|
||||
*
|
||||
* - mixing INTERRUPTIBLE and UNINTERRUPTIBLE sleeps on the same waitqueue;
|
||||
* all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right
|
||||
* sleeper state.
|
||||
*
|
||||
* - the exclusive mode; because this requires preserving the list order
|
||||
* and this is hard.
|
||||
*
|
||||
* - custom wake functions; because you cannot give any guarantees about
|
||||
* random code.
|
||||
*
|
||||
* As a side effect of this; the data structures are slimmer.
|
||||
*
|
||||
* One would recommend using this wait queue where possible.
|
||||
*/
|
||||
|
||||
struct task_struct;
|
||||
|
||||
struct swait_queue_head {
|
||||
raw_spinlock_t lock;
|
||||
struct list_head task_list;
|
||||
};
|
||||
|
||||
struct swait_queue {
|
||||
struct task_struct *task;
|
||||
struct list_head task_list;
|
||||
};
|
||||
|
||||
#define __SWAITQUEUE_INITIALIZER(name) { \
|
||||
.task = current, \
|
||||
.task_list = LIST_HEAD_INIT((name).task_list), \
|
||||
}
|
||||
|
||||
#define DECLARE_SWAITQUEUE(name) \
|
||||
struct swait_queue name = __SWAITQUEUE_INITIALIZER(name)
|
||||
|
||||
#define __SWAIT_QUEUE_HEAD_INITIALIZER(name) { \
|
||||
.lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
|
||||
.task_list = LIST_HEAD_INIT((name).task_list), \
|
||||
}
|
||||
|
||||
#define DECLARE_SWAIT_QUEUE_HEAD(name) \
|
||||
struct swait_queue_head name = __SWAIT_QUEUE_HEAD_INITIALIZER(name)
|
||||
|
||||
extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name,
|
||||
struct lock_class_key *key);
|
||||
|
||||
#define init_swait_queue_head(q) \
|
||||
do { \
|
||||
static struct lock_class_key __key; \
|
||||
__init_swait_queue_head((q), #q, &__key); \
|
||||
} while (0)
|
||||
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
# define __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
|
||||
({ init_swait_queue_head(&name); name; })
|
||||
# define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name) \
|
||||
struct swait_queue_head name = __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name)
|
||||
#else
|
||||
# define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name) \
|
||||
DECLARE_SWAIT_QUEUE_HEAD(name)
|
||||
#endif
|
||||
|
||||
static inline int swait_active(struct swait_queue_head *q)
|
||||
{
|
||||
return !list_empty(&q->task_list);
|
||||
}
|
||||
|
||||
extern void swake_up(struct swait_queue_head *q);
|
||||
extern void swake_up_all(struct swait_queue_head *q);
|
||||
extern void swake_up_locked(struct swait_queue_head *q);
|
||||
|
||||
extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
|
||||
extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state);
|
||||
extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state);
|
||||
|
||||
extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
|
||||
extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
|
||||
|
||||
/* as per ___wait_event() but for swait, therefore "exclusive == 0" */
|
||||
#define ___swait_event(wq, condition, state, ret, cmd) \
|
||||
({ \
|
||||
struct swait_queue __wait; \
|
||||
long __ret = ret; \
|
||||
\
|
||||
INIT_LIST_HEAD(&__wait.task_list); \
|
||||
for (;;) { \
|
||||
long __int = prepare_to_swait_event(&wq, &__wait, state);\
|
||||
\
|
||||
if (condition) \
|
||||
break; \
|
||||
\
|
||||
if (___wait_is_interruptible(state) && __int) { \
|
||||
__ret = __int; \
|
||||
break; \
|
||||
} \
|
||||
\
|
||||
cmd; \
|
||||
} \
|
||||
finish_swait(&wq, &__wait); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define __swait_event(wq, condition) \
|
||||
(void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \
|
||||
schedule())
|
||||
|
||||
#define swait_event(wq, condition) \
|
||||
do { \
|
||||
if (condition) \
|
||||
break; \
|
||||
__swait_event(wq, condition); \
|
||||
} while (0)
|
||||
|
||||
#define __swait_event_timeout(wq, condition, timeout) \
|
||||
___swait_event(wq, ___wait_cond_timeout(condition), \
|
||||
TASK_UNINTERRUPTIBLE, timeout, \
|
||||
__ret = schedule_timeout(__ret))
|
||||
|
||||
#define swait_event_timeout(wq, condition, timeout) \
|
||||
({ \
|
||||
long __ret = timeout; \
|
||||
if (!___wait_cond_timeout(condition)) \
|
||||
__ret = __swait_event_timeout(wq, condition, timeout); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define __swait_event_interruptible(wq, condition) \
|
||||
___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0, \
|
||||
schedule())
|
||||
|
||||
#define swait_event_interruptible(wq, condition) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
if (!(condition)) \
|
||||
__ret = __swait_event_interruptible(wq, condition); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define __swait_event_interruptible_timeout(wq, condition, timeout) \
|
||||
___swait_event(wq, ___wait_cond_timeout(condition), \
|
||||
TASK_INTERRUPTIBLE, timeout, \
|
||||
__ret = schedule_timeout(__ret))
|
||||
|
||||
#define swait_event_interruptible_timeout(wq, condition, timeout) \
|
||||
({ \
|
||||
long __ret = timeout; \
|
||||
if (!___wait_cond_timeout(condition)) \
|
||||
__ret = __swait_event_interruptible_timeout(wq, \
|
||||
condition, timeout); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#endif /* _LINUX_SWAIT_H */
|
||||
@@ -338,7 +338,7 @@ do { \
|
||||
schedule(); try_to_freeze())
|
||||
|
||||
/**
|
||||
* wait_event - sleep (or freeze) until a condition gets true
|
||||
* wait_event_freezable - sleep (or freeze) until a condition gets true
|
||||
* @wq: the waitqueue to wait on
|
||||
* @condition: a C expression for the event to wait for
|
||||
*
|
||||
|
||||
+13
-1
@@ -47,12 +47,12 @@
|
||||
* of times)
|
||||
*/
|
||||
|
||||
#include <linux/latencytop.h>
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/latencytop.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/list.h>
|
||||
@@ -289,4 +289,16 @@ static int __init init_lstats_procfs(void)
|
||||
proc_create("latency_stats", 0644, NULL, &lstats_fops);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sysctl_latencytop(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = proc_dointvec(table, write, buffer, lenp, ppos);
|
||||
if (latencytop_enabled)
|
||||
force_schedstat_enabled();
|
||||
|
||||
return err;
|
||||
}
|
||||
device_initcall(init_lstats_procfs);
|
||||
|
||||
@@ -59,6 +59,7 @@ int profile_setup(char *str)
|
||||
|
||||
if (!strncmp(str, sleepstr, strlen(sleepstr))) {
|
||||
#ifdef CONFIG_SCHEDSTATS
|
||||
force_schedstat_enabled();
|
||||
prof_on = SLEEP_PROFILING;
|
||||
if (str[strlen(sleepstr)] == ',')
|
||||
str += strlen(sleepstr) + 1;
|
||||
|
||||
+13
-11
@@ -1614,7 +1614,6 @@ static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
|
||||
int needmore;
|
||||
struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
|
||||
|
||||
rcu_nocb_gp_cleanup(rsp, rnp);
|
||||
rnp->need_future_gp[c & 0x1] = 0;
|
||||
needmore = rnp->need_future_gp[(c + 1) & 0x1];
|
||||
trace_rcu_future_gp(rnp, rdp, c,
|
||||
@@ -1635,7 +1634,7 @@ static void rcu_gp_kthread_wake(struct rcu_state *rsp)
|
||||
!READ_ONCE(rsp->gp_flags) ||
|
||||
!rsp->gp_kthread)
|
||||
return;
|
||||
wake_up(&rsp->gp_wq);
|
||||
swake_up(&rsp->gp_wq);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2010,6 +2009,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
|
||||
int nocb = 0;
|
||||
struct rcu_data *rdp;
|
||||
struct rcu_node *rnp = rcu_get_root(rsp);
|
||||
struct swait_queue_head *sq;
|
||||
|
||||
WRITE_ONCE(rsp->gp_activity, jiffies);
|
||||
raw_spin_lock_irq_rcu_node(rnp);
|
||||
@@ -2046,7 +2046,9 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
|
||||
needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
|
||||
/* smp_mb() provided by prior unlock-lock pair. */
|
||||
nocb += rcu_future_gp_cleanup(rsp, rnp);
|
||||
sq = rcu_nocb_gp_get(rnp);
|
||||
raw_spin_unlock_irq(&rnp->lock);
|
||||
rcu_nocb_gp_cleanup(sq);
|
||||
cond_resched_rcu_qs();
|
||||
WRITE_ONCE(rsp->gp_activity, jiffies);
|
||||
rcu_gp_slow(rsp, gp_cleanup_delay);
|
||||
@@ -2092,7 +2094,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
|
||||
READ_ONCE(rsp->gpnum),
|
||||
TPS("reqwait"));
|
||||
rsp->gp_state = RCU_GP_WAIT_GPS;
|
||||
wait_event_interruptible(rsp->gp_wq,
|
||||
swait_event_interruptible(rsp->gp_wq,
|
||||
READ_ONCE(rsp->gp_flags) &
|
||||
RCU_GP_FLAG_INIT);
|
||||
rsp->gp_state = RCU_GP_DONE_GPS;
|
||||
@@ -2122,7 +2124,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
|
||||
READ_ONCE(rsp->gpnum),
|
||||
TPS("fqswait"));
|
||||
rsp->gp_state = RCU_GP_WAIT_FQS;
|
||||
ret = wait_event_interruptible_timeout(rsp->gp_wq,
|
||||
ret = swait_event_interruptible_timeout(rsp->gp_wq,
|
||||
rcu_gp_fqs_check_wake(rsp, &gf), j);
|
||||
rsp->gp_state = RCU_GP_DOING_FQS;
|
||||
/* Locking provides needed memory barriers. */
|
||||
@@ -2246,7 +2248,7 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
|
||||
WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
|
||||
WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
|
||||
raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
|
||||
rcu_gp_kthread_wake(rsp);
|
||||
swake_up(&rsp->gp_wq); /* Memory barrier implied by swake_up() path. */
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2900,7 +2902,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
|
||||
}
|
||||
WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
|
||||
raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
|
||||
rcu_gp_kthread_wake(rsp);
|
||||
swake_up(&rsp->gp_wq); /* Memory barrier implied by swake_up() path. */
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -3529,7 +3531,7 @@ static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
|
||||
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
||||
if (wake) {
|
||||
smp_mb(); /* EGP done before wake_up(). */
|
||||
wake_up(&rsp->expedited_wq);
|
||||
swake_up(&rsp->expedited_wq);
|
||||
}
|
||||
break;
|
||||
}
|
||||
@@ -3780,7 +3782,7 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
|
||||
jiffies_start = jiffies;
|
||||
|
||||
for (;;) {
|
||||
ret = wait_event_interruptible_timeout(
|
||||
ret = swait_event_timeout(
|
||||
rsp->expedited_wq,
|
||||
sync_rcu_preempt_exp_done(rnp_root),
|
||||
jiffies_stall);
|
||||
@@ -3788,7 +3790,7 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
|
||||
return;
|
||||
if (ret < 0) {
|
||||
/* Hit a signal, disable CPU stall warnings. */
|
||||
wait_event(rsp->expedited_wq,
|
||||
swait_event(rsp->expedited_wq,
|
||||
sync_rcu_preempt_exp_done(rnp_root));
|
||||
return;
|
||||
}
|
||||
@@ -4482,8 +4484,8 @@ static void __init rcu_init_one(struct rcu_state *rsp)
|
||||
}
|
||||
}
|
||||
|
||||
init_waitqueue_head(&rsp->gp_wq);
|
||||
init_waitqueue_head(&rsp->expedited_wq);
|
||||
init_swait_queue_head(&rsp->gp_wq);
|
||||
init_swait_queue_head(&rsp->expedited_wq);
|
||||
rnp = rsp->level[rcu_num_lvls - 1];
|
||||
for_each_possible_cpu(i) {
|
||||
while (i > rnp->grphi)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user