You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'sched-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (46 commits) sched: Add comments to find_busiest_group() function sched: Refactor the power savings balance code sched: Optimize the !power_savings_balance during fbg() sched: Create a helper function to calculate imbalance sched: Create helper to calculate small_imbalance in fbg() sched: Create a helper function to calculate sched_domain stats for fbg() sched: Define structure to store the sched_domain statistics for fbg() sched: Create a helper function to calculate sched_group stats for fbg() sched: Define structure to store the sched_group statistics for fbg() sched: Fix indentations in find_busiest_group() using gotos sched: Simple helper functions for find_busiest_group() sched: remove unused fields from struct rq sched: jiffies not printed per CPU sched: small optimisation of can_migrate_task() sched: fix typos in documentation sched: add avg_overlap decay x86, sched_clock(): mark variables read-mostly sched: optimize ttwu vs group scheduling sched: TIF_NEED_RESCHED -> need_reshed() cleanup sched: don't rebalance if attached on NULL domain ...
This commit is contained in:
+71
-12
@@ -9,6 +9,44 @@
|
||||
* as published by the Free Software Foundation; version 2
|
||||
* of the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* CONFIG_LATENCYTOP enables a kernel latency tracking infrastructure that is
|
||||
* used by the "latencytop" userspace tool. The latency that is tracked is not
|
||||
* the 'traditional' interrupt latency (which is primarily caused by something
|
||||
* else consuming CPU), but instead, it is the latency an application encounters
|
||||
* because the kernel sleeps on its behalf for various reasons.
|
||||
*
|
||||
* This code tracks 2 levels of statistics:
|
||||
* 1) System level latency
|
||||
* 2) Per process latency
|
||||
*
|
||||
* The latency is stored in fixed sized data structures in an accumulated form;
|
||||
* if the "same" latency cause is hit twice, this will be tracked as one entry
|
||||
* in the data structure. Both the count, total accumulated latency and maximum
|
||||
* latency are tracked in this data structure. When the fixed size structure is
|
||||
* full, no new causes are tracked until the buffer is flushed by writing to
|
||||
* the /proc file; the userspace tool does this on a regular basis.
|
||||
*
|
||||
* A latency cause is identified by a stringified backtrace at the point that
|
||||
* the scheduler gets invoked. The userland tool will use this string to
|
||||
* identify the cause of the latency in human readable form.
|
||||
*
|
||||
* The information is exported via /proc/latency_stats and /proc/<pid>/latency.
|
||||
* These files look like this:
|
||||
*
|
||||
* Latency Top version : v0.1
|
||||
* 70 59433 4897 i915_irq_wait drm_ioctl vfs_ioctl do_vfs_ioctl sys_ioctl
|
||||
* | | | |
|
||||
* | | | +----> the stringified backtrace
|
||||
* | | +---------> The maximum latency for this entry in microseconds
|
||||
* | +--------------> The accumulated latency for this entry (microseconds)
|
||||
* +-------------------> The number of times this entry is hit
|
||||
*
|
||||
* (note: the average latency is the accumulated latency divided by the number
|
||||
* of times)
|
||||
*/
|
||||
|
||||
#include <linux/latencytop.h>
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/seq_file.h>
|
||||
@@ -72,7 +110,7 @@ account_global_scheduler_latency(struct task_struct *tsk, struct latency_record
|
||||
firstnonnull = i;
|
||||
continue;
|
||||
}
|
||||
for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) {
|
||||
for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
|
||||
unsigned long record = lat->backtrace[q];
|
||||
|
||||
if (latency_record[i].backtrace[q] != record) {
|
||||
@@ -101,31 +139,52 @@ account_global_scheduler_latency(struct task_struct *tsk, struct latency_record
|
||||
memcpy(&latency_record[i], lat, sizeof(struct latency_record));
|
||||
}
|
||||
|
||||
static inline void store_stacktrace(struct task_struct *tsk, struct latency_record *lat)
|
||||
/*
|
||||
* Iterator to store a backtrace into a latency record entry
|
||||
*/
|
||||
static inline void store_stacktrace(struct task_struct *tsk,
|
||||
struct latency_record *lat)
|
||||
{
|
||||
struct stack_trace trace;
|
||||
|
||||
memset(&trace, 0, sizeof(trace));
|
||||
trace.max_entries = LT_BACKTRACEDEPTH;
|
||||
trace.entries = &lat->backtrace[0];
|
||||
trace.skip = 0;
|
||||
save_stack_trace_tsk(tsk, &trace);
|
||||
}
|
||||
|
||||
/**
|
||||
* __account_scheduler_latency - record an occured latency
|
||||
* @tsk - the task struct of the task hitting the latency
|
||||
* @usecs - the duration of the latency in microseconds
|
||||
* @inter - 1 if the sleep was interruptible, 0 if uninterruptible
|
||||
*
|
||||
* This function is the main entry point for recording latency entries
|
||||
* as called by the scheduler.
|
||||
*
|
||||
* This function has a few special cases to deal with normal 'non-latency'
|
||||
* sleeps: specifically, interruptible sleep longer than 5 msec is skipped
|
||||
* since this usually is caused by waiting for events via select() and co.
|
||||
*
|
||||
* Negative latencies (caused by time going backwards) are also explicitly
|
||||
* skipped.
|
||||
*/
|
||||
void __sched
|
||||
account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
|
||||
__account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
|
||||
{
|
||||
unsigned long flags;
|
||||
int i, q;
|
||||
struct latency_record lat;
|
||||
|
||||
if (!latencytop_enabled)
|
||||
return;
|
||||
|
||||
/* Long interruptible waits are generally user requested... */
|
||||
if (inter && usecs > 5000)
|
||||
return;
|
||||
|
||||
/* Negative sleeps are time going backwards */
|
||||
/* Zero-time sleeps are non-interesting */
|
||||
if (usecs <= 0)
|
||||
return;
|
||||
|
||||
memset(&lat, 0, sizeof(lat));
|
||||
lat.count = 1;
|
||||
lat.time = usecs;
|
||||
@@ -143,12 +202,12 @@ account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
|
||||
if (tsk->latency_record_count >= LT_SAVECOUNT)
|
||||
goto out_unlock;
|
||||
|
||||
for (i = 0; i < LT_SAVECOUNT ; i++) {
|
||||
for (i = 0; i < LT_SAVECOUNT; i++) {
|
||||
struct latency_record *mylat;
|
||||
int same = 1;
|
||||
|
||||
mylat = &tsk->latency_record[i];
|
||||
for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) {
|
||||
for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
|
||||
unsigned long record = lat.backtrace[q];
|
||||
|
||||
if (mylat->backtrace[q] != record) {
|
||||
@@ -186,7 +245,7 @@ static int lstats_show(struct seq_file *m, void *v)
|
||||
for (i = 0; i < MAXLR; i++) {
|
||||
if (latency_record[i].backtrace[0]) {
|
||||
int q;
|
||||
seq_printf(m, "%i %li %li ",
|
||||
seq_printf(m, "%i %lu %lu ",
|
||||
latency_record[i].count,
|
||||
latency_record[i].time,
|
||||
latency_record[i].max);
|
||||
@@ -223,7 +282,7 @@ static int lstats_open(struct inode *inode, struct file *filp)
|
||||
return single_open(filp, lstats_show, NULL);
|
||||
}
|
||||
|
||||
static struct file_operations lstats_fops = {
|
||||
static const struct file_operations lstats_fops = {
|
||||
.open = lstats_open,
|
||||
.read = seq_read,
|
||||
.write = lstats_write,
|
||||
@@ -236,4 +295,4 @@ static int __init init_lstats_procfs(void)
|
||||
proc_create("latency_stats", 0644, NULL, &lstats_fops);
|
||||
return 0;
|
||||
}
|
||||
__initcall(init_lstats_procfs);
|
||||
device_initcall(init_lstats_procfs);
|
||||
|
||||
+731
-329
File diff suppressed because it is too large
Load Diff
+17
-13
@@ -24,11 +24,11 @@
|
||||
* The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat
|
||||
* consistent between cpus (never more than 2 jiffies difference).
|
||||
*/
|
||||
#include <linux/sched.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/ktime.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/ktime.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
/*
|
||||
* Scheduler clock - returns current time in nanosec units.
|
||||
@@ -43,6 +43,7 @@ unsigned long long __attribute__((weak)) sched_clock(void)
|
||||
static __read_mostly int sched_clock_running;
|
||||
|
||||
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
|
||||
__read_mostly int sched_clock_stable;
|
||||
|
||||
struct sched_clock_data {
|
||||
/*
|
||||
@@ -87,7 +88,7 @@ void sched_clock_init(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* min,max except they take wrapping into account
|
||||
* min, max except they take wrapping into account
|
||||
*/
|
||||
|
||||
static inline u64 wrap_min(u64 x, u64 y)
|
||||
@@ -111,15 +112,13 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
|
||||
s64 delta = now - scd->tick_raw;
|
||||
u64 clock, min_clock, max_clock;
|
||||
|
||||
WARN_ON_ONCE(!irqs_disabled());
|
||||
|
||||
if (unlikely(delta < 0))
|
||||
delta = 0;
|
||||
|
||||
/*
|
||||
* scd->clock = clamp(scd->tick_gtod + delta,
|
||||
* max(scd->tick_gtod, scd->clock),
|
||||
* scd->tick_gtod + TICK_NSEC);
|
||||
* max(scd->tick_gtod, scd->clock),
|
||||
* scd->tick_gtod + TICK_NSEC);
|
||||
*/
|
||||
|
||||
clock = scd->tick_gtod + delta;
|
||||
@@ -148,12 +147,13 @@ static void lock_double_clock(struct sched_clock_data *data1,
|
||||
|
||||
u64 sched_clock_cpu(int cpu)
|
||||
{
|
||||
struct sched_clock_data *scd = cpu_sdc(cpu);
|
||||
u64 now, clock, this_clock, remote_clock;
|
||||
struct sched_clock_data *scd;
|
||||
|
||||
if (unlikely(!sched_clock_running))
|
||||
return 0ull;
|
||||
if (sched_clock_stable)
|
||||
return sched_clock();
|
||||
|
||||
scd = cpu_sdc(cpu);
|
||||
WARN_ON_ONCE(!irqs_disabled());
|
||||
now = sched_clock();
|
||||
|
||||
@@ -195,14 +195,18 @@ u64 sched_clock_cpu(int cpu)
|
||||
|
||||
void sched_clock_tick(void)
|
||||
{
|
||||
struct sched_clock_data *scd = this_scd();
|
||||
struct sched_clock_data *scd;
|
||||
u64 now, now_gtod;
|
||||
|
||||
if (sched_clock_stable)
|
||||
return;
|
||||
|
||||
if (unlikely(!sched_clock_running))
|
||||
return;
|
||||
|
||||
WARN_ON_ONCE(!irqs_disabled());
|
||||
|
||||
scd = this_scd();
|
||||
now_gtod = ktime_to_ns(ktime_get());
|
||||
now = sched_clock();
|
||||
|
||||
@@ -250,7 +254,7 @@ u64 sched_clock_cpu(int cpu)
|
||||
return sched_clock();
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
|
||||
|
||||
unsigned long long cpu_clock(int cpu)
|
||||
{
|
||||
|
||||
@@ -272,7 +272,6 @@ static void print_cpu(struct seq_file *m, int cpu)
|
||||
P(nr_switches);
|
||||
P(nr_load_updates);
|
||||
P(nr_uninterruptible);
|
||||
SEQ_printf(m, " .%-30s: %lu\n", "jiffies", jiffies);
|
||||
PN(next_balance);
|
||||
P(curr->pid);
|
||||
PN(clock);
|
||||
@@ -287,9 +286,6 @@ static void print_cpu(struct seq_file *m, int cpu)
|
||||
#ifdef CONFIG_SCHEDSTATS
|
||||
#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n);
|
||||
|
||||
P(yld_exp_empty);
|
||||
P(yld_act_empty);
|
||||
P(yld_both_empty);
|
||||
P(yld_count);
|
||||
|
||||
P(sched_switch);
|
||||
@@ -314,7 +310,7 @@ static int sched_debug_show(struct seq_file *m, void *v)
|
||||
u64 now = ktime_to_ns(ktime_get());
|
||||
int cpu;
|
||||
|
||||
SEQ_printf(m, "Sched Debug Version: v0.08, %s %.*s\n",
|
||||
SEQ_printf(m, "Sched Debug Version: v0.09, %s %.*s\n",
|
||||
init_utsname()->release,
|
||||
(int)strcspn(init_utsname()->version, " "),
|
||||
init_utsname()->version);
|
||||
@@ -325,6 +321,7 @@ static int sched_debug_show(struct seq_file *m, void *v)
|
||||
SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
|
||||
#define PN(x) \
|
||||
SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
|
||||
P(jiffies);
|
||||
PN(sysctl_sched_latency);
|
||||
PN(sysctl_sched_min_granularity);
|
||||
PN(sysctl_sched_wakeup_granularity);
|
||||
@@ -397,6 +394,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
|
||||
PN(se.vruntime);
|
||||
PN(se.sum_exec_runtime);
|
||||
PN(se.avg_overlap);
|
||||
PN(se.avg_wakeup);
|
||||
|
||||
nr_switches = p->nvcsw + p->nivcsw;
|
||||
|
||||
|
||||
+53
-6
@@ -1314,16 +1314,63 @@ out:
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
static unsigned long wakeup_gran(struct sched_entity *se)
|
||||
/*
|
||||
* Adaptive granularity
|
||||
*
|
||||
* se->avg_wakeup gives the average time a task runs until it does a wakeup,
|
||||
* with the limit of wakeup_gran -- when it never does a wakeup.
|
||||
*
|
||||
* So the smaller avg_wakeup is the faster we want this task to preempt,
|
||||
* but we don't want to treat the preemptee unfairly and therefore allow it
|
||||
* to run for at least the amount of time we'd like to run.
|
||||
*
|
||||
* NOTE: we use 2*avg_wakeup to increase the probability of actually doing one
|
||||
*
|
||||
* NOTE: we use *nr_running to scale with load, this nicely matches the
|
||||
* degrading latency on load.
|
||||
*/
|
||||
static unsigned long
|
||||
adaptive_gran(struct sched_entity *curr, struct sched_entity *se)
|
||||
{
|
||||
u64 this_run = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
|
||||
u64 expected_wakeup = 2*se->avg_wakeup * cfs_rq_of(se)->nr_running;
|
||||
u64 gran = 0;
|
||||
|
||||
if (this_run < expected_wakeup)
|
||||
gran = expected_wakeup - this_run;
|
||||
|
||||
return min_t(s64, gran, sysctl_sched_wakeup_granularity);
|
||||
}
|
||||
|
||||
static unsigned long
|
||||
wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
|
||||
{
|
||||
unsigned long gran = sysctl_sched_wakeup_granularity;
|
||||
|
||||
if (cfs_rq_of(curr)->curr && sched_feat(ADAPTIVE_GRAN))
|
||||
gran = adaptive_gran(curr, se);
|
||||
|
||||
/*
|
||||
* More easily preempt - nice tasks, while not making it harder for
|
||||
* + nice tasks.
|
||||
* Since its curr running now, convert the gran from real-time
|
||||
* to virtual-time in his units.
|
||||
*/
|
||||
if (!sched_feat(ASYM_GRAN) || se->load.weight > NICE_0_LOAD)
|
||||
gran = calc_delta_fair(sysctl_sched_wakeup_granularity, se);
|
||||
if (sched_feat(ASYM_GRAN)) {
|
||||
/*
|
||||
* By using 'se' instead of 'curr' we penalize light tasks, so
|
||||
* they get preempted easier. That is, if 'se' < 'curr' then
|
||||
* the resulting gran will be larger, therefore penalizing the
|
||||
* lighter, if otoh 'se' > 'curr' then the resulting gran will
|
||||
* be smaller, again penalizing the lighter task.
|
||||
*
|
||||
* This is especially important for buddies when the leftmost
|
||||
* task is higher priority than the buddy.
|
||||
*/
|
||||
if (unlikely(se->load.weight != NICE_0_LOAD))
|
||||
gran = calc_delta_fair(gran, se);
|
||||
} else {
|
||||
if (unlikely(curr->load.weight != NICE_0_LOAD))
|
||||
gran = calc_delta_fair(gran, curr);
|
||||
}
|
||||
|
||||
return gran;
|
||||
}
|
||||
@@ -1350,7 +1397,7 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
|
||||
if (vdiff <= 0)
|
||||
return -1;
|
||||
|
||||
gran = wakeup_gran(curr);
|
||||
gran = wakeup_gran(curr, se);
|
||||
if (vdiff > gran)
|
||||
return 1;
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
SCHED_FEAT(NEW_FAIR_SLEEPERS, 1)
|
||||
SCHED_FEAT(NORMALIZED_SLEEPER, 1)
|
||||
SCHED_FEAT(NORMALIZED_SLEEPER, 0)
|
||||
SCHED_FEAT(ADAPTIVE_GRAN, 1)
|
||||
SCHED_FEAT(WAKEUP_PREEMPT, 1)
|
||||
SCHED_FEAT(START_DEBIT, 1)
|
||||
SCHED_FEAT(AFFINE_WAKEUPS, 1)
|
||||
|
||||
+376
-165
File diff suppressed because it is too large
Load Diff
@@ -4,7 +4,7 @@
|
||||
* bump this up when changing the output format or the meaning of an existing
|
||||
* format, so that tools can adapt (or abort)
|
||||
*/
|
||||
#define SCHEDSTAT_VERSION 14
|
||||
#define SCHEDSTAT_VERSION 15
|
||||
|
||||
static int show_schedstat(struct seq_file *seq, void *v)
|
||||
{
|
||||
@@ -26,9 +26,8 @@ static int show_schedstat(struct seq_file *seq, void *v)
|
||||
|
||||
/* runqueue-specific stats */
|
||||
seq_printf(seq,
|
||||
"cpu%d %u %u %u %u %u %u %u %u %u %llu %llu %lu",
|
||||
cpu, rq->yld_both_empty,
|
||||
rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count,
|
||||
"cpu%d %u %u %u %u %u %u %llu %llu %lu",
|
||||
cpu, rq->yld_count,
|
||||
rq->sched_switch, rq->sched_count, rq->sched_goidle,
|
||||
rq->ttwu_count, rq->ttwu_local,
|
||||
rq->rq_cpu_time,
|
||||
|
||||
Reference in New Issue
Block a user