Files
linux-apfs/kernel/sched_fair.c
T

1631 lines
38 KiB
C
Raw Normal View History

2007-07-09 18:51:58 +02:00
/*
* Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
*
* Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
*
* Interactivity improvements by Mike Galbraith
* (C) 2007 Mike Galbraith <efault@gmx.de>
*
* Various enhancements by Dmitry Adamushko.
* (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
*
* Group scheduling enhancements by Srivatsa Vaddagiri
* Copyright IBM Corporation, 2007
* Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
*
* Scaled math optimizations by Thomas Gleixner
* Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
2007-08-25 18:41:53 +02:00
*
* Adaptive scheduling granularity, math enhancements by Peter Zijlstra
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
2007-07-09 18:51:58 +02:00
*/
2008-01-25 21:08:34 +01:00
#include <linux/latencytop.h>
2007-07-09 18:51:58 +02:00
/*
2007-08-25 18:41:53 +02:00
* Targeted preemption latency for CPU-bound tasks:
2007-11-26 21:21:49 +01:00
* (default: 20ms * (1 + ilog(ncpus)), units: nanoseconds)
2007-07-09 18:51:58 +02:00
*
2007-08-25 18:41:53 +02:00
* NOTE: this latency value is not the same as the concept of
2007-10-15 17:00:14 +02:00
* 'timeslice length' - timeslices in CFS are of variable length
* and have no persistent notion like in traditional, time-slice
* based scheduling concepts.
2007-07-09 18:51:58 +02:00
*
2007-10-15 17:00:14 +02:00
* (to see the precise effective timeslice length of your workload,
* run vmstat and monitor the context-switches (cs) field)
2007-07-09 18:51:58 +02:00
*/
2007-11-09 22:39:38 +01:00
unsigned int sysctl_sched_latency = 20000000ULL;
2007-10-15 17:00:02 +02:00
/*
* Minimal preemption granularity for CPU-bound tasks:
2007-11-26 21:21:49 +01:00
* (default: 4 msec * (1 + ilog(ncpus)), units: nanoseconds)
*/
2007-11-26 21:21:49 +01:00
unsigned int sysctl_sched_min_granularity = 4000000ULL;
/*
* is kept at sysctl_sched_latency / sysctl_sched_min_granularity
*/
2007-11-26 21:21:49 +01:00
static unsigned int sched_nr_latency = 5;
2007-10-15 17:00:02 +02:00
/*
* After fork, child runs first. (default) If set to 0 then
* parent will (try to) run first.
*/
const_debug unsigned int sysctl_sched_child_runs_first = 1;
2007-08-25 18:41:53 +02:00
/*
* sys_sched_yield() compat mode
*
* This option switches the agressive yield implementation of the
* old scheduler back on.
*/
unsigned int __read_mostly sysctl_sched_compat_yield;
2007-07-09 18:51:58 +02:00
/*
* SCHED_OTHER wake-up granularity.
2008-04-19 19:44:57 +02:00
* (default: 10 msec * (1 + ilog(ncpus)), units: nanoseconds)
2007-07-09 18:51:58 +02:00
*
* This option delays the preemption effects of decoupled workloads
* and reduces their over-scheduling. Synchronous workloads will still
* have immediate wakeup/sleep latencies.
*/
2008-04-19 19:44:57 +02:00
unsigned int sysctl_sched_wakeup_granularity = 10000000UL;
2007-07-09 18:51:58 +02:00
2007-10-15 17:00:18 +02:00
const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
2007-07-09 18:51:58 +02:00
/**************************************************************
* CFS operations on generic schedulable entities:
*/
2008-04-19 19:45:00 +02:00
static inline struct task_struct *task_of(struct sched_entity *se)
{
return container_of(se, struct task_struct, se);
}
2007-07-09 18:51:58 +02:00
#ifdef CONFIG_FAIR_GROUP_SCHED
/* cpu runqueue to which this cfs_rq is attached */
static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
{
return cfs_rq->rq;
}
/* An entity is a task if it doesn't "own" a runqueue */
#define entity_is_task(se) (!se->my_q)
2008-04-19 19:45:00 +02:00
/* Walk up scheduling entities hierarchy */
#define for_each_sched_entity(se) \
for (; se; se = se->parent)
static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
{
return p->se.cfs_rq;
}
/* runqueue on which this entity is (to be) queued */
static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
{
return se->cfs_rq;
}
/* runqueue "owned" by this group */
static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
{
return grp->my_q;
}
/* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
* another cpu ('this_cpu')
*/
static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
{
return cfs_rq->tg->cfs_rq[this_cpu];
}
/* Iterate thr' all leaf cfs_rq's on a runqueue */
#define for_each_leaf_cfs_rq(rq, cfs_rq) \
list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
/* Do the two (enqueued) entities belong to the same group ? */
static inline int
is_same_group(struct sched_entity *se, struct sched_entity *pse)
{
if (se->cfs_rq == pse->cfs_rq)
return 1;
return 0;
}
static inline struct sched_entity *parent_entity(struct sched_entity *se)
{
return se->parent;
}
2007-07-09 18:51:58 +02:00
#else /* CONFIG_FAIR_GROUP_SCHED */
static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
{
return container_of(cfs_rq, struct rq, cfs);
}
#define entity_is_task(se) 1
2008-04-19 19:45:00 +02:00
#define for_each_sched_entity(se) \
for (; se; se = NULL)
2007-07-09 18:51:58 +02:00
2008-04-19 19:45:00 +02:00
static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
2007-07-09 18:51:58 +02:00
{
2008-04-19 19:45:00 +02:00
return &task_rq(p)->cfs;
2007-07-09 18:51:58 +02:00
}
2008-04-19 19:45:00 +02:00
static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
{
struct task_struct *p = task_of(se);
struct rq *rq = task_rq(p);
return &rq->cfs;
}
/* runqueue "owned" by this group */
static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
{
return NULL;
}
static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
{
return &cpu_rq(this_cpu)->cfs;
}
#define for_each_leaf_cfs_rq(rq, cfs_rq) \
for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
static inline int
is_same_group(struct sched_entity *se, struct sched_entity *pse)
{
return 1;
}
static inline struct sched_entity *parent_entity(struct sched_entity *se)
{
return NULL;
}
#endif /* CONFIG_FAIR_GROUP_SCHED */
2007-07-09 18:51:58 +02:00
/**************************************************************
* Scheduling class tree data structure manipulation methods:
*/
2007-10-15 17:00:14 +02:00
static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime)
2007-10-15 17:00:07 +02:00
{
2007-10-15 17:00:11 +02:00
s64 delta = (s64)(vruntime - min_vruntime);
if (delta > 0)
2007-10-15 17:00:07 +02:00
min_vruntime = vruntime;
return min_vruntime;
}
2007-10-15 17:00:14 +02:00
static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
2007-10-15 17:00:12 +02:00
{
s64 delta = (s64)(vruntime - min_vruntime);
if (delta < 0)
min_vruntime = vruntime;
return min_vruntime;
}
2007-10-15 17:00:14 +02:00
static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
2007-10-15 17:00:05 +02:00
{
return se->vruntime - cfs_rq->min_vruntime;
2007-10-15 17:00:05 +02:00
}
2007-07-09 18:51:58 +02:00
/*
* Enqueue an entity into the rb-tree:
*/
2007-10-15 17:00:14 +02:00
static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
2007-07-09 18:51:58 +02:00
{
struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
struct rb_node *parent = NULL;
struct sched_entity *entry;
2007-10-15 17:00:05 +02:00
s64 key = entity_key(cfs_rq, se);
2007-07-09 18:51:58 +02:00
int leftmost = 1;
/*
* Find the right place in the rbtree:
*/
while (*link) {
parent = *link;
entry = rb_entry(parent, struct sched_entity, run_node);
/*
* We dont care about collisions. Nodes with
* the same key stay together.
*/
2007-10-15 17:00:05 +02:00
if (key < entity_key(cfs_rq, entry)) {
2007-07-09 18:51:58 +02:00
link = &parent->rb_left;
} else {
link = &parent->rb_right;
leftmost = 0;
}
}
/*
* Maintain a cache of leftmost tree entries (it is frequently
* used):
*/
2008-03-14 20:55:51 +01:00
if (leftmost) {
2007-10-15 17:00:11 +02:00
cfs_rq->rb_leftmost = &se->run_node;
2008-03-14 20:55:51 +01:00
/*
* maintain cfs_rq->min_vruntime to be a monotonic increasing
* value tracking the leftmost vruntime in the tree.
*/
cfs_rq->min_vruntime =
max_vruntime(cfs_rq->min_vruntime, se->vruntime);
}
2007-07-09 18:51:58 +02:00
rb_link_node(&se->run_node, parent, link);
rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
}
2007-10-15 17:00:14 +02:00
static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
2007-07-09 18:51:58 +02:00
{
2008-03-14 20:55:51 +01:00
if (cfs_rq->rb_leftmost == &se->run_node) {
struct rb_node *next_node;
struct sched_entity *next;
next_node = rb_next(&se->run_node);
cfs_rq->rb_leftmost = next_node;
if (next_node) {
next = rb_entry(next_node,
struct sched_entity, run_node);
cfs_rq->min_vruntime =
max_vruntime(cfs_rq->min_vruntime,
next->vruntime);
}
}
2007-10-15 17:00:04 +02:00
if (cfs_rq->next == se)
cfs_rq->next = NULL;
2007-07-09 18:51:58 +02:00
rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
}
static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq)
{
return cfs_rq->rb_leftmost;
}
static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
{
return rb_entry(first_fair(cfs_rq), struct sched_entity, run_node);
}
2007-10-15 17:00:05 +02:00
static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
{
2008-02-22 10:32:21 +01:00
struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
2007-10-15 17:00:05 +02:00
if (!last)
return NULL;
2008-02-22 10:32:21 +01:00
return rb_entry(last, struct sched_entity, run_node);
2007-10-15 17:00:05 +02:00
}
2007-07-09 18:51:58 +02:00
/**************************************************************
* Scheduling class statistics methods:
*/
#ifdef CONFIG_SCHED_DEBUG
int sched_nr_latency_handler(struct ctl_table *table, int write,
struct file *filp, void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
if (ret || !write)
return ret;
sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
sysctl_sched_min_granularity);
return 0;
}
#endif
2007-10-15 17:00:13 +02:00
2008-04-19 19:45:00 +02:00
/*
* delta *= w / rw
*/
static inline unsigned long
calc_delta_weight(unsigned long delta, struct sched_entity *se)
{
for_each_sched_entity(se) {
delta = calc_delta_mine(delta,
se->load.weight, &cfs_rq_of(se)->load);
}
return delta;
}
/*
* delta *= rw / w
*/
static inline unsigned long
calc_delta_fair(unsigned long delta, struct sched_entity *se)
{
for_each_sched_entity(se) {
delta = calc_delta_mine(delta,
cfs_rq_of(se)->load.weight, &se->load);
}
return delta;
}
2007-10-15 17:00:13 +02:00
/*
* The idea is to set a period in which each task runs once.
*
* When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
* this period because otherwise the slices get too small.
*
* p = (nr <= nl) ? l : l*nr/nl
*/
2007-10-15 17:00:04 +02:00
static u64 __sched_period(unsigned long nr_running)
{
u64 period = sysctl_sched_latency;
unsigned long nr_latency = sched_nr_latency;
2007-10-15 17:00:04 +02:00
if (unlikely(nr_running > nr_latency)) {
2008-01-25 21:08:21 +01:00
period = sysctl_sched_min_granularity;
2007-10-15 17:00:04 +02:00
period *= nr_running;
}
return period;
}
2007-10-15 17:00:13 +02:00
/*
* We calculate the wall-time slice from the period by taking a part
* proportional to the weight.
*
* s = p*w/rw
*/
2007-10-15 17:00:05 +02:00
static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
2007-08-25 18:41:53 +02:00
{
2008-04-19 19:45:00 +02:00
return calc_delta_weight(__sched_period(cfs_rq->nr_running), se);
2007-07-09 18:51:58 +02:00
}
2007-10-15 17:00:13 +02:00
/*
2008-04-19 19:45:00 +02:00
* We calculate the vruntime slice of a to be inserted task
2007-10-15 17:00:13 +02:00
*
2008-04-19 19:45:00 +02:00
* vs = s*rw/w = p
2007-10-15 17:00:13 +02:00
*/
static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
2008-04-19 19:45:00 +02:00
unsigned long nr_running = cfs_rq->nr_running;
if (!se->on_rq)
nr_running++;
2008-04-19 19:45:00 +02:00
return __sched_period(nr_running);
}
/*
* The goal of calc_delta_asym() is to be asymmetrically around NICE_0_LOAD, in
* that it favours >=0 over <0.
*
* -20 |
* |
* 0 --------+-------
* .'
* 19 .'
*
*/
static unsigned long
calc_delta_asym(unsigned long delta, struct sched_entity *se)
{
struct load_weight lw = {
.weight = NICE_0_LOAD,
.inv_weight = 1UL << (WMULT_SHIFT-NICE_0_SHIFT)
};
2008-04-19 19:45:00 +02:00
for_each_sched_entity(se) {
2008-04-19 19:45:00 +02:00
struct load_weight *se_lw = &se->load;
2008-04-19 19:45:00 +02:00
2008-04-19 19:45:00 +02:00
if (se->load.weight < NICE_0_LOAD)
se_lw = &lw;
2008-04-19 19:45:00 +02:00
2008-04-19 19:45:00 +02:00
delta = calc_delta_mine(delta,
cfs_rq_of(se)->load.weight, se_lw);
2008-04-19 19:45:00 +02:00
}
2008-04-19 19:45:00 +02:00
return delta;
2007-10-15 17:00:10 +02:00
}
2007-07-09 18:51:58 +02:00
/*
* Update the current task's runtime statistics. Skip current tasks that
* are not in our scheduling class.
*/
static inline void
2007-10-15 17:00:03 +02:00
__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
unsigned long delta_exec)
2007-07-09 18:51:58 +02:00
{
unsigned long delta_exec_weighted;
2007-07-09 18:51:58 +02:00
2007-08-02 17:41:40 +02:00
schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
2007-07-09 18:51:58 +02:00
curr->sum_exec_runtime += delta_exec;
schedstat_add(cfs_rq, exec_clock, delta_exec);
2008-04-19 19:45:00 +02:00
delta_exec_weighted = calc_delta_fair(delta_exec, curr);
2007-10-15 17:00:04 +02:00
curr->vruntime += delta_exec_weighted;
2007-07-09 18:51:58 +02:00
}
static void update_curr(struct cfs_rq *cfs_rq)
2007-07-09 18:51:58 +02:00
{
struct sched_entity *curr = cfs_rq->curr;
2007-10-15 17:00:03 +02:00
u64 now = rq_of(cfs_rq)->clock;
2007-07-09 18:51:58 +02:00
unsigned long delta_exec;
if (unlikely(!curr))
return;
/*
* Get the amount of time the current task was running
* since the last time we changed load (this cannot
* overflow on 32 bits):
*/
2007-10-15 17:00:03 +02:00
delta_exec = (unsigned long)(now - curr->exec_start);
2007-07-09 18:51:58 +02:00
2007-10-15 17:00:03 +02:00
__update_curr(cfs_rq, curr, delta_exec);
curr->exec_start = now;
2007-12-02 20:04:49 +01:00
if (entity_is_task(curr)) {
struct task_struct *curtask = task_of(curr);
cpuacct_charge(curtask, delta_exec);
}
2007-07-09 18:51:58 +02:00
}
static inline void
update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
2007-07-09 18:51:58 +02:00
{
2007-08-09 11:16:47 +02:00
schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
2007-07-09 18:51:58 +02:00
}
/*
* Task is being enqueued - update stats:
*/
static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2007-07-09 18:51:58 +02:00
{
/*
* Are we enqueueing a waiting task? (for current tasks
* a dequeue/enqueue event is a NOP)
*/
if (se != cfs_rq->curr)
update_stats_wait_start(cfs_rq, se);
2007-07-09 18:51:58 +02:00
}
static void
update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
2007-07-09 18:51:58 +02:00
{
schedstat_set(se->wait_max, max(se->wait_max,
rq_of(cfs_rq)->clock - se->wait_start));
schedstat_set(se->wait_count, se->wait_count + 1);
schedstat_set(se->wait_sum, se->wait_sum +
rq_of(cfs_rq)->clock - se->wait_start);
2007-08-02 17:41:40 +02:00
schedstat_set(se->wait_start, 0);
2007-07-09 18:51:58 +02:00
}
static inline void
update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2007-07-09 18:51:58 +02:00
{
/*
* Mark the end of the wait period if dequeueing a
* waiting task:
*/
if (se != cfs_rq->curr)
update_stats_wait_end(cfs_rq, se);
2007-07-09 18:51:58 +02:00
}
/*
* We are picking a new current task - update its stats:
*/
static inline void
update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
2007-07-09 18:51:58 +02:00
{
/*
* We are starting a new run period:
*/
2007-08-09 11:16:47 +02:00
se->exec_start = rq_of(cfs_rq)->clock;
2007-07-09 18:51:58 +02:00
}
/**************************************************
* Scheduling class queueing methods:
*/
#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
static void
add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
{
cfs_rq->task_weight += weight;
}
#else
static inline void
add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
{
}
#endif
static void
account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
update_load_add(&cfs_rq->load, se->load.weight);
if (!parent_entity(se))
inc_cpu_load(rq_of(cfs_rq), se->load.weight);
if (entity_is_task(se))
add_cfs_task_weight(cfs_rq, se->load.weight);
cfs_rq->nr_running++;
se->on_rq = 1;
list_add(&se->group_node, &cfs_rq->tasks);
}
static void
account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
update_load_sub(&cfs_rq->load, se->load.weight);
if (!parent_entity(se))
dec_cpu_load(rq_of(cfs_rq), se->load.weight);
if (entity_is_task(se))
add_cfs_task_weight(cfs_rq, -se->load.weight);
cfs_rq->nr_running--;
se->on_rq = 0;
list_del_init(&se->group_node);
}
static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
2007-07-09 18:51:58 +02:00
{
#ifdef CONFIG_SCHEDSTATS
if (se->sleep_start) {
2007-08-09 11:16:47 +02:00
u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
2008-01-25 21:08:34 +01:00
struct task_struct *tsk = task_of(se);
2007-07-09 18:51:58 +02:00
if ((s64)delta < 0)
delta = 0;
if (unlikely(delta > se->sleep_max))
se->sleep_max = delta;
se->sleep_start = 0;
se->sum_sleep_runtime += delta;
2008-01-25 21:08:34 +01:00
account_scheduler_latency(tsk, delta >> 10, 1);
2007-07-09 18:51:58 +02:00
}
if (se->block_start) {
2007-08-09 11:16:47 +02:00
u64 delta = rq_of(cfs_rq)->clock - se->block_start;
2008-01-25 21:08:34 +01:00
struct task_struct *tsk = task_of(se);
2007-07-09 18:51:58 +02:00
if ((s64)delta < 0)
delta = 0;
if (unlikely(delta > se->block_max))
se->block_max = delta;
se->block_start = 0;
se->sum_sleep_runtime += delta;
2007-10-02 14:13:08 +02:00
/*
* Blocking time is in units of nanosecs, so shift by 20 to
* get a milliseconds-range estimation of the amount of
* time that the task spent sleeping:
*/
if (unlikely(prof_on == SLEEP_PROFILING)) {
2007-10-15 17:00:06 +02:00
2007-10-02 14:13:08 +02:00
profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk),
delta >> 20);
}
2008-01-25 21:08:34 +01:00
account_scheduler_latency(tsk, delta >> 10, 0);
2007-07-09 18:51:58 +02:00
}
#endif
}
2007-10-15 17:00:10 +02:00
static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
#ifdef CONFIG_SCHED_DEBUG
s64 d = se->vruntime - cfs_rq->min_vruntime;
if (d < 0)
d = -d;
if (d > 3*sysctl_sched_latency)
schedstat_inc(cfs_rq, nr_spread_over);
#endif
}
2007-10-15 17:00:05 +02:00
static void
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
{
2007-10-15 17:00:10 +02:00
u64 vruntime;
2007-10-15 17:00:05 +02:00
2008-03-14 20:55:51 +01:00
if (first_fair(cfs_rq)) {
vruntime = min_vruntime(cfs_rq->min_vruntime,
__pick_next_entity(cfs_rq)->vruntime);
} else
vruntime = cfs_rq->min_vruntime;
2007-10-15 17:00:05 +02:00
/*
* The 'current' period is already promised to the current tasks,
* however the extra weight of the new task will slow them down a
* little, place the new task so that it fits in the slot that
* stays open at the end.
*/
2007-10-15 17:00:05 +02:00
if (initial && sched_feat(START_DEBIT))
2007-10-15 17:00:13 +02:00
vruntime += sched_vslice_add(cfs_rq, se);
2007-10-15 17:00:05 +02:00
2007-10-15 17:00:11 +02:00
if (!initial) {
/* sleeps upto a single latency don't count. */
2008-04-14 08:53:32 +02:00
if (sched_feat(NEW_FAIR_SLEEPERS)) {
2008-05-08 17:00:42 +02:00
unsigned long thresh = sysctl_sched_latency;
/*
* convert the sleeper threshold into virtual time
*/
2008-03-19 11:43:36 +01:00
if (sched_feat(NORMALIZED_SLEEPER))
2008-05-08 17:00:42 +02:00
thresh = calc_delta_fair(thresh, se);
vruntime -= thresh;
2008-04-14 08:53:32 +02:00
}
2007-10-15 17:00:11 +02:00
/* ensure we never gain time by being placed backwards. */
vruntime = max_vruntime(se->vruntime, vruntime);
2007-10-15 17:00:05 +02:00
}
2007-10-15 17:00:10 +02:00
se->vruntime = vruntime;
2007-10-15 17:00:05 +02:00
}
2007-07-09 18:51:58 +02:00
static void
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
2007-07-09 18:51:58 +02:00
{
/*
* Update run-time statistics of the 'current'.
2007-07-09 18:51:58 +02:00
*/
update_curr(cfs_rq);
2008-05-05 23:56:17 +02:00
account_entity_enqueue(cfs_rq, se);
2007-07-09 18:51:58 +02:00
2007-10-15 17:00:04 +02:00
if (wakeup) {
2007-10-15 17:00:05 +02:00
place_entity(cfs_rq, se, 0);
enqueue_sleeper(cfs_rq, se);
2007-10-15 17:00:04 +02:00
}
2007-07-09 18:51:58 +02:00
update_stats_enqueue(cfs_rq, se);
2007-10-15 17:00:10 +02:00
check_spread(cfs_rq, se);
if (se != cfs_rq->curr)
__enqueue_entity(cfs_rq, se);
2007-07-09 18:51:58 +02:00
}
2008-03-19 01:42:00 +01:00
static void update_avg(u64 *avg, u64 sample)
{
s64 diff = sample - *avg;
*avg += diff >> 3;
}
static void update_avg_stats(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
if (!se->last_wakeup)
return;
update_avg(&se->avg_overlap, se->sum_exec_runtime - se->last_wakeup);
se->last_wakeup = 0;
}
2007-07-09 18:51:58 +02:00
static void
dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
2007-07-09 18:51:58 +02:00
{
/*
* Update run-time statistics of the 'current'.
*/
update_curr(cfs_rq);
update_stats_dequeue(cfs_rq, se);
if (sleep) {
2008-03-19 01:42:00 +01:00
update_avg_stats(cfs_rq, se);
2007-10-15 17:00:10 +02:00
#ifdef CONFIG_SCHEDSTATS
2007-07-09 18:51:58 +02:00
if (entity_is_task(se)) {
struct task_struct *tsk = task_of(se);
if (tsk->state & TASK_INTERRUPTIBLE)
2007-08-09 11:16:47 +02:00
se->sleep_start = rq_of(cfs_rq)->clock;
2007-07-09 18:51:58 +02:00
if (tsk->state & TASK_UNINTERRUPTIBLE)
2007-08-09 11:16:47 +02:00
se->block_start = rq_of(cfs_rq)->clock;
2007-07-09 18:51:58 +02:00
}
#endif
2007-10-15 17:00:10 +02:00
}
if (se != cfs_rq->curr)
__dequeue_entity(cfs_rq, se);
account_entity_dequeue(cfs_rq, se);
2007-07-09 18:51:58 +02:00
}
/*
* Preempt the current task with a newly woken task if needed:
*/
2007-09-05 14:32:49 +02:00
static void
2007-10-15 17:00:05 +02:00
check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
2007-07-09 18:51:58 +02:00
{
unsigned long ideal_runtime, delta_exec;
2007-10-15 17:00:05 +02:00
ideal_runtime = sched_slice(cfs_rq, curr);
delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
2007-11-09 22:39:39 +01:00
if (delta_exec > ideal_runtime)
2007-07-09 18:51:58 +02:00
resched_task(rq_of(cfs_rq)->curr);
}
static void
set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
2007-07-09 18:51:58 +02:00
{
/* 'current' is not kept within the tree. */
if (se->on_rq) {
/*
* Any task has to be enqueued before it get to execute on
* a CPU. So account for the time it spent waiting on the
* runqueue.
*/
update_stats_wait_end(cfs_rq, se);
__dequeue_entity(cfs_rq, se);
}
update_stats_curr_start(cfs_rq, se);
cfs_rq->curr = se;
2007-10-15 17:00:02 +02:00
#ifdef CONFIG_SCHEDSTATS
/*
* Track our maximum slice length, if the CPU's load is at
* least twice that of our own weight (i.e. dont track it
* when there are only lesser-weight tasks around):
*/
2007-10-15 17:00:06 +02:00
if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
2007-10-15 17:00:02 +02:00
se->slice_max = max(se->slice_max,
se->sum_exec_runtime - se->prev_sum_exec_runtime);
}
#endif
se->prev_sum_exec_runtime = se->sum_exec_runtime;
2007-07-09 18:51:58 +02:00
}
2008-04-19 19:44:57 +02:00
static int
wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
static struct sched_entity *
pick_next(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
if (!cfs_rq->next)
return se;
2008-04-19 19:44:57 +02:00
if (wakeup_preempt_entity(cfs_rq->next, se) != 0)
return se;
return cfs_rq->next;
}
static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
2007-07-09 18:51:58 +02:00
{
2007-10-15 17:00:13 +02:00
struct sched_entity *se = NULL;
2007-07-09 18:51:58 +02:00
2007-10-15 17:00:13 +02:00
if (first_fair(cfs_rq)) {
se = __pick_next_entity(cfs_rq);
se = pick_next(cfs_rq, se);
2007-10-15 17:00:13 +02:00
set_next_entity(cfs_rq, se);
}
2007-07-09 18:51:58 +02:00
return se;
}
static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
2007-07-09 18:51:58 +02:00
{
/*
* If still on the runqueue then deactivate_task()
* was not called and update_curr() has to be done:
*/
if (prev->on_rq)
update_curr(cfs_rq);
2007-07-09 18:51:58 +02:00
2007-10-15 17:00:10 +02:00
check_spread(cfs_rq, prev);
if (prev->on_rq) {
update_stats_wait_start(cfs_rq, prev);
/* Put 'current' back into the tree. */
__enqueue_entity(cfs_rq, prev);
}
cfs_rq->curr = NULL;
2007-07-09 18:51:58 +02:00
}
2008-01-25 21:08:29 +01:00
static void
entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
2007-07-09 18:51:58 +02:00
{
/*
* Update run-time statistics of the 'current'.
2007-07-09 18:51:58 +02:00
*/
update_curr(cfs_rq);
2007-07-09 18:51:58 +02:00
2008-01-25 21:08:29 +01:00
#ifdef CONFIG_SCHED_HRTICK
/*
* queued ticks are scheduled to match the slice, so don't bother
* validating it and just reschedule.
*/
if (queued) {
resched_task(rq_of(cfs_rq)->curr);
return;
}
2008-01-25 21:08:29 +01:00
/*
* don't let the period tick interfere with the hrtick preemption
*/
if (!sched_feat(DOUBLE_TICK) &&
hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
return;
#endif
2007-10-15 17:00:14 +02:00
if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT))
2007-10-15 17:00:05 +02:00
check_preempt_tick(cfs_rq, curr);
2007-07-09 18:51:58 +02:00
}
/**************************************************
* CFS operations on tasks:
*/
2008-01-25 21:08:29 +01:00
#ifdef CONFIG_SCHED_HRTICK
static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
{
int requeue = rq->curr == p;
struct sched_entity *se = &p->se;
struct cfs_rq *cfs_rq = cfs_rq_of(se);
WARN_ON(task_rq(p) != rq);
if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) {
u64 slice = sched_slice(cfs_rq, se);
u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
s64 delta = slice - ran;
if (delta < 0) {
if (rq->curr == p)
resched_task(p);
return;
}
/*
* Don't schedule slices shorter than 10000ns, that just
* doesn't make sense. Rely on vruntime for fairness.
*/
if (!requeue)
delta = max(10000LL, delta);
hrtick_start(rq, delta, requeue);
}
}
#else
static inline void
hrtick_start_fair(struct rq *rq, struct task_struct *p)
{
}
#endif
2007-07-09 18:51:58 +02:00
/*
* The enqueue_task method is called before nr_running is
* increased. Here we update the fair scheduling stats and
* then put the task into the rbtree:
*/
static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
2007-07-09 18:51:58 +02:00
{
struct cfs_rq *cfs_rq;
struct sched_entity *se = &p->se;
2007-07-09 18:51:58 +02:00
for_each_sched_entity(se) {
if (se->on_rq)
2007-07-09 18:51:58 +02:00
break;
cfs_rq = cfs_rq_of(se);
enqueue_entity(cfs_rq, se, wakeup);
2007-10-15 17:00:12 +02:00
wakeup = 1;
2007-07-09 18:51:58 +02:00
}
2008-01-25 21:08:29 +01:00
hrtick_start_fair(rq, rq->curr);
2007-07-09 18:51:58 +02:00
}
/*
* The dequeue_task method is called before nr_running is
* decreased. We remove the task from the rbtree and
* update the fair scheduling stats:
*/
static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
2007-07-09 18:51:58 +02:00
{
struct cfs_rq *cfs_rq;
struct sched_entity *se = &p->se;
2007-07-09 18:51:58 +02:00
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
dequeue_entity(cfs_rq, se, sleep);
2007-07-09 18:51:58 +02:00
/* Don't dequeue parent if it has other entities besides us */
if (cfs_rq->load.weight)
2007-07-09 18:51:58 +02:00
break;
2007-10-15 17:00:12 +02:00
sleep = 1;
2007-07-09 18:51:58 +02:00
}
2008-01-25 21:08:29 +01:00
hrtick_start_fair(rq, rq->curr);
2007-07-09 18:51:58 +02:00
}
/*
* sched_yield() support is very simple - we dequeue and enqueue.
*
* If compat_yield is turned on then we requeue to the end of the tree.
2007-07-09 18:51:58 +02:00
*/
2007-10-15 17:00:08 +02:00
static void yield_task_fair(struct rq *rq)
2007-07-09 18:51:58 +02:00
{
struct task_struct *curr = rq->curr;
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
struct sched_entity *rightmost, *se = &curr->se;
2007-07-09 18:51:58 +02:00
/*
* Are we the only task in the tree?
2007-07-09 18:51:58 +02:00
*/
if (unlikely(cfs_rq->nr_running == 1))
return;
if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
update_rq_clock(rq);
/*
* Update run-time statistics of the 'current'.
*/
2007-10-15 17:00:12 +02:00
update_curr(cfs_rq);
return;
}
/*
* Find the rightmost entry in the rbtree:
*/
2007-10-15 17:00:12 +02:00
rightmost = __pick_last_entity(cfs_rq);
/*
* Already in the rightmost position?
*/
2008-02-18 13:39:37 +01:00
if (unlikely(!rightmost || rightmost->vruntime < se->vruntime))
return;
/*
* Minimally necessary key value to be last in the tree:
2007-10-15 17:00:12 +02:00
* Upon rescheduling, sched_class::put_prev_task() will place
* 'current' within the tree based on its new key value.
*/
se->vruntime = rightmost->vruntime + 1;
2007-07-09 18:51:58 +02:00
}
2008-01-25 21:08:09 +01:00
/*
* wake_idle() will wake a task on an idle cpu if task->cpu is
* not idle and an idle cpu is available. The span of cpus to
* search starts with cpus closest then further out as needed,
* so we always favor a closer, idle cpu.
*
* Returns the CPU we should wake onto.
*/
#if defined(ARCH_HAS_SCHED_WAKE_IDLE)
static int wake_idle(int cpu, struct task_struct *p)
{
cpumask_t tmp;
struct sched_domain *sd;
int i;
/*
* If it is idle, then it is the best cpu to run this task.
*
* This cpu is also the best, if it has more than one task already.
* Siblings must be also busy(in most cases) as they didn't already
* pickup the extra load from this cpu and hence we need not check
* sibling runqueue info. This will avoid the checks and cache miss
* penalities associated with that.
*/
2008-04-28 12:40:01 -04:00
if (idle_cpu(cpu) || cpu_rq(cpu)->cfs.nr_running > 1)
2008-01-25 21:08:09 +01:00
return cpu;
for_each_domain(cpu, sd) {
if ((sd->flags & SD_WAKE_IDLE)
|| ((sd->flags & SD_WAKE_IDLE_FAR)
&& !task_hot(p, task_rq(p)->clock, sd))) {
2008-01-25 21:08:09 +01:00
cpus_and(tmp, sd->span, p->cpus_allowed);
for_each_cpu_mask(i, tmp) {
if (idle_cpu(i)) {
if (i != task_cpu(p)) {
schedstat_inc(p,
se.nr_wakeups_idle);
}
return i;
}
}
} else {
break;
}
}
return cpu;
}
#else
static inline int wake_idle(int cpu, struct task_struct *p)
{
return cpu;
}
#endif
#ifdef CONFIG_SMP
2008-03-19 01:42:00 +01:00
static const struct sched_class fair_sched_class;
static int
2008-03-19 01:42:00 +01:00
wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
struct task_struct *p, int prev_cpu, int this_cpu, int sync,
int idx, unsigned long load, unsigned long this_load,
unsigned int imbalance)
{
2008-03-19 01:42:00 +01:00
struct task_struct *curr = this_rq->curr;
unsigned long tl = this_load;
unsigned long tl_per_task;
if (!(this_sd->flags & SD_WAKE_AFFINE))
return 0;
/*
2008-03-19 01:42:00 +01:00
* If the currently running task will sleep within
* a reasonable amount of time then attract this newly
* woken task:
*/
2008-03-19 01:42:00 +01:00
if (sync && curr->sched_class == &fair_sched_class) {
if (curr->se.avg_overlap < sysctl_sched_migration_cost &&
p->se.avg_overlap < sysctl_sched_migration_cost)
return 1;
}
schedstat_inc(p, se.nr_wakeups_affine_attempts);
tl_per_task = cpu_avg_load_per_task(this_cpu);
/*
* If sync wakeup then subtract the (maximum possible)
* effect of the currently running task from the load
* of the current CPU:
*/
if (sync)
tl -= current->se.load.weight;
if ((tl <= load && tl + target_load(prev_cpu, idx) <= tl_per_task) ||
100*(tl + p->se.load.weight) <= imbalance*load) {
/*
* This domain has SD_WAKE_AFFINE and
* p is cache cold in this domain, and
* there is no bad imbalance.
*/
schedstat_inc(this_sd, ttwu_move_affine);
schedstat_inc(p, se.nr_wakeups_affine);
return 1;
}
return 0;
}
2008-01-25 21:08:09 +01:00
static int select_task_rq_fair(struct task_struct *p, int sync)
{
struct sched_domain *sd, *this_sd = NULL;
int prev_cpu, this_cpu, new_cpu;
unsigned long load, this_load;
2008-03-19 01:42:00 +01:00
struct rq *rq, *this_rq;
unsigned int imbalance;
int idx;
2008-01-25 21:08:09 +01:00
prev_cpu = task_cpu(p);
rq = task_rq(p);
this_cpu = smp_processor_id();
2008-03-19 01:42:00 +01:00
this_rq = cpu_rq(this_cpu);
new_cpu = prev_cpu;
2008-01-25 21:08:09 +01:00
/*
* 'this_sd' is the first domain that both
* this_cpu and prev_cpu are present in:
*/
2008-01-25 21:08:09 +01:00
for_each_domain(this_cpu, sd) {
if (cpu_isset(prev_cpu, sd->span)) {
2008-01-25 21:08:09 +01:00
this_sd = sd;
break;
}
}
if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
2008-03-16 21:21:47 +01:00
goto out;
2008-01-25 21:08:09 +01:00
/*
* Check for affine wakeup and passive balancing possibilities.
*/
if (!this_sd)
2008-03-16 21:21:47 +01:00
goto out;
2008-01-25 21:08:09 +01:00
idx = this_sd->wake_idx;
2008-01-25 21:08:09 +01:00
imbalance = 100 + (this_sd->imbalance_pct - 100) / 2;
2008-01-25 21:08:09 +01:00
load = source_load(prev_cpu, idx);
this_load = target_load(this_cpu, idx);
2008-01-25 21:08:09 +01:00
2008-03-19 01:42:00 +01:00
if (wake_affine(rq, this_sd, this_rq, p, prev_cpu, this_cpu, sync, idx,
load, this_load, imbalance))
return this_cpu;
if (prev_cpu == this_cpu)
2008-03-16 21:21:47 +01:00
goto out;
2008-01-25 21:08:09 +01:00
/*
* Start passive balancing when half the imbalance_pct
* limit is reached.
*/
if (this_sd->flags & SD_WAKE_BALANCE) {
if (imbalance*this_load <= 100*load) {
schedstat_inc(this_sd, ttwu_move_balance);
schedstat_inc(p, se.nr_wakeups_passive);
2008-03-19 01:42:00 +01:00
return this_cpu;
2008-01-25 21:08:09 +01:00
}
}
2008-03-16 21:21:47 +01:00
out:
2008-01-25 21:08:09 +01:00
return wake_idle(new_cpu, p);
}
#endif /* CONFIG_SMP */
2008-04-19 19:44:57 +02:00
static unsigned long wakeup_gran(struct sched_entity *se)
{
unsigned long gran = sysctl_sched_wakeup_granularity;
/*
2008-04-19 19:45:00 +02:00
* More easily preempt - nice tasks, while not making it harder for
* + nice tasks.
2008-04-19 19:44:57 +02:00
*/
2008-04-19 19:45:00 +02:00
gran = calc_delta_asym(sysctl_sched_wakeup_granularity, se);
2008-04-19 19:44:57 +02:00
return gran;
}
/*
* Should 'se' preempt 'curr'.
*
* |s1
* |s2
* |s3
* g
* |<--->|c
*
* w(c, s1) = -1
* w(c, s2) = 0
* w(c, s3) = 1
*
*/
static int
wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
{
s64 gran, vdiff = curr->vruntime - se->vruntime;
if (vdiff < 0)
return -1;
gran = wakeup_gran(curr);
if (vdiff > gran)
return 1;
return 0;
}
2008-01-25 21:08:09 +01:00
2008-04-19 19:44:59 +02:00
/* return depth at which a sched entity is present in the hierarchy */
static inline int depth_se(struct sched_entity *se)
{
int depth = 0;
for_each_sched_entity(se)
depth++;
return depth;
}
2007-07-09 18:51:58 +02:00
/*
* Preempt the current task with a newly woken task if needed:
*/
2007-10-15 17:00:05 +02:00
static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
2007-07-09 18:51:58 +02:00
{
struct task_struct *curr = rq->curr;
2007-10-15 17:00:12 +02:00
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
2007-10-15 17:00:12 +02:00
struct sched_entity *se = &curr->se, *pse = &p->se;
2008-04-19 19:44:59 +02:00
int se_depth, pse_depth;
2007-07-09 18:51:58 +02:00
if (unlikely(rt_prio(p->prio))) {
2007-08-09 11:16:47 +02:00
update_rq_clock(rq);
update_curr(cfs_rq);
2007-07-09 18:51:58 +02:00
resched_task(curr);
return;
}
2008-03-19 01:42:00 +01:00
se->last_wakeup = se->sum_exec_runtime;
if (unlikely(se == pse))
return;
cfs_rq_of(pse)->next = pse;
/*
* Batch tasks do not preempt (their preemption is driven by
* the tick):
*/
if (unlikely(p->policy == SCHED_BATCH))
return;
2007-07-09 18:51:58 +02:00
2007-11-09 22:39:39 +01:00
if (!sched_feat(WAKEUP_PREEMPT))
return;
2007-10-15 17:00:14 +02:00
2008-04-19 19:44:59 +02:00
/*
* preemption test can be made between sibling entities who are in the
* same cfs_rq i.e who have a common parent. Walk up the hierarchy of
* both tasks until we find their ancestors who are siblings of common
* parent.
*/
/* First walk up until both entities are at same depth */
se_depth = depth_se(se);
pse_depth = depth_se(pse);
while (se_depth > pse_depth) {
se_depth--;
se = parent_entity(se);
}
while (pse_depth > se_depth) {
pse_depth--;
pse = parent_entity(pse);
}
2007-11-09 22:39:39 +01:00
while (!is_same_group(se, pse)) {
se = parent_entity(se);
pse = parent_entity(pse);
2007-10-15 17:00:05 +02:00
}
2007-11-09 22:39:39 +01:00
2008-04-19 19:44:57 +02:00
if (wakeup_preempt_entity(se, pse) == 1)
2007-11-09 22:39:39 +01:00
resched_task(curr);
2007-07-09 18:51:58 +02:00
}
static struct task_struct *pick_next_task_fair(struct rq *rq)
2007-07-09 18:51:58 +02:00
{
2008-01-25 21:08:29 +01:00
struct task_struct *p;
2007-07-09 18:51:58 +02:00
struct cfs_rq *cfs_rq = &rq->cfs;
struct sched_entity *se;
if (unlikely(!cfs_rq->nr_running))
return NULL;
do {
se = pick_next_entity(cfs_rq);
2007-07-09 18:51:58 +02:00
cfs_rq = group_cfs_rq(se);
} while (cfs_rq);
2008-01-25 21:08:29 +01:00
p = task_of(se);
hrtick_start_fair(rq, p);
return p;
2007-07-09 18:51:58 +02:00
}
/*
* Account for a descheduled task:
*/
static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
2007-07-09 18:51:58 +02:00
{
struct sched_entity *se = &prev->se;
struct cfs_rq *cfs_rq;
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
put_prev_entity(cfs_rq, se);
2007-07-09 18:51:58 +02:00
}
}
#ifdef CONFIG_SMP
2007-07-09 18:51:58 +02:00
/**************************************************
* Fair scheduling class load-balancing methods:
*/
/*
* Load-balancing iterator. Note: while the runqueue stays locked
* during the whole iteration, the current task might be
* dequeued so the iterator has to be dequeue-safe. Here we
* achieve that by always pre-iterating before returning
* the current task:
*/
2007-10-15 17:00:13 +02:00
static struct task_struct *
__load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next)
2007-07-09 18:51:58 +02:00
{
2008-04-19 19:44:59 +02:00
struct task_struct *p = NULL;
struct sched_entity *se;
2007-07-09 18:51:58 +02:00
if (next == &cfs_rq->tasks)
2007-07-09 18:51:58 +02:00
return NULL;
2008-04-19 19:44:59 +02:00
/* Skip over entities that are not tasks */
do {
se = list_entry(next, struct sched_entity, group_node);
next = next->next;
} while (next != &cfs_rq->tasks && !entity_is_task(se));
2008-04-19 19:44:59 +02:00
if (next == &cfs_rq->tasks)
return NULL;
cfs_rq->balance_iterator = next;
2008-04-19 19:44:59 +02:00
if (entity_is_task(se))
p = task_of(se);
2007-07-09 18:51:58 +02:00
return p;
}
static struct task_struct *load_balance_start_fair(void *arg)
{
struct cfs_rq *cfs_rq = arg;
return __load_balance_iterator(cfs_rq, cfs_rq->tasks.next);
2007-07-09 18:51:58 +02:00
}
static struct task_struct *load_balance_next_fair(void *arg)
{
struct cfs_rq *cfs_rq = arg;
return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator);
2007-07-09 18:51:58 +02:00
}
static unsigned long
__load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
unsigned long max_load_move, struct sched_domain *sd,
enum cpu_idle_type idle, int *all_pinned, int *this_best_prio,
struct cfs_rq *cfs_rq)
{
struct rq_iterator cfs_rq_iterator;
cfs_rq_iterator.start = load_balance_start_fair;
cfs_rq_iterator.next = load_balance_next_fair;
cfs_rq_iterator.arg = cfs_rq;
return balance_tasks(this_rq, this_cpu, busiest,
max_load_move, sd, idle, all_pinned,
this_best_prio, &cfs_rq_iterator);
}
#ifdef CONFIG_FAIR_GROUP_SCHED
2007-08-09 11:16:46 +02:00
static unsigned long
2007-07-09 18:51:58 +02:00
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
2007-10-24 18:23:51 +02:00
unsigned long max_load_move,
2007-08-09 11:16:46 +02:00
struct sched_domain *sd, enum cpu_idle_type idle,
int *all_pinned, int *this_best_prio)
2007-07-09 18:51:58 +02:00
{
long rem_load_move = max_load_move;
int busiest_cpu = cpu_of(busiest);
struct task_group *tg;
2007-07-09 18:51:58 +02:00
rcu_read_lock();
list_for_each_entry(tg, &task_groups, list) {
long imbalance;
unsigned long this_weight, busiest_weight;
long rem_load, max_load, moved_load;
2007-07-09 18:51:58 +02:00
/*
* empty group
*/
if (!aggregate(tg, sd)->task_weight)
2007-07-09 18:51:58 +02:00
continue;
rem_load = rem_load_move * aggregate(tg, sd)->rq_weight;
rem_load /= aggregate(tg, sd)->load + 1;
2007-07-09 18:51:58 +02:00
this_weight = tg->cfs_rq[this_cpu]->task_weight;
busiest_weight = tg->cfs_rq[busiest_cpu]->task_weight;
2007-07-09 18:51:58 +02:00
imbalance = (busiest_weight - this_weight) / 2;
if (imbalance < 0)
imbalance = busiest_weight;
max_load = max(rem_load, imbalance);
moved_load = __load_balance_fair(this_rq, this_cpu, busiest,
max_load, sd, idle, all_pinned, this_best_prio,
tg->cfs_rq[busiest_cpu]);
if (!moved_load)
continue;
move_group_shares(tg, sd, busiest_cpu, this_cpu);
moved_load *= aggregate(tg, sd)->load;
moved_load /= aggregate(tg, sd)->rq_weight + 1;
rem_load_move -= moved_load;
if (rem_load_move < 0)
2007-07-09 18:51:58 +02:00
break;
}
rcu_read_unlock();
2007-07-09 18:51:58 +02:00
2007-08-09 11:16:46 +02:00
return max_load_move - rem_load_move;
2007-07-09 18:51:58 +02:00
}
#else
static unsigned long
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
unsigned long max_load_move,
struct sched_domain *sd, enum cpu_idle_type idle,
int *all_pinned, int *this_best_prio)
{
return __load_balance_fair(this_rq, this_cpu, busiest,
max_load_move, sd, idle, all_pinned,
this_best_prio, &busiest->cfs);
}
#endif
2007-07-09 18:51:58 +02:00
2007-10-24 18:23:51 +02:00
static int
move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
struct sched_domain *sd, enum cpu_idle_type idle)
{
struct cfs_rq *busy_cfs_rq;
struct rq_iterator cfs_rq_iterator;
cfs_rq_iterator.start = load_balance_start_fair;
cfs_rq_iterator.next = load_balance_next_fair;
for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
/*
* pass busy_cfs_rq argument into
* load_balance_[start|next]_fair iterators
*/
cfs_rq_iterator.arg = busy_cfs_rq;
if (iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
&cfs_rq_iterator))
return 1;
}
return 0;
}
#endif
2007-10-24 18:23:51 +02:00
2007-07-09 18:51:58 +02:00
/*
* scheduler tick hitting a task of our scheduling class:
*/
2008-01-25 21:08:29 +01:00
static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
2007-07-09 18:51:58 +02:00
{
struct cfs_rq *cfs_rq;
struct sched_entity *se = &curr->se;
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
2008-01-25 21:08:29 +01:00
entity_tick(cfs_rq, se, queued);
2007-07-09 18:51:58 +02:00
}
}
#define swap(a, b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0)
2007-10-15 17:00:04 +02:00
2007-07-09 18:51:58 +02:00
/*
* Share the fairness runtime between parent and child, thus the
* total amount of pressure for CPU stays equal - new tasks
* get a chance to run but frequent forkers are not allowed to
* monopolize the CPU. Note: the parent runqueue is locked,
* the child is not running yet.
*/
static void task_new_fair(struct rq *rq, struct task_struct *p)
2007-07-09 18:51:58 +02:00
{
struct cfs_rq *cfs_rq = task_cfs_rq(p);
struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
int this_cpu = smp_processor_id();
2007-07-09 18:51:58 +02:00
sched_info_queued(p);
update_curr(cfs_rq);
2007-10-15 17:00:05 +02:00
place_entity(cfs_rq, se, 1);
2007-10-15 17:00:04 +02:00
/* 'curr' will be NULL if the child belongs to a different group */
if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) &&
curr && curr->vruntime < se->vruntime) {
2007-10-15 17:00:08 +02:00
/*
* Upon rescheduling, sched_class::put_prev_task() will place
* 'current' within the tree based on its new key value.
*/
2007-10-15 17:00:04 +02:00
swap(curr->vruntime, se->vruntime);
}
2007-07-09 18:51:58 +02:00
2007-10-17 16:55:11 +02:00
enqueue_task_fair(rq, p, 0);
2007-10-15 17:00:02 +02:00
resched_task(rq->curr);
2007-07-09 18:51:58 +02:00
}
/*
* Priority of the task has changed. Check to see if we preempt
* the current task.
*/
static void prio_changed_fair(struct rq *rq, struct task_struct *p,
int oldprio, int running)
{
/*
* Reschedule if we are currently running on this runqueue and
* our priority decreased, or if we are not currently running on
* this runqueue and our priority is higher than the current's
*/
if (running) {
if (p->prio > oldprio)
resched_task(rq->curr);
} else
check_preempt_curr(rq, p);
}
/*
* We switched to the sched_fair class.
*/
static void switched_to_fair(struct rq *rq, struct task_struct *p,
int running)
{
/*
* We were most likely switched from sched_rt, so
* kick off the schedule if running, otherwise just see
* if we can still preempt the current task.
*/
if (running)
resched_task(rq->curr);
else
check_preempt_curr(rq, p);
}
/* Account for a task changing its policy or group.
*
* This routine is mostly called to set cfs_rq->curr field when a task
* migrates between groups/classes.
*/
static void set_curr_task_fair(struct rq *rq)
{
struct sched_entity *se = &rq->curr->se;
for_each_sched_entity(se)
set_next_entity(cfs_rq_of(se), se);
}
2008-02-29 15:21:01 -05:00
#ifdef CONFIG_FAIR_GROUP_SCHED
static void moved_group_fair(struct task_struct *p)
{
struct cfs_rq *cfs_rq = task_cfs_rq(p);
update_curr(cfs_rq);
place_entity(cfs_rq, &p->se, 1);
}
#endif
2007-07-09 18:51:58 +02:00
/*
* All the scheduling class methods:
*/
2007-10-15 17:00:12 +02:00
static const struct sched_class fair_sched_class = {
.next = &idle_sched_class,
2007-07-09 18:51:58 +02:00
.enqueue_task = enqueue_task_fair,
.dequeue_task = dequeue_task_fair,
.yield_task = yield_task_fair,
2008-01-25 21:08:09 +01:00
#ifdef CONFIG_SMP
.select_task_rq = select_task_rq_fair,
#endif /* CONFIG_SMP */
2007-07-09 18:51:58 +02:00
2007-10-15 17:00:05 +02:00
.check_preempt_curr = check_preempt_wakeup,
2007-07-09 18:51:58 +02:00
.pick_next_task = pick_next_task_fair,
.put_prev_task = put_prev_task_fair,
#ifdef CONFIG_SMP
2007-07-09 18:51:58 +02:00
.load_balance = load_balance_fair,
2007-10-24 18:23:51 +02:00
.move_one_task = move_one_task_fair,
#endif
2007-07-09 18:51:58 +02:00
.set_curr_task = set_curr_task_fair,
2007-07-09 18:51:58 +02:00
.task_tick = task_tick_fair,
.task_new = task_new_fair,
.prio_changed = prio_changed_fair,
.switched_to = switched_to_fair,
2008-02-29 15:21:01 -05:00
#ifdef CONFIG_FAIR_GROUP_SCHED
.moved_group = moved_group_fair,
#endif
2007-07-09 18:51:58 +02:00
};
#ifdef CONFIG_SCHED_DEBUG
static void print_cfs_stats(struct seq_file *m, int cpu)
2007-07-09 18:51:58 +02:00
{
struct cfs_rq *cfs_rq;
rcu_read_lock();
2007-08-09 11:16:51 +02:00
for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
print_cfs_rq(m, cpu, cfs_rq);
rcu_read_unlock();
2007-07-09 18:51:58 +02:00
}
#endif