You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler changes from Ingo Molnar:
"Major changes:
- Reworked CPU capacity code, for better SMP load balancing on
systems with assymetric CPUs. (Vincent Guittot, Morten Rasmussen)
- Reworked RT task SMP balancing to be push based instead of pull
based, to reduce latencies on large CPU count systems. (Steven
Rostedt)
- SCHED_DEADLINE support updates and fixes. (Juri Lelli)
- SCHED_DEADLINE task migration support during CPU hotplug. (Wanpeng Li)
- x86 mwait-idle optimizations and fixes. (Mike Galbraith, Len Brown)
- sched/numa improvements. (Rik van Riel)
- various cleanups"
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (28 commits)
sched/core: Drop debugging leftover trace_printk call
sched/deadline: Support DL task migration during CPU hotplug
sched/core: Check for available DL bandwidth in cpuset_cpu_inactive()
sched/deadline: Always enqueue on previous rq when dl_task_timer() fires
sched/core: Remove unused argument from init_[rt|dl]_rq()
sched/deadline: Fix rt runtime corruption when dl fails its global constraints
sched/deadline: Avoid a superfluous check
sched: Improve load balancing in the presence of idle CPUs
sched: Optimize freq invariant accounting
sched: Move CFS tasks to CPUs with higher capacity
sched: Add SD_PREFER_SIBLING for SMT level
sched: Remove unused struct sched_group_capacity::capacity_orig
sched: Replace capacity_factor by usage
sched: Calculate CPU's usage statistic and put it into struct sg_lb_stats::group_usage
sched: Add struct rq::cpu_capacity_orig
sched: Make scale_rt invariant with frequency
sched: Make sched entity usage tracking scale-invariant
sched: Remove frequency scaling from cpu_capacity
sched: Track group sched_entity usage contributions
sched: Add sched_avg::utilization_avg_contrib
...
This commit is contained in:
+50
-46
@@ -689,6 +689,23 @@ static inline bool got_nohz_idle_kick(void)
|
||||
#ifdef CONFIG_NO_HZ_FULL
|
||||
bool sched_can_stop_tick(void)
|
||||
{
|
||||
/*
|
||||
* FIFO realtime policy runs the highest priority task. Other runnable
|
||||
* tasks are of a lower priority. The scheduler tick does nothing.
|
||||
*/
|
||||
if (current->policy == SCHED_FIFO)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Round-robin realtime tasks time slice with other tasks at the same
|
||||
* realtime priority. Is this task the only one at this priority?
|
||||
*/
|
||||
if (current->policy == SCHED_RR) {
|
||||
struct sched_rt_entity *rt_se = ¤t->rt;
|
||||
|
||||
return rt_se->run_list.prev == rt_se->run_list.next;
|
||||
}
|
||||
|
||||
/*
|
||||
* More than one running task need preemption.
|
||||
* nr_running update is assumed to be visible
|
||||
@@ -5335,36 +5352,13 @@ static int sched_cpu_active(struct notifier_block *nfb,
|
||||
static int sched_cpu_inactive(struct notifier_block *nfb,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
unsigned long flags;
|
||||
long cpu = (long)hcpu;
|
||||
struct dl_bw *dl_b;
|
||||
|
||||
switch (action & ~CPU_TASKS_FROZEN) {
|
||||
case CPU_DOWN_PREPARE:
|
||||
set_cpu_active(cpu, false);
|
||||
|
||||
/* explicitly allow suspend */
|
||||
if (!(action & CPU_TASKS_FROZEN)) {
|
||||
bool overflow;
|
||||
int cpus;
|
||||
|
||||
rcu_read_lock_sched();
|
||||
dl_b = dl_bw_of(cpu);
|
||||
|
||||
raw_spin_lock_irqsave(&dl_b->lock, flags);
|
||||
cpus = dl_bw_cpus(cpu);
|
||||
overflow = __dl_overflow(dl_b, cpus, 0, 0);
|
||||
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
|
||||
|
||||
rcu_read_unlock_sched();
|
||||
|
||||
if (overflow)
|
||||
return notifier_from_errno(-EBUSY);
|
||||
}
|
||||
set_cpu_active((long)hcpu, false);
|
||||
return NOTIFY_OK;
|
||||
default:
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static int __init migration_init(void)
|
||||
@@ -5445,17 +5439,6 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Even though we initialize ->capacity to something semi-sane,
|
||||
* we leave capacity_orig unset. This allows us to detect if
|
||||
* domain iteration is still funny without causing /0 traps.
|
||||
*/
|
||||
if (!group->sgc->capacity_orig) {
|
||||
printk(KERN_CONT "\n");
|
||||
printk(KERN_ERR "ERROR: domain->cpu_capacity not set\n");
|
||||
break;
|
||||
}
|
||||
|
||||
if (!cpumask_weight(sched_group_cpus(group))) {
|
||||
printk(KERN_CONT "\n");
|
||||
printk(KERN_ERR "ERROR: empty group\n");
|
||||
@@ -5939,7 +5922,6 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
|
||||
* die on a /0 trap.
|
||||
*/
|
||||
sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
|
||||
sg->sgc->capacity_orig = sg->sgc->capacity;
|
||||
|
||||
/*
|
||||
* Make sure the first group of this domain contains the
|
||||
@@ -6250,6 +6232,7 @@ sd_init(struct sched_domain_topology_level *tl, int cpu)
|
||||
*/
|
||||
|
||||
if (sd->flags & SD_SHARE_CPUCAPACITY) {
|
||||
sd->flags |= SD_PREFER_SIBLING;
|
||||
sd->imbalance_pct = 110;
|
||||
sd->smt_gain = 1178; /* ~15% */
|
||||
|
||||
@@ -7015,7 +6998,6 @@ static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
|
||||
*/
|
||||
|
||||
case CPU_ONLINE:
|
||||
case CPU_DOWN_FAILED:
|
||||
cpuset_update_active_cpus(true);
|
||||
break;
|
||||
default:
|
||||
@@ -7027,8 +7009,30 @@ static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
|
||||
static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
|
||||
void *hcpu)
|
||||
{
|
||||
switch (action) {
|
||||
unsigned long flags;
|
||||
long cpu = (long)hcpu;
|
||||
struct dl_bw *dl_b;
|
||||
|
||||
switch (action & ~CPU_TASKS_FROZEN) {
|
||||
case CPU_DOWN_PREPARE:
|
||||
/* explicitly allow suspend */
|
||||
if (!(action & CPU_TASKS_FROZEN)) {
|
||||
bool overflow;
|
||||
int cpus;
|
||||
|
||||
rcu_read_lock_sched();
|
||||
dl_b = dl_bw_of(cpu);
|
||||
|
||||
raw_spin_lock_irqsave(&dl_b->lock, flags);
|
||||
cpus = dl_bw_cpus(cpu);
|
||||
overflow = __dl_overflow(dl_b, cpus, 0, 0);
|
||||
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
|
||||
|
||||
rcu_read_unlock_sched();
|
||||
|
||||
if (overflow)
|
||||
return notifier_from_errno(-EBUSY);
|
||||
}
|
||||
cpuset_update_active_cpus(false);
|
||||
break;
|
||||
case CPU_DOWN_PREPARE_FROZEN:
|
||||
@@ -7173,8 +7177,8 @@ void __init sched_init(void)
|
||||
rq->calc_load_active = 0;
|
||||
rq->calc_load_update = jiffies + LOAD_FREQ;
|
||||
init_cfs_rq(&rq->cfs);
|
||||
init_rt_rq(&rq->rt, rq);
|
||||
init_dl_rq(&rq->dl, rq);
|
||||
init_rt_rq(&rq->rt);
|
||||
init_dl_rq(&rq->dl);
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
root_task_group.shares = ROOT_TASK_GROUP_LOAD;
|
||||
INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
|
||||
@@ -7214,7 +7218,7 @@ void __init sched_init(void)
|
||||
#ifdef CONFIG_SMP
|
||||
rq->sd = NULL;
|
||||
rq->rd = NULL;
|
||||
rq->cpu_capacity = SCHED_CAPACITY_SCALE;
|
||||
rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE;
|
||||
rq->post_schedule = 0;
|
||||
rq->active_balance = 0;
|
||||
rq->next_balance = jiffies;
|
||||
@@ -7813,7 +7817,7 @@ static int sched_rt_global_constraints(void)
|
||||
}
|
||||
#endif /* CONFIG_RT_GROUP_SCHED */
|
||||
|
||||
static int sched_dl_global_constraints(void)
|
||||
static int sched_dl_global_validate(void)
|
||||
{
|
||||
u64 runtime = global_rt_runtime();
|
||||
u64 period = global_rt_period();
|
||||
@@ -7914,11 +7918,11 @@ int sched_rt_handler(struct ctl_table *table, int write,
|
||||
if (ret)
|
||||
goto undo;
|
||||
|
||||
ret = sched_rt_global_constraints();
|
||||
ret = sched_dl_global_validate();
|
||||
if (ret)
|
||||
goto undo;
|
||||
|
||||
ret = sched_dl_global_constraints();
|
||||
ret = sched_rt_global_constraints();
|
||||
if (ret)
|
||||
goto undo;
|
||||
|
||||
|
||||
+66
-11
@@ -69,7 +69,7 @@ void init_dl_bw(struct dl_bw *dl_b)
|
||||
dl_b->total_bw = 0;
|
||||
}
|
||||
|
||||
void init_dl_rq(struct dl_rq *dl_rq, struct rq *rq)
|
||||
void init_dl_rq(struct dl_rq *dl_rq)
|
||||
{
|
||||
dl_rq->rb_root = RB_ROOT;
|
||||
|
||||
@@ -218,6 +218,52 @@ static inline void set_post_schedule(struct rq *rq)
|
||||
rq->post_schedule = has_pushable_dl_tasks(rq);
|
||||
}
|
||||
|
||||
static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
|
||||
|
||||
static void dl_task_offline_migration(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
struct rq *later_rq = NULL;
|
||||
bool fallback = false;
|
||||
|
||||
later_rq = find_lock_later_rq(p, rq);
|
||||
|
||||
if (!later_rq) {
|
||||
int cpu;
|
||||
|
||||
/*
|
||||
* If we cannot preempt any rq, fall back to pick any
|
||||
* online cpu.
|
||||
*/
|
||||
fallback = true;
|
||||
cpu = cpumask_any_and(cpu_active_mask, tsk_cpus_allowed(p));
|
||||
if (cpu >= nr_cpu_ids) {
|
||||
/*
|
||||
* Fail to find any suitable cpu.
|
||||
* The task will never come back!
|
||||
*/
|
||||
BUG_ON(dl_bandwidth_enabled());
|
||||
|
||||
/*
|
||||
* If admission control is disabled we
|
||||
* try a little harder to let the task
|
||||
* run.
|
||||
*/
|
||||
cpu = cpumask_any(cpu_active_mask);
|
||||
}
|
||||
later_rq = cpu_rq(cpu);
|
||||
double_lock_balance(rq, later_rq);
|
||||
}
|
||||
|
||||
deactivate_task(rq, p, 0);
|
||||
set_task_cpu(p, later_rq->cpu);
|
||||
activate_task(later_rq, p, ENQUEUE_REPLENISH);
|
||||
|
||||
if (!fallback)
|
||||
resched_curr(later_rq);
|
||||
|
||||
double_unlock_balance(rq, later_rq);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline
|
||||
@@ -514,7 +560,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
|
||||
unsigned long flags;
|
||||
struct rq *rq;
|
||||
|
||||
rq = task_rq_lock(current, &flags);
|
||||
rq = task_rq_lock(p, &flags);
|
||||
|
||||
/*
|
||||
* We need to take care of several possible races here:
|
||||
@@ -536,6 +582,17 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
|
||||
sched_clock_tick();
|
||||
update_rq_clock(rq);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* If we find that the rq the task was on is no longer
|
||||
* available, we need to select a new rq.
|
||||
*/
|
||||
if (unlikely(!rq->online)) {
|
||||
dl_task_offline_migration(rq, p);
|
||||
goto unlock;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If the throttle happened during sched-out; like:
|
||||
*
|
||||
@@ -569,7 +626,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
|
||||
push_dl_task(rq);
|
||||
#endif
|
||||
unlock:
|
||||
task_rq_unlock(rq, current, &flags);
|
||||
task_rq_unlock(rq, p, &flags);
|
||||
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
@@ -914,6 +971,12 @@ static void yield_task_dl(struct rq *rq)
|
||||
}
|
||||
update_rq_clock(rq);
|
||||
update_curr_dl(rq);
|
||||
/*
|
||||
* Tell update_rq_clock() that we've just updated,
|
||||
* so we don't do microscopic update in schedule()
|
||||
* and double the fastpath cost.
|
||||
*/
|
||||
rq_clock_skip_update(rq, true);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
@@ -1659,14 +1722,6 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
int check_resched = 1;
|
||||
|
||||
/*
|
||||
* If p is throttled, don't consider the possibility
|
||||
* of preempting rq->curr, the check will be done right
|
||||
* after its runtime will get replenished.
|
||||
*/
|
||||
if (unlikely(p->dl.dl_throttled))
|
||||
return;
|
||||
|
||||
if (task_on_rq_queued(p) && rq->curr != p) {
|
||||
#ifdef CONFIG_SMP
|
||||
if (p->nr_cpus_allowed > 1 && rq->dl.overloaded &&
|
||||
|
||||
@@ -71,7 +71,7 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
|
||||
if (!se) {
|
||||
struct sched_avg *avg = &cpu_rq(cpu)->avg;
|
||||
P(avg->runnable_avg_sum);
|
||||
P(avg->runnable_avg_period);
|
||||
P(avg->avg_period);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -94,8 +94,10 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
|
||||
P(se->load.weight);
|
||||
#ifdef CONFIG_SMP
|
||||
P(se->avg.runnable_avg_sum);
|
||||
P(se->avg.runnable_avg_period);
|
||||
P(se->avg.running_avg_sum);
|
||||
P(se->avg.avg_period);
|
||||
P(se->avg.load_avg_contrib);
|
||||
P(se->avg.utilization_avg_contrib);
|
||||
P(se->avg.decay_count);
|
||||
#endif
|
||||
#undef PN
|
||||
@@ -214,6 +216,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
|
||||
cfs_rq->runnable_load_avg);
|
||||
SEQ_printf(m, " .%-30s: %ld\n", "blocked_load_avg",
|
||||
cfs_rq->blocked_load_avg);
|
||||
SEQ_printf(m, " .%-30s: %ld\n", "utilization_load_avg",
|
||||
cfs_rq->utilization_load_avg);
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
SEQ_printf(m, " .%-30s: %ld\n", "tg_load_contrib",
|
||||
cfs_rq->tg_load_contrib);
|
||||
@@ -636,8 +640,10 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
|
||||
P(se.load.weight);
|
||||
#ifdef CONFIG_SMP
|
||||
P(se.avg.runnable_avg_sum);
|
||||
P(se.avg.runnable_avg_period);
|
||||
P(se.avg.running_avg_sum);
|
||||
P(se.avg.avg_period);
|
||||
P(se.avg.load_avg_contrib);
|
||||
P(se.avg.utilization_avg_contrib);
|
||||
P(se.avg.decay_count);
|
||||
#endif
|
||||
P(policy);
|
||||
|
||||
+264
-165
File diff suppressed because it is too large
Load Diff
@@ -56,6 +56,19 @@ SCHED_FEAT(NONTASK_CAPACITY, true)
|
||||
*/
|
||||
SCHED_FEAT(TTWU_QUEUE, true)
|
||||
|
||||
#ifdef HAVE_RT_PUSH_IPI
|
||||
/*
|
||||
* In order to avoid a thundering herd attack of CPUs that are
|
||||
* lowering their priorities at the same time, and there being
|
||||
* a single CPU that has an RT task that can migrate and is waiting
|
||||
* to run, where the other CPUs will try to take that CPUs
|
||||
* rq lock and possibly create a large contention, sending an
|
||||
* IPI to that CPU and let that CPU push the RT task to where
|
||||
* it should go may be a better scenario.
|
||||
*/
|
||||
SCHED_FEAT(RT_PUSH_IPI, true)
|
||||
#endif
|
||||
|
||||
SCHED_FEAT(FORCE_SD_OVERLAP, false)
|
||||
SCHED_FEAT(RT_RUNTIME_SHARE, true)
|
||||
SCHED_FEAT(LB_MIN, false)
|
||||
|
||||
+179
-2
@@ -6,6 +6,7 @@
|
||||
#include "sched.h"
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <linux/irq_work.h>
|
||||
|
||||
int sched_rr_timeslice = RR_TIMESLICE;
|
||||
|
||||
@@ -59,7 +60,11 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
|
||||
raw_spin_unlock(&rt_b->rt_runtime_lock);
|
||||
}
|
||||
|
||||
void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
|
||||
#ifdef CONFIG_SMP
|
||||
static void push_irq_work_func(struct irq_work *work);
|
||||
#endif
|
||||
|
||||
void init_rt_rq(struct rt_rq *rt_rq)
|
||||
{
|
||||
struct rt_prio_array *array;
|
||||
int i;
|
||||
@@ -78,7 +83,14 @@ void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
|
||||
rt_rq->rt_nr_migratory = 0;
|
||||
rt_rq->overloaded = 0;
|
||||
plist_head_init(&rt_rq->pushable_tasks);
|
||||
|
||||
#ifdef HAVE_RT_PUSH_IPI
|
||||
rt_rq->push_flags = 0;
|
||||
rt_rq->push_cpu = nr_cpu_ids;
|
||||
raw_spin_lock_init(&rt_rq->push_lock);
|
||||
init_irq_work(&rt_rq->push_work, push_irq_work_func);
|
||||
#endif
|
||||
#endif /* CONFIG_SMP */
|
||||
/* We start is dequeued state, because no RT tasks are queued */
|
||||
rt_rq->rt_queued = 0;
|
||||
|
||||
@@ -193,7 +205,7 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
|
||||
if (!rt_se)
|
||||
goto err_free_rq;
|
||||
|
||||
init_rt_rq(rt_rq, cpu_rq(i));
|
||||
init_rt_rq(rt_rq);
|
||||
rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
|
||||
init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
|
||||
}
|
||||
@@ -1778,6 +1790,164 @@ static void push_rt_tasks(struct rq *rq)
|
||||
;
|
||||
}
|
||||
|
||||
#ifdef HAVE_RT_PUSH_IPI
|
||||
/*
|
||||
* The search for the next cpu always starts at rq->cpu and ends
|
||||
* when we reach rq->cpu again. It will never return rq->cpu.
|
||||
* This returns the next cpu to check, or nr_cpu_ids if the loop
|
||||
* is complete.
|
||||
*
|
||||
* rq->rt.push_cpu holds the last cpu returned by this function,
|
||||
* or if this is the first instance, it must hold rq->cpu.
|
||||
*/
|
||||
static int rto_next_cpu(struct rq *rq)
|
||||
{
|
||||
int prev_cpu = rq->rt.push_cpu;
|
||||
int cpu;
|
||||
|
||||
cpu = cpumask_next(prev_cpu, rq->rd->rto_mask);
|
||||
|
||||
/*
|
||||
* If the previous cpu is less than the rq's CPU, then it already
|
||||
* passed the end of the mask, and has started from the beginning.
|
||||
* We end if the next CPU is greater or equal to rq's CPU.
|
||||
*/
|
||||
if (prev_cpu < rq->cpu) {
|
||||
if (cpu >= rq->cpu)
|
||||
return nr_cpu_ids;
|
||||
|
||||
} else if (cpu >= nr_cpu_ids) {
|
||||
/*
|
||||
* We passed the end of the mask, start at the beginning.
|
||||
* If the result is greater or equal to the rq's CPU, then
|
||||
* the loop is finished.
|
||||
*/
|
||||
cpu = cpumask_first(rq->rd->rto_mask);
|
||||
if (cpu >= rq->cpu)
|
||||
return nr_cpu_ids;
|
||||
}
|
||||
rq->rt.push_cpu = cpu;
|
||||
|
||||
/* Return cpu to let the caller know if the loop is finished or not */
|
||||
return cpu;
|
||||
}
|
||||
|
||||
static int find_next_push_cpu(struct rq *rq)
|
||||
{
|
||||
struct rq *next_rq;
|
||||
int cpu;
|
||||
|
||||
while (1) {
|
||||
cpu = rto_next_cpu(rq);
|
||||
if (cpu >= nr_cpu_ids)
|
||||
break;
|
||||
next_rq = cpu_rq(cpu);
|
||||
|
||||
/* Make sure the next rq can push to this rq */
|
||||
if (next_rq->rt.highest_prio.next < rq->rt.highest_prio.curr)
|
||||
break;
|
||||
}
|
||||
|
||||
return cpu;
|
||||
}
|
||||
|
||||
#define RT_PUSH_IPI_EXECUTING 1
|
||||
#define RT_PUSH_IPI_RESTART 2
|
||||
|
||||
static void tell_cpu_to_push(struct rq *rq)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
|
||||
raw_spin_lock(&rq->rt.push_lock);
|
||||
/* Make sure it's still executing */
|
||||
if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
|
||||
/*
|
||||
* Tell the IPI to restart the loop as things have
|
||||
* changed since it started.
|
||||
*/
|
||||
rq->rt.push_flags |= RT_PUSH_IPI_RESTART;
|
||||
raw_spin_unlock(&rq->rt.push_lock);
|
||||
return;
|
||||
}
|
||||
raw_spin_unlock(&rq->rt.push_lock);
|
||||
}
|
||||
|
||||
/* When here, there's no IPI going around */
|
||||
|
||||
rq->rt.push_cpu = rq->cpu;
|
||||
cpu = find_next_push_cpu(rq);
|
||||
if (cpu >= nr_cpu_ids)
|
||||
return;
|
||||
|
||||
rq->rt.push_flags = RT_PUSH_IPI_EXECUTING;
|
||||
|
||||
irq_work_queue_on(&rq->rt.push_work, cpu);
|
||||
}
|
||||
|
||||
/* Called from hardirq context */
|
||||
static void try_to_push_tasks(void *arg)
|
||||
{
|
||||
struct rt_rq *rt_rq = arg;
|
||||
struct rq *rq, *src_rq;
|
||||
int this_cpu;
|
||||
int cpu;
|
||||
|
||||
this_cpu = rt_rq->push_cpu;
|
||||
|
||||
/* Paranoid check */
|
||||
BUG_ON(this_cpu != smp_processor_id());
|
||||
|
||||
rq = cpu_rq(this_cpu);
|
||||
src_rq = rq_of_rt_rq(rt_rq);
|
||||
|
||||
again:
|
||||
if (has_pushable_tasks(rq)) {
|
||||
raw_spin_lock(&rq->lock);
|
||||
push_rt_task(rq);
|
||||
raw_spin_unlock(&rq->lock);
|
||||
}
|
||||
|
||||
/* Pass the IPI to the next rt overloaded queue */
|
||||
raw_spin_lock(&rt_rq->push_lock);
|
||||
/*
|
||||
* If the source queue changed since the IPI went out,
|
||||
* we need to restart the search from that CPU again.
|
||||
*/
|
||||
if (rt_rq->push_flags & RT_PUSH_IPI_RESTART) {
|
||||
rt_rq->push_flags &= ~RT_PUSH_IPI_RESTART;
|
||||
rt_rq->push_cpu = src_rq->cpu;
|
||||
}
|
||||
|
||||
cpu = find_next_push_cpu(src_rq);
|
||||
|
||||
if (cpu >= nr_cpu_ids)
|
||||
rt_rq->push_flags &= ~RT_PUSH_IPI_EXECUTING;
|
||||
raw_spin_unlock(&rt_rq->push_lock);
|
||||
|
||||
if (cpu >= nr_cpu_ids)
|
||||
return;
|
||||
|
||||
/*
|
||||
* It is possible that a restart caused this CPU to be
|
||||
* chosen again. Don't bother with an IPI, just see if we
|
||||
* have more to push.
|
||||
*/
|
||||
if (unlikely(cpu == rq->cpu))
|
||||
goto again;
|
||||
|
||||
/* Try the next RT overloaded CPU */
|
||||
irq_work_queue_on(&rt_rq->push_work, cpu);
|
||||
}
|
||||
|
||||
static void push_irq_work_func(struct irq_work *work)
|
||||
{
|
||||
struct rt_rq *rt_rq = container_of(work, struct rt_rq, push_work);
|
||||
|
||||
try_to_push_tasks(rt_rq);
|
||||
}
|
||||
#endif /* HAVE_RT_PUSH_IPI */
|
||||
|
||||
static int pull_rt_task(struct rq *this_rq)
|
||||
{
|
||||
int this_cpu = this_rq->cpu, ret = 0, cpu;
|
||||
@@ -1793,6 +1963,13 @@ static int pull_rt_task(struct rq *this_rq)
|
||||
*/
|
||||
smp_rmb();
|
||||
|
||||
#ifdef HAVE_RT_PUSH_IPI
|
||||
if (sched_feat(RT_PUSH_IPI)) {
|
||||
tell_cpu_to_push(this_rq);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
for_each_cpu(cpu, this_rq->rd->rto_mask) {
|
||||
if (this_cpu == cpu)
|
||||
continue;
|
||||
|
||||
+33
-5
@@ -6,6 +6,7 @@
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/stop_machine.h>
|
||||
#include <linux/irq_work.h>
|
||||
#include <linux/tick.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
@@ -362,8 +363,14 @@ struct cfs_rq {
|
||||
* Under CFS, load is tracked on a per-entity basis and aggregated up.
|
||||
* This allows for the description of both thread and group usage (in
|
||||
* the FAIR_GROUP_SCHED case).
|
||||
* runnable_load_avg is the sum of the load_avg_contrib of the
|
||||
* sched_entities on the rq.
|
||||
* blocked_load_avg is similar to runnable_load_avg except that its
|
||||
* the blocked sched_entities on the rq.
|
||||
* utilization_load_avg is the sum of the average running time of the
|
||||
* sched_entities on the rq.
|
||||
*/
|
||||
unsigned long runnable_load_avg, blocked_load_avg;
|
||||
unsigned long runnable_load_avg, blocked_load_avg, utilization_load_avg;
|
||||
atomic64_t decay_counter;
|
||||
u64 last_decay;
|
||||
atomic_long_t removed_load;
|
||||
@@ -418,6 +425,11 @@ static inline int rt_bandwidth_enabled(void)
|
||||
return sysctl_sched_rt_runtime >= 0;
|
||||
}
|
||||
|
||||
/* RT IPI pull logic requires IRQ_WORK */
|
||||
#ifdef CONFIG_IRQ_WORK
|
||||
# define HAVE_RT_PUSH_IPI
|
||||
#endif
|
||||
|
||||
/* Real-Time classes' related field in a runqueue: */
|
||||
struct rt_rq {
|
||||
struct rt_prio_array active;
|
||||
@@ -435,7 +447,13 @@ struct rt_rq {
|
||||
unsigned long rt_nr_total;
|
||||
int overloaded;
|
||||
struct plist_head pushable_tasks;
|
||||
#ifdef HAVE_RT_PUSH_IPI
|
||||
int push_flags;
|
||||
int push_cpu;
|
||||
struct irq_work push_work;
|
||||
raw_spinlock_t push_lock;
|
||||
#endif
|
||||
#endif /* CONFIG_SMP */
|
||||
int rt_queued;
|
||||
|
||||
int rt_throttled;
|
||||
@@ -597,6 +615,7 @@ struct rq {
|
||||
struct sched_domain *sd;
|
||||
|
||||
unsigned long cpu_capacity;
|
||||
unsigned long cpu_capacity_orig;
|
||||
|
||||
unsigned char idle_balance;
|
||||
/* For active balancing */
|
||||
@@ -807,7 +826,7 @@ struct sched_group_capacity {
|
||||
* CPU capacity of this group, SCHED_LOAD_SCALE being max capacity
|
||||
* for a single CPU.
|
||||
*/
|
||||
unsigned int capacity, capacity_orig;
|
||||
unsigned int capacity;
|
||||
unsigned long next_update;
|
||||
int imbalance; /* XXX unrelated to capacity but shared group state */
|
||||
/*
|
||||
@@ -1368,9 +1387,18 @@ static inline int hrtick_enabled(struct rq *rq)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
extern void sched_avg_update(struct rq *rq);
|
||||
|
||||
#ifndef arch_scale_freq_capacity
|
||||
static __always_inline
|
||||
unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
|
||||
{
|
||||
return SCHED_CAPACITY_SCALE;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
|
||||
{
|
||||
rq->rt_avg += rt_delta;
|
||||
rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq));
|
||||
sched_avg_update(rq);
|
||||
}
|
||||
#else
|
||||
@@ -1643,8 +1671,8 @@ extern void print_rt_stats(struct seq_file *m, int cpu);
|
||||
extern void print_dl_stats(struct seq_file *m, int cpu);
|
||||
|
||||
extern void init_cfs_rq(struct cfs_rq *cfs_rq);
|
||||
extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq);
|
||||
extern void init_dl_rq(struct dl_rq *dl_rq, struct rq *rq);
|
||||
extern void init_rt_rq(struct rt_rq *rt_rq);
|
||||
extern void init_dl_rq(struct dl_rq *dl_rq);
|
||||
|
||||
extern void cfs_bandwidth_usage_inc(void);
|
||||
extern void cfs_bandwidth_usage_dec(void);
|
||||
|
||||
Reference in New Issue
Block a user