You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (26 commits) sched: Resched proper CPU on yield_to() sched: Allow users with sufficient RLIMIT_NICE to change from SCHED_IDLE policy sched: Allow SCHED_BATCH to preempt SCHED_IDLE tasks sched: Clean up the IRQ_TIME_ACCOUNTING code sched: Add #ifdef around irq time accounting functions sched, autogroup: Stop claiming ownership of the root task group sched, autogroup: Stop going ahead if autogroup is disabled sched, autogroup, sysctl: Use proc_dointvec_minmax() instead sched: Fix the group_imb logic sched: Clean up some f_b_g() comments sched: Clean up remnants of sd_idle sched: Wholesale removal of sd_idle logic sched: Add yield_to(task, preempt) functionality sched: Use a buddy to implement yield_task_fair() sched: Limit the scope of clear_buddies sched: Check the right ->nr_running in yield_task_fair() sched: Avoid expensive initial update_cfs_load(), on UP too sched: Fix switch_from_fair() sched: Simplify the idle scheduling class softirqs: Account ksoftirqd time as cpustat softirq ...
This commit is contained in:
@@ -30,6 +30,9 @@ typedef u64 cputime64_t;
|
||||
#define cputime64_to_jiffies64(__ct) (__ct)
|
||||
#define jiffies64_to_cputime64(__jif) (__jif)
|
||||
#define cputime_to_cputime64(__ct) ((u64) __ct)
|
||||
#define cputime64_gt(__a, __b) ((__a) > (__b))
|
||||
|
||||
#define nsecs_to_cputime64(__ct) nsecs_to_jiffies64(__ct)
|
||||
|
||||
|
||||
/*
|
||||
|
||||
@@ -427,6 +427,13 @@ extern void raise_softirq(unsigned int nr);
|
||||
*/
|
||||
DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
|
||||
|
||||
DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
|
||||
|
||||
static inline struct task_struct *this_cpu_ksoftirqd(void)
|
||||
{
|
||||
return this_cpu_read(ksoftirqd);
|
||||
}
|
||||
|
||||
/* Try to send a softirq to a remote cpu. If this cannot be done, the
|
||||
* work will be queued to the local cpu.
|
||||
*/
|
||||
|
||||
@@ -307,6 +307,7 @@ extern clock_t jiffies_to_clock_t(long x);
|
||||
extern unsigned long clock_t_to_jiffies(unsigned long x);
|
||||
extern u64 jiffies_64_to_clock_t(u64 x);
|
||||
extern u64 nsec_to_clock_t(u64 x);
|
||||
extern u64 nsecs_to_jiffies64(u64 n);
|
||||
extern unsigned long nsecs_to_jiffies(u64 n);
|
||||
|
||||
#define TIMESTAMP_SIZE 30
|
||||
|
||||
@@ -1058,6 +1058,7 @@ struct sched_class {
|
||||
void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
|
||||
void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
|
||||
void (*yield_task) (struct rq *rq);
|
||||
bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
|
||||
|
||||
void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
|
||||
|
||||
@@ -1084,12 +1085,10 @@ struct sched_class {
|
||||
void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
|
||||
void (*task_fork) (struct task_struct *p);
|
||||
|
||||
void (*switched_from) (struct rq *this_rq, struct task_struct *task,
|
||||
int running);
|
||||
void (*switched_to) (struct rq *this_rq, struct task_struct *task,
|
||||
int running);
|
||||
void (*switched_from) (struct rq *this_rq, struct task_struct *task);
|
||||
void (*switched_to) (struct rq *this_rq, struct task_struct *task);
|
||||
void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
|
||||
int oldprio, int running);
|
||||
int oldprio);
|
||||
|
||||
unsigned int (*get_rr_interval) (struct rq *rq,
|
||||
struct task_struct *task);
|
||||
@@ -1715,7 +1714,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
|
||||
/*
|
||||
* Per process flags
|
||||
*/
|
||||
#define PF_KSOFTIRQD 0x00000001 /* I am ksoftirqd */
|
||||
#define PF_STARTING 0x00000002 /* being created */
|
||||
#define PF_EXITING 0x00000004 /* getting shut down */
|
||||
#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
|
||||
@@ -1945,8 +1943,6 @@ int sched_rt_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos);
|
||||
|
||||
extern unsigned int sysctl_sched_compat_yield;
|
||||
|
||||
#ifdef CONFIG_SCHED_AUTOGROUP
|
||||
extern unsigned int sysctl_sched_autogroup_enabled;
|
||||
|
||||
@@ -1977,6 +1973,7 @@ static inline int rt_mutex_getprio(struct task_struct *p)
|
||||
# define rt_mutex_adjust_pi(p) do { } while (0)
|
||||
#endif
|
||||
|
||||
extern bool yield_to(struct task_struct *p, bool preempt);
|
||||
extern void set_user_nice(struct task_struct *p, long nice);
|
||||
extern int task_prio(const struct task_struct *p);
|
||||
extern int task_nice(const struct task_struct *p);
|
||||
|
||||
+261
-33
@@ -324,7 +324,7 @@ struct cfs_rq {
|
||||
* 'curr' points to currently running entity on this cfs_rq.
|
||||
* It is set to NULL otherwise (i.e when none are currently running).
|
||||
*/
|
||||
struct sched_entity *curr, *next, *last;
|
||||
struct sched_entity *curr, *next, *last, *skip;
|
||||
|
||||
unsigned int nr_spread_over;
|
||||
|
||||
@@ -1683,6 +1683,39 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
|
||||
__release(rq2->lock);
|
||||
}
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
/*
|
||||
* double_rq_lock - safely lock two runqueues
|
||||
*
|
||||
* Note this does not disable interrupts like task_rq_lock,
|
||||
* you need to do so manually before calling.
|
||||
*/
|
||||
static void double_rq_lock(struct rq *rq1, struct rq *rq2)
|
||||
__acquires(rq1->lock)
|
||||
__acquires(rq2->lock)
|
||||
{
|
||||
BUG_ON(!irqs_disabled());
|
||||
BUG_ON(rq1 != rq2);
|
||||
raw_spin_lock(&rq1->lock);
|
||||
__acquire(rq2->lock); /* Fake it out ;) */
|
||||
}
|
||||
|
||||
/*
|
||||
* double_rq_unlock - safely unlock two runqueues
|
||||
*
|
||||
* Note this does not restore interrupts like task_rq_unlock,
|
||||
* you need to do so manually after calling.
|
||||
*/
|
||||
static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
|
||||
__releases(rq1->lock)
|
||||
__releases(rq2->lock)
|
||||
{
|
||||
BUG_ON(rq1 != rq2);
|
||||
raw_spin_unlock(&rq1->lock);
|
||||
__release(rq2->lock);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static void calc_load_account_idle(struct rq *this_rq);
|
||||
@@ -1877,7 +1910,7 @@ void account_system_vtime(struct task_struct *curr)
|
||||
*/
|
||||
if (hardirq_count())
|
||||
__this_cpu_add(cpu_hardirq_time, delta);
|
||||
else if (in_serving_softirq() && !(curr->flags & PF_KSOFTIRQD))
|
||||
else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
|
||||
__this_cpu_add(cpu_softirq_time, delta);
|
||||
|
||||
irq_time_write_end();
|
||||
@@ -1917,8 +1950,40 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
|
||||
sched_rt_avg_update(rq, irq_delta);
|
||||
}
|
||||
|
||||
static int irqtime_account_hi_update(void)
|
||||
{
|
||||
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
|
||||
unsigned long flags;
|
||||
u64 latest_ns;
|
||||
int ret = 0;
|
||||
|
||||
local_irq_save(flags);
|
||||
latest_ns = this_cpu_read(cpu_hardirq_time);
|
||||
if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->irq))
|
||||
ret = 1;
|
||||
local_irq_restore(flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int irqtime_account_si_update(void)
|
||||
{
|
||||
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
|
||||
unsigned long flags;
|
||||
u64 latest_ns;
|
||||
int ret = 0;
|
||||
|
||||
local_irq_save(flags);
|
||||
latest_ns = this_cpu_read(cpu_softirq_time);
|
||||
if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->softirq))
|
||||
ret = 1;
|
||||
local_irq_restore(flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#else /* CONFIG_IRQ_TIME_ACCOUNTING */
|
||||
|
||||
#define sched_clock_irqtime (0)
|
||||
|
||||
static void update_rq_clock_task(struct rq *rq, s64 delta)
|
||||
{
|
||||
rq->clock_task += delta;
|
||||
@@ -2022,14 +2087,14 @@ inline int task_curr(const struct task_struct *p)
|
||||
|
||||
static inline void check_class_changed(struct rq *rq, struct task_struct *p,
|
||||
const struct sched_class *prev_class,
|
||||
int oldprio, int running)
|
||||
int oldprio)
|
||||
{
|
||||
if (prev_class != p->sched_class) {
|
||||
if (prev_class->switched_from)
|
||||
prev_class->switched_from(rq, p, running);
|
||||
p->sched_class->switched_to(rq, p, running);
|
||||
} else
|
||||
p->sched_class->prio_changed(rq, p, oldprio, running);
|
||||
prev_class->switched_from(rq, p);
|
||||
p->sched_class->switched_to(rq, p);
|
||||
} else if (oldprio != p->prio)
|
||||
p->sched_class->prio_changed(rq, p, oldprio);
|
||||
}
|
||||
|
||||
static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
|
||||
@@ -2542,6 +2607,7 @@ static void __sched_fork(struct task_struct *p)
|
||||
p->se.sum_exec_runtime = 0;
|
||||
p->se.prev_sum_exec_runtime = 0;
|
||||
p->se.nr_migrations = 0;
|
||||
p->se.vruntime = 0;
|
||||
|
||||
#ifdef CONFIG_SCHEDSTATS
|
||||
memset(&p->se.statistics, 0, sizeof(p->se.statistics));
|
||||
@@ -3546,6 +3612,32 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Account system cpu time to a process and desired cpustat field
|
||||
* @p: the process that the cpu time gets accounted to
|
||||
* @cputime: the cpu time spent in kernel space since the last update
|
||||
* @cputime_scaled: cputime scaled by cpu frequency
|
||||
* @target_cputime64: pointer to cpustat field that has to be updated
|
||||
*/
|
||||
static inline
|
||||
void __account_system_time(struct task_struct *p, cputime_t cputime,
|
||||
cputime_t cputime_scaled, cputime64_t *target_cputime64)
|
||||
{
|
||||
cputime64_t tmp = cputime_to_cputime64(cputime);
|
||||
|
||||
/* Add system time to process. */
|
||||
p->stime = cputime_add(p->stime, cputime);
|
||||
p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
|
||||
account_group_system_time(p, cputime);
|
||||
|
||||
/* Add system time to cpustat. */
|
||||
*target_cputime64 = cputime64_add(*target_cputime64, tmp);
|
||||
cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime);
|
||||
|
||||
/* Account for system time used */
|
||||
acct_update_integrals(p);
|
||||
}
|
||||
|
||||
/*
|
||||
* Account system cpu time to a process.
|
||||
* @p: the process that the cpu time gets accounted to
|
||||
@@ -3557,36 +3649,26 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
|
||||
cputime_t cputime, cputime_t cputime_scaled)
|
||||
{
|
||||
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
|
||||
cputime64_t tmp;
|
||||
cputime64_t *target_cputime64;
|
||||
|
||||
if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
|
||||
account_guest_time(p, cputime, cputime_scaled);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Add system time to process. */
|
||||
p->stime = cputime_add(p->stime, cputime);
|
||||
p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
|
||||
account_group_system_time(p, cputime);
|
||||
|
||||
/* Add system time to cpustat. */
|
||||
tmp = cputime_to_cputime64(cputime);
|
||||
if (hardirq_count() - hardirq_offset)
|
||||
cpustat->irq = cputime64_add(cpustat->irq, tmp);
|
||||
target_cputime64 = &cpustat->irq;
|
||||
else if (in_serving_softirq())
|
||||
cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
|
||||
target_cputime64 = &cpustat->softirq;
|
||||
else
|
||||
cpustat->system = cputime64_add(cpustat->system, tmp);
|
||||
target_cputime64 = &cpustat->system;
|
||||
|
||||
cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime);
|
||||
|
||||
/* Account for system time used */
|
||||
acct_update_integrals(p);
|
||||
__account_system_time(p, cputime, cputime_scaled, target_cputime64);
|
||||
}
|
||||
|
||||
/*
|
||||
* Account for involuntary wait time.
|
||||
* @steal: the cpu time spent in involuntary wait
|
||||
* @cputime: the cpu time spent in involuntary wait
|
||||
*/
|
||||
void account_steal_time(cputime_t cputime)
|
||||
{
|
||||
@@ -3614,6 +3696,73 @@ void account_idle_time(cputime_t cputime)
|
||||
|
||||
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
|
||||
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
|
||||
/*
|
||||
* Account a tick to a process and cpustat
|
||||
* @p: the process that the cpu time gets accounted to
|
||||
* @user_tick: is the tick from userspace
|
||||
* @rq: the pointer to rq
|
||||
*
|
||||
* Tick demultiplexing follows the order
|
||||
* - pending hardirq update
|
||||
* - pending softirq update
|
||||
* - user_time
|
||||
* - idle_time
|
||||
* - system time
|
||||
* - check for guest_time
|
||||
* - else account as system_time
|
||||
*
|
||||
* Check for hardirq is done both for system and user time as there is
|
||||
* no timer going off while we are on hardirq and hence we may never get an
|
||||
* opportunity to update it solely in system time.
|
||||
* p->stime and friends are only updated on system time and not on irq
|
||||
* softirq as those do not count in task exec_runtime any more.
|
||||
*/
|
||||
static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
|
||||
struct rq *rq)
|
||||
{
|
||||
cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
|
||||
cputime64_t tmp = cputime_to_cputime64(cputime_one_jiffy);
|
||||
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
|
||||
|
||||
if (irqtime_account_hi_update()) {
|
||||
cpustat->irq = cputime64_add(cpustat->irq, tmp);
|
||||
} else if (irqtime_account_si_update()) {
|
||||
cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
|
||||
} else if (this_cpu_ksoftirqd() == p) {
|
||||
/*
|
||||
* ksoftirqd time do not get accounted in cpu_softirq_time.
|
||||
* So, we have to handle it separately here.
|
||||
* Also, p->stime needs to be updated for ksoftirqd.
|
||||
*/
|
||||
__account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
|
||||
&cpustat->softirq);
|
||||
} else if (user_tick) {
|
||||
account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
|
||||
} else if (p == rq->idle) {
|
||||
account_idle_time(cputime_one_jiffy);
|
||||
} else if (p->flags & PF_VCPU) { /* System time or guest time */
|
||||
account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
|
||||
} else {
|
||||
__account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
|
||||
&cpustat->system);
|
||||
}
|
||||
}
|
||||
|
||||
static void irqtime_account_idle_ticks(int ticks)
|
||||
{
|
||||
int i;
|
||||
struct rq *rq = this_rq();
|
||||
|
||||
for (i = 0; i < ticks; i++)
|
||||
irqtime_account_process_tick(current, 0, rq);
|
||||
}
|
||||
#else /* CONFIG_IRQ_TIME_ACCOUNTING */
|
||||
static void irqtime_account_idle_ticks(int ticks) {}
|
||||
static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
|
||||
struct rq *rq) {}
|
||||
#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
|
||||
|
||||
/*
|
||||
* Account a single tick of cpu time.
|
||||
* @p: the process that the cpu time gets accounted to
|
||||
@@ -3624,6 +3773,11 @@ void account_process_tick(struct task_struct *p, int user_tick)
|
||||
cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
|
||||
struct rq *rq = this_rq();
|
||||
|
||||
if (sched_clock_irqtime) {
|
||||
irqtime_account_process_tick(p, user_tick, rq);
|
||||
return;
|
||||
}
|
||||
|
||||
if (user_tick)
|
||||
account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
|
||||
else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
|
||||
@@ -3649,6 +3803,12 @@ void account_steal_ticks(unsigned long ticks)
|
||||
*/
|
||||
void account_idle_ticks(unsigned long ticks)
|
||||
{
|
||||
|
||||
if (sched_clock_irqtime) {
|
||||
irqtime_account_idle_ticks(ticks);
|
||||
return;
|
||||
}
|
||||
|
||||
account_idle_time(jiffies_to_cputime(ticks));
|
||||
}
|
||||
|
||||
@@ -4547,11 +4707,10 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
|
||||
|
||||
if (running)
|
||||
p->sched_class->set_curr_task(rq);
|
||||
if (on_rq) {
|
||||
if (on_rq)
|
||||
enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
|
||||
|
||||
check_class_changed(rq, p, prev_class, oldprio, running);
|
||||
}
|
||||
check_class_changed(rq, p, prev_class, oldprio);
|
||||
task_rq_unlock(rq, &flags);
|
||||
}
|
||||
|
||||
@@ -4799,12 +4958,15 @@ recheck:
|
||||
param->sched_priority > rlim_rtprio)
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
/*
|
||||
* Like positive nice levels, dont allow tasks to
|
||||
* move out of SCHED_IDLE either:
|
||||
* Treat SCHED_IDLE as nice 20. Only allow a switch to
|
||||
* SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
|
||||
*/
|
||||
if (p->policy == SCHED_IDLE && policy != SCHED_IDLE)
|
||||
if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) {
|
||||
if (!can_nice(p, TASK_NICE(p)))
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
/* can't change other user's priorities */
|
||||
if (!check_same_owner(p))
|
||||
@@ -4879,11 +5041,10 @@ recheck:
|
||||
|
||||
if (running)
|
||||
p->sched_class->set_curr_task(rq);
|
||||
if (on_rq) {
|
||||
if (on_rq)
|
||||
activate_task(rq, p, 0);
|
||||
|
||||
check_class_changed(rq, p, prev_class, oldprio, running);
|
||||
}
|
||||
check_class_changed(rq, p, prev_class, oldprio);
|
||||
__task_rq_unlock(rq);
|
||||
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
|
||||
|
||||
@@ -5300,6 +5461,65 @@ void __sched yield(void)
|
||||
}
|
||||
EXPORT_SYMBOL(yield);
|
||||
|
||||
/**
|
||||
* yield_to - yield the current processor to another thread in
|
||||
* your thread group, or accelerate that thread toward the
|
||||
* processor it's on.
|
||||
*
|
||||
* It's the caller's job to ensure that the target task struct
|
||||
* can't go away on us before we can do any checks.
|
||||
*
|
||||
* Returns true if we indeed boosted the target task.
|
||||
*/
|
||||
bool __sched yield_to(struct task_struct *p, bool preempt)
|
||||
{
|
||||
struct task_struct *curr = current;
|
||||
struct rq *rq, *p_rq;
|
||||
unsigned long flags;
|
||||
bool yielded = 0;
|
||||
|
||||
local_irq_save(flags);
|
||||
rq = this_rq();
|
||||
|
||||
again:
|
||||
p_rq = task_rq(p);
|
||||
double_rq_lock(rq, p_rq);
|
||||
while (task_rq(p) != p_rq) {
|
||||
double_rq_unlock(rq, p_rq);
|
||||
goto again;
|
||||
}
|
||||
|
||||
if (!curr->sched_class->yield_to_task)
|
||||
goto out;
|
||||
|
||||
if (curr->sched_class != p->sched_class)
|
||||
goto out;
|
||||
|
||||
if (task_running(p_rq, p) || p->state)
|
||||
goto out;
|
||||
|
||||
yielded = curr->sched_class->yield_to_task(rq, p, preempt);
|
||||
if (yielded) {
|
||||
schedstat_inc(rq, yld_count);
|
||||
/*
|
||||
* Make p's CPU reschedule; pick_next_entity takes care of
|
||||
* fairness.
|
||||
*/
|
||||
if (preempt && rq != p_rq)
|
||||
resched_task(p_rq->curr);
|
||||
}
|
||||
|
||||
out:
|
||||
double_rq_unlock(rq, p_rq);
|
||||
local_irq_restore(flags);
|
||||
|
||||
if (yielded)
|
||||
schedule();
|
||||
|
||||
return yielded;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(yield_to);
|
||||
|
||||
/*
|
||||
* This task is about to go to sleep on IO. Increment rq->nr_iowait so
|
||||
* that process accounting knows that this is a task in IO wait state.
|
||||
@@ -7773,6 +7993,10 @@ static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
|
||||
INIT_LIST_HEAD(&cfs_rq->tasks);
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
cfs_rq->rq = rq;
|
||||
/* allow initial update_cfs_load() to truncate */
|
||||
#ifdef CONFIG_SMP
|
||||
cfs_rq->load_stamp = 1;
|
||||
#endif
|
||||
#endif
|
||||
cfs_rq->min_vruntime = (u64)(-(1LL << 20));
|
||||
}
|
||||
@@ -8086,6 +8310,8 @@ EXPORT_SYMBOL(__might_sleep);
|
||||
#ifdef CONFIG_MAGIC_SYSRQ
|
||||
static void normalize_task(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
const struct sched_class *prev_class = p->sched_class;
|
||||
int old_prio = p->prio;
|
||||
int on_rq;
|
||||
|
||||
on_rq = p->se.on_rq;
|
||||
@@ -8096,6 +8322,8 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
|
||||
activate_task(rq, p, 0);
|
||||
resched_task(rq->curr);
|
||||
}
|
||||
|
||||
check_class_changed(rq, p, prev_class, old_prio);
|
||||
}
|
||||
|
||||
void normalize_rt_tasks(void)
|
||||
@@ -8487,7 +8715,7 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
|
||||
/* Propagate contribution to hierarchy */
|
||||
raw_spin_lock_irqsave(&rq->lock, flags);
|
||||
for_each_sched_entity(se)
|
||||
update_cfs_shares(group_cfs_rq(se), 0);
|
||||
update_cfs_shares(group_cfs_rq(se));
|
||||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||
}
|
||||
|
||||
|
||||
@@ -12,7 +12,6 @@ static atomic_t autogroup_seq_nr;
|
||||
static void __init autogroup_init(struct task_struct *init_task)
|
||||
{
|
||||
autogroup_default.tg = &root_task_group;
|
||||
root_task_group.autogroup = &autogroup_default;
|
||||
kref_init(&autogroup_default.kref);
|
||||
init_rwsem(&autogroup_default.lock);
|
||||
init_task->signal->autogroup = &autogroup_default;
|
||||
@@ -130,7 +129,7 @@ task_wants_autogroup(struct task_struct *p, struct task_group *tg)
|
||||
|
||||
static inline bool task_group_is_autogroup(struct task_group *tg)
|
||||
{
|
||||
return tg != &root_task_group && tg->autogroup;
|
||||
return !!tg->autogroup;
|
||||
}
|
||||
|
||||
static inline struct task_group *
|
||||
@@ -161,11 +160,15 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)
|
||||
|
||||
p->signal->autogroup = autogroup_kref_get(ag);
|
||||
|
||||
if (!ACCESS_ONCE(sysctl_sched_autogroup_enabled))
|
||||
goto out;
|
||||
|
||||
t = p;
|
||||
do {
|
||||
sched_move_task(t);
|
||||
} while_each_thread(p, t);
|
||||
|
||||
out:
|
||||
unlock_task_sighand(p, &flags);
|
||||
autogroup_kref_put(prev);
|
||||
}
|
||||
@@ -247,10 +250,14 @@ void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m)
|
||||
{
|
||||
struct autogroup *ag = autogroup_task_get(p);
|
||||
|
||||
if (!task_group_is_autogroup(ag->tg))
|
||||
goto out;
|
||||
|
||||
down_read(&ag->lock);
|
||||
seq_printf(m, "/autogroup-%ld nice %d\n", ag->id, ag->nice);
|
||||
up_read(&ag->lock);
|
||||
|
||||
out:
|
||||
autogroup_kref_put(ag);
|
||||
}
|
||||
#endif /* CONFIG_PROC_FS */
|
||||
@@ -258,9 +265,7 @@ void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m)
|
||||
#ifdef CONFIG_SCHED_DEBUG
|
||||
static inline int autogroup_path(struct task_group *tg, char *buf, int buflen)
|
||||
{
|
||||
int enabled = ACCESS_ONCE(sysctl_sched_autogroup_enabled);
|
||||
|
||||
if (!enabled || !tg->autogroup)
|
||||
if (!task_group_is_autogroup(tg))
|
||||
return 0;
|
||||
|
||||
return snprintf(buf, buflen, "%s-%ld", "/autogroup", tg->autogroup->id);
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
#ifdef CONFIG_SCHED_AUTOGROUP
|
||||
|
||||
struct autogroup {
|
||||
/*
|
||||
* reference doesn't mean how many thread attach to this
|
||||
* autogroup now. It just stands for the number of task
|
||||
* could use this autogroup.
|
||||
*/
|
||||
struct kref kref;
|
||||
struct task_group *tg;
|
||||
struct rw_semaphore lock;
|
||||
|
||||
@@ -179,7 +179,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
|
||||
|
||||
raw_spin_lock_irqsave(&rq->lock, flags);
|
||||
if (cfs_rq->rb_leftmost)
|
||||
MIN_vruntime = (__pick_next_entity(cfs_rq))->vruntime;
|
||||
MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
|
||||
last = __pick_last_entity(cfs_rq);
|
||||
if (last)
|
||||
max_vruntime = last->vruntime;
|
||||
|
||||
+224
-161
File diff suppressed because it is too large
Load Diff
+5
-21
@@ -52,31 +52,15 @@ static void set_curr_task_idle(struct rq *rq)
|
||||
{
|
||||
}
|
||||
|
||||
static void switched_to_idle(struct rq *rq, struct task_struct *p,
|
||||
int running)
|
||||
static void switched_to_idle(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
/* Can this actually happen?? */
|
||||
if (running)
|
||||
resched_task(rq->curr);
|
||||
else
|
||||
check_preempt_curr(rq, p, 0);
|
||||
BUG();
|
||||
}
|
||||
|
||||
static void prio_changed_idle(struct rq *rq, struct task_struct *p,
|
||||
int oldprio, int running)
|
||||
static void
|
||||
prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
|
||||
{
|
||||
/* This can happen for hot plug CPUS */
|
||||
|
||||
/*
|
||||
* Reschedule if we are currently running on this runqueue and
|
||||
* our priority decreased, or if we are not currently running on
|
||||
* this runqueue and our priority is higher than the current's
|
||||
*/
|
||||
if (running) {
|
||||
if (p->prio > oldprio)
|
||||
resched_task(rq->curr);
|
||||
} else
|
||||
check_preempt_curr(rq, p, 0);
|
||||
BUG();
|
||||
}
|
||||
|
||||
static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task)
|
||||
|
||||
+10
-9
@@ -1599,8 +1599,7 @@ static void rq_offline_rt(struct rq *rq)
|
||||
* When switch from the rt queue, we bring ourselves to a position
|
||||
* that we might want to pull RT tasks from other runqueues.
|
||||
*/
|
||||
static void switched_from_rt(struct rq *rq, struct task_struct *p,
|
||||
int running)
|
||||
static void switched_from_rt(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
/*
|
||||
* If there are other RT tasks then we will reschedule
|
||||
@@ -1609,7 +1608,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p,
|
||||
* we may need to handle the pulling of RT tasks
|
||||
* now.
|
||||
*/
|
||||
if (!rq->rt.rt_nr_running)
|
||||
if (p->se.on_rq && !rq->rt.rt_nr_running)
|
||||
pull_rt_task(rq);
|
||||
}
|
||||
|
||||
@@ -1628,8 +1627,7 @@ static inline void init_sched_rt_class(void)
|
||||
* with RT tasks. In this case we try to push them off to
|
||||
* other runqueues.
|
||||
*/
|
||||
static void switched_to_rt(struct rq *rq, struct task_struct *p,
|
||||
int running)
|
||||
static void switched_to_rt(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
int check_resched = 1;
|
||||
|
||||
@@ -1640,7 +1638,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p,
|
||||
* If that current running task is also an RT task
|
||||
* then see if we can move to another run queue.
|
||||
*/
|
||||
if (!running) {
|
||||
if (p->se.on_rq && rq->curr != p) {
|
||||
#ifdef CONFIG_SMP
|
||||
if (rq->rt.overloaded && push_rt_task(rq) &&
|
||||
/* Don't resched if we changed runqueues */
|
||||
@@ -1656,10 +1654,13 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p,
|
||||
* Priority of the task has changed. This may cause
|
||||
* us to initiate a push or pull.
|
||||
*/
|
||||
static void prio_changed_rt(struct rq *rq, struct task_struct *p,
|
||||
int oldprio, int running)
|
||||
static void
|
||||
prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
|
||||
{
|
||||
if (running) {
|
||||
if (!p->se.on_rq)
|
||||
return;
|
||||
|
||||
if (rq->curr == p) {
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* If our priority decreases while running, we
|
||||
|
||||
@@ -59,14 +59,13 @@ static void set_curr_task_stop(struct rq *rq)
|
||||
{
|
||||
}
|
||||
|
||||
static void switched_to_stop(struct rq *rq, struct task_struct *p,
|
||||
int running)
|
||||
static void switched_to_stop(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
BUG(); /* its impossible to change to this class */
|
||||
}
|
||||
|
||||
static void prio_changed_stop(struct rq *rq, struct task_struct *p,
|
||||
int oldprio, int running)
|
||||
static void
|
||||
prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio)
|
||||
{
|
||||
BUG(); /* how!?, what priority? */
|
||||
}
|
||||
|
||||
+1
-2
@@ -54,7 +54,7 @@ EXPORT_SYMBOL(irq_stat);
|
||||
|
||||
static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
|
||||
|
||||
static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
|
||||
DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
|
||||
|
||||
char *softirq_to_name[NR_SOFTIRQS] = {
|
||||
"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
|
||||
@@ -721,7 +721,6 @@ static int run_ksoftirqd(void * __bind_cpu)
|
||||
{
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
current->flags |= PF_KSOFTIRQD;
|
||||
while (!kthread_should_stop()) {
|
||||
preempt_disable();
|
||||
if (!local_softirq_pending()) {
|
||||
|
||||
+1
-8
@@ -361,20 +361,13 @@ static struct ctl_table kern_table[] = {
|
||||
.mode = 0644,
|
||||
.proc_handler = sched_rt_handler,
|
||||
},
|
||||
{
|
||||
.procname = "sched_compat_yield",
|
||||
.data = &sysctl_sched_compat_yield,
|
||||
.maxlen = sizeof(unsigned int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec,
|
||||
},
|
||||
#ifdef CONFIG_SCHED_AUTOGROUP
|
||||
{
|
||||
.procname = "sched_autogroup_enabled",
|
||||
.data = &sysctl_sched_autogroup_enabled,
|
||||
.maxlen = sizeof(unsigned int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec,
|
||||
.proc_handler = proc_dointvec_minmax,
|
||||
.extra1 = &zero,
|
||||
.extra2 = &one,
|
||||
},
|
||||
|
||||
+21
-2
@@ -645,7 +645,7 @@ u64 nsec_to_clock_t(u64 x)
|
||||
}
|
||||
|
||||
/**
|
||||
* nsecs_to_jiffies - Convert nsecs in u64 to jiffies
|
||||
* nsecs_to_jiffies64 - Convert nsecs in u64 to jiffies64
|
||||
*
|
||||
* @n: nsecs in u64
|
||||
*
|
||||
@@ -657,7 +657,7 @@ u64 nsec_to_clock_t(u64 x)
|
||||
* NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
|
||||
* ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
|
||||
*/
|
||||
unsigned long nsecs_to_jiffies(u64 n)
|
||||
u64 nsecs_to_jiffies64(u64 n)
|
||||
{
|
||||
#if (NSEC_PER_SEC % HZ) == 0
|
||||
/* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */
|
||||
@@ -674,6 +674,25 @@ unsigned long nsecs_to_jiffies(u64 n)
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* nsecs_to_jiffies - Convert nsecs in u64 to jiffies
|
||||
*
|
||||
* @n: nsecs in u64
|
||||
*
|
||||
* Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64.
|
||||
* And this doesn't return MAX_JIFFY_OFFSET since this function is designed
|
||||
* for scheduler, not for use in device drivers to calculate timeout value.
|
||||
*
|
||||
* note:
|
||||
* NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
|
||||
* ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
|
||||
*/
|
||||
unsigned long nsecs_to_jiffies(u64 n)
|
||||
{
|
||||
return (unsigned long)nsecs_to_jiffies64(n);
|
||||
}
|
||||
|
||||
#if (BITS_PER_LONG < 64)
|
||||
u64 get_jiffies_64(void)
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user