You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched
* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched: sched: rt-group: refure unrunnable tasks sched: rt-group: clean up the ifdeffery sched: rt-group: make rt groups scheduling configurable sched: rt-group: interface sched: rt-group: deal with PI sched: fix incorrect irq lock usage in normalize_rt_tasks() sched: fair-group: separate tg->shares from task_group_lock hrtimer: more hrtimer_init_sleeper() fallout.
This commit is contained in:
@@ -0,0 +1,59 @@
|
||||
|
||||
|
||||
Real-Time group scheduling.
|
||||
|
||||
The problem space:
|
||||
|
||||
In order to schedule multiple groups of realtime tasks each group must
|
||||
be assigned a fixed portion of the CPU time available. Without a minimum
|
||||
guarantee a realtime group can obviously fall short. A fuzzy upper limit
|
||||
is of no use since it cannot be relied upon. Which leaves us with just
|
||||
the single fixed portion.
|
||||
|
||||
CPU time is divided by means of specifying how much time can be spent
|
||||
running in a given period. Say a frame fixed realtime renderer must
|
||||
deliver 25 frames a second, which yields a period of 0.04s. Now say
|
||||
it will also have to play some music and respond to input, leaving it
|
||||
with around 80% for the graphics. We can then give this group a runtime
|
||||
of 0.8 * 0.04s = 0.032s.
|
||||
|
||||
This way the graphics group will have a 0.04s period with a 0.032s runtime
|
||||
limit.
|
||||
|
||||
Now if the audio thread needs to refill the DMA buffer every 0.005s, but
|
||||
needs only about 3% CPU time to do so, it can do with a 0.03 * 0.005s
|
||||
= 0.00015s.
|
||||
|
||||
|
||||
The Interface:
|
||||
|
||||
system wide:
|
||||
|
||||
/proc/sys/kernel/sched_rt_period_ms
|
||||
/proc/sys/kernel/sched_rt_runtime_us
|
||||
|
||||
CONFIG_FAIR_USER_SCHED
|
||||
|
||||
/sys/kernel/uids/<uid>/cpu_rt_runtime_us
|
||||
|
||||
or
|
||||
|
||||
CONFIG_FAIR_CGROUP_SCHED
|
||||
|
||||
/cgroup/<cgroup>/cpu.rt_runtime_us
|
||||
|
||||
[ time is specified in us because the interface is s32; this gives an
|
||||
operating range of ~35m to 1us ]
|
||||
|
||||
The period takes values in [ 1, INT_MAX ], runtime in [ -1, INT_MAX - 1 ].
|
||||
|
||||
A runtime of -1 specifies runtime == period, ie. no limit.
|
||||
|
||||
New groups get the period from /proc/sys/kernel/sched_rt_period_us and
|
||||
a runtime of 0.
|
||||
|
||||
Settings are constrained to:
|
||||
|
||||
\Sum_{i} runtime_{i} / global_period <= global_runtime / global_period
|
||||
|
||||
in order to keep the configuration schedulable.
|
||||
@@ -25,7 +25,7 @@ SUBSYS(ns)
|
||||
|
||||
/* */
|
||||
|
||||
#ifdef CONFIG_FAIR_CGROUP_SCHED
|
||||
#ifdef CONFIG_CGROUP_SCHED
|
||||
SUBSYS(cpu_cgroup)
|
||||
#endif
|
||||
|
||||
|
||||
+12
-6
@@ -590,7 +590,7 @@ struct user_struct {
|
||||
struct hlist_node uidhash_node;
|
||||
uid_t uid;
|
||||
|
||||
#ifdef CONFIG_FAIR_USER_SCHED
|
||||
#ifdef CONFIG_USER_SCHED
|
||||
struct task_group *tg;
|
||||
#ifdef CONFIG_SYSFS
|
||||
struct kobject kobj;
|
||||
@@ -973,7 +973,7 @@ struct sched_rt_entity {
|
||||
unsigned long timeout;
|
||||
int nr_cpus_allowed;
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
struct sched_rt_entity *parent;
|
||||
/* rq on which this entity is (to be) queued: */
|
||||
struct rt_rq *rt_rq;
|
||||
@@ -1541,8 +1541,6 @@ extern unsigned int sysctl_sched_child_runs_first;
|
||||
extern unsigned int sysctl_sched_features;
|
||||
extern unsigned int sysctl_sched_migration_cost;
|
||||
extern unsigned int sysctl_sched_nr_migrate;
|
||||
extern unsigned int sysctl_sched_rt_period;
|
||||
extern unsigned int sysctl_sched_rt_ratio;
|
||||
#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP)
|
||||
extern unsigned int sysctl_sched_min_bal_int_shares;
|
||||
extern unsigned int sysctl_sched_max_bal_int_shares;
|
||||
@@ -1552,6 +1550,8 @@ int sched_nr_latency_handler(struct ctl_table *table, int write,
|
||||
struct file *file, void __user *buffer, size_t *length,
|
||||
loff_t *ppos);
|
||||
#endif
|
||||
extern unsigned int sysctl_sched_rt_period;
|
||||
extern int sysctl_sched_rt_runtime;
|
||||
|
||||
extern unsigned int sysctl_sched_compat_yield;
|
||||
|
||||
@@ -2027,16 +2027,22 @@ extern int sched_mc_power_savings, sched_smt_power_savings;
|
||||
|
||||
extern void normalize_rt_tasks(void);
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
#ifdef CONFIG_GROUP_SCHED
|
||||
|
||||
extern struct task_group init_task_group;
|
||||
|
||||
extern struct task_group *sched_create_group(void);
|
||||
extern void sched_destroy_group(struct task_group *tg);
|
||||
extern void sched_move_task(struct task_struct *tsk);
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
|
||||
extern unsigned long sched_group_shares(struct task_group *tg);
|
||||
|
||||
#endif
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
extern int sched_group_set_rt_runtime(struct task_group *tg,
|
||||
long rt_runtime_us);
|
||||
extern long sched_group_rt_runtime(struct task_group *tg);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TASK_XACCT
|
||||
|
||||
+19
-8
@@ -311,25 +311,36 @@ config CPUSETS
|
||||
|
||||
Say N if unsure.
|
||||
|
||||
config FAIR_GROUP_SCHED
|
||||
bool "Fair group CPU scheduler"
|
||||
config GROUP_SCHED
|
||||
bool "Group CPU scheduler"
|
||||
default y
|
||||
help
|
||||
This feature lets CPU scheduler recognize task groups and control CPU
|
||||
bandwidth allocation to such task groups.
|
||||
|
||||
choice
|
||||
depends on FAIR_GROUP_SCHED
|
||||
prompt "Basis for grouping tasks"
|
||||
default FAIR_USER_SCHED
|
||||
config FAIR_GROUP_SCHED
|
||||
bool "Group scheduling for SCHED_OTHER"
|
||||
depends on GROUP_SCHED
|
||||
default y
|
||||
|
||||
config FAIR_USER_SCHED
|
||||
config RT_GROUP_SCHED
|
||||
bool "Group scheduling for SCHED_RR/FIFO"
|
||||
depends on EXPERIMENTAL
|
||||
depends on GROUP_SCHED
|
||||
default n
|
||||
|
||||
choice
|
||||
depends on GROUP_SCHED
|
||||
prompt "Basis for grouping tasks"
|
||||
default USER_SCHED
|
||||
|
||||
config USER_SCHED
|
||||
bool "user id"
|
||||
help
|
||||
This option will choose userid as the basis for grouping
|
||||
tasks, thus providing equal CPU bandwidth to each user.
|
||||
|
||||
config FAIR_CGROUP_SCHED
|
||||
config CGROUP_SCHED
|
||||
bool "Control groups"
|
||||
depends on CGROUPS
|
||||
help
|
||||
|
||||
+4
-1
@@ -630,9 +630,12 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
|
||||
set_current_state(state);
|
||||
|
||||
/* Setup the timer, when timeout != NULL */
|
||||
if (unlikely(timeout))
|
||||
if (unlikely(timeout)) {
|
||||
hrtimer_start(&timeout->timer, timeout->timer.expires,
|
||||
HRTIMER_MODE_ABS);
|
||||
if (!hrtimer_active(&timeout->timer))
|
||||
timeout->task = NULL;
|
||||
}
|
||||
|
||||
for (;;) {
|
||||
/* Try to acquire the lock: */
|
||||
|
||||
+363
-137
File diff suppressed because it is too large
Load Diff
+64
-38
@@ -55,14 +55,14 @@ static inline int on_rt_rq(struct sched_rt_entity *rt_se)
|
||||
return !list_empty(&rt_se->run_list);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
|
||||
static inline unsigned int sched_rt_ratio(struct rt_rq *rt_rq)
|
||||
static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
|
||||
{
|
||||
if (!rt_rq->tg)
|
||||
return SCHED_RT_FRAC;
|
||||
return RUNTIME_INF;
|
||||
|
||||
return rt_rq->tg->rt_ratio;
|
||||
return rt_rq->tg->rt_runtime;
|
||||
}
|
||||
|
||||
#define for_each_leaf_rt_rq(rt_rq, rq) \
|
||||
@@ -89,7 +89,7 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
|
||||
static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
|
||||
static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
|
||||
|
||||
static void sched_rt_ratio_enqueue(struct rt_rq *rt_rq)
|
||||
static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
|
||||
{
|
||||
struct sched_rt_entity *rt_se = rt_rq->rt_se;
|
||||
|
||||
@@ -102,7 +102,7 @@ static void sched_rt_ratio_enqueue(struct rt_rq *rt_rq)
|
||||
}
|
||||
}
|
||||
|
||||
static void sched_rt_ratio_dequeue(struct rt_rq *rt_rq)
|
||||
static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
|
||||
{
|
||||
struct sched_rt_entity *rt_se = rt_rq->rt_se;
|
||||
|
||||
@@ -110,11 +110,31 @@ static void sched_rt_ratio_dequeue(struct rt_rq *rt_rq)
|
||||
dequeue_rt_entity(rt_se);
|
||||
}
|
||||
|
||||
static inline int rt_rq_throttled(struct rt_rq *rt_rq)
|
||||
{
|
||||
return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
|
||||
}
|
||||
|
||||
static int rt_se_boosted(struct sched_rt_entity *rt_se)
|
||||
{
|
||||
struct rt_rq *rt_rq = group_rt_rq(rt_se);
|
||||
struct task_struct *p;
|
||||
|
||||
if (rt_rq)
|
||||
return !!rt_rq->rt_nr_boosted;
|
||||
|
||||
p = rt_task_of(rt_se);
|
||||
return p->prio != p->normal_prio;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline unsigned int sched_rt_ratio(struct rt_rq *rt_rq)
|
||||
static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
|
||||
{
|
||||
return sysctl_sched_rt_ratio;
|
||||
if (sysctl_sched_rt_runtime == -1)
|
||||
return RUNTIME_INF;
|
||||
|
||||
return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
|
||||
}
|
||||
|
||||
#define for_each_leaf_rt_rq(rt_rq, rq) \
|
||||
@@ -141,19 +161,23 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void sched_rt_ratio_enqueue(struct rt_rq *rt_rq)
|
||||
static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void sched_rt_ratio_dequeue(struct rt_rq *rt_rq)
|
||||
static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int rt_rq_throttled(struct rt_rq *rt_rq)
|
||||
{
|
||||
return rt_rq->rt_throttled;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline int rt_se_prio(struct sched_rt_entity *rt_se)
|
||||
{
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
struct rt_rq *rt_rq = group_rt_rq(rt_se);
|
||||
|
||||
if (rt_rq)
|
||||
@@ -163,28 +187,26 @@ static inline int rt_se_prio(struct sched_rt_entity *rt_se)
|
||||
return rt_task_of(rt_se)->prio;
|
||||
}
|
||||
|
||||
static int sched_rt_ratio_exceeded(struct rt_rq *rt_rq)
|
||||
static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
|
||||
{
|
||||
unsigned int rt_ratio = sched_rt_ratio(rt_rq);
|
||||
u64 period, ratio;
|
||||
u64 runtime = sched_rt_runtime(rt_rq);
|
||||
|
||||
if (rt_ratio == SCHED_RT_FRAC)
|
||||
if (runtime == RUNTIME_INF)
|
||||
return 0;
|
||||
|
||||
if (rt_rq->rt_throttled)
|
||||
return 1;
|
||||
return rt_rq_throttled(rt_rq);
|
||||
|
||||
period = (u64)sysctl_sched_rt_period * NSEC_PER_MSEC;
|
||||
ratio = (period * rt_ratio) >> SCHED_RT_FRAC_SHIFT;
|
||||
|
||||
if (rt_rq->rt_time > ratio) {
|
||||
if (rt_rq->rt_time > runtime) {
|
||||
struct rq *rq = rq_of_rt_rq(rt_rq);
|
||||
|
||||
rq->rt_throttled = 1;
|
||||
rt_rq->rt_throttled = 1;
|
||||
|
||||
sched_rt_ratio_dequeue(rt_rq);
|
||||
return 1;
|
||||
if (rt_rq_throttled(rt_rq)) {
|
||||
sched_rt_rq_dequeue(rt_rq);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -196,17 +218,16 @@ static void update_sched_rt_period(struct rq *rq)
|
||||
u64 period;
|
||||
|
||||
while (rq->clock > rq->rt_period_expire) {
|
||||
period = (u64)sysctl_sched_rt_period * NSEC_PER_MSEC;
|
||||
period = (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
|
||||
rq->rt_period_expire += period;
|
||||
|
||||
for_each_leaf_rt_rq(rt_rq, rq) {
|
||||
unsigned long rt_ratio = sched_rt_ratio(rt_rq);
|
||||
u64 ratio = (period * rt_ratio) >> SCHED_RT_FRAC_SHIFT;
|
||||
u64 runtime = sched_rt_runtime(rt_rq);
|
||||
|
||||
rt_rq->rt_time -= min(rt_rq->rt_time, ratio);
|
||||
if (rt_rq->rt_throttled) {
|
||||
rt_rq->rt_time -= min(rt_rq->rt_time, runtime);
|
||||
if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
|
||||
rt_rq->rt_throttled = 0;
|
||||
sched_rt_ratio_enqueue(rt_rq);
|
||||
sched_rt_rq_enqueue(rt_rq);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -239,12 +260,7 @@ static void update_curr_rt(struct rq *rq)
|
||||
cpuacct_charge(curr, delta_exec);
|
||||
|
||||
rt_rq->rt_time += delta_exec;
|
||||
/*
|
||||
* might make it a tad more accurate:
|
||||
*
|
||||
* update_sched_rt_period(rq);
|
||||
*/
|
||||
if (sched_rt_ratio_exceeded(rt_rq))
|
||||
if (sched_rt_runtime_exceeded(rt_rq))
|
||||
resched_task(curr);
|
||||
}
|
||||
|
||||
@@ -253,7 +269,7 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
||||
{
|
||||
WARN_ON(!rt_prio(rt_se_prio(rt_se)));
|
||||
rt_rq->rt_nr_running++;
|
||||
#if defined CONFIG_SMP || defined CONFIG_FAIR_GROUP_SCHED
|
||||
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
|
||||
if (rt_se_prio(rt_se) < rt_rq->highest_prio)
|
||||
rt_rq->highest_prio = rt_se_prio(rt_se);
|
||||
#endif
|
||||
@@ -265,6 +281,10 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
||||
|
||||
update_rt_migration(rq_of_rt_rq(rt_rq));
|
||||
#endif
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
if (rt_se_boosted(rt_se))
|
||||
rt_rq->rt_nr_boosted++;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline
|
||||
@@ -273,7 +293,7 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
||||
WARN_ON(!rt_prio(rt_se_prio(rt_se)));
|
||||
WARN_ON(!rt_rq->rt_nr_running);
|
||||
rt_rq->rt_nr_running--;
|
||||
#if defined CONFIG_SMP || defined CONFIG_FAIR_GROUP_SCHED
|
||||
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
|
||||
if (rt_rq->rt_nr_running) {
|
||||
struct rt_prio_array *array;
|
||||
|
||||
@@ -295,6 +315,12 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
||||
|
||||
update_rt_migration(rq_of_rt_rq(rt_rq));
|
||||
#endif /* CONFIG_SMP */
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
if (rt_se_boosted(rt_se))
|
||||
rt_rq->rt_nr_boosted--;
|
||||
|
||||
WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
|
||||
@@ -303,7 +329,7 @@ static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
|
||||
struct rt_prio_array *array = &rt_rq->active;
|
||||
struct rt_rq *group_rq = group_rt_rq(rt_se);
|
||||
|
||||
if (group_rq && group_rq->rt_throttled)
|
||||
if (group_rq && rt_rq_throttled(group_rq))
|
||||
return;
|
||||
|
||||
list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
|
||||
@@ -496,7 +522,7 @@ static struct task_struct *pick_next_task_rt(struct rq *rq)
|
||||
if (unlikely(!rt_rq->rt_nr_running))
|
||||
return NULL;
|
||||
|
||||
if (sched_rt_ratio_exceeded(rt_rq))
|
||||
if (rt_rq_throttled(rt_rq))
|
||||
return NULL;
|
||||
|
||||
do {
|
||||
|
||||
+16
-16
@@ -311,22 +311,6 @@ static struct ctl_table kern_table[] = {
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec,
|
||||
},
|
||||
{
|
||||
.ctl_name = CTL_UNNUMBERED,
|
||||
.procname = "sched_rt_period_ms",
|
||||
.data = &sysctl_sched_rt_period,
|
||||
.maxlen = sizeof(unsigned int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec,
|
||||
},
|
||||
{
|
||||
.ctl_name = CTL_UNNUMBERED,
|
||||
.procname = "sched_rt_ratio",
|
||||
.data = &sysctl_sched_rt_ratio,
|
||||
.maxlen = sizeof(unsigned int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec,
|
||||
},
|
||||
#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP)
|
||||
{
|
||||
.ctl_name = CTL_UNNUMBERED,
|
||||
@@ -346,6 +330,22 @@ static struct ctl_table kern_table[] = {
|
||||
},
|
||||
#endif
|
||||
#endif
|
||||
{
|
||||
.ctl_name = CTL_UNNUMBERED,
|
||||
.procname = "sched_rt_period_us",
|
||||
.data = &sysctl_sched_rt_period,
|
||||
.maxlen = sizeof(unsigned int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec,
|
||||
},
|
||||
{
|
||||
.ctl_name = CTL_UNNUMBERED,
|
||||
.procname = "sched_rt_runtime_us",
|
||||
.data = &sysctl_sched_rt_runtime,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec,
|
||||
},
|
||||
{
|
||||
.ctl_name = CTL_UNNUMBERED,
|
||||
.procname = "sched_compat_yield",
|
||||
|
||||
+43
-7
@@ -57,7 +57,7 @@ struct user_struct root_user = {
|
||||
.uid_keyring = &root_user_keyring,
|
||||
.session_keyring = &root_session_keyring,
|
||||
#endif
|
||||
#ifdef CONFIG_FAIR_USER_SCHED
|
||||
#ifdef CONFIG_USER_SCHED
|
||||
.tg = &init_task_group,
|
||||
#endif
|
||||
};
|
||||
@@ -90,7 +90,7 @@ static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FAIR_USER_SCHED
|
||||
#ifdef CONFIG_USER_SCHED
|
||||
|
||||
static void sched_destroy_user(struct user_struct *up)
|
||||
{
|
||||
@@ -113,15 +113,15 @@ static void sched_switch_user(struct task_struct *p)
|
||||
sched_move_task(p);
|
||||
}
|
||||
|
||||
#else /* CONFIG_FAIR_USER_SCHED */
|
||||
#else /* CONFIG_USER_SCHED */
|
||||
|
||||
static void sched_destroy_user(struct user_struct *up) { }
|
||||
static int sched_create_user(struct user_struct *up) { return 0; }
|
||||
static void sched_switch_user(struct task_struct *p) { }
|
||||
|
||||
#endif /* CONFIG_FAIR_USER_SCHED */
|
||||
#endif /* CONFIG_USER_SCHED */
|
||||
|
||||
#if defined(CONFIG_FAIR_USER_SCHED) && defined(CONFIG_SYSFS)
|
||||
#if defined(CONFIG_USER_SCHED) && defined(CONFIG_SYSFS)
|
||||
|
||||
static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */
|
||||
static DEFINE_MUTEX(uids_mutex);
|
||||
@@ -137,6 +137,7 @@ static inline void uids_mutex_unlock(void)
|
||||
}
|
||||
|
||||
/* uid directory attributes */
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
static ssize_t cpu_shares_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
@@ -163,10 +164,45 @@ static ssize_t cpu_shares_store(struct kobject *kobj,
|
||||
|
||||
static struct kobj_attribute cpu_share_attr =
|
||||
__ATTR(cpu_share, 0644, cpu_shares_show, cpu_shares_store);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
static ssize_t cpu_rt_runtime_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct user_struct *up = container_of(kobj, struct user_struct, kobj);
|
||||
|
||||
return sprintf(buf, "%lu\n", sched_group_rt_runtime(up->tg));
|
||||
}
|
||||
|
||||
static ssize_t cpu_rt_runtime_store(struct kobject *kobj,
|
||||
struct kobj_attribute *attr,
|
||||
const char *buf, size_t size)
|
||||
{
|
||||
struct user_struct *up = container_of(kobj, struct user_struct, kobj);
|
||||
unsigned long rt_runtime;
|
||||
int rc;
|
||||
|
||||
sscanf(buf, "%lu", &rt_runtime);
|
||||
|
||||
rc = sched_group_set_rt_runtime(up->tg, rt_runtime);
|
||||
|
||||
return (rc ? rc : size);
|
||||
}
|
||||
|
||||
static struct kobj_attribute cpu_rt_runtime_attr =
|
||||
__ATTR(cpu_rt_runtime, 0644, cpu_rt_runtime_show, cpu_rt_runtime_store);
|
||||
#endif
|
||||
|
||||
/* default attributes per uid directory */
|
||||
static struct attribute *uids_attributes[] = {
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
&cpu_share_attr.attr,
|
||||
#endif
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
&cpu_rt_runtime_attr.attr,
|
||||
#endif
|
||||
NULL
|
||||
};
|
||||
|
||||
@@ -269,7 +305,7 @@ static inline void free_user(struct user_struct *up, unsigned long flags)
|
||||
schedule_work(&up->work);
|
||||
}
|
||||
|
||||
#else /* CONFIG_FAIR_USER_SCHED && CONFIG_SYSFS */
|
||||
#else /* CONFIG_USER_SCHED && CONFIG_SYSFS */
|
||||
|
||||
int uids_sysfs_init(void) { return 0; }
|
||||
static inline int uids_user_create(struct user_struct *up) { return 0; }
|
||||
@@ -373,7 +409,7 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
|
||||
spin_lock_irq(&uidhash_lock);
|
||||
up = uid_hash_find(uid, hashent);
|
||||
if (up) {
|
||||
/* This case is not possible when CONFIG_FAIR_USER_SCHED
|
||||
/* This case is not possible when CONFIG_USER_SCHED
|
||||
* is defined, since we serialize alloc_uid() using
|
||||
* uids_mutex. Hence no need to call
|
||||
* sched_destroy_user() or remove_user_sysfs_dir().
|
||||
|
||||
Reference in New Issue
Block a user