You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'linus' into core/rcu
This commit is contained in:
+6
-5
@@ -572,16 +572,17 @@ void audit_send_reply(int pid, int seq, int type, int done, int multi,
|
||||
|
||||
skb = audit_make_reply(pid, seq, type, done, multi, payload, size);
|
||||
if (!skb)
|
||||
return;
|
||||
goto out;
|
||||
|
||||
reply->pid = pid;
|
||||
reply->skb = skb;
|
||||
|
||||
tsk = kthread_run(audit_send_reply_thread, reply, "audit_send_reply");
|
||||
if (IS_ERR(tsk)) {
|
||||
kfree(reply);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
if (!IS_ERR(tsk))
|
||||
return;
|
||||
kfree_skb(skb);
|
||||
out:
|
||||
kfree(reply);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
+2
-3
@@ -172,10 +172,9 @@ static void insert_hash(struct audit_chunk *chunk)
|
||||
struct audit_chunk *audit_tree_lookup(const struct inode *inode)
|
||||
{
|
||||
struct list_head *list = chunk_hash(inode);
|
||||
struct list_head *pos;
|
||||
struct audit_chunk *p;
|
||||
|
||||
list_for_each_rcu(pos, list) {
|
||||
struct audit_chunk *p = container_of(pos, struct audit_chunk, hash);
|
||||
list_for_each_entry_rcu(p, list, hash) {
|
||||
if (p->watch.inode == inode) {
|
||||
get_inotify_watch(&p->watch);
|
||||
return p;
|
||||
|
||||
+73
-38
@@ -52,6 +52,69 @@ static void warn_legacy_capability_use(void)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Version 2 capabilities worked fine, but the linux/capability.h file
|
||||
* that accompanied their introduction encouraged their use without
|
||||
* the necessary user-space source code changes. As such, we have
|
||||
* created a version 3 with equivalent functionality to version 2, but
|
||||
* with a header change to protect legacy source code from using
|
||||
* version 2 when it wanted to use version 1. If your system has code
|
||||
* that trips the following warning, it is using version 2 specific
|
||||
* capabilities and may be doing so insecurely.
|
||||
*
|
||||
* The remedy is to either upgrade your version of libcap (to 2.10+,
|
||||
* if the application is linked against it), or recompile your
|
||||
* application with modern kernel headers and this warning will go
|
||||
* away.
|
||||
*/
|
||||
|
||||
static void warn_deprecated_v2(void)
|
||||
{
|
||||
static int warned;
|
||||
|
||||
if (!warned) {
|
||||
char name[sizeof(current->comm)];
|
||||
|
||||
printk(KERN_INFO "warning: `%s' uses deprecated v2"
|
||||
" capabilities in a way that may be insecure.\n",
|
||||
get_task_comm(name, current));
|
||||
warned = 1;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Version check. Return the number of u32s in each capability flag
|
||||
* array, or a negative value on error.
|
||||
*/
|
||||
static int cap_validate_magic(cap_user_header_t header, unsigned *tocopy)
|
||||
{
|
||||
__u32 version;
|
||||
|
||||
if (get_user(version, &header->version))
|
||||
return -EFAULT;
|
||||
|
||||
switch (version) {
|
||||
case _LINUX_CAPABILITY_VERSION_1:
|
||||
warn_legacy_capability_use();
|
||||
*tocopy = _LINUX_CAPABILITY_U32S_1;
|
||||
break;
|
||||
case _LINUX_CAPABILITY_VERSION_2:
|
||||
warn_deprecated_v2();
|
||||
/*
|
||||
* fall through - v3 is otherwise equivalent to v2.
|
||||
*/
|
||||
case _LINUX_CAPABILITY_VERSION_3:
|
||||
*tocopy = _LINUX_CAPABILITY_U32S_3;
|
||||
break;
|
||||
default:
|
||||
if (put_user((u32)_KERNEL_CAPABILITY_VERSION, &header->version))
|
||||
return -EFAULT;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* For sys_getproccap() and sys_setproccap(), any of the three
|
||||
* capability set pointers may be NULL -- indicating that that set is
|
||||
@@ -71,27 +134,13 @@ asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr)
|
||||
{
|
||||
int ret = 0;
|
||||
pid_t pid;
|
||||
__u32 version;
|
||||
struct task_struct *target;
|
||||
unsigned tocopy;
|
||||
kernel_cap_t pE, pI, pP;
|
||||
|
||||
if (get_user(version, &header->version))
|
||||
return -EFAULT;
|
||||
|
||||
switch (version) {
|
||||
case _LINUX_CAPABILITY_VERSION_1:
|
||||
warn_legacy_capability_use();
|
||||
tocopy = _LINUX_CAPABILITY_U32S_1;
|
||||
break;
|
||||
case _LINUX_CAPABILITY_VERSION_2:
|
||||
tocopy = _LINUX_CAPABILITY_U32S_2;
|
||||
break;
|
||||
default:
|
||||
if (put_user(_LINUX_CAPABILITY_VERSION, &header->version))
|
||||
return -EFAULT;
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = cap_validate_magic(header, &tocopy);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
if (get_user(pid, &header->pid))
|
||||
return -EFAULT;
|
||||
@@ -118,7 +167,7 @@ out:
|
||||
spin_unlock(&task_capability_lock);
|
||||
|
||||
if (!ret) {
|
||||
struct __user_cap_data_struct kdata[_LINUX_CAPABILITY_U32S];
|
||||
struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S];
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < tocopy; i++) {
|
||||
@@ -128,7 +177,7 @@ out:
|
||||
}
|
||||
|
||||
/*
|
||||
* Note, in the case, tocopy < _LINUX_CAPABILITY_U32S,
|
||||
* Note, in the case, tocopy < _KERNEL_CAPABILITY_U32S,
|
||||
* we silently drop the upper capabilities here. This
|
||||
* has the effect of making older libcap
|
||||
* implementations implicitly drop upper capability
|
||||
@@ -240,30 +289,16 @@ static inline int cap_set_all(kernel_cap_t *effective,
|
||||
*/
|
||||
asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
|
||||
{
|
||||
struct __user_cap_data_struct kdata[_LINUX_CAPABILITY_U32S];
|
||||
struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S];
|
||||
unsigned i, tocopy;
|
||||
kernel_cap_t inheritable, permitted, effective;
|
||||
__u32 version;
|
||||
struct task_struct *target;
|
||||
int ret;
|
||||
pid_t pid;
|
||||
|
||||
if (get_user(version, &header->version))
|
||||
return -EFAULT;
|
||||
|
||||
switch (version) {
|
||||
case _LINUX_CAPABILITY_VERSION_1:
|
||||
warn_legacy_capability_use();
|
||||
tocopy = _LINUX_CAPABILITY_U32S_1;
|
||||
break;
|
||||
case _LINUX_CAPABILITY_VERSION_2:
|
||||
tocopy = _LINUX_CAPABILITY_U32S_2;
|
||||
break;
|
||||
default:
|
||||
if (put_user(_LINUX_CAPABILITY_VERSION, &header->version))
|
||||
return -EFAULT;
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = cap_validate_magic(header, &tocopy);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
if (get_user(pid, &header->pid))
|
||||
return -EFAULT;
|
||||
@@ -281,7 +316,7 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
|
||||
permitted.cap[i] = kdata[i].permitted;
|
||||
inheritable.cap[i] = kdata[i].inheritable;
|
||||
}
|
||||
while (i < _LINUX_CAPABILITY_U32S) {
|
||||
while (i < _KERNEL_CAPABILITY_U32S) {
|
||||
effective.cap[i] = 0;
|
||||
permitted.cap[i] = 0;
|
||||
inheritable.cap[i] = 0;
|
||||
|
||||
+1
-1
@@ -2903,7 +2903,7 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys)
|
||||
cg = tsk->cgroups;
|
||||
parent = task_cgroup(tsk, subsys->subsys_id);
|
||||
|
||||
snprintf(nodename, MAX_CGROUP_TYPE_NAMELEN, "node_%d", tsk->pid);
|
||||
snprintf(nodename, MAX_CGROUP_TYPE_NAMELEN, "%d", tsk->pid);
|
||||
|
||||
/* Pin the hierarchy */
|
||||
atomic_inc(&parent->root->sb->s_active);
|
||||
|
||||
+7
-3
@@ -797,8 +797,10 @@ static int update_cpumask(struct cpuset *cs, char *buf)
|
||||
retval = cpulist_parse(buf, trialcs.cpus_allowed);
|
||||
if (retval < 0)
|
||||
return retval;
|
||||
|
||||
if (!cpus_subset(trialcs.cpus_allowed, cpu_online_map))
|
||||
return -EINVAL;
|
||||
}
|
||||
cpus_and(trialcs.cpus_allowed, trialcs.cpus_allowed, cpu_online_map);
|
||||
retval = validate_change(cs, &trialcs);
|
||||
if (retval < 0)
|
||||
return retval;
|
||||
@@ -932,9 +934,11 @@ static int update_nodemask(struct cpuset *cs, char *buf)
|
||||
retval = nodelist_parse(buf, trialcs.mems_allowed);
|
||||
if (retval < 0)
|
||||
goto done;
|
||||
|
||||
if (!nodes_subset(trialcs.mems_allowed,
|
||||
node_states[N_HIGH_MEMORY]))
|
||||
return -EINVAL;
|
||||
}
|
||||
nodes_and(trialcs.mems_allowed, trialcs.mems_allowed,
|
||||
node_states[N_HIGH_MEMORY]);
|
||||
oldmem = cs->mems_allowed;
|
||||
if (nodes_equal(oldmem, trialcs.mems_allowed)) {
|
||||
retval = 0; /* Too easy - nothing to do */
|
||||
|
||||
+6
-1
@@ -126,6 +126,12 @@ static void __exit_signal(struct task_struct *tsk)
|
||||
|
||||
__unhash_process(tsk);
|
||||
|
||||
/*
|
||||
* Do this under ->siglock, we can race with another thread
|
||||
* doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
|
||||
*/
|
||||
flush_sigqueue(&tsk->pending);
|
||||
|
||||
tsk->signal = NULL;
|
||||
tsk->sighand = NULL;
|
||||
spin_unlock(&sighand->siglock);
|
||||
@@ -133,7 +139,6 @@ static void __exit_signal(struct task_struct *tsk)
|
||||
|
||||
__cleanup_sighand(sighand);
|
||||
clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
|
||||
flush_sigqueue(&tsk->pending);
|
||||
if (sig) {
|
||||
flush_sigqueue(&sig->shared_pending);
|
||||
taskstats_tgid_free(sig);
|
||||
|
||||
-130
@@ -660,136 +660,6 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int count_open_files(struct fdtable *fdt)
|
||||
{
|
||||
int size = fdt->max_fds;
|
||||
int i;
|
||||
|
||||
/* Find the last open fd */
|
||||
for (i = size/(8*sizeof(long)); i > 0; ) {
|
||||
if (fdt->open_fds->fds_bits[--i])
|
||||
break;
|
||||
}
|
||||
i = (i+1) * 8 * sizeof(long);
|
||||
return i;
|
||||
}
|
||||
|
||||
static struct files_struct *alloc_files(void)
|
||||
{
|
||||
struct files_struct *newf;
|
||||
struct fdtable *fdt;
|
||||
|
||||
newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
|
||||
if (!newf)
|
||||
goto out;
|
||||
|
||||
atomic_set(&newf->count, 1);
|
||||
|
||||
spin_lock_init(&newf->file_lock);
|
||||
newf->next_fd = 0;
|
||||
fdt = &newf->fdtab;
|
||||
fdt->max_fds = NR_OPEN_DEFAULT;
|
||||
fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init;
|
||||
fdt->open_fds = (fd_set *)&newf->open_fds_init;
|
||||
fdt->fd = &newf->fd_array[0];
|
||||
INIT_RCU_HEAD(&fdt->rcu);
|
||||
fdt->next = NULL;
|
||||
rcu_assign_pointer(newf->fdt, fdt);
|
||||
out:
|
||||
return newf;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate a new files structure and copy contents from the
|
||||
* passed in files structure.
|
||||
* errorp will be valid only when the returned files_struct is NULL.
|
||||
*/
|
||||
static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
|
||||
{
|
||||
struct files_struct *newf;
|
||||
struct file **old_fds, **new_fds;
|
||||
int open_files, size, i;
|
||||
struct fdtable *old_fdt, *new_fdt;
|
||||
|
||||
*errorp = -ENOMEM;
|
||||
newf = alloc_files();
|
||||
if (!newf)
|
||||
goto out;
|
||||
|
||||
spin_lock(&oldf->file_lock);
|
||||
old_fdt = files_fdtable(oldf);
|
||||
new_fdt = files_fdtable(newf);
|
||||
open_files = count_open_files(old_fdt);
|
||||
|
||||
/*
|
||||
* Check whether we need to allocate a larger fd array and fd set.
|
||||
* Note: we're not a clone task, so the open count won't change.
|
||||
*/
|
||||
if (open_files > new_fdt->max_fds) {
|
||||
new_fdt->max_fds = 0;
|
||||
spin_unlock(&oldf->file_lock);
|
||||
spin_lock(&newf->file_lock);
|
||||
*errorp = expand_files(newf, open_files-1);
|
||||
spin_unlock(&newf->file_lock);
|
||||
if (*errorp < 0)
|
||||
goto out_release;
|
||||
new_fdt = files_fdtable(newf);
|
||||
/*
|
||||
* Reacquire the oldf lock and a pointer to its fd table
|
||||
* who knows it may have a new bigger fd table. We need
|
||||
* the latest pointer.
|
||||
*/
|
||||
spin_lock(&oldf->file_lock);
|
||||
old_fdt = files_fdtable(oldf);
|
||||
}
|
||||
|
||||
old_fds = old_fdt->fd;
|
||||
new_fds = new_fdt->fd;
|
||||
|
||||
memcpy(new_fdt->open_fds->fds_bits,
|
||||
old_fdt->open_fds->fds_bits, open_files/8);
|
||||
memcpy(new_fdt->close_on_exec->fds_bits,
|
||||
old_fdt->close_on_exec->fds_bits, open_files/8);
|
||||
|
||||
for (i = open_files; i != 0; i--) {
|
||||
struct file *f = *old_fds++;
|
||||
if (f) {
|
||||
get_file(f);
|
||||
} else {
|
||||
/*
|
||||
* The fd may be claimed in the fd bitmap but not yet
|
||||
* instantiated in the files array if a sibling thread
|
||||
* is partway through open(). So make sure that this
|
||||
* fd is available to the new process.
|
||||
*/
|
||||
FD_CLR(open_files - i, new_fdt->open_fds);
|
||||
}
|
||||
rcu_assign_pointer(*new_fds++, f);
|
||||
}
|
||||
spin_unlock(&oldf->file_lock);
|
||||
|
||||
/* compute the remainder to be cleared */
|
||||
size = (new_fdt->max_fds - open_files) * sizeof(struct file *);
|
||||
|
||||
/* This is long word aligned thus could use a optimized version */
|
||||
memset(new_fds, 0, size);
|
||||
|
||||
if (new_fdt->max_fds > open_files) {
|
||||
int left = (new_fdt->max_fds-open_files)/8;
|
||||
int start = open_files / (8 * sizeof(unsigned long));
|
||||
|
||||
memset(&new_fdt->open_fds->fds_bits[start], 0, left);
|
||||
memset(&new_fdt->close_on_exec->fds_bits[start], 0, left);
|
||||
}
|
||||
|
||||
return newf;
|
||||
|
||||
out_release:
|
||||
kmem_cache_free(files_cachep, newf);
|
||||
out:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
|
||||
{
|
||||
struct files_struct *oldf, *newf;
|
||||
|
||||
+6
-10
@@ -52,6 +52,7 @@
|
||||
#include <asm/byteorder.h>
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
static int kgdb_break_asap;
|
||||
|
||||
@@ -227,8 +228,6 @@ void __weak kgdb_disable_hw_debug(struct pt_regs *regs)
|
||||
* GDB remote protocol parser:
|
||||
*/
|
||||
|
||||
static const char hexchars[] = "0123456789abcdef";
|
||||
|
||||
static int hex(char ch)
|
||||
{
|
||||
if ((ch >= 'a') && (ch <= 'f'))
|
||||
@@ -316,8 +315,8 @@ static void put_packet(char *buffer)
|
||||
}
|
||||
|
||||
kgdb_io_ops->write_char('#');
|
||||
kgdb_io_ops->write_char(hexchars[checksum >> 4]);
|
||||
kgdb_io_ops->write_char(hexchars[checksum & 0xf]);
|
||||
kgdb_io_ops->write_char(hex_asc_hi(checksum));
|
||||
kgdb_io_ops->write_char(hex_asc_lo(checksum));
|
||||
if (kgdb_io_ops->flush)
|
||||
kgdb_io_ops->flush();
|
||||
|
||||
@@ -478,8 +477,8 @@ static void error_packet(char *pkt, int error)
|
||||
{
|
||||
error = -error;
|
||||
pkt[0] = 'E';
|
||||
pkt[1] = hexchars[(error / 10)];
|
||||
pkt[2] = hexchars[(error % 10)];
|
||||
pkt[1] = hex_asc[(error / 10)];
|
||||
pkt[2] = hex_asc[(error % 10)];
|
||||
pkt[3] = '\0';
|
||||
}
|
||||
|
||||
@@ -510,10 +509,7 @@ static void int_to_threadref(unsigned char *id, int value)
|
||||
scan = (unsigned char *)id;
|
||||
while (i--)
|
||||
*scan++ = 0;
|
||||
*scan++ = (value >> 24) & 0xff;
|
||||
*scan++ = (value >> 16) & 0xff;
|
||||
*scan++ = (value >> 8) & 0xff;
|
||||
*scan++ = (value & 0xff);
|
||||
put_unaligned_be32(value, scan);
|
||||
}
|
||||
|
||||
static struct task_struct *getthread(struct pt_regs *regs, int tid)
|
||||
|
||||
+9
-6
@@ -699,8 +699,9 @@ static int __register_kprobes(struct kprobe **kps, int num,
|
||||
return -EINVAL;
|
||||
for (i = 0; i < num; i++) {
|
||||
ret = __register_kprobe(kps[i], called_from);
|
||||
if (ret < 0 && i > 0) {
|
||||
unregister_kprobes(kps, i);
|
||||
if (ret < 0) {
|
||||
if (i > 0)
|
||||
unregister_kprobes(kps, i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -776,8 +777,9 @@ static int __register_jprobes(struct jprobe **jps, int num,
|
||||
jp->kp.break_handler = longjmp_break_handler;
|
||||
ret = __register_kprobe(&jp->kp, called_from);
|
||||
}
|
||||
if (ret < 0 && i > 0) {
|
||||
unregister_jprobes(jps, i);
|
||||
if (ret < 0) {
|
||||
if (i > 0)
|
||||
unregister_jprobes(jps, i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -920,8 +922,9 @@ static int __register_kretprobes(struct kretprobe **rps, int num,
|
||||
return -EINVAL;
|
||||
for (i = 0; i < num; i++) {
|
||||
ret = __register_kretprobe(rps[i], called_from);
|
||||
if (ret < 0 && i > 0) {
|
||||
unregister_kretprobes(rps, i);
|
||||
if (ret < 0) {
|
||||
if (i > 0)
|
||||
unregister_kretprobes(rps, i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
+15
-3
@@ -1337,7 +1337,19 @@ out_unreg:
|
||||
kobject_put(&mod->mkobj.kobj);
|
||||
return err;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void mod_sysfs_fini(struct module *mod)
|
||||
{
|
||||
kobject_put(&mod->mkobj.kobj);
|
||||
}
|
||||
|
||||
#else /* CONFIG_SYSFS */
|
||||
|
||||
static void mod_sysfs_fini(struct module *mod)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SYSFS */
|
||||
|
||||
static void mod_kobject_remove(struct module *mod)
|
||||
{
|
||||
@@ -1345,7 +1357,7 @@ static void mod_kobject_remove(struct module *mod)
|
||||
module_param_sysfs_remove(mod);
|
||||
kobject_put(mod->mkobj.drivers_dir);
|
||||
kobject_put(mod->holders_dir);
|
||||
kobject_put(&mod->mkobj.kobj);
|
||||
mod_sysfs_fini(mod);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1780,7 +1792,7 @@ static struct module *load_module(void __user *umod,
|
||||
|
||||
/* Sanity checks against insmoding binaries or wrong arch,
|
||||
weird elf version */
|
||||
if (memcmp(hdr->e_ident, ELFMAG, 4) != 0
|
||||
if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0
|
||||
|| hdr->e_type != ET_REL
|
||||
|| !elf_check_arch(hdr)
|
||||
|| hdr->e_shentsize != sizeof(*sechdrs)) {
|
||||
|
||||
+1
-1
@@ -1191,7 +1191,7 @@ static ssize_t relay_file_splice_read(struct file *in,
|
||||
ret = 0;
|
||||
spliced = 0;
|
||||
|
||||
while (len) {
|
||||
while (len && !spliced) {
|
||||
ret = subbuf_splice_actor(in, ppos, pipe, len, flags, &nonpad_ret);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
+56
-415
File diff suppressed because it is too large
Load Diff
+14
-4
@@ -59,22 +59,26 @@ static inline struct sched_clock_data *cpu_sdc(int cpu)
|
||||
return &per_cpu(sched_clock_data, cpu);
|
||||
}
|
||||
|
||||
static __read_mostly int sched_clock_running;
|
||||
|
||||
void sched_clock_init(void)
|
||||
{
|
||||
u64 ktime_now = ktime_to_ns(ktime_get());
|
||||
u64 now = 0;
|
||||
unsigned long now_jiffies = jiffies;
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct sched_clock_data *scd = cpu_sdc(cpu);
|
||||
|
||||
scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
||||
scd->prev_jiffies = jiffies;
|
||||
scd->prev_raw = now;
|
||||
scd->tick_raw = now;
|
||||
scd->prev_jiffies = now_jiffies;
|
||||
scd->prev_raw = 0;
|
||||
scd->tick_raw = 0;
|
||||
scd->tick_gtod = ktime_now;
|
||||
scd->clock = ktime_now;
|
||||
}
|
||||
|
||||
sched_clock_running = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -136,6 +140,9 @@ u64 sched_clock_cpu(int cpu)
|
||||
struct sched_clock_data *scd = cpu_sdc(cpu);
|
||||
u64 now, clock;
|
||||
|
||||
if (unlikely(!sched_clock_running))
|
||||
return 0ull;
|
||||
|
||||
WARN_ON_ONCE(!irqs_disabled());
|
||||
now = sched_clock();
|
||||
|
||||
@@ -174,6 +181,9 @@ void sched_clock_tick(void)
|
||||
struct sched_clock_data *scd = this_scd();
|
||||
u64 now, now_gtod;
|
||||
|
||||
if (unlikely(!sched_clock_running))
|
||||
return;
|
||||
|
||||
WARN_ON_ONCE(!irqs_disabled());
|
||||
|
||||
now = sched_clock();
|
||||
|
||||
@@ -167,11 +167,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
|
||||
#endif
|
||||
SEQ_printf(m, " .%-30s: %ld\n", "nr_spread_over",
|
||||
cfs_rq->nr_spread_over);
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
#ifdef CONFIG_SMP
|
||||
SEQ_printf(m, " .%-30s: %lu\n", "shares", cfs_rq->shares);
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
static void print_cpu(struct seq_file *m, int cpu)
|
||||
|
||||
+104
-176
@@ -333,34 +333,6 @@ int sched_nr_latency_handler(struct ctl_table *table, int write,
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* delta *= w / rw
|
||||
*/
|
||||
static inline unsigned long
|
||||
calc_delta_weight(unsigned long delta, struct sched_entity *se)
|
||||
{
|
||||
for_each_sched_entity(se) {
|
||||
delta = calc_delta_mine(delta,
|
||||
se->load.weight, &cfs_rq_of(se)->load);
|
||||
}
|
||||
|
||||
return delta;
|
||||
}
|
||||
|
||||
/*
|
||||
* delta *= rw / w
|
||||
*/
|
||||
static inline unsigned long
|
||||
calc_delta_fair(unsigned long delta, struct sched_entity *se)
|
||||
{
|
||||
for_each_sched_entity(se) {
|
||||
delta = calc_delta_mine(delta,
|
||||
cfs_rq_of(se)->load.weight, &se->load);
|
||||
}
|
||||
|
||||
return delta;
|
||||
}
|
||||
|
||||
/*
|
||||
* The idea is to set a period in which each task runs once.
|
||||
*
|
||||
@@ -390,54 +362,47 @@ static u64 __sched_period(unsigned long nr_running)
|
||||
*/
|
||||
static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
return calc_delta_weight(__sched_period(cfs_rq->nr_running), se);
|
||||
u64 slice = __sched_period(cfs_rq->nr_running);
|
||||
|
||||
for_each_sched_entity(se) {
|
||||
cfs_rq = cfs_rq_of(se);
|
||||
|
||||
slice *= se->load.weight;
|
||||
do_div(slice, cfs_rq->load.weight);
|
||||
}
|
||||
|
||||
|
||||
return slice;
|
||||
}
|
||||
|
||||
/*
|
||||
* We calculate the vruntime slice of a to be inserted task
|
||||
*
|
||||
* vs = s*rw/w = p
|
||||
* vs = s/w = p/rw
|
||||
*/
|
||||
static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
unsigned long nr_running = cfs_rq->nr_running;
|
||||
unsigned long weight;
|
||||
u64 vslice;
|
||||
|
||||
if (!se->on_rq)
|
||||
nr_running++;
|
||||
|
||||
return __sched_period(nr_running);
|
||||
}
|
||||
|
||||
/*
|
||||
* The goal of calc_delta_asym() is to be asymmetrically around NICE_0_LOAD, in
|
||||
* that it favours >=0 over <0.
|
||||
*
|
||||
* -20 |
|
||||
* |
|
||||
* 0 --------+-------
|
||||
* .'
|
||||
* 19 .'
|
||||
*
|
||||
*/
|
||||
static unsigned long
|
||||
calc_delta_asym(unsigned long delta, struct sched_entity *se)
|
||||
{
|
||||
struct load_weight lw = {
|
||||
.weight = NICE_0_LOAD,
|
||||
.inv_weight = 1UL << (WMULT_SHIFT-NICE_0_SHIFT)
|
||||
};
|
||||
vslice = __sched_period(nr_running);
|
||||
|
||||
for_each_sched_entity(se) {
|
||||
struct load_weight *se_lw = &se->load;
|
||||
cfs_rq = cfs_rq_of(se);
|
||||
|
||||
if (se->load.weight < NICE_0_LOAD)
|
||||
se_lw = &lw;
|
||||
weight = cfs_rq->load.weight;
|
||||
if (!se->on_rq)
|
||||
weight += se->load.weight;
|
||||
|
||||
delta = calc_delta_mine(delta,
|
||||
cfs_rq_of(se)->load.weight, se_lw);
|
||||
vslice *= NICE_0_LOAD;
|
||||
do_div(vslice, weight);
|
||||
}
|
||||
|
||||
return delta;
|
||||
return vslice;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -454,7 +419,11 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
|
||||
|
||||
curr->sum_exec_runtime += delta_exec;
|
||||
schedstat_add(cfs_rq, exec_clock, delta_exec);
|
||||
delta_exec_weighted = calc_delta_fair(delta_exec, curr);
|
||||
delta_exec_weighted = delta_exec;
|
||||
if (unlikely(curr->load.weight != NICE_0_LOAD)) {
|
||||
delta_exec_weighted = calc_delta_fair(delta_exec_weighted,
|
||||
&curr->load);
|
||||
}
|
||||
curr->vruntime += delta_exec_weighted;
|
||||
}
|
||||
|
||||
@@ -541,27 +510,10 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
* Scheduling class queueing methods:
|
||||
*/
|
||||
|
||||
#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
|
||||
static void
|
||||
add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
|
||||
{
|
||||
cfs_rq->task_weight += weight;
|
||||
}
|
||||
#else
|
||||
static inline void
|
||||
add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
static void
|
||||
account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
update_load_add(&cfs_rq->load, se->load.weight);
|
||||
if (!parent_entity(se))
|
||||
inc_cpu_load(rq_of(cfs_rq), se->load.weight);
|
||||
if (entity_is_task(se))
|
||||
add_cfs_task_weight(cfs_rq, se->load.weight);
|
||||
cfs_rq->nr_running++;
|
||||
se->on_rq = 1;
|
||||
list_add(&se->group_node, &cfs_rq->tasks);
|
||||
@@ -571,10 +523,6 @@ static void
|
||||
account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
update_load_sub(&cfs_rq->load, se->load.weight);
|
||||
if (!parent_entity(se))
|
||||
dec_cpu_load(rq_of(cfs_rq), se->load.weight);
|
||||
if (entity_is_task(se))
|
||||
add_cfs_task_weight(cfs_rq, -se->load.weight);
|
||||
cfs_rq->nr_running--;
|
||||
se->on_rq = 0;
|
||||
list_del_init(&se->group_node);
|
||||
@@ -661,17 +609,8 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
|
||||
|
||||
if (!initial) {
|
||||
/* sleeps upto a single latency don't count. */
|
||||
if (sched_feat(NEW_FAIR_SLEEPERS)) {
|
||||
unsigned long thresh = sysctl_sched_latency;
|
||||
|
||||
/*
|
||||
* convert the sleeper threshold into virtual time
|
||||
*/
|
||||
if (sched_feat(NORMALIZED_SLEEPER))
|
||||
thresh = calc_delta_fair(thresh, se);
|
||||
|
||||
vruntime -= thresh;
|
||||
}
|
||||
if (sched_feat(NEW_FAIR_SLEEPERS))
|
||||
vruntime -= sysctl_sched_latency;
|
||||
|
||||
/* ensure we never gain time by being placed backwards. */
|
||||
vruntime = max_vruntime(se->vruntime, vruntime);
|
||||
@@ -1057,24 +996,11 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
|
||||
struct task_struct *curr = this_rq->curr;
|
||||
unsigned long tl = this_load;
|
||||
unsigned long tl_per_task;
|
||||
int balanced;
|
||||
|
||||
if (!(this_sd->flags & SD_WAKE_AFFINE))
|
||||
if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* If the currently running task will sleep within
|
||||
* a reasonable amount of time then attract this newly
|
||||
* woken task:
|
||||
*/
|
||||
if (sync && curr->sched_class == &fair_sched_class) {
|
||||
if (curr->se.avg_overlap < sysctl_sched_migration_cost &&
|
||||
p->se.avg_overlap < sysctl_sched_migration_cost)
|
||||
return 1;
|
||||
}
|
||||
|
||||
schedstat_inc(p, se.nr_wakeups_affine_attempts);
|
||||
tl_per_task = cpu_avg_load_per_task(this_cpu);
|
||||
|
||||
/*
|
||||
* If sync wakeup then subtract the (maximum possible)
|
||||
* effect of the currently running task from the load
|
||||
@@ -1083,8 +1009,24 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
|
||||
if (sync)
|
||||
tl -= current->se.load.weight;
|
||||
|
||||
balanced = 100*(tl + p->se.load.weight) <= imbalance*load;
|
||||
|
||||
/*
|
||||
* If the currently running task will sleep within
|
||||
* a reasonable amount of time then attract this newly
|
||||
* woken task:
|
||||
*/
|
||||
if (sync && balanced && curr->sched_class == &fair_sched_class) {
|
||||
if (curr->se.avg_overlap < sysctl_sched_migration_cost &&
|
||||
p->se.avg_overlap < sysctl_sched_migration_cost)
|
||||
return 1;
|
||||
}
|
||||
|
||||
schedstat_inc(p, se.nr_wakeups_affine_attempts);
|
||||
tl_per_task = cpu_avg_load_per_task(this_cpu);
|
||||
|
||||
if ((tl <= load && tl + target_load(prev_cpu, idx) <= tl_per_task) ||
|
||||
100*(tl + p->se.load.weight) <= imbalance*load) {
|
||||
balanced) {
|
||||
/*
|
||||
* This domain has SD_WAKE_AFFINE and
|
||||
* p is cache cold in this domain, and
|
||||
@@ -1169,10 +1111,11 @@ static unsigned long wakeup_gran(struct sched_entity *se)
|
||||
unsigned long gran = sysctl_sched_wakeup_granularity;
|
||||
|
||||
/*
|
||||
* More easily preempt - nice tasks, while not making it harder for
|
||||
* + nice tasks.
|
||||
* More easily preempt - nice tasks, while not making
|
||||
* it harder for + nice tasks.
|
||||
*/
|
||||
gran = calc_delta_asym(sysctl_sched_wakeup_granularity, se);
|
||||
if (unlikely(se->load.weight > NICE_0_LOAD))
|
||||
gran = calc_delta_fair(gran, &se->load);
|
||||
|
||||
return gran;
|
||||
}
|
||||
@@ -1366,90 +1309,75 @@ static struct task_struct *load_balance_next_fair(void *arg)
|
||||
return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator);
|
||||
}
|
||||
|
||||
static unsigned long
|
||||
__load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
||||
unsigned long max_load_move, struct sched_domain *sd,
|
||||
enum cpu_idle_type idle, int *all_pinned, int *this_best_prio,
|
||||
struct cfs_rq *cfs_rq)
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
static int cfs_rq_best_prio(struct cfs_rq *cfs_rq)
|
||||
{
|
||||
struct sched_entity *curr;
|
||||
struct task_struct *p;
|
||||
|
||||
if (!cfs_rq->nr_running || !first_fair(cfs_rq))
|
||||
return MAX_PRIO;
|
||||
|
||||
curr = cfs_rq->curr;
|
||||
if (!curr)
|
||||
curr = __pick_next_entity(cfs_rq);
|
||||
|
||||
p = task_of(curr);
|
||||
|
||||
return p->prio;
|
||||
}
|
||||
#endif
|
||||
|
||||
static unsigned long
|
||||
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
||||
unsigned long max_load_move,
|
||||
struct sched_domain *sd, enum cpu_idle_type idle,
|
||||
int *all_pinned, int *this_best_prio)
|
||||
{
|
||||
struct cfs_rq *busy_cfs_rq;
|
||||
long rem_load_move = max_load_move;
|
||||
struct rq_iterator cfs_rq_iterator;
|
||||
|
||||
cfs_rq_iterator.start = load_balance_start_fair;
|
||||
cfs_rq_iterator.next = load_balance_next_fair;
|
||||
cfs_rq_iterator.arg = cfs_rq;
|
||||
|
||||
return balance_tasks(this_rq, this_cpu, busiest,
|
||||
max_load_move, sd, idle, all_pinned,
|
||||
this_best_prio, &cfs_rq_iterator);
|
||||
}
|
||||
|
||||
for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
static unsigned long
|
||||
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
||||
unsigned long max_load_move,
|
||||
struct sched_domain *sd, enum cpu_idle_type idle,
|
||||
int *all_pinned, int *this_best_prio)
|
||||
{
|
||||
long rem_load_move = max_load_move;
|
||||
int busiest_cpu = cpu_of(busiest);
|
||||
struct task_group *tg;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry(tg, &task_groups, list) {
|
||||
struct cfs_rq *this_cfs_rq;
|
||||
long imbalance;
|
||||
unsigned long this_weight, busiest_weight;
|
||||
long rem_load, max_load, moved_load;
|
||||
unsigned long maxload;
|
||||
|
||||
this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu);
|
||||
|
||||
imbalance = busy_cfs_rq->load.weight - this_cfs_rq->load.weight;
|
||||
/* Don't pull if this_cfs_rq has more load than busy_cfs_rq */
|
||||
if (imbalance <= 0)
|
||||
continue;
|
||||
|
||||
/* Don't pull more than imbalance/2 */
|
||||
imbalance /= 2;
|
||||
maxload = min(rem_load_move, imbalance);
|
||||
|
||||
*this_best_prio = cfs_rq_best_prio(this_cfs_rq);
|
||||
#else
|
||||
# define maxload rem_load_move
|
||||
#endif
|
||||
/*
|
||||
* empty group
|
||||
* pass busy_cfs_rq argument into
|
||||
* load_balance_[start|next]_fair iterators
|
||||
*/
|
||||
if (!aggregate(tg, sd)->task_weight)
|
||||
continue;
|
||||
cfs_rq_iterator.arg = busy_cfs_rq;
|
||||
rem_load_move -= balance_tasks(this_rq, this_cpu, busiest,
|
||||
maxload, sd, idle, all_pinned,
|
||||
this_best_prio,
|
||||
&cfs_rq_iterator);
|
||||
|
||||
rem_load = rem_load_move * aggregate(tg, sd)->rq_weight;
|
||||
rem_load /= aggregate(tg, sd)->load + 1;
|
||||
|
||||
this_weight = tg->cfs_rq[this_cpu]->task_weight;
|
||||
busiest_weight = tg->cfs_rq[busiest_cpu]->task_weight;
|
||||
|
||||
imbalance = (busiest_weight - this_weight) / 2;
|
||||
|
||||
if (imbalance < 0)
|
||||
imbalance = busiest_weight;
|
||||
|
||||
max_load = max(rem_load, imbalance);
|
||||
moved_load = __load_balance_fair(this_rq, this_cpu, busiest,
|
||||
max_load, sd, idle, all_pinned, this_best_prio,
|
||||
tg->cfs_rq[busiest_cpu]);
|
||||
|
||||
if (!moved_load)
|
||||
continue;
|
||||
|
||||
move_group_shares(tg, sd, busiest_cpu, this_cpu);
|
||||
|
||||
moved_load *= aggregate(tg, sd)->load;
|
||||
moved_load /= aggregate(tg, sd)->rq_weight + 1;
|
||||
|
||||
rem_load_move -= moved_load;
|
||||
if (rem_load_move < 0)
|
||||
if (rem_load_move <= 0)
|
||||
break;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return max_load_move - rem_load_move;
|
||||
}
|
||||
#else
|
||||
static unsigned long
|
||||
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
||||
unsigned long max_load_move,
|
||||
struct sched_domain *sd, enum cpu_idle_type idle,
|
||||
int *all_pinned, int *this_best_prio)
|
||||
{
|
||||
return __load_balance_fair(this_rq, this_cpu, busiest,
|
||||
max_load_move, sd, idle, all_pinned,
|
||||
this_best_prio, &busiest->cfs);
|
||||
}
|
||||
#endif
|
||||
|
||||
static int
|
||||
move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
||||
|
||||
@@ -513,8 +513,6 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
|
||||
*/
|
||||
for_each_sched_rt_entity(rt_se)
|
||||
enqueue_rt_entity(rt_se);
|
||||
|
||||
inc_cpu_load(rq, p->se.load.weight);
|
||||
}
|
||||
|
||||
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
|
||||
@@ -534,8 +532,6 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
|
||||
if (rt_rq && rt_rq->rt_nr_running)
|
||||
enqueue_rt_entity(rt_se);
|
||||
}
|
||||
|
||||
dec_cpu_load(rq, p->se.load.weight);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -67,6 +67,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
|
||||
preempt_enable();
|
||||
#endif
|
||||
}
|
||||
kfree(mask_str);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
+45
-6
@@ -231,6 +231,40 @@ void flush_signals(struct task_struct *t)
|
||||
spin_unlock_irqrestore(&t->sighand->siglock, flags);
|
||||
}
|
||||
|
||||
static void __flush_itimer_signals(struct sigpending *pending)
|
||||
{
|
||||
sigset_t signal, retain;
|
||||
struct sigqueue *q, *n;
|
||||
|
||||
signal = pending->signal;
|
||||
sigemptyset(&retain);
|
||||
|
||||
list_for_each_entry_safe(q, n, &pending->list, list) {
|
||||
int sig = q->info.si_signo;
|
||||
|
||||
if (likely(q->info.si_code != SI_TIMER)) {
|
||||
sigaddset(&retain, sig);
|
||||
} else {
|
||||
sigdelset(&signal, sig);
|
||||
list_del_init(&q->list);
|
||||
__sigqueue_free(q);
|
||||
}
|
||||
}
|
||||
|
||||
sigorsets(&pending->signal, &signal, &retain);
|
||||
}
|
||||
|
||||
void flush_itimer_signals(void)
|
||||
{
|
||||
struct task_struct *tsk = current;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&tsk->sighand->siglock, flags);
|
||||
__flush_itimer_signals(&tsk->pending);
|
||||
__flush_itimer_signals(&tsk->signal->shared_pending);
|
||||
spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
|
||||
}
|
||||
|
||||
void ignore_signals(struct task_struct *t)
|
||||
{
|
||||
int i;
|
||||
@@ -1240,17 +1274,22 @@ void sigqueue_free(struct sigqueue *q)
|
||||
|
||||
BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
|
||||
/*
|
||||
* If the signal is still pending remove it from the
|
||||
* pending queue. We must hold ->siglock while testing
|
||||
* q->list to serialize with collect_signal().
|
||||
* We must hold ->siglock while testing q->list
|
||||
* to serialize with collect_signal() or with
|
||||
* __exit_signal()->flush_sigqueue().
|
||||
*/
|
||||
spin_lock_irqsave(lock, flags);
|
||||
q->flags &= ~SIGQUEUE_PREALLOC;
|
||||
/*
|
||||
* If it is queued it will be freed when dequeued,
|
||||
* like the "regular" sigqueue.
|
||||
*/
|
||||
if (!list_empty(&q->list))
|
||||
list_del_init(&q->list);
|
||||
q = NULL;
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
|
||||
q->flags &= ~SIGQUEUE_PREALLOC;
|
||||
__sigqueue_free(q);
|
||||
if (q)
|
||||
__sigqueue_free(q);
|
||||
}
|
||||
|
||||
int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
|
||||
|
||||
@@ -62,8 +62,7 @@ static int stopmachine(void *cpu)
|
||||
* help our sisters onto their CPUs. */
|
||||
if (!prepared && !irqs_disabled)
|
||||
yield();
|
||||
else
|
||||
cpu_relax();
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
/* Ack: we are exiting. */
|
||||
@@ -106,8 +105,10 @@ static int stop_machine(void)
|
||||
}
|
||||
|
||||
/* Wait for them all to come to life. */
|
||||
while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads)
|
||||
while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads) {
|
||||
yield();
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
/* If some failed, kill them all. */
|
||||
if (ret < 0) {
|
||||
|
||||
+2
-4
@@ -1652,7 +1652,7 @@ asmlinkage long sys_umask(int mask)
|
||||
asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
|
||||
unsigned long arg4, unsigned long arg5)
|
||||
{
|
||||
long uninitialized_var(error);
|
||||
long error = 0;
|
||||
|
||||
if (security_task_prctl(option, arg2, arg3, arg4, arg5, &error))
|
||||
return error;
|
||||
@@ -1701,9 +1701,7 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
|
||||
error = PR_TIMING_STATISTICAL;
|
||||
break;
|
||||
case PR_SET_TIMING:
|
||||
if (arg2 == PR_TIMING_STATISTICAL)
|
||||
error = 0;
|
||||
else
|
||||
if (arg2 != PR_TIMING_STATISTICAL)
|
||||
error = -EINVAL;
|
||||
break;
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user