Merge branch 'linus' into core/locking

Reason: Pull in the semaphore related changes

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Thomas Gleixner
2010-10-12 17:27:20 +02:00
1433 changed files with 20337 additions and 16359 deletions

View File

@@ -1791,19 +1791,20 @@ out:
}
/**
* cgroup_attach_task_current_cg - attach task 'tsk' to current task's cgroup
* cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
* @from: attach to all cgroups of a given task
* @tsk: the task to be attached
*/
int cgroup_attach_task_current_cg(struct task_struct *tsk)
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
{
struct cgroupfs_root *root;
struct cgroup *cur_cg;
int retval = 0;
cgroup_lock();
for_each_active_root(root) {
cur_cg = task_cgroup_from_root(current, root);
retval = cgroup_attach_task(cur_cg, tsk);
struct cgroup *from_cg = task_cgroup_from_root(from, root);
retval = cgroup_attach_task(from_cg, tsk);
if (retval)
break;
}
@@ -1811,7 +1812,7 @@ int cgroup_attach_task_current_cg(struct task_struct *tsk)
return retval;
}
EXPORT_SYMBOL_GPL(cgroup_attach_task_current_cg);
EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
/*
* Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex

View File

@@ -1126,3 +1126,24 @@ compat_sys_sysinfo(struct compat_sysinfo __user *info)
return 0;
}
/*
* Allocate user-space memory for the duration of a single system call,
* in order to marshall parameters inside a compat thunk.
*/
void __user *compat_alloc_user_space(unsigned long len)
{
void __user *ptr;
/* If len would occupy more than half of the entire compat space... */
if (unlikely(len > (((compat_uptr_t)~0) >> 1)))
return NULL;
ptr = arch_compat_alloc_user_space(len);
if (unlikely(!access_ok(VERIFY_WRITE, ptr, len)))
return NULL;
return ptr;
}
EXPORT_SYMBOL_GPL(compat_alloc_user_space);

View File

@@ -741,7 +741,7 @@ static struct console kgdbcons = {
};
#ifdef CONFIG_MAGIC_SYSRQ
static void sysrq_handle_dbg(int key, struct tty_struct *tty)
static void sysrq_handle_dbg(int key)
{
if (!dbg_io_ops) {
printk(KERN_CRIT "ERROR: No KGDB I/O module available\n");

View File

@@ -274,7 +274,6 @@ static int kdb_bp(int argc, const char **argv)
int i, bpno;
kdb_bp_t *bp, *bp_check;
int diag;
int free;
char *symname = NULL;
long offset = 0ul;
int nextarg;
@@ -305,7 +304,6 @@ static int kdb_bp(int argc, const char **argv)
/*
* Find an empty bp structure to allocate
*/
free = KDB_MAXBPT;
for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) {
if (bp->bp_free)
break;

View File

@@ -1929,7 +1929,7 @@ static int kdb_sr(int argc, const char **argv)
if (argc != 1)
return KDB_ARGCOUNT;
kdb_trap_printk++;
__handle_sysrq(*argv[1], NULL, 0);
__handle_sysrq(*argv[1], false);
kdb_trap_printk--;
return 0;

View File

@@ -255,7 +255,14 @@ extern void kdb_ps1(const struct task_struct *p);
extern void kdb_print_nameval(const char *name, unsigned long val);
extern void kdb_send_sig_info(struct task_struct *p, struct siginfo *info);
extern void kdb_meminfo_proc_show(void);
#ifdef CONFIG_KALLSYMS
extern const char *kdb_walk_kallsyms(loff_t *pos);
#else /* ! CONFIG_KALLSYMS */
static inline const char *kdb_walk_kallsyms(loff_t *pos)
{
return NULL;
}
#endif /* ! CONFIG_KALLSYMS */
extern char *kdb_getstr(char *, size_t, char *);
/* Defines for kdb_symbol_print */

View File

@@ -82,8 +82,8 @@ static char *kdb_name_table[100]; /* arbitrary size */
int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab)
{
int ret = 0;
unsigned long symbolsize;
unsigned long offset;
unsigned long symbolsize = 0;
unsigned long offset = 0;
#define knt1_size 128 /* must be >= kallsyms table size */
char *knt1 = NULL;

View File

@@ -1386,8 +1386,7 @@ static int wait_task_stopped(struct wait_opts *wo,
if (!unlikely(wo->wo_flags & WNOWAIT))
*p_code = 0;
/* don't need the RCU readlock here as we're holding a spinlock */
uid = __task_cred(p)->uid;
uid = task_uid(p);
unlock_sig:
spin_unlock_irq(&p->sighand->siglock);
if (!exit_code)
@@ -1460,7 +1459,7 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
}
if (!unlikely(wo->wo_flags & WNOWAIT))
p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
uid = __task_cred(p)->uid;
uid = task_uid(p);
spin_unlock_irq(&p->sighand->siglock);
pid = task_pid_vnr(p);

View File

@@ -300,7 +300,7 @@ out:
#ifdef CONFIG_MMU
static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
{
struct vm_area_struct *mpnt, *tmp, **pprev;
struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
struct rb_node **rb_link, *rb_parent;
int retval;
unsigned long charge;
@@ -328,6 +328,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
if (retval)
goto out;
prev = NULL;
for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
struct file *file;
@@ -355,11 +356,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
if (IS_ERR(pol))
goto fail_nomem_policy;
vma_set_policy(tmp, pol);
tmp->vm_mm = mm;
if (anon_vma_fork(tmp, mpnt))
goto fail_nomem_anon_vma_fork;
tmp->vm_flags &= ~VM_LOCKED;
tmp->vm_mm = mm;
tmp->vm_next = NULL;
tmp->vm_next = tmp->vm_prev = NULL;
file = tmp->vm_file;
if (file) {
struct inode *inode = file->f_path.dentry->d_inode;
@@ -392,6 +393,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
*/
*pprev = tmp;
pprev = &tmp->vm_next;
tmp->vm_prev = prev;
prev = tmp;
__vma_link_rb(mm, tmp, rb_link, rb_parent);
rb_link = &tmp->vm_rb.rb_right;
@@ -752,13 +755,13 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
struct fs_struct *fs = current->fs;
if (clone_flags & CLONE_FS) {
/* tsk->fs is already what we want */
write_lock(&fs->lock);
spin_lock(&fs->lock);
if (fs->in_exec) {
write_unlock(&fs->lock);
spin_unlock(&fs->lock);
return -EAGAIN;
}
fs->users++;
write_unlock(&fs->lock);
spin_unlock(&fs->lock);
return 0;
}
tsk->fs = copy_fs_struct(fs);
@@ -1676,13 +1679,13 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
if (new_fs) {
fs = current->fs;
write_lock(&fs->lock);
spin_lock(&fs->lock);
current->fs = new_fs;
if (--fs->users)
new_fs = NULL;
else
new_fs = fs;
write_unlock(&fs->lock);
spin_unlock(&fs->lock);
}
if (new_mm) {

View File

@@ -33,10 +33,11 @@
* @children: child nodes
* @all: list head for list of all nodes
* @parent: parent node
* @info: associated profiling data structure if not a directory
* @ghost: when an object file containing profiling data is unloaded we keep a
* copy of the profiling data here to allow collecting coverage data
* for cleanup code. Such a node is called a "ghost".
* @loaded_info: array of pointers to profiling data sets for loaded object
* files.
* @num_loaded: number of profiling data sets for loaded object files.
* @unloaded_info: accumulated copy of profiling data sets for unloaded
* object files. Used only when gcov_persist=1.
* @dentry: main debugfs entry, either a directory or data file
* @links: associated symbolic links
* @name: data file basename
@@ -51,10 +52,11 @@ struct gcov_node {
struct list_head children;
struct list_head all;
struct gcov_node *parent;
struct gcov_info *info;
struct gcov_info *ghost;
struct gcov_info **loaded_info;
struct gcov_info *unloaded_info;
struct dentry *dentry;
struct dentry **links;
int num_loaded;
char name[0];
};
@@ -136,16 +138,37 @@ static const struct seq_operations gcov_seq_ops = {
};
/*
* Return the profiling data set for a given node. This can either be the
* original profiling data structure or a duplicate (also called "ghost")
* in case the associated object file has been unloaded.
* Return a profiling data set associated with the given node. This is
* either a data set for a loaded object file or a data set copy in case
* all associated object files have been unloaded.
*/
static struct gcov_info *get_node_info(struct gcov_node *node)
{
if (node->info)
return node->info;
if (node->num_loaded > 0)
return node->loaded_info[0];
return node->ghost;
return node->unloaded_info;
}
/*
* Return a newly allocated profiling data set which contains the sum of
* all profiling data associated with the given node.
*/
static struct gcov_info *get_accumulated_info(struct gcov_node *node)
{
struct gcov_info *info;
int i = 0;
if (node->unloaded_info)
info = gcov_info_dup(node->unloaded_info);
else
info = gcov_info_dup(node->loaded_info[i++]);
if (!info)
return NULL;
for (; i < node->num_loaded; i++)
gcov_info_add(info, node->loaded_info[i]);
return info;
}
/*
@@ -163,9 +186,10 @@ static int gcov_seq_open(struct inode *inode, struct file *file)
mutex_lock(&node_lock);
/*
* Read from a profiling data copy to minimize reference tracking
* complexity and concurrent access.
* complexity and concurrent access and to keep accumulating multiple
* profiling data sets associated with one node simple.
*/
info = gcov_info_dup(get_node_info(node));
info = get_accumulated_info(node);
if (!info)
goto out_unlock;
iter = gcov_iter_new(info);
@@ -225,12 +249,25 @@ static struct gcov_node *get_node_by_name(const char *name)
return NULL;
}
/*
* Reset all profiling data associated with the specified node.
*/
static void reset_node(struct gcov_node *node)
{
int i;
if (node->unloaded_info)
gcov_info_reset(node->unloaded_info);
for (i = 0; i < node->num_loaded; i++)
gcov_info_reset(node->loaded_info[i]);
}
static void remove_node(struct gcov_node *node);
/*
* write() implementation for gcov data files. Reset profiling data for the
* associated file. If the object file has been unloaded (i.e. this is
* a "ghost" node), remove the debug fs node as well.
* corresponding file. If all associated object files have been unloaded,
* remove the debug fs node as well.
*/
static ssize_t gcov_seq_write(struct file *file, const char __user *addr,
size_t len, loff_t *pos)
@@ -245,10 +282,10 @@ static ssize_t gcov_seq_write(struct file *file, const char __user *addr,
node = get_node_by_name(info->filename);
if (node) {
/* Reset counts or remove node for unloaded modules. */
if (node->ghost)
if (node->num_loaded == 0)
remove_node(node);
else
gcov_info_reset(node->info);
reset_node(node);
}
/* Reset counts for open file. */
gcov_info_reset(info);
@@ -378,7 +415,10 @@ static void init_node(struct gcov_node *node, struct gcov_info *info,
INIT_LIST_HEAD(&node->list);
INIT_LIST_HEAD(&node->children);
INIT_LIST_HEAD(&node->all);
node->info = info;
if (node->loaded_info) {
node->loaded_info[0] = info;
node->num_loaded = 1;
}
node->parent = parent;
if (name)
strcpy(node->name, name);
@@ -394,9 +434,13 @@ static struct gcov_node *new_node(struct gcov_node *parent,
struct gcov_node *node;
node = kzalloc(sizeof(struct gcov_node) + strlen(name) + 1, GFP_KERNEL);
if (!node) {
pr_warning("out of memory\n");
return NULL;
if (!node)
goto err_nomem;
if (info) {
node->loaded_info = kcalloc(1, sizeof(struct gcov_info *),
GFP_KERNEL);
if (!node->loaded_info)
goto err_nomem;
}
init_node(node, info, name, parent);
/* Differentiate between gcov data file nodes and directory nodes. */
@@ -416,6 +460,11 @@ static struct gcov_node *new_node(struct gcov_node *parent,
list_add(&node->all, &all_head);
return node;
err_nomem:
kfree(node);
pr_warning("out of memory\n");
return NULL;
}
/* Remove symbolic links associated with node. */
@@ -441,8 +490,9 @@ static void release_node(struct gcov_node *node)
list_del(&node->all);
debugfs_remove(node->dentry);
remove_links(node);
if (node->ghost)
gcov_info_free(node->ghost);
kfree(node->loaded_info);
if (node->unloaded_info)
gcov_info_free(node->unloaded_info);
kfree(node);
}
@@ -477,7 +527,7 @@ static struct gcov_node *get_child_by_name(struct gcov_node *parent,
/*
* write() implementation for reset file. Reset all profiling data to zero
* and remove ghost nodes.
* and remove nodes for which all associated object files are unloaded.
*/
static ssize_t reset_write(struct file *file, const char __user *addr,
size_t len, loff_t *pos)
@@ -487,8 +537,8 @@ static ssize_t reset_write(struct file *file, const char __user *addr,
mutex_lock(&node_lock);
restart:
list_for_each_entry(node, &all_head, all) {
if (node->info)
gcov_info_reset(node->info);
if (node->num_loaded > 0)
reset_node(node);
else if (list_empty(&node->children)) {
remove_node(node);
/* Several nodes may have gone - restart loop. */
@@ -564,37 +614,115 @@ err_remove:
}
/*
* The profiling data set associated with this node is being unloaded. Store a
* copy of the profiling data and turn this node into a "ghost".
* Associate a profiling data set with an existing node. Needs to be called
* with node_lock held.
*/
static int ghost_node(struct gcov_node *node)
static void add_info(struct gcov_node *node, struct gcov_info *info)
{
node->ghost = gcov_info_dup(node->info);
if (!node->ghost) {
pr_warning("could not save data for '%s' (out of memory)\n",
node->info->filename);
return -ENOMEM;
}
node->info = NULL;
struct gcov_info **loaded_info;
int num = node->num_loaded;
return 0;
/*
* Prepare new array. This is done first to simplify cleanup in
* case the new data set is incompatible, the node only contains
* unloaded data sets and there's not enough memory for the array.
*/
loaded_info = kcalloc(num + 1, sizeof(struct gcov_info *), GFP_KERNEL);
if (!loaded_info) {
pr_warning("could not add '%s' (out of memory)\n",
info->filename);
return;
}
memcpy(loaded_info, node->loaded_info,
num * sizeof(struct gcov_info *));
loaded_info[num] = info;
/* Check if the new data set is compatible. */
if (num == 0) {
/*
* A module was unloaded, modified and reloaded. The new
* data set replaces the copy of the last one.
*/
if (!gcov_info_is_compatible(node->unloaded_info, info)) {
pr_warning("discarding saved data for %s "
"(incompatible version)\n", info->filename);
gcov_info_free(node->unloaded_info);
node->unloaded_info = NULL;
}
} else {
/*
* Two different versions of the same object file are loaded.
* The initial one takes precedence.
*/
if (!gcov_info_is_compatible(node->loaded_info[0], info)) {
pr_warning("could not add '%s' (incompatible "
"version)\n", info->filename);
kfree(loaded_info);
return;
}
}
/* Overwrite previous array. */
kfree(node->loaded_info);
node->loaded_info = loaded_info;
node->num_loaded = num + 1;
}
/*
* Profiling data for this node has been loaded again. Add profiling data
* from previous instantiation and turn this node into a regular node.
* Return the index of a profiling data set associated with a node.
*/
static void revive_node(struct gcov_node *node, struct gcov_info *info)
static int get_info_index(struct gcov_node *node, struct gcov_info *info)
{
if (gcov_info_is_compatible(node->ghost, info))
gcov_info_add(info, node->ghost);
else {
pr_warning("discarding saved data for '%s' (version changed)\n",
info->filename);
int i;
for (i = 0; i < node->num_loaded; i++) {
if (node->loaded_info[i] == info)
return i;
}
gcov_info_free(node->ghost);
node->ghost = NULL;
node->info = info;
return -ENOENT;
}
/*
* Save the data of a profiling data set which is being unloaded.
*/
static void save_info(struct gcov_node *node, struct gcov_info *info)
{
if (node->unloaded_info)
gcov_info_add(node->unloaded_info, info);
else {
node->unloaded_info = gcov_info_dup(info);
if (!node->unloaded_info) {
pr_warning("could not save data for '%s' "
"(out of memory)\n", info->filename);
}
}
}
/*
* Disassociate a profiling data set from a node. Needs to be called with
* node_lock held.
*/
static void remove_info(struct gcov_node *node, struct gcov_info *info)
{
int i;
i = get_info_index(node, info);
if (i < 0) {
pr_warning("could not remove '%s' (not found)\n",
info->filename);
return;
}
if (gcov_persist)
save_info(node, info);
/* Shrink array. */
node->loaded_info[i] = node->loaded_info[node->num_loaded - 1];
node->num_loaded--;
if (node->num_loaded > 0)
return;
/* Last loaded data set was removed. */
kfree(node->loaded_info);
node->loaded_info = NULL;
node->num_loaded = 0;
if (!node->unloaded_info)
remove_node(node);
}
/*
@@ -609,30 +737,18 @@ void gcov_event(enum gcov_action action, struct gcov_info *info)
node = get_node_by_name(info->filename);
switch (action) {
case GCOV_ADD:
/* Add new node or revive ghost. */
if (!node) {
if (node)
add_info(node, info);
else
add_node(info);
break;
}
if (gcov_persist)
revive_node(node, info);
else {
pr_warning("could not add '%s' (already exists)\n",
info->filename);
}
break;
case GCOV_REMOVE:
/* Remove node or turn into ghost. */
if (!node) {
if (node)
remove_info(node, info);
else {
pr_warning("could not remove '%s' (not found)\n",
info->filename);
break;
}
if (gcov_persist) {
if (!ghost_node(node))
break;
}
remove_node(node);
break;
}
mutex_unlock(&node_lock);

View File

@@ -143,10 +143,9 @@ int groups_search(const struct group_info *group_info, gid_t grp)
right = group_info->ngroups;
while (left < right) {
unsigned int mid = (left+right)/2;
int cmp = grp - GROUP_AT(group_info, mid);
if (cmp > 0)
if (grp > GROUP_AT(group_info, mid))
left = mid + 1;
else if (cmp < 0)
else if (grp < GROUP_AT(group_info, mid))
right = mid;
else
return 1;

View File

@@ -1091,11 +1091,10 @@ EXPORT_SYMBOL_GPL(hrtimer_cancel);
*/
ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
{
struct hrtimer_clock_base *base;
unsigned long flags;
ktime_t rem;
base = lock_hrtimer_base(timer, &flags);
lock_hrtimer_base(timer, &flags);
rem = hrtimer_expires_remaining(timer);
unlock_hrtimer_base(timer, &flags);

View File

@@ -433,7 +433,8 @@ register_user_hw_breakpoint(struct perf_event_attr *attr,
perf_overflow_handler_t triggered,
struct task_struct *tsk)
{
return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered);
return perf_event_create_kernel_counter(attr, -1, task_pid_vnr(tsk),
triggered);
}
EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);

View File

@@ -365,8 +365,6 @@ static unsigned int setup_sgl(struct __kfifo *fifo, struct scatterlist *sgl,
n = setup_sgl_buf(sgl, fifo->data + off, nents, l);
n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l);
if (n)
sg_mark_end(sgl + n - 1);
return n;
}
@@ -503,6 +501,15 @@ unsigned int __kfifo_out_r(struct __kfifo *fifo, void *buf,
}
EXPORT_SYMBOL(__kfifo_out_r);
void __kfifo_skip_r(struct __kfifo *fifo, size_t recsize)
{
unsigned int n;
n = __kfifo_peek_n(fifo, recsize);
fifo->out += n + recsize;
}
EXPORT_SYMBOL(__kfifo_skip_r);
int __kfifo_from_user_r(struct __kfifo *fifo, const void __user *from,
unsigned long len, unsigned int *copied, size_t recsize)
{

View File

@@ -153,7 +153,9 @@ static int ____call_usermodehelper(void *data)
goto fail;
}
retval = kernel_execve(sub_info->path, sub_info->argv, sub_info->envp);
retval = kernel_execve(sub_info->path,
(const char *const *)sub_info->argv,
(const char *const *)sub_info->envp);
/* Exec failed? */
fail:

View File

@@ -36,15 +36,6 @@
# include <asm/mutex.h>
#endif
/***
* mutex_init - initialize the mutex
* @lock: the mutex to be initialized
* @key: the lock_class_key for the class; used by mutex lock debugging
*
* Initialize the mutex to unlocked state.
*
* It is not allowed to initialize an already locked mutex.
*/
void
__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
{
@@ -68,7 +59,7 @@ EXPORT_SYMBOL(__mutex_init);
static __used noinline void __sched
__mutex_lock_slowpath(atomic_t *lock_count);
/***
/**
* mutex_lock - acquire the mutex
* @lock: the mutex to be acquired
*
@@ -105,7 +96,7 @@ EXPORT_SYMBOL(mutex_lock);
static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
/***
/**
* mutex_unlock - release the mutex
* @lock: the mutex to be released
*
@@ -364,8 +355,8 @@ __mutex_lock_killable_slowpath(atomic_t *lock_count);
static noinline int __sched
__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
/***
* mutex_lock_interruptible - acquire the mutex, interruptable
/**
* mutex_lock_interruptible - acquire the mutex, interruptible
* @lock: the mutex to be acquired
*
* Lock the mutex like mutex_lock(), and return 0 if the mutex has
@@ -456,15 +447,15 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
return prev == 1;
}
/***
* mutex_trylock - try acquire the mutex, without waiting
/**
* mutex_trylock - try to acquire the mutex, without waiting
* @lock: the mutex to be acquired
*
* Try to acquire the mutex atomically. Returns 1 if the mutex
* has been acquired successfully, and 0 on contention.
*
* NOTE: this function follows the spin_trylock() convention, so
* it is negated to the down_trylock() return values! Be careful
* it is negated from the down_trylock() return values! Be careful
* about this when converting semaphore users to mutexes.
*
* This function must not be used in interrupt context. The

View File

@@ -402,11 +402,31 @@ static void perf_group_detach(struct perf_event *event)
}
}
static inline int
event_filter_match(struct perf_event *event)
{
return event->cpu == -1 || event->cpu == smp_processor_id();
}
static void
event_sched_out(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
u64 delta;
/*
* An event which could not be activated because of
* filter mismatch still needs to have its timings
* maintained, otherwise bogus information is return
* via read() for time_enabled, time_running:
*/
if (event->state == PERF_EVENT_STATE_INACTIVE
&& !event_filter_match(event)) {
delta = ctx->time - event->tstamp_stopped;
event->tstamp_running += delta;
event->tstamp_stopped = ctx->time;
}
if (event->state != PERF_EVENT_STATE_ACTIVE)
return;
@@ -432,9 +452,7 @@ group_sched_out(struct perf_event *group_event,
struct perf_event_context *ctx)
{
struct perf_event *event;
if (group_event->state != PERF_EVENT_STATE_ACTIVE)
return;
int state = group_event->state;
event_sched_out(group_event, cpuctx, ctx);
@@ -444,7 +462,7 @@ group_sched_out(struct perf_event *group_event,
list_for_each_entry(event, &group_event->sibling_list, group_entry)
event_sched_out(event, cpuctx, ctx);
if (group_event->attr.exclusive)
if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
cpuctx->exclusive = 0;
}
@@ -5743,15 +5761,15 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
{
unsigned int cpu = (long)hcpu;
switch (action) {
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
case CPU_DOWN_FAILED:
perf_event_init_cpu(cpu);
break;
case CPU_UP_CANCELED:
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
perf_event_exit_cpu(cpu);
break;

View File

@@ -212,15 +212,17 @@ EXPORT_SYMBOL_GPL(pm_qos_request_active);
/**
* pm_qos_add_request - inserts new qos request into the list
* @pm_qos_class: identifies which list of qos request to us
* @dep: pointer to a preallocated handle
* @pm_qos_class: identifies which list of qos request to use
* @value: defines the qos request
*
* This function inserts a new entry in the pm_qos_class list of requested qos
* performance characteristics. It recomputes the aggregate QoS expectations
* for the pm_qos_class of parameters, and returns the pm_qos_request list
* element as a handle for use in updating and removal. Call needs to save
* this handle for later use.
* for the pm_qos_class of parameters and initializes the pm_qos_request_list
* handle. Caller needs to save this handle for later use in updates and
* removal.
*/
void pm_qos_add_request(struct pm_qos_request_list *dep,
int pm_qos_class, s32 value)
{
@@ -348,7 +350,7 @@ static int pm_qos_power_open(struct inode *inode, struct file *filp)
pm_qos_class = find_pm_qos_object_by_minor(iminor(inode));
if (pm_qos_class >= 0) {
struct pm_qos_request_list *req = kzalloc(GFP_KERNEL, sizeof(*req));
struct pm_qos_request_list *req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
@@ -387,10 +389,12 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
} else if (count == 11) { /* len('0x12345678/0') */
if (copy_from_user(ascii_value, buf, 11))
return -EFAULT;
if (strlen(ascii_value) != 10)
return -EINVAL;
x = sscanf(ascii_value, "%x", &value);
if (x != 1)
return -EINVAL;
pr_debug(KERN_ERR "%s, %d, 0x%x\n", ascii_value, x, value);
pr_debug("%s, %d, 0x%x\n", ascii_value, x, value);
} else
return -EINVAL;

View File

@@ -338,7 +338,6 @@ int hibernation_snapshot(int platform_mode)
goto Close;
suspend_console();
hibernation_freeze_swap();
saved_mask = clear_gfp_allowed_mask(GFP_IOFS);
error = dpm_suspend_start(PMSG_FREEZE);
if (error)

View File

@@ -24,7 +24,7 @@ static void do_poweroff(struct work_struct *dummy)
static DECLARE_WORK(poweroff_work, do_poweroff);
static void handle_poweroff(int key, struct tty_struct *tty)
static void handle_poweroff(int key)
{
/* run sysrq poweroff on boot cpu */
schedule_work_on(cpumask_first(cpu_online_mask), &poweroff_work);

Some files were not shown because too many files have changed in this diff Show More