You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf: Fix loss of notification with multi-event perf, x86: Force IBS LVT offset assignment for family 10h perf, x86: Disable PEBS on SandyBridge chips trace_events_filter: Use rcu_assign_pointer() when setting ftrace_event_call->filter perf session: Fix crash with invalid CPU list perf python: Fix undefined symbol problem perf/x86: Enable raw event access to Intel offcore events perf: Don't use -ENOSPC for out of PMU resources perf: Do not set task_ctx pointer in cpuctx if there are no events in the context perf/x86: Fix PEBS instruction unwind oprofile, x86: Fix crash when unloading module (nmi timer mode) oprofile: Fix crash when unloading module (hr timer mode)
This commit is contained in:
+86
-3
@@ -185,6 +185,9 @@ static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
|
||||
static void update_context_time(struct perf_event_context *ctx);
|
||||
static u64 perf_event_time(struct perf_event *event);
|
||||
|
||||
static void ring_buffer_attach(struct perf_event *event,
|
||||
struct ring_buffer *rb);
|
||||
|
||||
void __weak perf_event_print_debug(void) { }
|
||||
|
||||
extern __weak const char *perf_pmu_name(void)
|
||||
@@ -2173,7 +2176,8 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
|
||||
|
||||
perf_event_sched_in(cpuctx, ctx, task);
|
||||
|
||||
cpuctx->task_ctx = ctx;
|
||||
if (ctx->nr_events)
|
||||
cpuctx->task_ctx = ctx;
|
||||
|
||||
perf_pmu_enable(ctx->pmu);
|
||||
perf_ctx_unlock(cpuctx, ctx);
|
||||
@@ -3190,12 +3194,33 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
|
||||
struct ring_buffer *rb;
|
||||
unsigned int events = POLL_HUP;
|
||||
|
||||
/*
|
||||
* Race between perf_event_set_output() and perf_poll(): perf_poll()
|
||||
* grabs the rb reference but perf_event_set_output() overrides it.
|
||||
* Here is the timeline for two threads T1, T2:
|
||||
* t0: T1, rb = rcu_dereference(event->rb)
|
||||
* t1: T2, old_rb = event->rb
|
||||
* t2: T2, event->rb = new rb
|
||||
* t3: T2, ring_buffer_detach(old_rb)
|
||||
* t4: T1, ring_buffer_attach(rb1)
|
||||
* t5: T1, poll_wait(event->waitq)
|
||||
*
|
||||
* To avoid this problem, we grab mmap_mutex in perf_poll()
|
||||
* thereby ensuring that the assignment of the new ring buffer
|
||||
* and the detachment of the old buffer appear atomic to perf_poll()
|
||||
*/
|
||||
mutex_lock(&event->mmap_mutex);
|
||||
|
||||
rcu_read_lock();
|
||||
rb = rcu_dereference(event->rb);
|
||||
if (rb)
|
||||
if (rb) {
|
||||
ring_buffer_attach(event, rb);
|
||||
events = atomic_xchg(&rb->poll, 0);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
mutex_unlock(&event->mmap_mutex);
|
||||
|
||||
poll_wait(file, &event->waitq, wait);
|
||||
|
||||
return events;
|
||||
@@ -3496,6 +3521,49 @@ unlock:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ring_buffer_attach(struct perf_event *event,
|
||||
struct ring_buffer *rb)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (!list_empty(&event->rb_entry))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&rb->event_lock, flags);
|
||||
if (!list_empty(&event->rb_entry))
|
||||
goto unlock;
|
||||
|
||||
list_add(&event->rb_entry, &rb->event_list);
|
||||
unlock:
|
||||
spin_unlock_irqrestore(&rb->event_lock, flags);
|
||||
}
|
||||
|
||||
static void ring_buffer_detach(struct perf_event *event,
|
||||
struct ring_buffer *rb)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (list_empty(&event->rb_entry))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&rb->event_lock, flags);
|
||||
list_del_init(&event->rb_entry);
|
||||
wake_up_all(&event->waitq);
|
||||
spin_unlock_irqrestore(&rb->event_lock, flags);
|
||||
}
|
||||
|
||||
static void ring_buffer_wakeup(struct perf_event *event)
|
||||
{
|
||||
struct ring_buffer *rb;
|
||||
|
||||
rcu_read_lock();
|
||||
rb = rcu_dereference(event->rb);
|
||||
list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
|
||||
wake_up_all(&event->waitq);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void rb_free_rcu(struct rcu_head *rcu_head)
|
||||
{
|
||||
struct ring_buffer *rb;
|
||||
@@ -3521,9 +3589,19 @@ static struct ring_buffer *ring_buffer_get(struct perf_event *event)
|
||||
|
||||
static void ring_buffer_put(struct ring_buffer *rb)
|
||||
{
|
||||
struct perf_event *event, *n;
|
||||
unsigned long flags;
|
||||
|
||||
if (!atomic_dec_and_test(&rb->refcount))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&rb->event_lock, flags);
|
||||
list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) {
|
||||
list_del_init(&event->rb_entry);
|
||||
wake_up_all(&event->waitq);
|
||||
}
|
||||
spin_unlock_irqrestore(&rb->event_lock, flags);
|
||||
|
||||
call_rcu(&rb->rcu_head, rb_free_rcu);
|
||||
}
|
||||
|
||||
@@ -3546,6 +3624,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
|
||||
atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
|
||||
vma->vm_mm->pinned_vm -= event->mmap_locked;
|
||||
rcu_assign_pointer(event->rb, NULL);
|
||||
ring_buffer_detach(event, rb);
|
||||
mutex_unlock(&event->mmap_mutex);
|
||||
|
||||
ring_buffer_put(rb);
|
||||
@@ -3700,7 +3779,7 @@ static const struct file_operations perf_fops = {
|
||||
|
||||
void perf_event_wakeup(struct perf_event *event)
|
||||
{
|
||||
wake_up_all(&event->waitq);
|
||||
ring_buffer_wakeup(event);
|
||||
|
||||
if (event->pending_kill) {
|
||||
kill_fasync(&event->fasync, SIGIO, event->pending_kill);
|
||||
@@ -5822,6 +5901,8 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
|
||||
INIT_LIST_HEAD(&event->group_entry);
|
||||
INIT_LIST_HEAD(&event->event_entry);
|
||||
INIT_LIST_HEAD(&event->sibling_list);
|
||||
INIT_LIST_HEAD(&event->rb_entry);
|
||||
|
||||
init_waitqueue_head(&event->waitq);
|
||||
init_irq_work(&event->pending, perf_pending_event);
|
||||
|
||||
@@ -6028,6 +6109,8 @@ set:
|
||||
|
||||
old_rb = event->rb;
|
||||
rcu_assign_pointer(event->rb, rb);
|
||||
if (old_rb)
|
||||
ring_buffer_detach(event, old_rb);
|
||||
ret = 0;
|
||||
unlock:
|
||||
mutex_unlock(&event->mmap_mutex);
|
||||
|
||||
@@ -22,6 +22,9 @@ struct ring_buffer {
|
||||
local_t lost; /* nr records lost */
|
||||
|
||||
long watermark; /* wakeup watermark */
|
||||
/* poll crap */
|
||||
spinlock_t event_lock;
|
||||
struct list_head event_list;
|
||||
|
||||
struct perf_event_mmap_page *user_page;
|
||||
void *data_pages[0];
|
||||
|
||||
@@ -209,6 +209,9 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
|
||||
rb->writable = 1;
|
||||
|
||||
atomic_set(&rb->refcount, 1);
|
||||
|
||||
INIT_LIST_HEAD(&rb->event_list);
|
||||
spin_lock_init(&rb->event_lock);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_PERF_USE_VMALLOC
|
||||
|
||||
@@ -1686,7 +1686,7 @@ static int replace_system_preds(struct event_subsystem *system,
|
||||
* replace the filter for the call.
|
||||
*/
|
||||
filter = call->filter;
|
||||
call->filter = filter_item->filter;
|
||||
rcu_assign_pointer(call->filter, filter_item->filter);
|
||||
filter_item->filter = filter;
|
||||
|
||||
fail = false;
|
||||
@@ -1741,7 +1741,7 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
|
||||
filter = call->filter;
|
||||
if (!filter)
|
||||
goto out_unlock;
|
||||
call->filter = NULL;
|
||||
RCU_INIT_POINTER(call->filter, NULL);
|
||||
/* Make sure the filter is not being used */
|
||||
synchronize_sched();
|
||||
__free_filter(filter);
|
||||
@@ -1782,7 +1782,7 @@ out:
|
||||
* string
|
||||
*/
|
||||
tmp = call->filter;
|
||||
call->filter = filter;
|
||||
rcu_assign_pointer(call->filter, filter);
|
||||
if (tmp) {
|
||||
/* Make sure the call is done with the filter */
|
||||
synchronize_sched();
|
||||
|
||||
Reference in New Issue
Block a user