You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
perf counters: add support for group counters
Impact: add group counters This patch adds the "counter groups" abstraction. Groups of counters behave much like normal 'single' counters, with a few semantic and behavioral extensions on top of that. A counter group is created by creating a new counter with the open() syscall's group-leader group_fd file descriptor parameter pointing to another, already existing counter. Groups of counters are scheduled in and out in one atomic group, and they are also roundrobin-scheduled atomically. Counters that are member of a group can also record events with an (atomic) extended timestamp that extends to all members of the group, if the record type is set to PERF_RECORD_GROUP. Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
@@ -346,18 +346,22 @@ static void perf_save_and_restart(struct perf_counter *counter)
|
||||
}
|
||||
|
||||
static void
|
||||
perf_handle_group(struct perf_counter *leader, u64 *status, u64 *overflown)
|
||||
perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown)
|
||||
{
|
||||
struct perf_counter_context *ctx = leader->ctx;
|
||||
struct perf_counter *counter;
|
||||
struct perf_counter *counter, *group_leader = sibling->group_leader;
|
||||
int bit;
|
||||
|
||||
list_for_each_entry(counter, &ctx->counters, list) {
|
||||
if (counter->hw_event.record_type != PERF_RECORD_SIMPLE ||
|
||||
counter == leader)
|
||||
continue;
|
||||
/*
|
||||
* Store the counter's own timestamp first:
|
||||
*/
|
||||
perf_store_irq_data(sibling, sibling->hw_event.type);
|
||||
perf_store_irq_data(sibling, atomic64_counter_read(sibling));
|
||||
|
||||
if (counter->active) {
|
||||
/*
|
||||
* Then store sibling timestamps (if any):
|
||||
*/
|
||||
list_for_each_entry(counter, &group_leader->sibling_list, list_entry) {
|
||||
if (!counter->active) {
|
||||
/*
|
||||
* When counter was not in the overflow mask, we have to
|
||||
* read it from hardware. We read it as well, when it
|
||||
@@ -371,8 +375,8 @@ perf_handle_group(struct perf_counter *leader, u64 *status, u64 *overflown)
|
||||
perf_save_and_restart(counter);
|
||||
}
|
||||
}
|
||||
perf_store_irq_data(leader, counter->hw_event.type);
|
||||
perf_store_irq_data(leader, atomic64_counter_read(counter));
|
||||
perf_store_irq_data(sibling, counter->hw_event.type);
|
||||
perf_store_irq_data(sibling, atomic64_counter_read(counter));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -416,10 +420,6 @@ again:
|
||||
perf_store_irq_data(counter, instruction_pointer(regs));
|
||||
break;
|
||||
case PERF_RECORD_GROUP:
|
||||
perf_store_irq_data(counter,
|
||||
counter->hw_event.type);
|
||||
perf_store_irq_data(counter,
|
||||
atomic64_counter_read(counter));
|
||||
perf_handle_group(counter, &status, &ack);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -117,7 +117,10 @@ struct perf_data {
|
||||
* struct perf_counter - performance counter kernel representation:
|
||||
*/
|
||||
struct perf_counter {
|
||||
struct list_head list;
|
||||
struct list_head list_entry;
|
||||
struct list_head sibling_list;
|
||||
struct perf_counter *group_leader;
|
||||
|
||||
int active;
|
||||
#if BITS_PER_LONG == 64
|
||||
atomic64_t count;
|
||||
@@ -158,7 +161,8 @@ struct perf_counter_context {
|
||||
* Protect the list of counters:
|
||||
*/
|
||||
spinlock_t lock;
|
||||
struct list_head counters;
|
||||
|
||||
struct list_head counter_list;
|
||||
int nr_counters;
|
||||
int nr_active;
|
||||
struct task_struct *task;
|
||||
|
||||
+216
-66
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user