Files
linux-apfs/drivers/oprofile/cpu_buffer.h
T

121 lines
2.8 KiB
C
Raw Normal View History

2005-04-16 15:20:36 -07:00
/**
* @file cpu_buffer.h
*
2008-12-25 17:26:07 +01:00
* @remark Copyright 2002-2009 OProfile authors
2005-04-16 15:20:36 -07:00
* @remark Read the file COPYING
*
* @author John Levon <levon@movementarian.org>
2008-12-25 17:26:07 +01:00
* @author Robert Richter <robert.richter@amd.com>
2005-04-16 15:20:36 -07:00
*/
#ifndef OPROFILE_CPU_BUFFER_H
#define OPROFILE_CPU_BUFFER_H
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/cache.h>
#include <linux/sched.h>
2008-12-09 01:21:32 +01:00
#include <linux/ring_buffer.h>
2005-04-16 15:20:36 -07:00
struct task_struct;
2005-04-16 15:20:36 -07:00
int alloc_cpu_buffers(void);
void free_cpu_buffers(void);
void start_cpu_work(void);
void end_cpu_work(void);
/* CPU buffer is composed of such entries (which are
* also used for context switch notes)
*/
struct op_sample {
unsigned long eip;
unsigned long event;
2008-12-25 17:26:07 +01:00
unsigned long data[0];
2005-04-16 15:20:36 -07:00
};
struct op_entry;
2008-12-09 01:21:32 +01:00
2005-04-16 15:20:36 -07:00
struct oprofile_cpu_buffer {
unsigned long buffer_size;
2008-09-05 17:12:36 +02:00
struct task_struct *last_task;
2005-04-16 15:20:36 -07:00
int last_is_kernel;
int tracing;
unsigned long sample_received;
unsigned long sample_lost_overflow;
unsigned long backtrace_aborted;
unsigned long sample_invalid_eip;
2005-04-16 15:20:36 -07:00
int cpu;
2006-11-22 14:57:56 +00:00
struct delayed_work work;
};
2005-04-16 15:20:36 -07:00
DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
2005-04-16 15:20:36 -07:00
/*
* Resets the cpu buffer to a sane state.
*
* reset these to invalid values; the next sample collected will
* populate the buffer with proper values to initialize the buffer
*/
2008-12-24 16:53:53 +01:00
static inline void op_cpu_buffer_reset(int cpu)
{
struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu);
cpu_buf->last_is_kernel = -1;
cpu_buf->last_task = NULL;
}
2005-04-16 15:20:36 -07:00
/*
* op_cpu_buffer_add_data() and op_cpu_buffer_write_commit() may be
* called only if op_cpu_buffer_write_reserve() did not return NULL or
* entry->event != NULL, otherwise entry->size or entry->event will be
* used uninitialized.
*/
2008-12-25 17:26:07 +01:00
struct op_sample
*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size);
int op_cpu_buffer_write_commit(struct op_entry *entry);
2008-12-30 04:10:46 +01:00
struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu);
unsigned long op_cpu_buffer_entries(int cpu);
2008-11-27 22:33:37 +01:00
2008-12-25 17:26:07 +01:00
/* returns the remaining free size of data in the entry */
static inline
int op_cpu_buffer_add_data(struct op_entry *entry, unsigned long val)
{
if (!entry->size)
return 0;
*entry->data = val;
entry->size--;
entry->data++;
return entry->size;
}
2009-01-06 03:56:50 +01:00
/* returns the size of data in the entry */
static inline
int op_cpu_buffer_get_size(struct op_entry *entry)
{
return entry->size;
}
/* returns 0 if empty or the size of data including the current value */
static inline
int op_cpu_buffer_get_data(struct op_entry *entry, unsigned long *val)
{
int size = entry->size;
if (!size)
return 0;
*val = *entry->data;
entry->size--;
entry->data++;
return size;
}
/* extra data flags */
#define KERNEL_CTX_SWITCH (1UL << 0)
#define IS_KERNEL (1UL << 1)
#define TRACE_BEGIN (1UL << 2)
#define USER_CTX_SWITCH (1UL << 3)
2005-04-16 15:20:36 -07:00
#endif /* OPROFILE_CPU_BUFFER_H */