Linux-2.6.12-rc2

Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.

Let it rip!
This commit is contained in:
Linus Torvalds
2005-04-16 15:20:36 -07:00
commit 1da177e4c3
17291 changed files with 6718755 additions and 0 deletions
File diff suppressed because it is too large Load Diff
+22
View File
@@ -0,0 +1,22 @@
/**
* @file buffer_sync.h
*
* @remark Copyright 2002 OProfile authors
* @remark Read the file COPYING
*
* @author John Levon <levon@movementarian.org>
*/
#ifndef OPROFILE_BUFFER_SYNC_H
#define OPROFILE_BUFFER_SYNC_H
/* add the necessary profiling hooks */
int sync_start(void);
/* remove the hooks */
void sync_stop(void);
/* sync the given CPU's buffer */
void sync_buffer(int cpu);
#endif /* OPROFILE_BUFFER_SYNC_H */
+307
View File
@@ -0,0 +1,307 @@
/**
* @file cpu_buffer.c
*
* @remark Copyright 2002 OProfile authors
* @remark Read the file COPYING
*
* @author John Levon <levon@movementarian.org>
*
* Each CPU has a local buffer that stores PC value/event
* pairs. We also log context switches when we notice them.
* Eventually each CPU's buffer is processed into the global
* event buffer by sync_buffer().
*
* We use a local buffer for two reasons: an NMI or similar
* interrupt cannot synchronise, and high sampling rates
* would lead to catastrophic global synchronisation if
* a global buffer was used.
*/
#include <linux/sched.h>
#include <linux/oprofile.h>
#include <linux/vmalloc.h>
#include <linux/errno.h>
#include "event_buffer.h"
#include "cpu_buffer.h"
#include "buffer_sync.h"
#include "oprof.h"
struct oprofile_cpu_buffer cpu_buffer[NR_CPUS] __cacheline_aligned;
static void wq_sync_buffer(void *);
#define DEFAULT_TIMER_EXPIRE (HZ / 10)
static int work_enabled;
void free_cpu_buffers(void)
{
int i;
for_each_online_cpu(i) {
vfree(cpu_buffer[i].buffer);
}
}
int alloc_cpu_buffers(void)
{
int i;
unsigned long buffer_size = fs_cpu_buffer_size;
for_each_online_cpu(i) {
struct oprofile_cpu_buffer * b = &cpu_buffer[i];
b->buffer = vmalloc(sizeof(struct op_sample) * buffer_size);
if (!b->buffer)
goto fail;
b->last_task = NULL;
b->last_is_kernel = -1;
b->tracing = 0;
b->buffer_size = buffer_size;
b->tail_pos = 0;
b->head_pos = 0;
b->sample_received = 0;
b->sample_lost_overflow = 0;
b->cpu = i;
INIT_WORK(&b->work, wq_sync_buffer, b);
}
return 0;
fail:
free_cpu_buffers();
return -ENOMEM;
}
void start_cpu_work(void)
{
int i;
work_enabled = 1;
for_each_online_cpu(i) {
struct oprofile_cpu_buffer * b = &cpu_buffer[i];
/*
* Spread the work by 1 jiffy per cpu so they dont all
* fire at once.
*/
schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i);
}
}
void end_cpu_work(void)
{
int i;
work_enabled = 0;
for_each_online_cpu(i) {
struct oprofile_cpu_buffer * b = &cpu_buffer[i];
cancel_delayed_work(&b->work);
}
flush_scheduled_work();
}
/* Resets the cpu buffer to a sane state. */
void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf)
{
/* reset these to invalid values; the next sample
* collected will populate the buffer with proper
* values to initialize the buffer
*/
cpu_buf->last_is_kernel = -1;
cpu_buf->last_task = NULL;
}
/* compute number of available slots in cpu_buffer queue */
static unsigned long nr_available_slots(struct oprofile_cpu_buffer const * b)
{
unsigned long head = b->head_pos;
unsigned long tail = b->tail_pos;
if (tail > head)
return (tail - head) - 1;
return tail + (b->buffer_size - head) - 1;
}
static void increment_head(struct oprofile_cpu_buffer * b)
{
unsigned long new_head = b->head_pos + 1;
/* Ensure anything written to the slot before we
* increment is visible */
wmb();
if (new_head < b->buffer_size)
b->head_pos = new_head;
else
b->head_pos = 0;
}
inline static void
add_sample(struct oprofile_cpu_buffer * cpu_buf,
unsigned long pc, unsigned long event)
{
struct op_sample * entry = &cpu_buf->buffer[cpu_buf->head_pos];
entry->eip = pc;
entry->event = event;
increment_head(cpu_buf);
}
inline static void
add_code(struct oprofile_cpu_buffer * buffer, unsigned long value)
{
add_sample(buffer, ESCAPE_CODE, value);
}
/* This must be safe from any context. It's safe writing here
* because of the head/tail separation of the writer and reader
* of the CPU buffer.
*
* is_kernel is needed because on some architectures you cannot
* tell if you are in kernel or user space simply by looking at
* pc. We tag this in the buffer by generating kernel enter/exit
* events whenever is_kernel changes
*/
static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc,
int is_kernel, unsigned long event)
{
struct task_struct * task;
cpu_buf->sample_received++;
if (nr_available_slots(cpu_buf) < 3) {
cpu_buf->sample_lost_overflow++;
return 0;
}
is_kernel = !!is_kernel;
task = current;
/* notice a switch from user->kernel or vice versa */
if (cpu_buf->last_is_kernel != is_kernel) {
cpu_buf->last_is_kernel = is_kernel;
add_code(cpu_buf, is_kernel);
}
/* notice a task switch */
if (cpu_buf->last_task != task) {
cpu_buf->last_task = task;
add_code(cpu_buf, (unsigned long)task);
}
add_sample(cpu_buf, pc, event);
return 1;
}
static int oprofile_begin_trace(struct oprofile_cpu_buffer * cpu_buf)
{
if (nr_available_slots(cpu_buf) < 4) {
cpu_buf->sample_lost_overflow++;
return 0;
}
add_code(cpu_buf, CPU_TRACE_BEGIN);
cpu_buf->tracing = 1;
return 1;
}
static void oprofile_end_trace(struct oprofile_cpu_buffer * cpu_buf)
{
cpu_buf->tracing = 0;
}
void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
{
struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
unsigned long pc = profile_pc(regs);
int is_kernel = !user_mode(regs);
if (!backtrace_depth) {
log_sample(cpu_buf, pc, is_kernel, event);
return;
}
if (!oprofile_begin_trace(cpu_buf))
return;
/* if log_sample() fail we can't backtrace since we lost the source
* of this event */
if (log_sample(cpu_buf, pc, is_kernel, event))
oprofile_ops.backtrace(regs, backtrace_depth);
oprofile_end_trace(cpu_buf);
}
void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
{
struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
log_sample(cpu_buf, pc, is_kernel, event);
}
void oprofile_add_trace(unsigned long pc)
{
struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
if (!cpu_buf->tracing)
return;
if (nr_available_slots(cpu_buf) < 1) {
cpu_buf->tracing = 0;
cpu_buf->sample_lost_overflow++;
return;
}
/* broken frame can give an eip with the same value as an escape code,
* abort the trace if we get it */
if (pc == ESCAPE_CODE) {
cpu_buf->tracing = 0;
cpu_buf->backtrace_aborted++;
return;
}
add_sample(cpu_buf, pc, 0);
}
/*
* This serves to avoid cpu buffer overflow, and makes sure
* the task mortuary progresses
*
* By using schedule_delayed_work_on and then schedule_delayed_work
* we guarantee this will stay on the correct cpu
*/
static void wq_sync_buffer(void * data)
{
struct oprofile_cpu_buffer * b = data;
if (b->cpu != smp_processor_id()) {
printk("WQ on CPU%d, prefer CPU%d\n",
smp_processor_id(), b->cpu);
}
sync_buffer(b->cpu);
/* don't re-add the work if we're shutting down */
if (work_enabled)
schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE);
}
+57
View File
@@ -0,0 +1,57 @@
/**
* @file cpu_buffer.h
*
* @remark Copyright 2002 OProfile authors
* @remark Read the file COPYING
*
* @author John Levon <levon@movementarian.org>
*/
#ifndef OPROFILE_CPU_BUFFER_H
#define OPROFILE_CPU_BUFFER_H
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/cache.h>
struct task_struct;
int alloc_cpu_buffers(void);
void free_cpu_buffers(void);
void start_cpu_work(void);
void end_cpu_work(void);
/* CPU buffer is composed of such entries (which are
* also used for context switch notes)
*/
struct op_sample {
unsigned long eip;
unsigned long event;
};
struct oprofile_cpu_buffer {
volatile unsigned long head_pos;
volatile unsigned long tail_pos;
unsigned long buffer_size;
struct task_struct * last_task;
int last_is_kernel;
int tracing;
struct op_sample * buffer;
unsigned long sample_received;
unsigned long sample_lost_overflow;
unsigned long backtrace_aborted;
int cpu;
struct work_struct work;
} ____cacheline_aligned;
extern struct oprofile_cpu_buffer cpu_buffer[];
void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf);
/* transient events for the CPU buffer -> event buffer */
#define CPU_IS_KERNEL 1
#define CPU_TRACE_BEGIN 2
#endif /* OPROFILE_CPU_BUFFER_H */
+187
View File
@@ -0,0 +1,187 @@
/**
* @file event_buffer.c
*
* @remark Copyright 2002 OProfile authors
* @remark Read the file COPYING
*
* @author John Levon <levon@movementarian.org>
*
* This is the global event buffer that the user-space
* daemon reads from. The event buffer is an untyped array
* of unsigned longs. Entries are prefixed by the
* escape value ESCAPE_CODE followed by an identifying code.
*/
#include <linux/vmalloc.h>
#include <linux/oprofile.h>
#include <linux/sched.h>
#include <linux/dcookies.h>
#include <linux/fs.h>
#include <asm/uaccess.h>
#include "oprof.h"
#include "event_buffer.h"
#include "oprofile_stats.h"
DECLARE_MUTEX(buffer_sem);
static unsigned long buffer_opened;
static DECLARE_WAIT_QUEUE_HEAD(buffer_wait);
static unsigned long * event_buffer;
static unsigned long buffer_size;
static unsigned long buffer_watershed;
static size_t buffer_pos;
/* atomic_t because wait_event checks it outside of buffer_sem */
static atomic_t buffer_ready = ATOMIC_INIT(0);
/* Add an entry to the event buffer. When we
* get near to the end we wake up the process
* sleeping on the read() of the file.
*/
void add_event_entry(unsigned long value)
{
if (buffer_pos == buffer_size) {
atomic_inc(&oprofile_stats.event_lost_overflow);
return;
}
event_buffer[buffer_pos] = value;
if (++buffer_pos == buffer_size - buffer_watershed) {
atomic_set(&buffer_ready, 1);
wake_up(&buffer_wait);
}
}
/* Wake up the waiting process if any. This happens
* on "echo 0 >/dev/oprofile/enable" so the daemon
* processes the data remaining in the event buffer.
*/
void wake_up_buffer_waiter(void)
{
down(&buffer_sem);
atomic_set(&buffer_ready, 1);
wake_up(&buffer_wait);
up(&buffer_sem);
}
int alloc_event_buffer(void)
{
int err = -ENOMEM;
spin_lock(&oprofilefs_lock);
buffer_size = fs_buffer_size;
buffer_watershed = fs_buffer_watershed;
spin_unlock(&oprofilefs_lock);
if (buffer_watershed >= buffer_size)
return -EINVAL;
event_buffer = vmalloc(sizeof(unsigned long) * buffer_size);
if (!event_buffer)
goto out;
err = 0;
out:
return err;
}
void free_event_buffer(void)
{
vfree(event_buffer);
}
static int event_buffer_open(struct inode * inode, struct file * file)
{
int err = -EPERM;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (test_and_set_bit(0, &buffer_opened))
return -EBUSY;
/* Register as a user of dcookies
* to ensure they persist for the lifetime of
* the open event file
*/
err = -EINVAL;
file->private_data = dcookie_register();
if (!file->private_data)
goto out;
if ((err = oprofile_setup()))
goto fail;
/* NB: the actual start happens from userspace
* echo 1 >/dev/oprofile/enable
*/
return 0;
fail:
dcookie_unregister(file->private_data);
out:
clear_bit(0, &buffer_opened);
return err;
}
static int event_buffer_release(struct inode * inode, struct file * file)
{
oprofile_stop();
oprofile_shutdown();
dcookie_unregister(file->private_data);
buffer_pos = 0;
atomic_set(&buffer_ready, 0);
clear_bit(0, &buffer_opened);
return 0;
}
static ssize_t event_buffer_read(struct file * file, char __user * buf,
size_t count, loff_t * offset)
{
int retval = -EINVAL;
size_t const max = buffer_size * sizeof(unsigned long);
/* handling partial reads is more trouble than it's worth */
if (count != max || *offset)
return -EINVAL;
wait_event_interruptible(buffer_wait, atomic_read(&buffer_ready));
if (signal_pending(current))
return -EINTR;
/* can't currently happen */
if (!atomic_read(&buffer_ready))
return -EAGAIN;
down(&buffer_sem);
atomic_set(&buffer_ready, 0);
retval = -EFAULT;
count = buffer_pos * sizeof(unsigned long);
if (copy_to_user(buf, event_buffer, count))
goto out;
retval = count;
buffer_pos = 0;
out:
up(&buffer_sem);
return retval;
}
struct file_operations event_buffer_fops = {
.open = event_buffer_open,
.release = event_buffer_release,
.read = event_buffer_read,
};
+48
View File
@@ -0,0 +1,48 @@
/**
* @file event_buffer.h
*
* @remark Copyright 2002 OProfile authors
* @remark Read the file COPYING
*
* @author John Levon <levon@movementarian.org>
*/
#ifndef EVENT_BUFFER_H
#define EVENT_BUFFER_H
#include <linux/types.h>
#include <asm/semaphore.h>
int alloc_event_buffer(void);
void free_event_buffer(void);
/* wake up the process sleeping on the event file */
void wake_up_buffer_waiter(void);
/* Each escaped entry is prefixed by ESCAPE_CODE
* then one of the following codes, then the
* relevant data.
*/
#define ESCAPE_CODE ~0UL
#define CTX_SWITCH_CODE 1
#define CPU_SWITCH_CODE 2
#define COOKIE_SWITCH_CODE 3
#define KERNEL_ENTER_SWITCH_CODE 4
#define KERNEL_EXIT_SWITCH_CODE 5
#define MODULE_LOADED_CODE 6
#define CTX_TGID_CODE 7
#define TRACE_BEGIN_CODE 8
#define TRACE_END_CODE 9
/* add data to the event buffer */
void add_event_entry(unsigned long data);
extern struct file_operations event_buffer_fops;
/* mutex between sync_cpu_buffers() and the
* file reading code.
*/
extern struct semaphore buffer_sem;
#endif /* EVENT_BUFFER_H */
+188
View File
@@ -0,0 +1,188 @@
/**
* @file oprof.c
*
* @remark Copyright 2002 OProfile authors
* @remark Read the file COPYING
*
* @author John Levon <levon@movementarian.org>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/oprofile.h>
#include <linux/moduleparam.h>
#include <asm/semaphore.h>
#include "oprof.h"
#include "event_buffer.h"
#include "cpu_buffer.h"
#include "buffer_sync.h"
#include "oprofile_stats.h"
struct oprofile_operations oprofile_ops;
unsigned long oprofile_started;
unsigned long backtrace_depth;
static unsigned long is_setup;
static DECLARE_MUTEX(start_sem);
/* timer
0 - use performance monitoring hardware if available
1 - use the timer int mechanism regardless
*/
static int timer = 0;
int oprofile_setup(void)
{
int err;
down(&start_sem);
if ((err = alloc_cpu_buffers()))
goto out;
if ((err = alloc_event_buffer()))
goto out1;
if (oprofile_ops.setup && (err = oprofile_ops.setup()))
goto out2;
/* Note even though this starts part of the
* profiling overhead, it's necessary to prevent
* us missing task deaths and eventually oopsing
* when trying to process the event buffer.
*/
if ((err = sync_start()))
goto out3;
is_setup = 1;
up(&start_sem);
return 0;
out3:
if (oprofile_ops.shutdown)
oprofile_ops.shutdown();
out2:
free_event_buffer();
out1:
free_cpu_buffers();
out:
up(&start_sem);
return err;
}
/* Actually start profiling (echo 1>/dev/oprofile/enable) */
int oprofile_start(void)
{
int err = -EINVAL;
down(&start_sem);
if (!is_setup)
goto out;
err = 0;
if (oprofile_started)
goto out;
oprofile_reset_stats();
if ((err = oprofile_ops.start()))
goto out;
oprofile_started = 1;
out:
up(&start_sem);
return err;
}
/* echo 0>/dev/oprofile/enable */
void oprofile_stop(void)
{
down(&start_sem);
if (!oprofile_started)
goto out;
oprofile_ops.stop();
oprofile_started = 0;
/* wake up the daemon to read what remains */
wake_up_buffer_waiter();
out:
up(&start_sem);
}
void oprofile_shutdown(void)
{
down(&start_sem);
sync_stop();
if (oprofile_ops.shutdown)
oprofile_ops.shutdown();
is_setup = 0;
free_event_buffer();
free_cpu_buffers();
up(&start_sem);
}
int oprofile_set_backtrace(unsigned long val)
{
int err = 0;
down(&start_sem);
if (oprofile_started) {
err = -EBUSY;
goto out;
}
if (!oprofile_ops.backtrace) {
err = -EINVAL;
goto out;
}
backtrace_depth = val;
out:
up(&start_sem);
return err;
}
static int __init oprofile_init(void)
{
int err;
err = oprofile_arch_init(&oprofile_ops);
if (err < 0 || timer) {
printk(KERN_INFO "oprofile: using timer interrupt.\n");
oprofile_timer_init(&oprofile_ops);
}
err = oprofilefs_register();
if (err)
oprofile_arch_exit();
return err;
}
static void __exit oprofile_exit(void)
{
oprofilefs_unregister();
oprofile_arch_exit();
}
module_init(oprofile_init);
module_exit(oprofile_exit);
module_param_named(timer, timer, int, 0644);
MODULE_PARM_DESC(timer, "force use of timer interrupt");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("John Levon <levon@movementarian.org>");
MODULE_DESCRIPTION("OProfile system profiler");
+39
View File
@@ -0,0 +1,39 @@
/**
* @file oprof.h
*
* @remark Copyright 2002 OProfile authors
* @remark Read the file COPYING
*
* @author John Levon <levon@movementarian.org>
*/
#ifndef OPROF_H
#define OPROF_H
int oprofile_setup(void);
void oprofile_shutdown(void);
int oprofilefs_register(void);
void oprofilefs_unregister(void);
int oprofile_start(void);
void oprofile_stop(void);
struct oprofile_operations;
extern unsigned long fs_buffer_size;
extern unsigned long fs_cpu_buffer_size;
extern unsigned long fs_buffer_watershed;
extern struct oprofile_operations oprofile_ops;
extern unsigned long oprofile_started;
extern unsigned long backtrace_depth;
struct super_block;
struct dentry;
void oprofile_create_files(struct super_block * sb, struct dentry * root);
void oprofile_timer_init(struct oprofile_operations * ops);
int oprofile_set_backtrace(unsigned long depth);
#endif /* OPROF_H */
+135
View File
@@ -0,0 +1,135 @@
/**
* @file oprofile_files.c
*
* @remark Copyright 2002 OProfile authors
* @remark Read the file COPYING
*
* @author John Levon <levon@movementarian.org>
*/
#include <linux/fs.h>
#include <linux/oprofile.h>
#include "event_buffer.h"
#include "oprofile_stats.h"
#include "oprof.h"
unsigned long fs_buffer_size = 131072;
unsigned long fs_cpu_buffer_size = 8192;
unsigned long fs_buffer_watershed = 32768; /* FIXME: tune */
static ssize_t depth_read(struct file * file, char * buf, size_t count, loff_t * offset)
{
return oprofilefs_ulong_to_user(backtrace_depth, buf, count, offset);
}
static ssize_t depth_write(struct file * file, char const * buf, size_t count, loff_t * offset)
{
unsigned long val;
int retval;
if (*offset)
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
if (retval)
return retval;
retval = oprofile_set_backtrace(val);
if (retval)
return retval;
return count;
}
static struct file_operations depth_fops = {
.read = depth_read,
.write = depth_write
};
static ssize_t pointer_size_read(struct file * file, char __user * buf, size_t count, loff_t * offset)
{
return oprofilefs_ulong_to_user(sizeof(void *), buf, count, offset);
}
static struct file_operations pointer_size_fops = {
.read = pointer_size_read,
};
static ssize_t cpu_type_read(struct file * file, char __user * buf, size_t count, loff_t * offset)
{
return oprofilefs_str_to_user(oprofile_ops.cpu_type, buf, count, offset);
}
static struct file_operations cpu_type_fops = {
.read = cpu_type_read,
};
static ssize_t enable_read(struct file * file, char __user * buf, size_t count, loff_t * offset)
{
return oprofilefs_ulong_to_user(oprofile_started, buf, count, offset);
}
static ssize_t enable_write(struct file * file, char const __user * buf, size_t count, loff_t * offset)
{
unsigned long val;
int retval;
if (*offset)
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
if (retval)
return retval;
if (val)
retval = oprofile_start();
else
oprofile_stop();
if (retval)
return retval;
return count;
}
static struct file_operations enable_fops = {
.read = enable_read,
.write = enable_write,
};
static ssize_t dump_write(struct file * file, char const __user * buf, size_t count, loff_t * offset)
{
wake_up_buffer_waiter();
return count;
}
static struct file_operations dump_fops = {
.write = dump_write,
};
void oprofile_create_files(struct super_block * sb, struct dentry * root)
{
oprofilefs_create_file(sb, root, "enable", &enable_fops);
oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops);
oprofilefs_create_ulong(sb, root, "buffer_size", &fs_buffer_size);
oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed);
oprofilefs_create_ulong(sb, root, "cpu_buffer_size", &fs_cpu_buffer_size);
oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops);
oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops);
oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops);
oprofile_create_stats_files(sb, root);
if (oprofile_ops.create_files)
oprofile_ops.create_files(sb, root);
}
+74
View File
@@ -0,0 +1,74 @@
/**
* @file oprofile_stats.c
*
* @remark Copyright 2002 OProfile authors
* @remark Read the file COPYING
*
* @author John Levon
*/
#include <linux/oprofile.h>
#include <linux/smp.h>
#include <linux/cpumask.h>
#include <linux/threads.h>
#include "oprofile_stats.h"
#include "cpu_buffer.h"
struct oprofile_stat_struct oprofile_stats;
void oprofile_reset_stats(void)
{
struct oprofile_cpu_buffer * cpu_buf;
int i;
for_each_cpu(i) {
cpu_buf = &cpu_buffer[i];
cpu_buf->sample_received = 0;
cpu_buf->sample_lost_overflow = 0;
}
atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
atomic_set(&oprofile_stats.event_lost_overflow, 0);
}
void oprofile_create_stats_files(struct super_block * sb, struct dentry * root)
{
struct oprofile_cpu_buffer * cpu_buf;
struct dentry * cpudir;
struct dentry * dir;
char buf[10];
int i;
dir = oprofilefs_mkdir(sb, root, "stats");
if (!dir)
return;
for_each_cpu(i) {
cpu_buf = &cpu_buffer[i];
snprintf(buf, 10, "cpu%d", i);
cpudir = oprofilefs_mkdir(sb, dir, buf);
/* Strictly speaking access to these ulongs is racy,
* but we can't simply lock them, and they are
* informational only.
*/
oprofilefs_create_ro_ulong(sb, cpudir, "sample_received",
&cpu_buf->sample_received);
oprofilefs_create_ro_ulong(sb, cpudir, "sample_lost_overflow",
&cpu_buf->sample_lost_overflow);
oprofilefs_create_ro_ulong(sb, cpudir, "backtrace_aborted",
&cpu_buf->backtrace_aborted);
}
oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mm",
&oprofile_stats.sample_lost_no_mm);
oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mapping",
&oprofile_stats.sample_lost_no_mapping);
oprofilefs_create_ro_atomic(sb, dir, "event_lost_overflow",
&oprofile_stats.event_lost_overflow);
oprofilefs_create_ro_atomic(sb, dir, "bt_lost_no_mapping",
&oprofile_stats.bt_lost_no_mapping);
}
+33
View File
@@ -0,0 +1,33 @@
/**
* @file oprofile_stats.h
*
* @remark Copyright 2002 OProfile authors
* @remark Read the file COPYING
*
* @author John Levon
*/
#ifndef OPROFILE_STATS_H
#define OPROFILE_STATS_H
#include <asm/atomic.h>
struct oprofile_stat_struct {
atomic_t sample_lost_no_mm;
atomic_t sample_lost_no_mapping;
atomic_t bt_lost_no_mapping;
atomic_t event_lost_overflow;
};
extern struct oprofile_stat_struct oprofile_stats;
/* reset all stats to zero */
void oprofile_reset_stats(void);
struct super_block;
struct dentry;
/* create the stats/ dir */
void oprofile_create_stats_files(struct super_block * sb, struct dentry * root);
#endif /* OPROFILE_STATS_H */
+299
View File
@@ -0,0 +1,299 @@
/**
* @file oprofilefs.c
*
* @remark Copyright 2002 OProfile authors
* @remark Read the file COPYING
*
* @author John Levon
*
* A simple filesystem for configuration and
* access of oprofile.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/oprofile.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <asm/uaccess.h>
#include "oprof.h"
#define OPROFILEFS_MAGIC 0x6f70726f
DEFINE_SPINLOCK(oprofilefs_lock);
static struct inode * oprofilefs_get_inode(struct super_block * sb, int mode)
{
struct inode * inode = new_inode(sb);
if (inode) {
inode->i_mode = mode;
inode->i_uid = 0;
inode->i_gid = 0;
inode->i_blksize = PAGE_CACHE_SIZE;
inode->i_blocks = 0;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
}
return inode;
}
static struct super_operations s_ops = {
.statfs = simple_statfs,
.drop_inode = generic_delete_inode,
};
ssize_t oprofilefs_str_to_user(char const * str, char __user * buf, size_t count, loff_t * offset)
{
return simple_read_from_buffer(buf, count, offset, str, strlen(str));
}
#define TMPBUFSIZE 50
ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t count, loff_t * offset)
{
char tmpbuf[TMPBUFSIZE];
size_t maxlen = snprintf(tmpbuf, TMPBUFSIZE, "%lu\n", val);
if (maxlen > TMPBUFSIZE)
maxlen = TMPBUFSIZE;
return simple_read_from_buffer(buf, count, offset, tmpbuf, maxlen);
}
int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count)
{
char tmpbuf[TMPBUFSIZE];
if (!count)
return 0;
if (count > TMPBUFSIZE - 1)
return -EINVAL;
memset(tmpbuf, 0x0, TMPBUFSIZE);
if (copy_from_user(tmpbuf, buf, count))
return -EFAULT;
spin_lock(&oprofilefs_lock);
*val = simple_strtoul(tmpbuf, NULL, 0);
spin_unlock(&oprofilefs_lock);
return 0;
}
static ssize_t ulong_read_file(struct file * file, char __user * buf, size_t count, loff_t * offset)
{
unsigned long * val = file->private_data;
return oprofilefs_ulong_to_user(*val, buf, count, offset);
}
static ssize_t ulong_write_file(struct file * file, char const __user * buf, size_t count, loff_t * offset)
{
unsigned long * value = file->private_data;
int retval;
if (*offset)
return -EINVAL;
retval = oprofilefs_ulong_from_user(value, buf, count);
if (retval)
return retval;
return count;
}
static int default_open(struct inode * inode, struct file * filp)
{
if (inode->u.generic_ip)
filp->private_data = inode->u.generic_ip;
return 0;
}
static struct file_operations ulong_fops = {
.read = ulong_read_file,
.write = ulong_write_file,
.open = default_open,
};
static struct file_operations ulong_ro_fops = {
.read = ulong_read_file,
.open = default_open,
};
static struct dentry * __oprofilefs_create_file(struct super_block * sb,
struct dentry * root, char const * name, struct file_operations * fops,
int perm)
{
struct dentry * dentry;
struct inode * inode;
dentry = d_alloc_name(root, name);
if (!dentry)
return NULL;
inode = oprofilefs_get_inode(sb, S_IFREG | perm);
if (!inode) {
dput(dentry);
return NULL;
}
inode->i_fop = fops;
d_add(dentry, inode);
return dentry;
}
int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
char const * name, unsigned long * val)
{
struct dentry * d = __oprofilefs_create_file(sb, root, name,
&ulong_fops, 0644);
if (!d)
return -EFAULT;
d->d_inode->u.generic_ip = val;
return 0;
}
int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
char const * name, unsigned long * val)
{
struct dentry * d = __oprofilefs_create_file(sb, root, name,
&ulong_ro_fops, 0444);
if (!d)
return -EFAULT;
d->d_inode->u.generic_ip = val;
return 0;
}
static ssize_t atomic_read_file(struct file * file, char __user * buf, size_t count, loff_t * offset)
{
atomic_t * val = file->private_data;
return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
}
static struct file_operations atomic_ro_fops = {
.read = atomic_read_file,
.open = default_open,
};
int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
char const * name, atomic_t * val)
{
struct dentry * d = __oprofilefs_create_file(sb, root, name,
&atomic_ro_fops, 0444);
if (!d)
return -EFAULT;
d->d_inode->u.generic_ip = val;
return 0;
}
int oprofilefs_create_file(struct super_block * sb, struct dentry * root,
char const * name, struct file_operations * fops)
{
if (!__oprofilefs_create_file(sb, root, name, fops, 0644))
return -EFAULT;
return 0;
}
int oprofilefs_create_file_perm(struct super_block * sb, struct dentry * root,
char const * name, struct file_operations * fops, int perm)
{
if (!__oprofilefs_create_file(sb, root, name, fops, perm))
return -EFAULT;
return 0;
}
struct dentry * oprofilefs_mkdir(struct super_block * sb,
struct dentry * root, char const * name)
{
struct dentry * dentry;
struct inode * inode;
dentry = d_alloc_name(root, name);
if (!dentry)
return NULL;
inode = oprofilefs_get_inode(sb, S_IFDIR | 0755);
if (!inode) {
dput(dentry);
return NULL;
}
inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
d_add(dentry, inode);
return dentry;
}
static int oprofilefs_fill_super(struct super_block * sb, void * data, int silent)
{
struct inode * root_inode;
struct dentry * root_dentry;
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_magic = OPROFILEFS_MAGIC;
sb->s_op = &s_ops;
sb->s_time_gran = 1;
root_inode = oprofilefs_get_inode(sb, S_IFDIR | 0755);
if (!root_inode)
return -ENOMEM;
root_inode->i_op = &simple_dir_inode_operations;
root_inode->i_fop = &simple_dir_operations;
root_dentry = d_alloc_root(root_inode);
if (!root_dentry) {
iput(root_inode);
return -ENOMEM;
}
sb->s_root = root_dentry;
oprofile_create_files(sb, root_dentry);
// FIXME: verify kill_litter_super removes our dentries
return 0;
}
static struct super_block *oprofilefs_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
return get_sb_single(fs_type, flags, data, oprofilefs_fill_super);
}
static struct file_system_type oprofilefs_type = {
.owner = THIS_MODULE,
.name = "oprofilefs",
.get_sb = oprofilefs_get_sb,
.kill_sb = kill_litter_super,
};
int __init oprofilefs_register(void)
{
return register_filesystem(&oprofilefs_type);
}
void __exit oprofilefs_unregister(void)
{
unregister_filesystem(&oprofilefs_type);
}
+46
View File
@@ -0,0 +1,46 @@
/**
* @file timer_int.c
*
* @remark Copyright 2002 OProfile authors
* @remark Read the file COPYING
*
* @author John Levon <levon@movementarian.org>
*/
#include <linux/kernel.h>
#include <linux/notifier.h>
#include <linux/smp.h>
#include <linux/oprofile.h>
#include <linux/profile.h>
#include <linux/init.h>
#include <asm/ptrace.h>
#include "oprof.h"
static int timer_notify(struct pt_regs *regs)
{
oprofile_add_sample(regs, 0);
return 0;
}
static int timer_start(void)
{
return register_timer_hook(timer_notify);
}
static void timer_stop(void)
{
unregister_timer_hook(timer_notify);
}
void __init oprofile_timer_init(struct oprofile_operations * ops)
{
ops->create_files = NULL;
ops->setup = NULL;
ops->shutdown = NULL;
ops->start = timer_start;
ops->stop = timer_stop;
ops->cpu_type = "timer";
}