enum perf_counter_record_format {
PERF_RECORD_IP = 1U << 0,
PERF_RECORD_TID = 1U << 1,
- PERF_RECORD_GROUP = 1U << 2,
- PERF_RECORD_CALLCHAIN = 1U << 3,
+ PERF_RECORD_TIME = 1U << 2,
+ PERF_RECORD_ADDR = 1U << 3,
+ PERF_RECORD_GROUP = 1U << 4,
+ PERF_RECORD_CALLCHAIN = 1U << 5,
+ PERF_RECORD_CONFIG = 1U << 6,
+ PERF_RECORD_CPU = 1U << 7,
};
/*
*/
__u64 config;
- __u64 irq_period;
+ union {
+ __u64 irq_period;
+ __u64 irq_freq;
+ };
+
__u32 record_type;
__u32 read_format;
exclude_idle : 1, /* don't count when idle */
mmap : 1, /* include mmap data */
munmap : 1, /* include munmap data */
+ comm : 1, /* include comm data */
+ freq : 1, /* use freq, not period */
- __reserved_1 : 53;
+ __reserved_1 : 51;
- __u32 extra_config_len;
__u32 wakeup_events; /* wakeup every n events */
+ __u32 __reserved_2;
- __u64 __reserved_2;
__u64 __reserved_3;
+ __u64 __reserved_4;
};
/*
* Ioctls that can be done on a perf counter fd:
*/
-#define PERF_COUNTER_IOC_ENABLE _IO('$', 0)
-#define PERF_COUNTER_IOC_DISABLE _IO('$', 1)
+#define PERF_COUNTER_IOC_ENABLE _IOW('$', 0, u32)
+#define PERF_COUNTER_IOC_DISABLE _IOW('$', 1, u32)
+#define PERF_COUNTER_IOC_REFRESH _IOW('$', 2, u32)
+#define PERF_COUNTER_IOC_RESET _IOW('$', 3, u32)
+
+enum perf_counter_ioc_flags {
+ PERF_IOC_FLAG_GROUP = 1U << 0,
+};
/*
* Structure of the page that can be mapped via mmap
__u32 data_head; /* head in the data section */
};
+#define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0)
+#define PERF_EVENT_MISC_CPUMODE_UNKNOWN (0 << 0)
+#define PERF_EVENT_MISC_KERNEL (1 << 0)
+#define PERF_EVENT_MISC_USER (2 << 0)
+#define PERF_EVENT_MISC_HYPERVISOR (3 << 0)
+#define PERF_EVENT_MISC_OVERFLOW (1 << 2)
+
struct perf_event_header {
__u32 type;
- __u32 size;
+ __u16 misc;
+ __u16 size;
};
enum perf_event_type {
+ /*
+ * The MMAP events record the PROT_EXEC mappings so that we can
+ * correlate userspace IPs to code. They have the following structure:
+ *
+ * struct {
+ * struct perf_event_header header;
+ *
+ * u32 pid, tid;
+ * u64 addr;
+ * u64 len;
+ * u64 pgoff;
+ * char filename[];
+ * };
+ */
PERF_EVENT_MMAP = 1,
PERF_EVENT_MUNMAP = 2,
/*
- * Half the event type space is reserved for the counter overflow
- * bitfields, as found in hw_event.record_type.
+ * struct {
+ * struct perf_event_header header;
*
- * These events will have types of the form:
- * PERF_EVENT_COUNTER_OVERFLOW { | __PERF_EVENT_* } *
+ * u32 pid, tid;
+ * char comm[];
+ * };
+ */
+ PERF_EVENT_COMM = 3,
+
+ /*
+ * struct {
+ * struct perf_event_header header;
+ * u64 time;
+ * u64 irq_period;
+ * };
+ */
+ PERF_EVENT_PERIOD = 4,
+
+ /*
+ * struct {
+ * struct perf_event_header header;
+ * u64 time;
+ * };
+ */
+ PERF_EVENT_THROTTLE = 5,
+ PERF_EVENT_UNTHROTTLE = 6,
+
+ /*
+ * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field
+ * will be PERF_RECORD_*
+ *
+ * struct {
+ * struct perf_event_header header;
+ *
+ * { u64 ip; } && PERF_RECORD_IP
+ * { u32 pid, tid; } && PERF_RECORD_TID
+ * { u64 time; } && PERF_RECORD_TIME
+ * { u64 addr; } && PERF_RECORD_ADDR
+ * { u64 config; } && PERF_RECORD_CONFIG
+ * { u32 cpu, res; } && PERF_RECORD_CPU
+ *
+ * { u64 nr;
+ * { u64 event, val; } cnt[nr]; } && PERF_RECORD_GROUP
+ *
+ * { u16 nr,
+ * hv,
+ * kernel,
+ * user;
+ * u64 ips[nr]; } && PERF_RECORD_CALLCHAIN
+ * };
*/
- PERF_EVENT_COUNTER_OVERFLOW = 1UL << 31,
- __PERF_EVENT_IP = PERF_RECORD_IP,
- __PERF_EVENT_TID = PERF_RECORD_TID,
- __PERF_EVENT_GROUP = PERF_RECORD_GROUP,
- __PERF_EVENT_CALLCHAIN = PERF_RECORD_CALLCHAIN,
};
#ifdef __KERNEL__
unsigned long config_base;
unsigned long counter_base;
int nmi;
- unsigned int idx;
+ int idx;
};
union { /* software */
atomic64_t count;
atomic64_t prev_count;
u64 irq_period;
atomic64_t period_left;
+ u64 interrupts;
#endif
};
struct perf_counter;
/**
- * struct hw_perf_counter_ops - performance counter hw ops
+ * struct pmu - generic performance monitoring unit
*/
-struct hw_perf_counter_ops {
+struct pmu {
int (*enable) (struct perf_counter *counter);
void (*disable) (struct perf_counter *counter);
void (*read) (struct perf_counter *counter);
+ void (*unthrottle) (struct perf_counter *counter);
};
/**
struct perf_mmap_data {
struct rcu_head rcu_head;
- int nr_pages;
- atomic_t wakeup;
- atomic_t head;
- atomic_t events;
+ int nr_pages; /* nr of data pages */
+ int nr_locked; /* nr pages mlocked */
+
+ atomic_t poll; /* POLL_ for wakeups */
+ atomic_t head; /* write position */
+ atomic_t events; /* event limit */
+
+ atomic_t done_head; /* completed head */
+ atomic_t lock; /* concurrent writes */
+
+ atomic_t wakeup; /* needs a wakeup */
+
struct perf_counter_mmap_page *user_page;
void *data_pages[0];
};
struct list_head sibling_list;
int nr_siblings;
struct perf_counter *group_leader;
- const struct hw_perf_counter_ops *hw_ops;
+ const struct pmu *pmu;
enum perf_counter_active_state state;
enum perf_counter_active_state prev_state;
struct hw_perf_counter hw;
struct perf_counter_context *ctx;
- struct task_struct *task;
struct file *filp;
- struct perf_counter *parent;
- struct list_head child_list;
-
/*
* These accumulate total time (in nanoseconds) that children
* counters have been enabled and running, respectively.
/*
* Protect attach/detach and child_list:
*/
- struct mutex mutex;
+ struct mutex child_mutex;
+ struct list_head child_list;
+ struct perf_counter *parent;
int oncpu;
int cpu;
+ struct list_head owner_entry;
+ struct task_struct *owner;
+
/* mmap bits */
struct mutex mmap_mutex;
atomic_t mmap_count;
/* poll related */
wait_queue_head_t waitq;
struct fasync_struct *fasync;
- /* optional: for NMIs */
+
+ /* delayed work for NMIs and such */
+ int pending_wakeup;
+ int pending_kill;
+ int pending_disable;
struct perf_pending_entry pending;
+ atomic_t event_limit;
+
void (*destroy)(struct perf_counter *);
struct rcu_head rcu_head;
#endif
* Used as a container for task counters and CPU counters as well:
*/
struct perf_counter_context {
-#ifdef CONFIG_PERF_COUNTERS
/*
* Protect the states of the counters in the list,
* nr_active, and the list:
int nr_counters;
int nr_active;
int is_active;
+ atomic_t refcount;
struct task_struct *task;
/*
- * time_now is the current time in nanoseconds since an arbitrary
- * point in the past. For per-task counters, this is based on the
- * task clock, and for per-cpu counters it is based on the cpu clock.
- * time_lost is an offset from the task/cpu clock, used to make it
- * appear that time only passes while the context is scheduled in.
+ * Context clock, runs when context enabled.
*/
- u64 time_now;
- u64 time_lost;
-#endif
+ u64 time;
+ u64 timestamp;
+
+ /*
+ * These fields let us detect when two contexts have both
+ * been cloned (inherited) from a common ancestor.
+ */
+ struct perf_counter_context *parent_ctx;
+ u32 parent_gen;
+ u32 generation;
};
/**
int recursion[4];
};
+#ifdef CONFIG_PERF_COUNTERS
+
/*
* Set by architecture code:
*/
extern int perf_max_counters;
-#ifdef CONFIG_PERF_COUNTERS
-extern const struct hw_perf_counter_ops *
-hw_perf_counter_init(struct perf_counter *counter);
+extern const struct pmu *hw_perf_counter_init(struct perf_counter *counter);
extern void perf_counter_task_sched_in(struct task_struct *task, int cpu);
-extern void perf_counter_task_sched_out(struct task_struct *task, int cpu);
+extern void perf_counter_task_sched_out(struct task_struct *task,
+ struct task_struct *next, int cpu);
extern void perf_counter_task_tick(struct task_struct *task, int cpu);
-extern void perf_counter_init_task(struct task_struct *child);
+extern int perf_counter_init_task(struct task_struct *child);
extern void perf_counter_exit_task(struct task_struct *child);
extern void perf_counter_do_pending(void);
extern void perf_counter_print_debug(void);
-extern void perf_counter_unthrottle(void);
-extern u64 hw_perf_save_disable(void);
-extern void hw_perf_restore(u64 ctrl);
+extern void __perf_disable(void);
+extern bool __perf_enable(void);
+extern void perf_disable(void);
+extern void perf_enable(void);
extern int perf_counter_task_disable(void);
extern int perf_counter_task_enable(void);
extern int hw_perf_group_sched_in(struct perf_counter *group_leader,
extern void perf_counter_update_userpage(struct perf_counter *counter);
extern int perf_counter_overflow(struct perf_counter *counter,
- int nmi, struct pt_regs *regs);
+ int nmi, struct pt_regs *regs, u64 addr);
/*
* Return 1 for a software counter, 0 for a hardware counter
*/
perf_event_type(&counter->hw_event) != PERF_TYPE_HARDWARE;
}
-extern void perf_swcounter_event(u32, u64, int, struct pt_regs *);
+extern void perf_swcounter_event(u32, u64, int, struct pt_regs *, u64);
extern void perf_counter_mmap(unsigned long addr, unsigned long len,
unsigned long pgoff, struct file *file);
extern void perf_counter_munmap(unsigned long addr, unsigned long len,
unsigned long pgoff, struct file *file);
+extern void perf_counter_comm(struct task_struct *tsk);
+
#define MAX_STACK_DEPTH 255
struct perf_callchain_entry {
extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
+extern int sysctl_perf_counter_priv;
+extern int sysctl_perf_counter_mlock;
+extern int sysctl_perf_counter_limit;
+
+extern void perf_counter_init(void);
+
+#ifndef perf_misc_flags
+#define perf_misc_flags(regs) (user_mode(regs) ? PERF_EVENT_MISC_USER : \
+ PERF_EVENT_MISC_KERNEL)
+#define perf_instruction_pointer(regs) instruction_pointer(regs)
+#endif
+
#else
static inline void
perf_counter_task_sched_in(struct task_struct *task, int cpu) { }
static inline void
-perf_counter_task_sched_out(struct task_struct *task, int cpu) { }
+perf_counter_task_sched_out(struct task_struct *task,
+ struct task_struct *next, int cpu) { }
static inline void
perf_counter_task_tick(struct task_struct *task, int cpu) { }
-static inline void perf_counter_init_task(struct task_struct *child) { }
+static inline int perf_counter_init_task(struct task_struct *child) { }
static inline void perf_counter_exit_task(struct task_struct *child) { }
static inline void perf_counter_do_pending(void) { }
static inline void perf_counter_print_debug(void) { }
-static inline void perf_counter_unthrottle(void) { }
-static inline void hw_perf_restore(u64 ctrl) { }
-static inline u64 hw_perf_save_disable(void) { return 0; }
+static inline void perf_disable(void) { }
+static inline void perf_enable(void) { }
static inline int perf_counter_task_disable(void) { return -EINVAL; }
static inline int perf_counter_task_enable(void) { return -EINVAL; }
static inline void
-perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs) { }
-
+perf_swcounter_event(u32 event, u64 nr, int nmi,
+ struct pt_regs *regs, u64 addr) { }
static inline void
perf_counter_mmap(unsigned long addr, unsigned long len,
static inline void
perf_counter_munmap(unsigned long addr, unsigned long len,
- unsigned long pgoff, struct file *file) { }
+ unsigned long pgoff, struct file *file) { }
+static inline void perf_counter_comm(struct task_struct *tsk) { }
+static inline void perf_counter_init(void) { }
#endif
#endif /* __KERNEL__ */