__reserved_1 : 51;
- __u32 extra_config_len;
__u32 wakeup_events; /* wakeup every n events */
+ __u32 __reserved_2;
- __u64 __reserved_2;
__u64 __reserved_3;
+ __u64 __reserved_4;
};
/*
*/
PERF_EVENT_PERIOD = 4,
+ /*
+ * struct {
+ * struct perf_event_header header;
+ * u64 time;
+ * };
+ */
+ PERF_EVENT_THROTTLE = 5,
+ PERF_EVENT_UNTHROTTLE = 6,
+
/*
* When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field
* will be PERF_RECORD_*
int (*enable) (struct perf_counter *counter);
void (*disable) (struct perf_counter *counter);
void (*read) (struct perf_counter *counter);
+ void (*unthrottle) (struct perf_counter *counter);
};
/**
struct perf_counter_context *ctx;
struct file *filp;
- struct perf_counter *parent;
- struct list_head child_list;
-
/*
* These accumulate total time (in nanoseconds) that children
* counters have been enabled and running, respectively.
/*
* Protect attach/detach and child_list:
*/
- struct mutex mutex;
+ struct mutex child_mutex;
+ struct list_head child_list;
+ struct perf_counter *parent;
int oncpu;
int cpu;
+ struct list_head owner_entry;
+ struct task_struct *owner;
+
/* mmap bits */
struct mutex mmap_mutex;
atomic_t mmap_count;
struct list_head event_list;
int nr_counters;
int nr_active;
- int nr_enabled;
int is_active;
atomic_t refcount;
struct task_struct *task;
extern void perf_counter_task_sched_out(struct task_struct *task,
struct task_struct *next, int cpu);
extern void perf_counter_task_tick(struct task_struct *task, int cpu);
-extern void perf_counter_init_task(struct task_struct *child);
+extern int perf_counter_init_task(struct task_struct *child);
extern void perf_counter_exit_task(struct task_struct *child);
extern void perf_counter_do_pending(void);
extern void perf_counter_print_debug(void);
-extern void perf_counter_unthrottle(void);
extern void __perf_disable(void);
extern bool __perf_enable(void);
extern void perf_disable(void);
extern int sysctl_perf_counter_priv;
extern int sysctl_perf_counter_mlock;
+extern int sysctl_perf_counter_limit;
extern void perf_counter_init(void);
struct task_struct *next, int cpu) { }
static inline void
perf_counter_task_tick(struct task_struct *task, int cpu) { }
-static inline void perf_counter_init_task(struct task_struct *child) { }
+static inline int perf_counter_init_task(struct task_struct *child) { }
static inline void perf_counter_exit_task(struct task_struct *child) { }
static inline void perf_counter_do_pending(void) { }
static inline void perf_counter_print_debug(void) { }
-static inline void perf_counter_unthrottle(void) { }
static inline void perf_disable(void) { }
static inline void perf_enable(void) { }
static inline int perf_counter_task_disable(void) { return -EINVAL; }