#include <linux/file.h>
#include <linux/poll.h>
#include <linux/sysfs.h>
-#include <linux/ptrace.h>
+#include <linux/dcache.h>
#include <linux/percpu.h>
+#include <linux/ptrace.h>
#include <linux/vmstat.h>
#include <linux/hardirq.h>
#include <linux/rculist.h>
#include <linux/anon_inodes.h>
#include <linux/kernel_stat.h>
#include <linux/perf_counter.h>
-#include <linux/dcache.h>
#include <asm/irq_regs.h>
void __weak hw_perf_enable(void) { barrier(); }
void __weak hw_perf_counter_setup(int cpu) { barrier(); }
-int __weak hw_perf_group_sched_in(struct perf_counter *group_leader,
+
+int __weak
+hw_perf_group_sched_in(struct perf_counter *group_leader,
struct perf_cpu_context *cpuctx,
struct perf_counter_context *ctx, int cpu)
{
* This has to cope with with the fact that until it is locked,
* the context could get moved to another task.
*/
-static struct perf_counter_context *perf_lock_task_context(
- struct task_struct *task, unsigned long *flags)
+static struct perf_counter_context *
+perf_lock_task_context(struct task_struct *task, unsigned long *flags)
{
struct perf_counter_context *ctx;
static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
{
- struct perf_cpu_context *cpuctx;
- struct perf_counter_context *ctx;
struct perf_counter_context *parent_ctx;
+ struct perf_counter_context *ctx;
+ struct perf_cpu_context *cpuctx;
struct task_struct *task;
unsigned long flags;
int err;
*/
void perf_counter_update_userpage(struct perf_counter *counter)
{
- struct perf_mmap_data *data;
struct perf_counter_mmap_page *userpg;
+ struct perf_mmap_data *data;
rcu_read_lock();
data = rcu_dereference(counter->data);
static void __perf_mmap_data_free(struct rcu_head *rcu_head)
{
- struct perf_mmap_data *data = container_of(rcu_head,
- struct perf_mmap_data, rcu_head);
+ struct perf_mmap_data *data;
int i;
+ data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
+
free_page((unsigned long)data->user_page);
for (i = 0; i < data->nr_pages; i++)
free_page((unsigned long)data->data_pages[i]);
struct perf_counter *counter = vma->vm_file->private_data;
WARN_ON_ONCE(counter->ctx->parent_ctx);
- if (atomic_dec_and_mutex_lock(&counter->mmap_count,
- &counter->mmap_mutex)) {
+ if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) {
struct user_struct *user = current_user();
atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm);
static int perf_mmap(struct file *file, struct vm_area_struct *vma)
{
struct perf_counter *counter = file->private_data;
+ unsigned long user_locked, user_lock_limit;
struct user_struct *user = current_user();
+ unsigned long locked, lock_limit;
unsigned long vma_size;
unsigned long nr_pages;
- unsigned long user_locked, user_lock_limit;
- unsigned long locked, lock_limit;
long user_extra, extra;
int ret = 0;
static int perf_fasync(int fd, struct file *filp, int on)
{
- struct perf_counter *counter = filp->private_data;
struct inode *inode = filp->f_path.dentry->d_inode;
+ struct perf_counter *counter = filp->private_data;
int retval;
mutex_lock(&inode->i_mutex);
*/
struct perf_comm_event {
- struct task_struct *task;
- char *comm;
+ struct task_struct *task;
+ char *comm;
int comm_size;
struct {
int nmi, struct pt_regs *regs, u64 addr)
{
int neg = atomic64_add_negative(nr, &counter->hw.count);
+
if (counter->hw.irq_period && !neg)
perf_swcounter_overflow(counter, nmi, regs, addr);
}
/*
* Make the child state follow the state of the parent counter,
* not its hw_event.disabled bit. We hold the parent's mutex,
- * so we won't race with perf_counter_{en,dis}able_family.
+ * so we won't race with perf_counter_{en, dis}able_family.
*/
if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
child_counter->state = PERF_COUNTER_STATE_INACTIVE;