]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - kernel/perf_counter.c
perf_counter: Add fork event
[karo-tx-linux.git] / kernel / perf_counter.c
index 3f11a2bc6c799268ba01fb20538e0fffc292ff00..78c58623a0dd207ee6856c2378c73913b446efc1 100644 (file)
@@ -40,14 +40,16 @@ static int perf_reserved_percpu __read_mostly;
 static int perf_overcommit __read_mostly = 1;
 
 static atomic_t nr_counters __read_mostly;
-static atomic_t nr_mmap_tracking __read_mostly;
-static atomic_t nr_munmap_tracking __read_mostly;
-static atomic_t nr_comm_tracking __read_mostly;
+static atomic_t nr_mmap_counters __read_mostly;
+static atomic_t nr_munmap_counters __read_mostly;
+static atomic_t nr_comm_counters __read_mostly;
 
 int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
 int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
 int sysctl_perf_counter_limit __read_mostly = 100000; /* max NMIs per second */
 
+static atomic64_t perf_counter_id;
+
 /*
  * Lock for (sysadmin-configurable) counter reservations:
  */
@@ -260,7 +262,7 @@ counter_sched_out(struct perf_counter *counter,
        if (!is_software_counter(counter))
                cpuctx->active_oncpu--;
        ctx->nr_active--;
-       if (counter->hw_event.exclusive || !cpuctx->active_oncpu)
+       if (counter->attr.exclusive || !cpuctx->active_oncpu)
                cpuctx->exclusive = 0;
 }
 
@@ -282,7 +284,7 @@ group_sched_out(struct perf_counter *group_counter,
        list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
                counter_sched_out(counter, cpuctx, ctx);
 
-       if (group_counter->hw_event.exclusive)
+       if (group_counter->attr.exclusive)
                cpuctx->exclusive = 0;
 }
 
@@ -550,7 +552,7 @@ counter_sched_in(struct perf_counter *counter,
                cpuctx->active_oncpu++;
        ctx->nr_active++;
 
-       if (counter->hw_event.exclusive)
+       if (counter->attr.exclusive)
                cpuctx->exclusive = 1;
 
        return 0;
@@ -642,7 +644,7 @@ static int group_can_go_on(struct perf_counter *counter,
         * If this group is exclusive and there are already
         * counters on the CPU, it can't go on.
         */
-       if (counter->hw_event.exclusive && cpuctx->active_oncpu)
+       if (counter->attr.exclusive && cpuctx->active_oncpu)
                return 0;
        /*
         * Otherwise, try to add it if all previous groups were able
@@ -725,7 +727,7 @@ static void __perf_install_in_context(void *info)
                 */
                if (leader != counter)
                        group_sched_out(leader, cpuctx, ctx);
-               if (leader->hw_event.pinned) {
+               if (leader->attr.pinned) {
                        update_group_times(leader);
                        leader->state = PERF_COUNTER_STATE_ERROR;
                }
@@ -849,7 +851,7 @@ static void __perf_counter_enable(void *info)
                 */
                if (leader != counter)
                        group_sched_out(leader, cpuctx, ctx);
-               if (leader->hw_event.pinned) {
+               if (leader->attr.pinned) {
                        update_group_times(leader);
                        leader->state = PERF_COUNTER_STATE_ERROR;
                }
@@ -927,7 +929,7 @@ static int perf_counter_refresh(struct perf_counter *counter, int refresh)
        /*
         * not supported on inherited counters
         */
-       if (counter->hw_event.inherit)
+       if (counter->attr.inherit)
                return -EINVAL;
 
        atomic_add(refresh, &counter->event_limit);
@@ -1094,7 +1096,7 @@ __perf_counter_sched_in(struct perf_counter_context *ctx,
         */
        list_for_each_entry(counter, &ctx->counter_list, list_entry) {
                if (counter->state <= PERF_COUNTER_STATE_OFF ||
-                   !counter->hw_event.pinned)
+                   !counter->attr.pinned)
                        continue;
                if (counter->cpu != -1 && counter->cpu != cpu)
                        continue;
@@ -1122,7 +1124,7 @@ __perf_counter_sched_in(struct perf_counter_context *ctx,
                 * ignore pinned counters since we did them already.
                 */
                if (counter->state <= PERF_COUNTER_STATE_OFF ||
-                   counter->hw_event.pinned)
+                   counter->attr.pinned)
                        continue;
 
                /*
@@ -1204,11 +1206,11 @@ static void perf_adjust_freq(struct perf_counter_context *ctx)
                        interrupts = 2*sysctl_perf_counter_limit/HZ;
                }
 
-               if (!counter->hw_event.freq || !counter->hw_event.sample_freq)
+               if (!counter->attr.freq || !counter->attr.sample_freq)
                        continue;
 
                events = HZ * interrupts * counter->hw.sample_period;
-               period = div64_u64(events, counter->hw_event.sample_freq);
+               period = div64_u64(events, counter->attr.sample_freq);
 
                delta = (s64)(1 + period - counter->hw.sample_period);
                delta >>= 1;
@@ -1444,12 +1446,12 @@ static void free_counter(struct perf_counter *counter)
        perf_pending_sync(counter);
 
        atomic_dec(&nr_counters);
-       if (counter->hw_event.mmap)
-               atomic_dec(&nr_mmap_tracking);
-       if (counter->hw_event.munmap)
-               atomic_dec(&nr_munmap_tracking);
-       if (counter->hw_event.comm)
-               atomic_dec(&nr_comm_tracking);
+       if (counter->attr.mmap)
+               atomic_dec(&nr_mmap_counters);
+       if (counter->attr.munmap)
+               atomic_dec(&nr_munmap_counters);
+       if (counter->attr.comm)
+               atomic_dec(&nr_comm_counters);
 
        if (counter->destroy)
                counter->destroy(counter);
@@ -1504,13 +1506,13 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
        mutex_lock(&counter->child_mutex);
        values[0] = perf_counter_read(counter);
        n = 1;
-       if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+       if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
                values[n++] = counter->total_time_enabled +
                        atomic64_read(&counter->child_total_time_enabled);
-       if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+       if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
                values[n++] = counter->total_time_running +
                        atomic64_read(&counter->child_total_time_running);
-       if (counter->hw_event.read_format & PERF_FORMAT_ID)
+       if (counter->attr.read_format & PERF_FORMAT_ID)
                values[n++] = counter->id;
        mutex_unlock(&counter->child_mutex);
 
@@ -1604,6 +1606,43 @@ static void perf_counter_for_each(struct perf_counter *counter,
        mutex_unlock(&counter->child_mutex);
 }
 
+static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
+{
+       struct perf_counter_context *ctx = counter->ctx;
+       unsigned long size;
+       int ret = 0;
+       u64 value;
+
+       if (!counter->attr.sample_period)
+               return -EINVAL;
+
+       size = copy_from_user(&value, arg, sizeof(value));
+       if (size != sizeof(value))
+               return -EFAULT;
+
+       if (!value)
+               return -EINVAL;
+
+       spin_lock_irq(&ctx->lock);
+       if (counter->attr.freq) {
+               if (value > sysctl_perf_counter_limit) {
+                       ret = -EINVAL;
+                       goto unlock;
+               }
+
+               counter->attr.sample_freq = value;
+       } else {
+               counter->attr.sample_period = value;
+               counter->hw.sample_period = value;
+
+               perf_log_period(counter, value);
+       }
+unlock:
+       spin_unlock_irq(&ctx->lock);
+
+       return ret;
+}
+
 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
        struct perf_counter *counter = file->private_data;
@@ -1623,6 +1662,10 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 
        case PERF_COUNTER_IOC_REFRESH:
                return perf_counter_refresh(counter, arg);
+
+       case PERF_COUNTER_IOC_PERIOD:
+               return perf_counter_period(counter, (u64 __user *)arg);
+
        default:
                return -ENOTTY;
        }
@@ -2258,7 +2301,7 @@ static void perf_output_end(struct perf_output_handle *handle)
        struct perf_counter *counter = handle->counter;
        struct perf_mmap_data *data = handle->data;
 
-       int wakeup_events = counter->hw_event.wakeup_events;
+       int wakeup_events = counter->attr.wakeup_events;
 
        if (handle->overflow && wakeup_events) {
                int events = atomic_inc_return(&data->events);
@@ -2298,7 +2341,7 @@ static void perf_counter_output(struct perf_counter *counter,
                                int nmi, struct pt_regs *regs, u64 addr)
 {
        int ret;
-       u64 sample_type = counter->hw_event.sample_type;
+       u64 sample_type = counter->attr.sample_type;
        struct perf_output_handle handle;
        struct perf_event_header header;
        u64 ip;
@@ -2400,7 +2443,7 @@ static void perf_counter_output(struct perf_counter *counter,
                perf_output_put(&handle, addr);
 
        if (sample_type & PERF_SAMPLE_CONFIG)
-               perf_output_put(&handle, counter->hw_event.config);
+               perf_output_put(&handle, counter->attr.config);
 
        if (sample_type & PERF_SAMPLE_CPU)
                perf_output_put(&handle, cpu_entry);
@@ -2432,6 +2475,105 @@ static void perf_counter_output(struct perf_counter *counter,
        perf_output_end(&handle);
 }
 
+/*
+ * fork tracking
+ */
+
+struct perf_fork_event {
+       struct task_struct      *task;
+
+       struct {
+               struct perf_event_header        header;
+
+               u32                             pid;
+               u32                             ppid;
+       } event;
+};
+
+static void perf_counter_fork_output(struct perf_counter *counter,
+                                    struct perf_fork_event *fork_event)
+{
+       struct perf_output_handle handle;
+       int size = fork_event->event.header.size;
+       struct task_struct *task = fork_event->task;
+       int ret = perf_output_begin(&handle, counter, size, 0, 0);
+
+       if (ret)
+               return;
+
+       fork_event->event.pid = perf_counter_pid(counter, task);
+       fork_event->event.ppid = perf_counter_pid(counter, task->real_parent);
+
+       perf_output_put(&handle, fork_event->event);
+       perf_output_end(&handle);
+}
+
+static int perf_counter_fork_match(struct perf_counter *counter)
+{
+       if (counter->attr.comm || counter->attr.mmap || counter->attr.munmap)
+               return 1;
+
+       return 0;
+}
+
+static void perf_counter_fork_ctx(struct perf_counter_context *ctx,
+                                 struct perf_fork_event *fork_event)
+{
+       struct perf_counter *counter;
+
+       if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
+               return;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
+               if (perf_counter_fork_match(counter))
+                       perf_counter_fork_output(counter, fork_event);
+       }
+       rcu_read_unlock();
+}
+
+static void perf_counter_fork_event(struct perf_fork_event *fork_event)
+{
+       struct perf_cpu_context *cpuctx;
+       struct perf_counter_context *ctx;
+
+       cpuctx = &get_cpu_var(perf_cpu_context);
+       perf_counter_fork_ctx(&cpuctx->ctx, fork_event);
+       put_cpu_var(perf_cpu_context);
+
+       rcu_read_lock();
+       /*
+        * doesn't really matter which of the child contexts the
+        * events ends up in.
+        */
+       ctx = rcu_dereference(current->perf_counter_ctxp);
+       if (ctx)
+               perf_counter_fork_ctx(ctx, fork_event);
+       rcu_read_unlock();
+}
+
+void perf_counter_fork(struct task_struct *task)
+{
+       struct perf_fork_event fork_event;
+
+       if (!atomic_read(&nr_comm_counters) &&
+           !atomic_read(&nr_mmap_counters) &&
+           !atomic_read(&nr_munmap_counters))
+               return;
+
+       fork_event = (struct perf_fork_event){
+               .task   = task,
+               .event  = {
+                       .header = {
+                               .type = PERF_EVENT_FORK,
+                               .size = sizeof(fork_event.event),
+                       },
+               },
+       };
+
+       perf_counter_fork_event(&fork_event);
+}
+
 /*
  * comm tracking
  */
@@ -2468,11 +2610,9 @@ static void perf_counter_comm_output(struct perf_counter *counter,
        perf_output_end(&handle);
 }
 
-static int perf_counter_comm_match(struct perf_counter *counter,
-                                  struct perf_comm_event *comm_event)
+static int perf_counter_comm_match(struct perf_counter *counter)
 {
-       if (counter->hw_event.comm &&
-           comm_event->event.header.type == PERF_EVENT_COMM)
+       if (counter->attr.comm)
                return 1;
 
        return 0;
@@ -2488,7 +2628,7 @@ static void perf_counter_comm_ctx(struct perf_counter_context *ctx,
 
        rcu_read_lock();
        list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
-               if (perf_counter_comm_match(counter, comm_event))
+               if (perf_counter_comm_match(counter))
                        perf_counter_comm_output(counter, comm_event);
        }
        rcu_read_unlock();
@@ -2527,7 +2667,7 @@ void perf_counter_comm(struct task_struct *task)
 {
        struct perf_comm_event comm_event;
 
-       if (!atomic_read(&nr_comm_tracking))
+       if (!atomic_read(&nr_comm_counters))
                return;
 
        comm_event = (struct perf_comm_event){
@@ -2582,11 +2722,11 @@ static void perf_counter_mmap_output(struct perf_counter *counter,
 static int perf_counter_mmap_match(struct perf_counter *counter,
                                   struct perf_mmap_event *mmap_event)
 {
-       if (counter->hw_event.mmap &&
+       if (counter->attr.mmap &&
            mmap_event->event.header.type == PERF_EVENT_MMAP)
                return 1;
 
-       if (counter->hw_event.munmap &&
+       if (counter->attr.munmap &&
            mmap_event->event.header.type == PERF_EVENT_MUNMAP)
                return 1;
 
@@ -2665,7 +2805,7 @@ void perf_counter_mmap(unsigned long addr, unsigned long len,
 {
        struct perf_mmap_event mmap_event;
 
-       if (!atomic_read(&nr_mmap_tracking))
+       if (!atomic_read(&nr_mmap_counters))
                return;
 
        mmap_event = (struct perf_mmap_event){
@@ -2686,7 +2826,7 @@ void perf_counter_munmap(unsigned long addr, unsigned long len,
 {
        struct perf_mmap_event mmap_event;
 
-       if (!atomic_read(&nr_munmap_tracking))
+       if (!atomic_read(&nr_munmap_counters))
                return;
 
        mmap_event = (struct perf_mmap_event){
@@ -2779,11 +2919,20 @@ int perf_counter_overflow(struct perf_counter *counter,
 
        if (!throttle) {
                counter->hw.interrupts++;
-       } else if (counter->hw.interrupts != MAX_INTERRUPTS) {
-               counter->hw.interrupts++;
-               if (HZ*counter->hw.interrupts > (u64)sysctl_perf_counter_limit) {
-                       counter->hw.interrupts = MAX_INTERRUPTS;
-                       perf_log_throttle(counter, 0);
+       } else {
+               if (counter->hw.interrupts != MAX_INTERRUPTS) {
+                       counter->hw.interrupts++;
+                       if (HZ*counter->hw.interrupts > (u64)sysctl_perf_counter_limit) {
+                               counter->hw.interrupts = MAX_INTERRUPTS;
+                               perf_log_throttle(counter, 0);
+                               ret = 1;
+                       }
+               } else {
+                       /*
+                        * Keep re-disabling counters even though on the previous
+                        * pass we disabled it - just in case we raced with a
+                        * sched-in and the counter got enabled again:
+                        */
                        ret = 1;
                }
        }
@@ -2866,8 +3015,8 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
         * In case we exclude kernel IPs or are somehow not in interrupt
         * context, provide the next best thing, the user IP.
         */
-       if ((counter->hw_event.exclude_kernel || !regs) &&
-                       !counter->hw_event.exclude_user)
+       if ((counter->attr.exclude_kernel || !regs) &&
+                       !counter->attr.exclude_user)
                regs = task_pt_regs(current);
 
        if (regs) {
@@ -2941,14 +3090,14 @@ static int perf_swcounter_match(struct perf_counter *counter,
        if (!perf_swcounter_is_counting(counter))
                return 0;
 
-       if (counter->hw_event.config != event_config)
+       if (counter->attr.config != event_config)
                return 0;
 
        if (regs) {
-               if (counter->hw_event.exclude_user && user_mode(regs))
+               if (counter->attr.exclude_user && user_mode(regs))
                        return 0;
 
-               if (counter->hw_event.exclude_kernel && !user_mode(regs))
+               if (counter->attr.exclude_kernel && !user_mode(regs))
                        return 0;
        }
 
@@ -3211,12 +3360,12 @@ extern void ftrace_profile_disable(int);
 
 static void tp_perf_counter_destroy(struct perf_counter *counter)
 {
-       ftrace_profile_disable(perf_event_id(&counter->hw_event));
+       ftrace_profile_disable(perf_event_id(&counter->attr));
 }
 
 static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
 {
-       int event_id = perf_event_id(&counter->hw_event);
+       int event_id = perf_event_id(&counter->attr);
        int ret;
 
        ret = ftrace_profile_enable(event_id);
@@ -3224,7 +3373,7 @@ static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
                return NULL;
 
        counter->destroy = tp_perf_counter_destroy;
-       counter->hw.sample_period = counter->hw_event.sample_period;
+       counter->hw.sample_period = counter->attr.sample_period;
 
        return &perf_ops_generic;
 }
@@ -3246,7 +3395,7 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
         * to be kernel events, and page faults are never hypervisor
         * events.
         */
-       switch (perf_event_id(&counter->hw_event)) {
+       switch (perf_event_id(&counter->attr)) {
        case PERF_COUNT_CPU_CLOCK:
                pmu = &perf_ops_cpu_clock;
 
@@ -3278,7 +3427,7 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
  * Allocate and initialize a counter structure
  */
 static struct perf_counter *
-perf_counter_alloc(struct perf_counter_hw_event *hw_event,
+perf_counter_alloc(struct perf_counter_attr *attr,
                   int cpu,
                   struct perf_counter_context *ctx,
                   struct perf_counter *group_leader,
@@ -3310,37 +3459,41 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event,
 
        mutex_init(&counter->mmap_mutex);
 
-       counter->cpu                    = cpu;
-       counter->hw_event               = *hw_event;
-       counter->group_leader           = group_leader;
-       counter->pmu                    = NULL;
-       counter->ctx                    = ctx;
-       counter->oncpu                  = -1;
+       counter->cpu            = cpu;
+       counter->attr           = *attr;
+       counter->group_leader   = group_leader;
+       counter->pmu            = NULL;
+       counter->ctx            = ctx;
+       counter->oncpu          = -1;
 
-       counter->state = PERF_COUNTER_STATE_INACTIVE;
-       if (hw_event->disabled)
+       counter->ns             = get_pid_ns(current->nsproxy->pid_ns);
+       counter->id             = atomic64_inc_return(&perf_counter_id);
+
+       counter->state          = PERF_COUNTER_STATE_INACTIVE;
+
+       if (attr->disabled)
                counter->state = PERF_COUNTER_STATE_OFF;
 
        pmu = NULL;
 
        hwc = &counter->hw;
-       if (hw_event->freq && hw_event->sample_freq)
-               hwc->sample_period = div64_u64(TICK_NSEC, hw_event->sample_freq);
+       if (attr->freq && attr->sample_freq)
+               hwc->sample_period = div64_u64(TICK_NSEC, attr->sample_freq);
        else
-               hwc->sample_period = hw_event->sample_period;
+               hwc->sample_period = attr->sample_period;
 
        /*
         * we currently do not support PERF_SAMPLE_GROUP on inherited counters
         */
-       if (hw_event->inherit && (hw_event->sample_type & PERF_SAMPLE_GROUP))
+       if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP))
                goto done;
 
-       if (perf_event_raw(hw_event)) {
+       if (perf_event_raw(attr)) {
                pmu = hw_perf_counter_init(counter);
                goto done;
        }
 
-       switch (perf_event_type(hw_event)) {
+       switch (perf_event_type(attr)) {
        case PERF_TYPE_HARDWARE:
                pmu = hw_perf_counter_init(counter);
                break;
@@ -3361,6 +3514,8 @@ done:
                err = PTR_ERR(pmu);
 
        if (err) {
+               if (counter->ns)
+                       put_pid_ns(counter->ns);
                kfree(counter);
                return ERR_PTR(err);
        }
@@ -3368,32 +3523,30 @@ done:
        counter->pmu = pmu;
 
        atomic_inc(&nr_counters);
-       if (counter->hw_event.mmap)
-               atomic_inc(&nr_mmap_tracking);
-       if (counter->hw_event.munmap)
-               atomic_inc(&nr_munmap_tracking);
-       if (counter->hw_event.comm)
-               atomic_inc(&nr_comm_tracking);
+       if (counter->attr.mmap)
+               atomic_inc(&nr_mmap_counters);
+       if (counter->attr.munmap)
+               atomic_inc(&nr_munmap_counters);
+       if (counter->attr.comm)
+               atomic_inc(&nr_comm_counters);
 
        return counter;
 }
 
-static atomic64_t perf_counter_id;
-
 /**
  * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
  *
- * @hw_event_uptr:     event type attributes for monitoring/sampling
+ * @attr_uptr: event type attributes for monitoring/sampling
  * @pid:               target pid
  * @cpu:               target cpu
  * @group_fd:          group leader counter fd
  */
 SYSCALL_DEFINE5(perf_counter_open,
-               const struct perf_counter_hw_event __user *, hw_event_uptr,
+               const struct perf_counter_attr __user *, attr_uptr,
                pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
 {
        struct perf_counter *counter, *group_leader;
-       struct perf_counter_hw_event hw_event;
+       struct perf_counter_attr attr;
        struct perf_counter_context *ctx;
        struct file *counter_file = NULL;
        struct file *group_file = NULL;
@@ -3405,7 +3558,7 @@ SYSCALL_DEFINE5(perf_counter_open,
        if (flags)
                return -EINVAL;
 
-       if (copy_from_user(&hw_event, hw_event_uptr, sizeof(hw_event)) != 0)
+       if (copy_from_user(&attr, attr_uptr, sizeof(attr)) != 0)
                return -EFAULT;
 
        /*
@@ -3443,11 +3596,11 @@ SYSCALL_DEFINE5(perf_counter_open,
                /*
                 * Only a group leader can be exclusive or pinned
                 */
-               if (hw_event.exclusive || hw_event.pinned)
+               if (attr.exclusive || attr.pinned)
                        goto err_put_context;
        }
 
-       counter = perf_counter_alloc(&hw_event, cpu, ctx, group_leader,
+       counter = perf_counter_alloc(&attr, cpu, ctx, group_leader,
                                     GFP_KERNEL);
        ret = PTR_ERR(counter);
        if (IS_ERR(counter))
@@ -3474,9 +3627,6 @@ SYSCALL_DEFINE5(perf_counter_open,
        list_add_tail(&counter->owner_entry, &current->perf_counter_list);
        mutex_unlock(&current->perf_counter_mutex);
 
-       counter->ns = get_pid_ns(current->nsproxy->pid_ns);
-       counter->id = atomic64_inc_return(&perf_counter_id);
-
        fput_light(counter_file, fput_needed2);
 
 out_fput:
@@ -3515,7 +3665,7 @@ inherit_counter(struct perf_counter *parent_counter,
        if (parent_counter->parent)
                parent_counter = parent_counter->parent;
 
-       child_counter = perf_counter_alloc(&parent_counter->hw_event,
+       child_counter = perf_counter_alloc(&parent_counter->attr,
                                           parent_counter->cpu, child_ctx,
                                           group_leader, GFP_KERNEL);
        if (IS_ERR(child_counter))
@@ -3524,7 +3674,7 @@ inherit_counter(struct perf_counter *parent_counter,
 
        /*
         * Make the child state follow the state of the parent counter,
-        * not its hw_event.disabled bit.  We hold the parent's mutex,
+        * not its attr.disabled bit.  We hold the parent's mutex,
         * so we won't race with perf_counter_{en, dis}able_family.
         */
        if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
@@ -3541,7 +3691,7 @@ inherit_counter(struct perf_counter *parent_counter,
        /*
         * inherit into child's child as well:
         */
-       child_counter->hw_event.inherit = 1;
+       child_counter->attr.inherit = 1;
 
        /*
         * Get a reference to the parent filp - we will fput it
@@ -3797,7 +3947,7 @@ int perf_counter_init_task(struct task_struct *child)
                if (counter != counter->group_leader)
                        continue;
 
-               if (!counter->hw_event.inherit) {
+               if (!counter->attr.inherit) {
                        inherited_all = 0;
                        continue;
                }