]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge branch 'perf/urgent' into perf/core
authorIngo Molnar <mingo@kernel.org>
Sun, 21 Apr 2013 08:57:33 +0000 (10:57 +0200)
committerIngo Molnar <mingo@kernel.org>
Sun, 21 Apr 2013 08:57:33 +0000 (10:57 +0200)
Conflicts:
arch/x86/kernel/cpu/perf_event_intel.c

Merge in the latest fixes before applying new patches, resolve the conflict.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
1  2 
Makefile
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_ds.c
kernel/events/core.c

diff --combined Makefile
index 70fd2748388722418ad00a683f5b9bfb8382fe8d,9cf6783c2ec37a09f7eab3d771243e4219e616d5..57fda9ce0419fa57a66aaddb20646d426c53101d
+++ b/Makefile
@@@ -1,7 -1,7 +1,7 @@@
  VERSION = 3
  PATCHLEVEL = 9
  SUBLEVEL = 0
- EXTRAVERSION = -rc5
+ EXTRAVERSION = -rc7
  NAME = Unicycling Gorilla
  
  # *DOCUMENTATION*
@@@ -1331,11 -1331,11 +1331,11 @@@ kernelversion
  # Clear a bunch of variables before executing the submake
  tools/: FORCE
        $(Q)mkdir -p $(objtree)/tools
 -      $(Q)$(MAKE) LDFLAGS= MAKEFLAGS= O=$(objtree) subdir=tools -C $(src)/tools/
 +      $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(objtree) subdir=tools -C $(src)/tools/
  
  tools/%: FORCE
        $(Q)mkdir -p $(objtree)/tools
 -      $(Q)$(MAKE) LDFLAGS= MAKEFLAGS= O=$(objtree) subdir=tools -C $(src)/tools/ $*
 +      $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(objtree) subdir=tools -C $(src)/tools/ $*
  
  # Single targets
  # ---------------------------------------------------------------------------
index 2ad2374d53d4f3ee26e0815f59b081e6229f38c9,cc45deb791b01d103c01cbc527f8bfd023d51879..ffd6050a1de44dd69ec419e01c3dc8d15363dfcd
@@@ -81,7 -81,6 +81,7 @@@ static struct event_constraint intel_ne
  static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
  {
        INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
 +      INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
        EVENT_EXTRA_END
  };
  
@@@ -109,8 -108,6 +109,8 @@@ static struct event_constraint intel_sn
        INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
        INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
        INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
 +      INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
 +      INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
        EVENT_CONSTRAINT_END
  };
  
@@@ -139,7 -136,6 +139,7 @@@ static struct extra_reg intel_westmere_
  {
        INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
        INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
 +      INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
        EVENT_EXTRA_END
  };
  
@@@ -157,27 -153,17 +157,34 @@@ static struct event_constraint intel_ge
  };
  
  static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
-       INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
-       INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
+       INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
+       INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
 +      INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
++      INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
+       EVENT_EXTRA_END
+ };
+ static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
+       INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
+       INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
        EVENT_EXTRA_END
  };
  
 +EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
 +EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
 +EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
 +
 +struct attribute *nhm_events_attrs[] = {
 +      EVENT_PTR(mem_ld_nhm),
 +      NULL,
 +};
 +
 +struct attribute *snb_events_attrs[] = {
 +      EVENT_PTR(mem_ld_snb),
 +      EVENT_PTR(mem_st_snb),
 +      NULL,
 +};
 +
  static u64 intel_pmu_event_map(int hw_event)
  {
        return intel_perfmon_event_map[hw_event];
@@@ -1412,11 -1398,8 +1419,11 @@@ x86_get_event_constraints(struct cpu_hw
  
        if (x86_pmu.event_constraints) {
                for_each_event_constraint(c, x86_pmu.event_constraints) {
 -                      if ((event->hw.config & c->cmask) == c->code)
 +                      if ((event->hw.config & c->cmask) == c->code) {
 +                              /* hw.flags zeroed at initialization */
 +                              event->hw.flags |= c->flags;
                                return c;
 +                      }
                }
        }
  
@@@ -1461,7 -1444,6 +1468,7 @@@ intel_put_shared_regs_event_constraints
  static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
                                        struct perf_event *event)
  {
 +      event->hw.flags = 0;
        intel_put_shared_regs_event_constraints(cpuc, event);
  }
  
@@@ -1785,8 -1767,6 +1792,8 @@@ static void intel_pmu_flush_branch_stac
  
  PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
  
 +PMU_FORMAT_ATTR(ldlat, "config1:0-15");
 +
  static struct attribute *intel_arch3_formats_attr[] = {
        &format_attr_event.attr,
        &format_attr_umask.attr,
        &format_attr_cmask.attr,
  
        &format_attr_offcore_rsp.attr, /* XXX do NHM/WSM + SNB breakout */
 +      &format_attr_ldlat.attr, /* PEBS load latency */
        NULL,
  };
  
@@@ -2058,8 -2037,6 +2065,8 @@@ __init int intel_pmu_init(void
                x86_pmu.enable_all = intel_pmu_nhm_enable_all;
                x86_pmu.extra_regs = intel_nehalem_extra_regs;
  
 +              x86_pmu.cpu_events = nhm_events_attrs;
 +
                /* UOPS_ISSUED.STALLED_CYCLES */
                intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
                        X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
                x86_pmu.extra_regs = intel_westmere_extra_regs;
                x86_pmu.er_flags |= ERF_HAS_RSP_1;
  
 +              x86_pmu.cpu_events = nhm_events_attrs;
 +
                /* UOPS_ISSUED.STALLED_CYCLES */
                intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
                        X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
                x86_pmu.event_constraints = intel_snb_event_constraints;
                x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
                x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
-               x86_pmu.extra_regs = intel_snb_extra_regs;
+               if (boot_cpu_data.x86_model == 45)
+                       x86_pmu.extra_regs = intel_snbep_extra_regs;
+               else
+                       x86_pmu.extra_regs = intel_snb_extra_regs;
                /* all extra regs are per-cpu when HT is on */
                x86_pmu.er_flags |= ERF_HAS_RSP_1;
                x86_pmu.er_flags |= ERF_NO_HT_SHARING;
  
 +              x86_pmu.cpu_events = snb_events_attrs;
 +
                /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
                intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
                        X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
                x86_pmu.event_constraints = intel_ivb_event_constraints;
                x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
                x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
-               x86_pmu.extra_regs = intel_snb_extra_regs;
+               if (boot_cpu_data.x86_model == 62)
+                       x86_pmu.extra_regs = intel_snbep_extra_regs;
+               else
+                       x86_pmu.extra_regs = intel_snb_extra_regs;
                /* all extra regs are per-cpu when HT is on */
                x86_pmu.er_flags |= ERF_HAS_RSP_1;
                x86_pmu.er_flags |= ERF_NO_HT_SHARING;
  
 +              x86_pmu.cpu_events = snb_events_attrs;
 +
                /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
                intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
                        X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
index d467561c805fd7da302dbf6824e0c57df8b69106,26830f3af0df70eebb438ddc82f74240b49d618e..60250f68705291d539cf562b4c5f507b7d0e5a43
@@@ -24,130 -24,6 +24,130 @@@ struct pebs_record_32 
  
   */
  
 +union intel_x86_pebs_dse {
 +      u64 val;
 +      struct {
 +              unsigned int ld_dse:4;
 +              unsigned int ld_stlb_miss:1;
 +              unsigned int ld_locked:1;
 +              unsigned int ld_reserved:26;
 +      };
 +      struct {
 +              unsigned int st_l1d_hit:1;
 +              unsigned int st_reserved1:3;
 +              unsigned int st_stlb_miss:1;
 +              unsigned int st_locked:1;
 +              unsigned int st_reserved2:26;
 +      };
 +};
 +
 +
 +/*
 + * Map PEBS Load Latency Data Source encodings to generic
 + * memory data source information
 + */
 +#define P(a, b) PERF_MEM_S(a, b)
 +#define OP_LH (P(OP, LOAD) | P(LVL, HIT))
 +#define SNOOP_NONE_MISS (P(SNOOP, NONE) | P(SNOOP, MISS))
 +
 +static const u64 pebs_data_source[] = {
 +      P(OP, LOAD) | P(LVL, MISS) | P(LVL, L3) | P(SNOOP, NA),/* 0x00:ukn L3 */
 +      OP_LH | P(LVL, L1)  | P(SNOOP, NONE),   /* 0x01: L1 local */
 +      OP_LH | P(LVL, LFB) | P(SNOOP, NONE),   /* 0x02: LFB hit */
 +      OP_LH | P(LVL, L2)  | P(SNOOP, NONE),   /* 0x03: L2 hit */
 +      OP_LH | P(LVL, L3)  | P(SNOOP, NONE),   /* 0x04: L3 hit */
 +      OP_LH | P(LVL, L3)  | P(SNOOP, MISS),   /* 0x05: L3 hit, snoop miss */
 +      OP_LH | P(LVL, L3)  | P(SNOOP, HIT),    /* 0x06: L3 hit, snoop hit */
 +      OP_LH | P(LVL, L3)  | P(SNOOP, HITM),   /* 0x07: L3 hit, snoop hitm */
 +      OP_LH | P(LVL, REM_CCE1) | P(SNOOP, HIT),  /* 0x08: L3 miss snoop hit */
 +      OP_LH | P(LVL, REM_CCE1) | P(SNOOP, HITM), /* 0x09: L3 miss snoop hitm*/
 +      OP_LH | P(LVL, LOC_RAM)  | P(SNOOP, HIT),  /* 0x0a: L3 miss, shared */
 +      OP_LH | P(LVL, REM_RAM1) | P(SNOOP, HIT),  /* 0x0b: L3 miss, shared */
 +      OP_LH | P(LVL, LOC_RAM)  | SNOOP_NONE_MISS,/* 0x0c: L3 miss, excl */
 +      OP_LH | P(LVL, REM_RAM1) | SNOOP_NONE_MISS,/* 0x0d: L3 miss, excl */
 +      OP_LH | P(LVL, IO)  | P(SNOOP, NONE), /* 0x0e: I/O */
 +      OP_LH | P(LVL, UNC) | P(SNOOP, NONE), /* 0x0f: uncached */
 +};
 +
 +static u64 precise_store_data(u64 status)
 +{
 +      union intel_x86_pebs_dse dse;
 +      u64 val = P(OP, STORE) | P(SNOOP, NA) | P(LVL, L1) | P(TLB, L2);
 +
 +      dse.val = status;
 +
 +      /*
 +       * bit 4: TLB access
 +       * 1 = stored missed 2nd level TLB
 +       *
 +       * so it either hit the walker or the OS
 +       * otherwise hit 2nd level TLB
 +       */
 +      if (dse.st_stlb_miss)
 +              val |= P(TLB, MISS);
 +      else
 +              val |= P(TLB, HIT);
 +
 +      /*
 +       * bit 0: hit L1 data cache
 +       * if not set, then all we know is that
 +       * it missed L1D
 +       */
 +      if (dse.st_l1d_hit)
 +              val |= P(LVL, HIT);
 +      else
 +              val |= P(LVL, MISS);
 +
 +      /*
 +       * bit 5: Locked prefix
 +       */
 +      if (dse.st_locked)
 +              val |= P(LOCK, LOCKED);
 +
 +      return val;
 +}
 +
 +static u64 load_latency_data(u64 status)
 +{
 +      union intel_x86_pebs_dse dse;
 +      u64 val;
 +      int model = boot_cpu_data.x86_model;
 +      int fam = boot_cpu_data.x86;
 +
 +      dse.val = status;
 +
 +      /*
 +       * use the mapping table for bit 0-3
 +       */
 +      val = pebs_data_source[dse.ld_dse];
 +
 +      /*
 +       * Nehalem models do not support TLB, Lock infos
 +       */
 +      if (fam == 0x6 && (model == 26 || model == 30
 +          || model == 31 || model == 46)) {
 +              val |= P(TLB, NA) | P(LOCK, NA);
 +              return val;
 +      }
 +      /*
 +       * bit 4: TLB access
 +       * 0 = did not miss 2nd level TLB
 +       * 1 = missed 2nd level TLB
 +       */
 +      if (dse.ld_stlb_miss)
 +              val |= P(TLB, MISS) | P(TLB, L2);
 +      else
 +              val |= P(TLB, HIT) | P(TLB, L1) | P(TLB, L2);
 +
 +      /*
 +       * bit 5: locked prefix
 +       */
 +      if (dse.ld_locked)
 +              val |= P(LOCK, LOCKED);
 +
 +      return val;
 +}
 +
  struct pebs_record_core {
        u64 flags, ip;
        u64 ax, bx, cx, dx;
@@@ -438,10 -314,11 +438,11 @@@ int intel_pmu_drain_bts_buffer(void
        if (top <= at)
                return 0;
  
+       memset(&regs, 0, sizeof(regs));
        ds->bts_index = ds->bts_buffer_base;
  
        perf_sample_data_init(&data, 0, event->hw.last_period);
-       regs.ip     = 0;
  
        /*
         * Prepare a generic sample, i.e. fill in the invariant fields.
@@@ -488,7 -365,7 +489,7 @@@ struct event_constraint intel_atom_pebs
  };
  
  struct event_constraint intel_nehalem_pebs_event_constraints[] = {
 -      INTEL_EVENT_CONSTRAINT(0x0b, 0xf),    /* MEM_INST_RETIRED.* */
 +      INTEL_PLD_CONSTRAINT(0x100b, 0xf),      /* MEM_INST_RETIRED.* */
        INTEL_EVENT_CONSTRAINT(0x0f, 0xf),    /* MEM_UNCORE_RETIRED.* */
        INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
        INTEL_EVENT_CONSTRAINT(0xc0, 0xf),    /* INST_RETIRED.ANY */
  };
  
  struct event_constraint intel_westmere_pebs_event_constraints[] = {
 -      INTEL_EVENT_CONSTRAINT(0x0b, 0xf),    /* MEM_INST_RETIRED.* */
 +      INTEL_PLD_CONSTRAINT(0x100b, 0xf),      /* MEM_INST_RETIRED.* */
        INTEL_EVENT_CONSTRAINT(0x0f, 0xf),    /* MEM_UNCORE_RETIRED.* */
        INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
        INTEL_EVENT_CONSTRAINT(0xc0, 0xf),    /* INSTR_RETIRED.* */
@@@ -523,8 -400,7 +524,8 @@@ struct event_constraint intel_snb_pebs_
        INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
        INTEL_EVENT_CONSTRAINT(0xc4, 0xf),    /* BR_INST_RETIRED.* */
        INTEL_EVENT_CONSTRAINT(0xc5, 0xf),    /* BR_MISP_RETIRED.* */
 -      INTEL_EVENT_CONSTRAINT(0xcd, 0x8),    /* MEM_TRANS_RETIRED.* */
 +      INTEL_PLD_CONSTRAINT(0x01cd, 0x8),    /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
 +      INTEL_PST_CONSTRAINT(0x02cd, 0x8),    /* MEM_TRANS_RETIRED.PRECISE_STORES */
        INTEL_EVENT_CONSTRAINT(0xd0, 0xf),    /* MEM_UOP_RETIRED.* */
        INTEL_EVENT_CONSTRAINT(0xd1, 0xf),    /* MEM_LOAD_UOPS_RETIRED.* */
        INTEL_EVENT_CONSTRAINT(0xd2, 0xf),    /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
@@@ -538,8 -414,7 +539,8 @@@ struct event_constraint intel_ivb_pebs_
          INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
          INTEL_EVENT_CONSTRAINT(0xc4, 0xf),    /* BR_INST_RETIRED.* */
          INTEL_EVENT_CONSTRAINT(0xc5, 0xf),    /* BR_MISP_RETIRED.* */
 -        INTEL_EVENT_CONSTRAINT(0xcd, 0x8),    /* MEM_TRANS_RETIRED.* */
 +        INTEL_PLD_CONSTRAINT(0x01cd, 0x8),    /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
 +      INTEL_PST_CONSTRAINT(0x02cd, 0x8),    /* MEM_TRANS_RETIRED.PRECISE_STORES */
          INTEL_EVENT_CONSTRAINT(0xd0, 0xf),    /* MEM_UOP_RETIRED.* */
          INTEL_EVENT_CONSTRAINT(0xd1, 0xf),    /* MEM_LOAD_UOPS_RETIRED.* */
          INTEL_EVENT_CONSTRAINT(0xd2, 0xf),    /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
@@@ -556,10 -431,8 +557,10 @@@ struct event_constraint *intel_pebs_con
  
        if (x86_pmu.pebs_constraints) {
                for_each_event_constraint(c, x86_pmu.pebs_constraints) {
 -                      if ((event->hw.config & c->cmask) == c->code)
 +                      if ((event->hw.config & c->cmask) == c->code) {
 +                              event->hw.flags |= c->flags;
                                return c;
 +                      }
                }
        }
  
@@@ -574,11 -447,6 +575,11 @@@ void intel_pmu_pebs_enable(struct perf_
        hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
  
        cpuc->pebs_enabled |= 1ULL << hwc->idx;
 +
 +      if (event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT)
 +              cpuc->pebs_enabled |= 1ULL << (hwc->idx + 32);
 +      else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
 +              cpuc->pebs_enabled |= 1ULL << 63;
  }
  
  void intel_pmu_pebs_disable(struct perf_event *event)
@@@ -691,51 -559,20 +692,51 @@@ static void __intel_pmu_pebs_event(stru
                                   struct pt_regs *iregs, void *__pebs)
  {
        /*
 -       * We cast to pebs_record_core since that is a subset of
 -       * both formats and we don't use the other fields in this
 -       * routine.
 +       * We cast to pebs_record_nhm to get the load latency data
 +       * if extra_reg MSR_PEBS_LD_LAT_THRESHOLD used
         */
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 -      struct pebs_record_core *pebs = __pebs;
 +      struct pebs_record_nhm *pebs = __pebs;
        struct perf_sample_data data;
        struct pt_regs regs;
 +      u64 sample_type;
 +      int fll, fst;
  
        if (!intel_pmu_save_and_restart(event))
                return;
  
 +      fll = event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT;
 +      fst = event->hw.flags & PERF_X86_EVENT_PEBS_ST;
 +
        perf_sample_data_init(&data, 0, event->hw.last_period);
  
 +      data.period = event->hw.last_period;
 +      sample_type = event->attr.sample_type;
 +
 +      /*
 +       * if PEBS-LL or PreciseStore
 +       */
 +      if (fll || fst) {
 +              if (sample_type & PERF_SAMPLE_ADDR)
 +                      data.addr = pebs->dla;
 +
 +              /*
 +               * Use latency for weight (only avail with PEBS-LL)
 +               */
 +              if (fll && (sample_type & PERF_SAMPLE_WEIGHT))
 +                      data.weight = pebs->lat;
 +
 +              /*
 +               * data.data_src encodes the data source
 +               */
 +              if (sample_type & PERF_SAMPLE_DATA_SRC) {
 +                      if (fll)
 +                              data.data_src.val = load_latency_data(pebs->dse);
 +                      else
 +                              data.data_src.val = precise_store_data(pebs->dse);
 +              }
 +      }
 +
        /*
         * We use the interrupt regs as a base because the PEBS record
         * does not contain a full regs set, specifically it seems to
diff --combined kernel/events/core.c
index 98c0845fcd208fe676159e24085fa1c82fadcf51,4d3124b392774322ae1b33f167a10c1046d886d9..8097df340eedb25f85fe574f172b56f1ad4b29e6
@@@ -37,7 -37,6 +37,7 @@@
  #include <linux/ftrace_event.h>
  #include <linux/hw_breakpoint.h>
  #include <linux/mm_types.h>
 +#include <linux/cgroup.h>
  
  #include "internal.h"
  
@@@ -234,20 -233,6 +234,20 @@@ static void perf_ctx_unlock(struct perf
  
  #ifdef CONFIG_CGROUP_PERF
  
 +/*
 + * perf_cgroup_info keeps track of time_enabled for a cgroup.
 + * This is a per-cpu dynamically allocated data structure.
 + */
 +struct perf_cgroup_info {
 +      u64                             time;
 +      u64                             timestamp;
 +};
 +
 +struct perf_cgroup {
 +      struct cgroup_subsys_state      css;
 +      struct perf_cgroup_info __percpu *info;
 +};
 +
  /*
   * Must ensure cgroup is pinned (css_get) before calling
   * this function. In other words, we cannot call this function
@@@ -976,15 -961,9 +976,15 @@@ static void perf_event__header_size(str
        if (sample_type & PERF_SAMPLE_PERIOD)
                size += sizeof(data->period);
  
 +      if (sample_type & PERF_SAMPLE_WEIGHT)
 +              size += sizeof(data->weight);
 +
        if (sample_type & PERF_SAMPLE_READ)
                size += event->read_size;
  
 +      if (sample_type & PERF_SAMPLE_DATA_SRC)
 +              size += sizeof(data->data_src.val);
 +
        event->header_size = size;
  }
  
@@@ -4199,12 -4178,6 +4199,12 @@@ void perf_output_sample(struct perf_out
                perf_output_sample_ustack(handle,
                                          data->stack_user_size,
                                          data->regs_user.regs);
 +
 +      if (sample_type & PERF_SAMPLE_WEIGHT)
 +              perf_output_put(handle, data->weight);
 +
 +      if (sample_type & PERF_SAMPLE_DATA_SRC)
 +              perf_output_put(handle, data->data_src.val);
  }
  
  void perf_prepare_sample(struct perf_event_header *header,
@@@ -4764,7 -4737,8 +4764,8 @@@ static void perf_event_mmap_event(struc
        } else {
                if (arch_vma_name(mmap_event->vma)) {
                        name = strncpy(tmp, arch_vma_name(mmap_event->vma),
-                                      sizeof(tmp));
+                                      sizeof(tmp) - 1);
+                       tmp[sizeof(tmp) - 1] = '\0';
                        goto got_name;
                }
  
@@@ -4791,9 -4765,6 +4792,9 @@@ got_name
        mmap_event->file_name = name;
        mmap_event->file_size = size;
  
 +      if (!(vma->vm_flags & VM_EXEC))
 +              mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA;
 +
        mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
  
        rcu_read_lock();
@@@ -5360,7 -5331,7 +5361,7 @@@ static void sw_perf_event_destroy(struc
  
  static int perf_swevent_init(struct perf_event *event)
  {
-       int event_id = event->attr.config;
+       u64 event_id = event->attr.config;
  
        if (event->attr.type != PERF_TYPE_SOFTWARE)
                return -ENOENT;
@@@ -6016,6 -5987,7 +6017,7 @@@ skip_type
        if (pmu->pmu_cpu_context)
                goto got_cpu_context;
  
+       ret = -ENOMEM;
        pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
        if (!pmu->pmu_cpu_context)
                goto free_dev;