]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/x86/events/intel/cstate.c
Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[karo-tx-linux.git] / arch / x86 / events / intel / cstate.c
1 /*
2  * Support cstate residency counters
3  *
4  * Copyright (C) 2015, Intel Corp.
5  * Author: Kan Liang (kan.liang@intel.com)
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Library General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Library General Public License for more details.
16  *
17  */
18
19 /*
20  * This file export cstate related free running (read-only) counters
21  * for perf. These counters may be use simultaneously by other tools,
22  * such as turbostat. However, it still make sense to implement them
23  * in perf. Because we can conveniently collect them together with
24  * other events, and allow to use them from tools without special MSR
25  * access code.
26  *
27  * The events only support system-wide mode counting. There is no
28  * sampling support because it is not supported by the hardware.
29  *
30  * According to counters' scope and category, two PMUs are registered
31  * with the perf_event core subsystem.
32  *  - 'cstate_core': The counter is available for each physical core.
33  *    The counters include CORE_C*_RESIDENCY.
34  *  - 'cstate_pkg': The counter is available for each physical package.
35  *    The counters include PKG_C*_RESIDENCY.
36  *
37  * All of these counters are specified in the IntelĀ® 64 and IA-32
38  * Architectures Software Developer.s Manual Vol3b.
39  *
40  * Model specific counters:
41  *      MSR_CORE_C1_RES: CORE C1 Residency Counter
42  *                       perf code: 0x00
43  *                       Available model: SLM,AMT,GLM
44  *                       Scope: Core (each processor core has a MSR)
45  *      MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
46  *                             perf code: 0x01
47  *                             Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,GLM
48  *                             Scope: Core
49  *      MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
50  *                             perf code: 0x02
51  *                             Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
52  *                                              SKL,KNL,GLM
53  *                             Scope: Core
54  *      MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
55  *                             perf code: 0x03
56  *                             Available model: SNB,IVB,HSW,BDW,SKL
57  *                             Scope: Core
58  *      MSR_PKG_C2_RESIDENCY:  Package C2 Residency Counter.
59  *                             perf code: 0x00
60  *                             Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM
61  *                             Scope: Package (physical package)
62  *      MSR_PKG_C3_RESIDENCY:  Package C3 Residency Counter.
63  *                             perf code: 0x01
64  *                             Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL
65  *                                              GLM
66  *                             Scope: Package (physical package)
67  *      MSR_PKG_C6_RESIDENCY:  Package C6 Residency Counter.
68  *                             perf code: 0x02
69  *                             Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
70  *                                              SKL,KNL,GLM
71  *                             Scope: Package (physical package)
72  *      MSR_PKG_C7_RESIDENCY:  Package C7 Residency Counter.
73  *                             perf code: 0x03
74  *                             Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL
75  *                             Scope: Package (physical package)
76  *      MSR_PKG_C8_RESIDENCY:  Package C8 Residency Counter.
77  *                             perf code: 0x04
78  *                             Available model: HSW ULT only
79  *                             Scope: Package (physical package)
80  *      MSR_PKG_C9_RESIDENCY:  Package C9 Residency Counter.
81  *                             perf code: 0x05
82  *                             Available model: HSW ULT only
83  *                             Scope: Package (physical package)
84  *      MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
85  *                             perf code: 0x06
86  *                             Available model: HSW ULT, GLM
87  *                             Scope: Package (physical package)
88  *
89  */
90
91 #include <linux/module.h>
92 #include <linux/slab.h>
93 #include <linux/perf_event.h>
94 #include <asm/cpu_device_id.h>
95 #include <asm/intel-family.h>
96 #include "../perf_event.h"
97
98 MODULE_LICENSE("GPL");
99
100 #define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format)         \
101 static ssize_t __cstate_##_var##_show(struct kobject *kobj,     \
102                                 struct kobj_attribute *attr,    \
103                                 char *page)                     \
104 {                                                               \
105         BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);             \
106         return sprintf(page, _format "\n");                     \
107 }                                                               \
108 static struct kobj_attribute format_attr_##_var =               \
109         __ATTR(_name, 0444, __cstate_##_var##_show, NULL)
110
111 static ssize_t cstate_get_attr_cpumask(struct device *dev,
112                                        struct device_attribute *attr,
113                                        char *buf);
114
115 /* Model -> events mapping */
116 struct cstate_model {
117         unsigned long           core_events;
118         unsigned long           pkg_events;
119         unsigned long           quirks;
120 };
121
122 /* Quirk flags */
123 #define SLM_PKG_C6_USE_C7_MSR   (1UL << 0)
124 #define KNL_CORE_C6_MSR         (1UL << 1)
125
126 struct perf_cstate_msr {
127         u64     msr;
128         struct  perf_pmu_events_attr *attr;
129 };
130
131
132 /* cstate_core PMU */
133 static struct pmu cstate_core_pmu;
134 static bool has_cstate_core;
135
136 enum perf_cstate_core_events {
137         PERF_CSTATE_CORE_C1_RES = 0,
138         PERF_CSTATE_CORE_C3_RES,
139         PERF_CSTATE_CORE_C6_RES,
140         PERF_CSTATE_CORE_C7_RES,
141
142         PERF_CSTATE_CORE_EVENT_MAX,
143 };
144
145 PMU_EVENT_ATTR_STRING(c1-residency, evattr_cstate_core_c1, "event=0x00");
146 PMU_EVENT_ATTR_STRING(c3-residency, evattr_cstate_core_c3, "event=0x01");
147 PMU_EVENT_ATTR_STRING(c6-residency, evattr_cstate_core_c6, "event=0x02");
148 PMU_EVENT_ATTR_STRING(c7-residency, evattr_cstate_core_c7, "event=0x03");
149
150 static struct perf_cstate_msr core_msr[] = {
151         [PERF_CSTATE_CORE_C1_RES] = { MSR_CORE_C1_RES,          &evattr_cstate_core_c1 },
152         [PERF_CSTATE_CORE_C3_RES] = { MSR_CORE_C3_RESIDENCY,    &evattr_cstate_core_c3 },
153         [PERF_CSTATE_CORE_C6_RES] = { MSR_CORE_C6_RESIDENCY,    &evattr_cstate_core_c6 },
154         [PERF_CSTATE_CORE_C7_RES] = { MSR_CORE_C7_RESIDENCY,    &evattr_cstate_core_c7 },
155 };
156
157 static struct attribute *core_events_attrs[PERF_CSTATE_CORE_EVENT_MAX + 1] = {
158         NULL,
159 };
160
161 static struct attribute_group core_events_attr_group = {
162         .name = "events",
163         .attrs = core_events_attrs,
164 };
165
166 DEFINE_CSTATE_FORMAT_ATTR(core_event, event, "config:0-63");
167 static struct attribute *core_format_attrs[] = {
168         &format_attr_core_event.attr,
169         NULL,
170 };
171
172 static struct attribute_group core_format_attr_group = {
173         .name = "format",
174         .attrs = core_format_attrs,
175 };
176
177 static cpumask_t cstate_core_cpu_mask;
178 static DEVICE_ATTR(cpumask, S_IRUGO, cstate_get_attr_cpumask, NULL);
179
180 static struct attribute *cstate_cpumask_attrs[] = {
181         &dev_attr_cpumask.attr,
182         NULL,
183 };
184
185 static struct attribute_group cpumask_attr_group = {
186         .attrs = cstate_cpumask_attrs,
187 };
188
189 static const struct attribute_group *core_attr_groups[] = {
190         &core_events_attr_group,
191         &core_format_attr_group,
192         &cpumask_attr_group,
193         NULL,
194 };
195
196 /* cstate_pkg PMU */
197 static struct pmu cstate_pkg_pmu;
198 static bool has_cstate_pkg;
199
200 enum perf_cstate_pkg_events {
201         PERF_CSTATE_PKG_C2_RES = 0,
202         PERF_CSTATE_PKG_C3_RES,
203         PERF_CSTATE_PKG_C6_RES,
204         PERF_CSTATE_PKG_C7_RES,
205         PERF_CSTATE_PKG_C8_RES,
206         PERF_CSTATE_PKG_C9_RES,
207         PERF_CSTATE_PKG_C10_RES,
208
209         PERF_CSTATE_PKG_EVENT_MAX,
210 };
211
212 PMU_EVENT_ATTR_STRING(c2-residency, evattr_cstate_pkg_c2, "event=0x00");
213 PMU_EVENT_ATTR_STRING(c3-residency, evattr_cstate_pkg_c3, "event=0x01");
214 PMU_EVENT_ATTR_STRING(c6-residency, evattr_cstate_pkg_c6, "event=0x02");
215 PMU_EVENT_ATTR_STRING(c7-residency, evattr_cstate_pkg_c7, "event=0x03");
216 PMU_EVENT_ATTR_STRING(c8-residency, evattr_cstate_pkg_c8, "event=0x04");
217 PMU_EVENT_ATTR_STRING(c9-residency, evattr_cstate_pkg_c9, "event=0x05");
218 PMU_EVENT_ATTR_STRING(c10-residency, evattr_cstate_pkg_c10, "event=0x06");
219
220 static struct perf_cstate_msr pkg_msr[] = {
221         [PERF_CSTATE_PKG_C2_RES] = { MSR_PKG_C2_RESIDENCY,      &evattr_cstate_pkg_c2 },
222         [PERF_CSTATE_PKG_C3_RES] = { MSR_PKG_C3_RESIDENCY,      &evattr_cstate_pkg_c3 },
223         [PERF_CSTATE_PKG_C6_RES] = { MSR_PKG_C6_RESIDENCY,      &evattr_cstate_pkg_c6 },
224         [PERF_CSTATE_PKG_C7_RES] = { MSR_PKG_C7_RESIDENCY,      &evattr_cstate_pkg_c7 },
225         [PERF_CSTATE_PKG_C8_RES] = { MSR_PKG_C8_RESIDENCY,      &evattr_cstate_pkg_c8 },
226         [PERF_CSTATE_PKG_C9_RES] = { MSR_PKG_C9_RESIDENCY,      &evattr_cstate_pkg_c9 },
227         [PERF_CSTATE_PKG_C10_RES] = { MSR_PKG_C10_RESIDENCY,    &evattr_cstate_pkg_c10 },
228 };
229
230 static struct attribute *pkg_events_attrs[PERF_CSTATE_PKG_EVENT_MAX + 1] = {
231         NULL,
232 };
233
234 static struct attribute_group pkg_events_attr_group = {
235         .name = "events",
236         .attrs = pkg_events_attrs,
237 };
238
239 DEFINE_CSTATE_FORMAT_ATTR(pkg_event, event, "config:0-63");
240 static struct attribute *pkg_format_attrs[] = {
241         &format_attr_pkg_event.attr,
242         NULL,
243 };
244 static struct attribute_group pkg_format_attr_group = {
245         .name = "format",
246         .attrs = pkg_format_attrs,
247 };
248
249 static cpumask_t cstate_pkg_cpu_mask;
250
251 static const struct attribute_group *pkg_attr_groups[] = {
252         &pkg_events_attr_group,
253         &pkg_format_attr_group,
254         &cpumask_attr_group,
255         NULL,
256 };
257
258 static ssize_t cstate_get_attr_cpumask(struct device *dev,
259                                        struct device_attribute *attr,
260                                        char *buf)
261 {
262         struct pmu *pmu = dev_get_drvdata(dev);
263
264         if (pmu == &cstate_core_pmu)
265                 return cpumap_print_to_pagebuf(true, buf, &cstate_core_cpu_mask);
266         else if (pmu == &cstate_pkg_pmu)
267                 return cpumap_print_to_pagebuf(true, buf, &cstate_pkg_cpu_mask);
268         else
269                 return 0;
270 }
271
272 static int cstate_pmu_event_init(struct perf_event *event)
273 {
274         u64 cfg = event->attr.config;
275         int cpu;
276
277         if (event->attr.type != event->pmu->type)
278                 return -ENOENT;
279
280         /* unsupported modes and filters */
281         if (event->attr.exclude_user   ||
282             event->attr.exclude_kernel ||
283             event->attr.exclude_hv     ||
284             event->attr.exclude_idle   ||
285             event->attr.exclude_host   ||
286             event->attr.exclude_guest  ||
287             event->attr.sample_period) /* no sampling */
288                 return -EINVAL;
289
290         if (event->cpu < 0)
291                 return -EINVAL;
292
293         if (event->pmu == &cstate_core_pmu) {
294                 if (cfg >= PERF_CSTATE_CORE_EVENT_MAX)
295                         return -EINVAL;
296                 if (!core_msr[cfg].attr)
297                         return -EINVAL;
298                 event->hw.event_base = core_msr[cfg].msr;
299                 cpu = cpumask_any_and(&cstate_core_cpu_mask,
300                                       topology_sibling_cpumask(event->cpu));
301         } else if (event->pmu == &cstate_pkg_pmu) {
302                 if (cfg >= PERF_CSTATE_PKG_EVENT_MAX)
303                         return -EINVAL;
304                 if (!pkg_msr[cfg].attr)
305                         return -EINVAL;
306                 event->hw.event_base = pkg_msr[cfg].msr;
307                 cpu = cpumask_any_and(&cstate_pkg_cpu_mask,
308                                       topology_core_cpumask(event->cpu));
309         } else {
310                 return -ENOENT;
311         }
312
313         if (cpu >= nr_cpu_ids)
314                 return -ENODEV;
315
316         event->cpu = cpu;
317         event->hw.config = cfg;
318         event->hw.idx = -1;
319         return 0;
320 }
321
322 static inline u64 cstate_pmu_read_counter(struct perf_event *event)
323 {
324         u64 val;
325
326         rdmsrl(event->hw.event_base, val);
327         return val;
328 }
329
330 static void cstate_pmu_event_update(struct perf_event *event)
331 {
332         struct hw_perf_event *hwc = &event->hw;
333         u64 prev_raw_count, new_raw_count;
334
335 again:
336         prev_raw_count = local64_read(&hwc->prev_count);
337         new_raw_count = cstate_pmu_read_counter(event);
338
339         if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
340                             new_raw_count) != prev_raw_count)
341                 goto again;
342
343         local64_add(new_raw_count - prev_raw_count, &event->count);
344 }
345
346 static void cstate_pmu_event_start(struct perf_event *event, int mode)
347 {
348         local64_set(&event->hw.prev_count, cstate_pmu_read_counter(event));
349 }
350
351 static void cstate_pmu_event_stop(struct perf_event *event, int mode)
352 {
353         cstate_pmu_event_update(event);
354 }
355
356 static void cstate_pmu_event_del(struct perf_event *event, int mode)
357 {
358         cstate_pmu_event_stop(event, PERF_EF_UPDATE);
359 }
360
361 static int cstate_pmu_event_add(struct perf_event *event, int mode)
362 {
363         if (mode & PERF_EF_START)
364                 cstate_pmu_event_start(event, mode);
365
366         return 0;
367 }
368
369 /*
370  * Check if exiting cpu is the designated reader. If so migrate the
371  * events when there is a valid target available
372  */
373 static int cstate_cpu_exit(unsigned int cpu)
374 {
375         unsigned int target;
376
377         if (has_cstate_core &&
378             cpumask_test_and_clear_cpu(cpu, &cstate_core_cpu_mask)) {
379
380                 target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
381                 /* Migrate events if there is a valid target */
382                 if (target < nr_cpu_ids) {
383                         cpumask_set_cpu(target, &cstate_core_cpu_mask);
384                         perf_pmu_migrate_context(&cstate_core_pmu, cpu, target);
385                 }
386         }
387
388         if (has_cstate_pkg &&
389             cpumask_test_and_clear_cpu(cpu, &cstate_pkg_cpu_mask)) {
390
391                 target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
392                 /* Migrate events if there is a valid target */
393                 if (target < nr_cpu_ids) {
394                         cpumask_set_cpu(target, &cstate_pkg_cpu_mask);
395                         perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target);
396                 }
397         }
398         return 0;
399 }
400
401 static int cstate_cpu_init(unsigned int cpu)
402 {
403         unsigned int target;
404
405         /*
406          * If this is the first online thread of that core, set it in
407          * the core cpu mask as the designated reader.
408          */
409         target = cpumask_any_and(&cstate_core_cpu_mask,
410                                  topology_sibling_cpumask(cpu));
411
412         if (has_cstate_core && target >= nr_cpu_ids)
413                 cpumask_set_cpu(cpu, &cstate_core_cpu_mask);
414
415         /*
416          * If this is the first online thread of that package, set it
417          * in the package cpu mask as the designated reader.
418          */
419         target = cpumask_any_and(&cstate_pkg_cpu_mask,
420                                  topology_core_cpumask(cpu));
421         if (has_cstate_pkg && target >= nr_cpu_ids)
422                 cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask);
423
424         return 0;
425 }
426
427 static struct pmu cstate_core_pmu = {
428         .attr_groups    = core_attr_groups,
429         .name           = "cstate_core",
430         .task_ctx_nr    = perf_invalid_context,
431         .event_init     = cstate_pmu_event_init,
432         .add            = cstate_pmu_event_add,
433         .del            = cstate_pmu_event_del,
434         .start          = cstate_pmu_event_start,
435         .stop           = cstate_pmu_event_stop,
436         .read           = cstate_pmu_event_update,
437         .capabilities   = PERF_PMU_CAP_NO_INTERRUPT,
438         .module         = THIS_MODULE,
439 };
440
441 static struct pmu cstate_pkg_pmu = {
442         .attr_groups    = pkg_attr_groups,
443         .name           = "cstate_pkg",
444         .task_ctx_nr    = perf_invalid_context,
445         .event_init     = cstate_pmu_event_init,
446         .add            = cstate_pmu_event_add,
447         .del            = cstate_pmu_event_del,
448         .start          = cstate_pmu_event_start,
449         .stop           = cstate_pmu_event_stop,
450         .read           = cstate_pmu_event_update,
451         .capabilities   = PERF_PMU_CAP_NO_INTERRUPT,
452         .module         = THIS_MODULE,
453 };
454
455 static const struct cstate_model nhm_cstates __initconst = {
456         .core_events            = BIT(PERF_CSTATE_CORE_C3_RES) |
457                                   BIT(PERF_CSTATE_CORE_C6_RES),
458
459         .pkg_events             = BIT(PERF_CSTATE_PKG_C3_RES) |
460                                   BIT(PERF_CSTATE_PKG_C6_RES) |
461                                   BIT(PERF_CSTATE_PKG_C7_RES),
462 };
463
464 static const struct cstate_model snb_cstates __initconst = {
465         .core_events            = BIT(PERF_CSTATE_CORE_C3_RES) |
466                                   BIT(PERF_CSTATE_CORE_C6_RES) |
467                                   BIT(PERF_CSTATE_CORE_C7_RES),
468
469         .pkg_events             = BIT(PERF_CSTATE_PKG_C2_RES) |
470                                   BIT(PERF_CSTATE_PKG_C3_RES) |
471                                   BIT(PERF_CSTATE_PKG_C6_RES) |
472                                   BIT(PERF_CSTATE_PKG_C7_RES),
473 };
474
475 static const struct cstate_model hswult_cstates __initconst = {
476         .core_events            = BIT(PERF_CSTATE_CORE_C3_RES) |
477                                   BIT(PERF_CSTATE_CORE_C6_RES) |
478                                   BIT(PERF_CSTATE_CORE_C7_RES),
479
480         .pkg_events             = BIT(PERF_CSTATE_PKG_C2_RES) |
481                                   BIT(PERF_CSTATE_PKG_C3_RES) |
482                                   BIT(PERF_CSTATE_PKG_C6_RES) |
483                                   BIT(PERF_CSTATE_PKG_C7_RES) |
484                                   BIT(PERF_CSTATE_PKG_C8_RES) |
485                                   BIT(PERF_CSTATE_PKG_C9_RES) |
486                                   BIT(PERF_CSTATE_PKG_C10_RES),
487 };
488
489 static const struct cstate_model slm_cstates __initconst = {
490         .core_events            = BIT(PERF_CSTATE_CORE_C1_RES) |
491                                   BIT(PERF_CSTATE_CORE_C6_RES),
492
493         .pkg_events             = BIT(PERF_CSTATE_PKG_C6_RES),
494         .quirks                 = SLM_PKG_C6_USE_C7_MSR,
495 };
496
497
498 static const struct cstate_model knl_cstates __initconst = {
499         .core_events            = BIT(PERF_CSTATE_CORE_C6_RES),
500
501         .pkg_events             = BIT(PERF_CSTATE_PKG_C2_RES) |
502                                   BIT(PERF_CSTATE_PKG_C3_RES) |
503                                   BIT(PERF_CSTATE_PKG_C6_RES),
504         .quirks                 = KNL_CORE_C6_MSR,
505 };
506
507
508 static const struct cstate_model glm_cstates __initconst = {
509         .core_events            = BIT(PERF_CSTATE_CORE_C1_RES) |
510                                   BIT(PERF_CSTATE_CORE_C3_RES) |
511                                   BIT(PERF_CSTATE_CORE_C6_RES),
512
513         .pkg_events             = BIT(PERF_CSTATE_PKG_C2_RES) |
514                                   BIT(PERF_CSTATE_PKG_C3_RES) |
515                                   BIT(PERF_CSTATE_PKG_C6_RES) |
516                                   BIT(PERF_CSTATE_PKG_C10_RES),
517 };
518
519
520 #define X86_CSTATES_MODEL(model, states)                                \
521         { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long) &(states) }
522
523 static const struct x86_cpu_id intel_cstates_match[] __initconst = {
524         X86_CSTATES_MODEL(INTEL_FAM6_NEHALEM,    nhm_cstates),
525         X86_CSTATES_MODEL(INTEL_FAM6_NEHALEM_EP, nhm_cstates),
526         X86_CSTATES_MODEL(INTEL_FAM6_NEHALEM_EX, nhm_cstates),
527
528         X86_CSTATES_MODEL(INTEL_FAM6_WESTMERE,    nhm_cstates),
529         X86_CSTATES_MODEL(INTEL_FAM6_WESTMERE_EP, nhm_cstates),
530         X86_CSTATES_MODEL(INTEL_FAM6_WESTMERE_EX, nhm_cstates),
531
532         X86_CSTATES_MODEL(INTEL_FAM6_SANDYBRIDGE,   snb_cstates),
533         X86_CSTATES_MODEL(INTEL_FAM6_SANDYBRIDGE_X, snb_cstates),
534
535         X86_CSTATES_MODEL(INTEL_FAM6_IVYBRIDGE,   snb_cstates),
536         X86_CSTATES_MODEL(INTEL_FAM6_IVYBRIDGE_X, snb_cstates),
537
538         X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_CORE, snb_cstates),
539         X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_X,    snb_cstates),
540         X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_GT3E, snb_cstates),
541
542         X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_ULT, hswult_cstates),
543
544         X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT1, slm_cstates),
545         X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT2, slm_cstates),
546         X86_CSTATES_MODEL(INTEL_FAM6_ATOM_AIRMONT,     slm_cstates),
547
548         X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_CORE,   snb_cstates),
549         X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_XEON_D, snb_cstates),
550         X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_GT3E,   snb_cstates),
551         X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_X,      snb_cstates),
552
553         X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_MOBILE,  snb_cstates),
554         X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates),
555
556         X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE,  snb_cstates),
557         X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, snb_cstates),
558
559         X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNL, knl_cstates),
560         X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNM, knl_cstates),
561
562         X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT, glm_cstates),
563         { },
564 };
565 MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
566
567 /*
568  * Probe the cstate events and insert the available one into sysfs attrs
569  * Return false if there are no available events.
570  */
571 static bool __init cstate_probe_msr(const unsigned long evmsk, int max,
572                                    struct perf_cstate_msr *msr,
573                                    struct attribute **attrs)
574 {
575         bool found = false;
576         unsigned int bit;
577         u64 val;
578
579         for (bit = 0; bit < max; bit++) {
580                 if (test_bit(bit, &evmsk) && !rdmsrl_safe(msr[bit].msr, &val)) {
581                         *attrs++ = &msr[bit].attr->attr.attr;
582                         found = true;
583                 } else {
584                         msr[bit].attr = NULL;
585                 }
586         }
587         *attrs = NULL;
588
589         return found;
590 }
591
592 static int __init cstate_probe(const struct cstate_model *cm)
593 {
594         /* SLM has different MSR for PKG C6 */
595         if (cm->quirks & SLM_PKG_C6_USE_C7_MSR)
596                 pkg_msr[PERF_CSTATE_PKG_C6_RES].msr = MSR_PKG_C7_RESIDENCY;
597
598         /* KNL has different MSR for CORE C6 */
599         if (cm->quirks & KNL_CORE_C6_MSR)
600                 pkg_msr[PERF_CSTATE_CORE_C6_RES].msr = MSR_KNL_CORE_C6_RESIDENCY;
601
602
603         has_cstate_core = cstate_probe_msr(cm->core_events,
604                                            PERF_CSTATE_CORE_EVENT_MAX,
605                                            core_msr, core_events_attrs);
606
607         has_cstate_pkg = cstate_probe_msr(cm->pkg_events,
608                                           PERF_CSTATE_PKG_EVENT_MAX,
609                                           pkg_msr, pkg_events_attrs);
610
611         return (has_cstate_core || has_cstate_pkg) ? 0 : -ENODEV;
612 }
613
614 static inline void cstate_cleanup(void)
615 {
616         cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_ONLINE);
617         cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_STARTING);
618
619         if (has_cstate_core)
620                 perf_pmu_unregister(&cstate_core_pmu);
621
622         if (has_cstate_pkg)
623                 perf_pmu_unregister(&cstate_pkg_pmu);
624 }
625
626 static int __init cstate_init(void)
627 {
628         int err;
629
630         cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_STARTING,
631                           "perf/x86/cstate:starting", cstate_cpu_init, NULL);
632         cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_ONLINE,
633                           "perf/x86/cstate:online", NULL, cstate_cpu_exit);
634
635         if (has_cstate_core) {
636                 err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1);
637                 if (err) {
638                         has_cstate_core = false;
639                         pr_info("Failed to register cstate core pmu\n");
640                         cstate_cleanup();
641                         return err;
642                 }
643         }
644
645         if (has_cstate_pkg) {
646                 err = perf_pmu_register(&cstate_pkg_pmu, cstate_pkg_pmu.name, -1);
647                 if (err) {
648                         has_cstate_pkg = false;
649                         pr_info("Failed to register cstate pkg pmu\n");
650                         cstate_cleanup();
651                         return err;
652                 }
653         }
654         return 0;
655 }
656
657 static int __init cstate_pmu_init(void)
658 {
659         const struct x86_cpu_id *id;
660         int err;
661
662         if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
663                 return -ENODEV;
664
665         id = x86_match_cpu(intel_cstates_match);
666         if (!id)
667                 return -ENODEV;
668
669         err = cstate_probe((const struct cstate_model *) id->driver_data);
670         if (err)
671                 return err;
672
673         return cstate_init();
674 }
675 module_init(cstate_pmu_init);
676
677 static void __exit cstate_pmu_exit(void)
678 {
679         cstate_cleanup();
680 }
681 module_exit(cstate_pmu_exit);