2 * Hypervisor supplied "24x7" performance counter support
4 * Author: Cody P Schafer <cody@linux.vnet.ibm.com>
5 * Copyright 2014 IBM Corporation.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #define pr_fmt(fmt) "hv-24x7: " fmt
15 #include <linux/perf_event.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <asm/firmware.h>
19 #include <asm/hvcall.h>
23 #include "hv-24x7-catalog.h"
24 #include "hv-common.h"
27 * TODO: Merging events:
28 * - Think of the hcall as an interface to a 4d array of counters:
30 * - y = indexes in the domain (core, chip, vcpu, node, etc)
31 * - z = offset into the counter space
32 * - w = lpars (guest vms, "logical partitions")
33 * - A single request is: x,y,y_last,z,z_last,w,w_last
34 * - this means we can retrieve a rectangle of counters in y,z for a single x.
36 * - Things to consider (ignoring w):
37 * - input cost_per_request = 16
38 * - output cost_per_result(ys,zs) = 8 + 8 * ys + ys * zs
39 * - limited number of requests per hcall (must fit into 4K bytes)
40 * - 4k = 16 [buffer header] - 16 [request size] * request_count
41 * - 255 requests per hcall
42 * - sometimes it will be more efficient to read extra data and discard
47 * perf stat -e 'hv_24x7/domain=2,offset=8,starting_index=0,lpar=0xffffffff/'
50 /* u3 0-6, one of HV_24X7_PERF_DOMAIN */
51 EVENT_DEFINE_RANGE_FORMAT(domain, config, 0, 3);
53 EVENT_DEFINE_RANGE_FORMAT(starting_index, config, 16, 31);
54 /* u32, see "data_offset" */
55 EVENT_DEFINE_RANGE_FORMAT(offset, config, 32, 63);
57 EVENT_DEFINE_RANGE_FORMAT(lpar, config1, 0, 15);
59 EVENT_DEFINE_RANGE(reserved1, config, 4, 15);
60 EVENT_DEFINE_RANGE(reserved2, config1, 16, 63);
61 EVENT_DEFINE_RANGE(reserved3, config2, 0, 63);
63 static struct attribute *format_attrs[] = {
64 &format_attr_domain.attr,
65 &format_attr_offset.attr,
66 &format_attr_starting_index.attr,
67 &format_attr_lpar.attr,
71 static struct attribute_group format_group = {
73 .attrs = format_attrs,
76 static struct kmem_cache *hv_page_cache;
79 * read_offset_data - copy data from one buffer to another while treating the
80 * source buffer as a small view on the total avaliable
83 * @dest: buffer to copy into
84 * @dest_len: length of @dest in bytes
85 * @requested_offset: the offset within the source data we want. Must be > 0
86 * @src: buffer to copy data from
87 * @src_len: length of @src in bytes
88 * @source_offset: the offset in the sorce data that (src,src_len) refers to.
91 * returns the number of bytes copied.
93 * The following ascii art shows the various buffer possitioning we need to
94 * handle, assigns some arbitrary varibles to points on the buffer, and then
95 * shows how we fiddle with those values to get things we care about (copy
96 * start in src and copy len)
100 * '.' areas in d are written to.
105 * s |----------------------|
110 * s |------------------|
114 * s |------------------|
117 * d |..................|
118 * s |------------------|
123 * s |------------------|
130 * w = requested_offset
131 * z = source_offset + src_len
132 * v = requested_offset + dest_len
134 * w_offset_in_s = w - x = requested_offset - source_offset
135 * z_offset_in_s = z - x = src_len
136 * v_offset_in_s = v - x = request_offset + dest_len - src_len
138 static ssize_t read_offset_data(void *dest, size_t dest_len,
139 loff_t requested_offset, void *src,
140 size_t src_len, loff_t source_offset)
142 size_t w_offset_in_s = requested_offset - source_offset;
143 size_t z_offset_in_s = src_len;
144 size_t v_offset_in_s = requested_offset + dest_len - src_len;
145 size_t u_offset_in_s = min(z_offset_in_s, v_offset_in_s);
146 size_t copy_len = u_offset_in_s - w_offset_in_s;
148 if (requested_offset < 0 || source_offset < 0)
151 if (z_offset_in_s <= w_offset_in_s)
154 memcpy(dest, src + w_offset_in_s, copy_len);
158 static unsigned long h_get_24x7_catalog_page(char page[static 4096],
159 u32 version, u32 index)
161 WARN_ON(!IS_ALIGNED((unsigned long)page, 4096));
162 return plpar_hcall_norets(H_GET_24X7_CATALOG_PAGE,
168 static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
169 struct bin_attribute *bin_attr, char *buf,
170 loff_t offset, size_t count)
174 size_t catalog_len = 0, catalog_page_len = 0, page_count = 0;
175 loff_t page_offset = 0;
176 uint32_t catalog_version_num = 0;
177 void *page = kmem_cache_alloc(hv_page_cache, GFP_USER);
178 struct hv_24x7_catalog_page_0 *page_0 = page;
182 hret = h_get_24x7_catalog_page(page, 0, 0);
188 catalog_version_num = be32_to_cpu(page_0->version);
189 catalog_page_len = be32_to_cpu(page_0->length);
190 catalog_len = catalog_page_len * 4096;
192 page_offset = offset / 4096;
193 page_count = count / 4096;
195 if (page_offset >= catalog_page_len)
198 if (page_offset != 0) {
199 hret = h_get_24x7_catalog_page(page, catalog_version_num,
207 ret = read_offset_data(buf, count, offset,
208 page, 4096, page_offset * 4096);
211 pr_err("h_get_24x7_catalog_page(ver=%d, page=%lld) failed: rc=%ld\n",
212 catalog_version_num, page_offset, hret);
215 pr_devel("catalog_read: offset=%lld(%lld) count=%zu(%zu) catalog_len=%zu(%zu) => %zd\n",
216 offset, page_offset, count, page_count, catalog_len,
217 catalog_page_len, ret);
222 #define PAGE_0_ATTR(_name, _fmt, _expr) \
223 static ssize_t _name##_show(struct device *dev, \
224 struct device_attribute *dev_attr, \
227 unsigned long hret; \
229 void *page = kmem_cache_alloc(hv_page_cache, GFP_USER); \
230 struct hv_24x7_catalog_page_0 *page_0 = page; \
233 hret = h_get_24x7_catalog_page(page, 0, 0); \
238 ret = sprintf(buf, _fmt, _expr); \
243 static DEVICE_ATTR_RO(_name)
245 PAGE_0_ATTR(catalog_version, "%lld\n",
246 (unsigned long long)be32_to_cpu(page_0->version));
247 PAGE_0_ATTR(catalog_len, "%lld\n",
248 (unsigned long long)be32_to_cpu(page_0->length) * 4096);
249 static BIN_ATTR_RO(catalog, 0/* real length varies */);
251 static struct bin_attribute *if_bin_attrs[] = {
256 static struct attribute *if_attrs[] = {
257 &dev_attr_catalog_len.attr,
258 &dev_attr_catalog_version.attr,
262 static struct attribute_group if_group = {
264 .bin_attrs = if_bin_attrs,
268 static const struct attribute_group *attr_groups[] = {
274 static bool is_physical_domain(int domain)
276 return domain == HV_24X7_PERF_DOMAIN_PHYSICAL_CHIP ||
277 domain == HV_24X7_PERF_DOMAIN_PHYSICAL_CORE;
280 static unsigned long single_24x7_request(u8 domain, u32 offset, u16 ix,
282 bool success_expected)
287 * request_buffer and result_buffer are not required to be 4k aligned,
288 * but are not allowed to cross any 4k boundary. Aligning them to 4k is
289 * the simplest way to ensure that.
292 struct hv_24x7_request_buffer buf;
293 struct hv_24x7_request req;
294 } __packed __aligned(4096) request_buffer = {
296 .interface_version = HV_24X7_IF_VERSION_CURRENT,
300 .performance_domain = domain,
301 .data_size = cpu_to_be16(8),
302 .data_offset = cpu_to_be32(offset),
303 .starting_lpar_ix = cpu_to_be16(lpar),
304 .max_num_lpars = cpu_to_be16(1),
305 .starting_ix = cpu_to_be16(ix),
306 .max_ix = cpu_to_be16(1),
311 struct hv_24x7_data_result_buffer buf;
312 struct hv_24x7_result res;
313 struct hv_24x7_result_element elem;
315 } __packed __aligned(4096) result_buffer = {};
317 ret = plpar_hcall_norets(H_GET_24X7_DATA,
318 virt_to_phys(&request_buffer), sizeof(request_buffer),
319 virt_to_phys(&result_buffer), sizeof(result_buffer));
322 if (success_expected)
323 pr_err_ratelimited("hcall failed: %d %#x %#x %d => 0x%lx (%ld) detail=0x%x failing ix=%x\n",
324 domain, offset, ix, lpar,
326 result_buffer.buf.detailed_rc,
327 result_buffer.buf.failing_request_ix);
331 *res = be64_to_cpu(result_buffer.result);
335 static unsigned long event_24x7_request(struct perf_event *event, u64 *res,
336 bool success_expected)
338 return single_24x7_request(event_get_domain(event),
339 event_get_offset(event),
340 event_get_starting_index(event),
341 event_get_lpar(event),
346 static int h_24x7_event_init(struct perf_event *event)
348 struct hv_perf_caps caps;
354 if (event->attr.type != event->pmu->type)
357 /* Unused areas must be 0 */
358 if (event_get_reserved1(event) ||
359 event_get_reserved2(event) ||
360 event_get_reserved3(event)) {
361 pr_devel("reserved set when forbidden 0x%llx(0x%llx) 0x%llx(0x%llx) 0x%llx(0x%llx)\n",
363 event_get_reserved1(event),
365 event_get_reserved2(event),
367 event_get_reserved3(event));
371 /* unsupported modes and filters */
372 if (event->attr.exclude_user ||
373 event->attr.exclude_kernel ||
374 event->attr.exclude_hv ||
375 event->attr.exclude_idle ||
376 event->attr.exclude_host ||
377 event->attr.exclude_guest ||
378 is_sampling_event(event)) /* no sampling */
381 /* no branch sampling */
382 if (has_branch_stack(event))
385 /* offset must be 8 byte aligned */
386 if (event_get_offset(event) % 8) {
387 pr_devel("bad alignment\n");
391 /* Domains above 6 are invalid */
392 domain = event_get_domain(event);
394 pr_devel("invalid domain %d\n", domain);
398 hret = hv_perf_caps_get(&caps);
400 pr_devel("could not get capabilities: rc=%ld\n", hret);
404 /* PHYSICAL domains & other lpars require extra capabilities */
405 if (!caps.collect_privileged && (is_physical_domain(domain) ||
406 (event_get_lpar(event) != event_get_lpar_max()))) {
407 pr_devel("hv permisions disallow: is_physical_domain:%d, lpar=0x%llx\n",
408 is_physical_domain(domain),
409 event_get_lpar(event));
413 /* see if the event complains */
414 if (event_24x7_request(event, &ct, false)) {
415 pr_devel("test hcall failed\n");
422 static u64 h_24x7_get_value(struct perf_event *event)
426 ret = event_24x7_request(event, &ct, true);
428 /* We checked this in event init, shouldn't fail here... */
434 static void h_24x7_event_update(struct perf_event *event)
438 now = h_24x7_get_value(event);
439 prev = local64_xchg(&event->hw.prev_count, now);
440 local64_add(now - prev, &event->count);
443 static void h_24x7_event_start(struct perf_event *event, int flags)
445 if (flags & PERF_EF_RELOAD)
446 local64_set(&event->hw.prev_count, h_24x7_get_value(event));
449 static void h_24x7_event_stop(struct perf_event *event, int flags)
451 h_24x7_event_update(event);
454 static int h_24x7_event_add(struct perf_event *event, int flags)
456 if (flags & PERF_EF_START)
457 h_24x7_event_start(event, flags);
462 static int h_24x7_event_idx(struct perf_event *event)
467 static struct pmu h_24x7_pmu = {
468 .task_ctx_nr = perf_invalid_context,
471 .attr_groups = attr_groups,
472 .event_init = h_24x7_event_init,
473 .add = h_24x7_event_add,
474 .del = h_24x7_event_stop,
475 .start = h_24x7_event_start,
476 .stop = h_24x7_event_stop,
477 .read = h_24x7_event_update,
478 .event_idx = h_24x7_event_idx,
481 static int hv_24x7_init(void)
485 struct hv_perf_caps caps;
487 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
488 pr_info("not a virtualized system, not enabling\n");
492 hret = hv_perf_caps_get(&caps);
494 pr_info("could not obtain capabilities, error 0x%80lx, not enabling\n",
499 hv_page_cache = kmem_cache_create("hv-page-4096", 4096, 4096, 0, NULL);
503 r = perf_pmu_register(&h_24x7_pmu, h_24x7_pmu.name, -1);
510 device_initcall(hv_24x7_init);