3 /* For the CPU_* macros */
10 #include <linux/err.h>
11 #include <api/fs/tracing_path.h>
14 #include "thread_map.h"
19 int test__openat_syscall_event_on_all_cpus(int subtest __maybe_unused)
21 int err = -1, fd, cpu;
23 struct perf_evsel *evsel;
24 unsigned int nr_openat_calls = 111, i;
26 struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
27 char sbuf[STRERR_BUFSIZE];
30 if (threads == NULL) {
31 pr_debug("thread_map__new\n");
35 cpus = cpu_map__new(NULL);
37 pr_debug("cpu_map__new\n");
38 goto out_thread_map_delete;
43 evsel = perf_evsel__newtp("syscalls", "sys_enter_openat");
45 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat");
46 pr_debug("%s\n", errbuf);
47 goto out_thread_map_delete;
50 if (perf_evsel__open(evsel, cpus, threads) < 0) {
51 pr_debug("failed to open counter: %s, "
52 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
53 str_error_r(errno, sbuf, sizeof(sbuf)));
54 goto out_evsel_delete;
57 for (cpu = 0; cpu < cpus->nr; ++cpu) {
58 unsigned int ncalls = nr_openat_calls + cpu;
60 * XXX eventually lift this restriction in a way that
61 * keeps perf building on older glibc installations
62 * without CPU_ALLOC. 1024 cpus in 2010 still seems
63 * a reasonable upper limit tho :-)
65 if (cpus->map[cpu] >= CPU_SETSIZE) {
66 pr_debug("Ignoring CPU %d\n", cpus->map[cpu]);
70 CPU_SET(cpus->map[cpu], &cpu_set);
71 if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
72 pr_debug("sched_setaffinity() failed on CPU %d: %s ",
74 str_error_r(errno, sbuf, sizeof(sbuf)));
77 for (i = 0; i < ncalls; ++i) {
78 fd = openat(0, "/etc/passwd", O_RDONLY);
81 CPU_CLR(cpus->map[cpu], &cpu_set);
85 * Here we need to explicitly preallocate the counts, as if
86 * we use the auto allocation it will allocate just for 1 cpu,
87 * as we start by cpu 0.
89 if (perf_evsel__alloc_counts(evsel, cpus->nr, 1) < 0) {
90 pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
96 for (cpu = 0; cpu < cpus->nr; ++cpu) {
97 unsigned int expected;
99 if (cpus->map[cpu] >= CPU_SETSIZE)
102 if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
103 pr_debug("perf_evsel__read_on_cpu\n");
108 expected = nr_openat_calls + cpu;
109 if (perf_counts(evsel->counts, cpu, 0)->val != expected) {
110 pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
111 expected, cpus->map[cpu], perf_counts(evsel->counts, cpu, 0)->val);
116 perf_evsel__free_counts(evsel);
118 perf_evsel__close_fd(evsel, 1, threads->nr);
120 perf_evsel__delete(evsel);
121 out_thread_map_delete:
122 thread_map__put(threads);