4 #include "thread_map.h"
9 int test__openat_syscall_event_on_all_cpus(void)
11 int err = -1, fd, cpu;
13 struct perf_evsel *evsel;
14 unsigned int nr_openat_calls = 111, i;
16 struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
17 char sbuf[STRERR_BUFSIZE];
19 if (threads == NULL) {
20 pr_debug("thread_map__new\n");
24 cpus = cpu_map__new(NULL);
26 pr_debug("cpu_map__new\n");
27 goto out_thread_map_delete;
32 evsel = perf_evsel__newtp("syscalls", "sys_enter_openat");
34 if (tracefs__configured())
35 pr_debug("is tracefs mounted on /sys/kernel/tracing?\n");
36 else if (debugfs__configured())
37 pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
39 pr_debug("Neither tracefs or debugfs is enabled in this kernel\n");
40 goto out_thread_map_delete;
43 if (perf_evsel__open(evsel, cpus, threads) < 0) {
44 pr_debug("failed to open counter: %s, "
45 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
46 strerror_r(errno, sbuf, sizeof(sbuf)));
47 goto out_evsel_delete;
50 for (cpu = 0; cpu < cpus->nr; ++cpu) {
51 unsigned int ncalls = nr_openat_calls + cpu;
53 * XXX eventually lift this restriction in a way that
54 * keeps perf building on older glibc installations
55 * without CPU_ALLOC. 1024 cpus in 2010 still seems
56 * a reasonable upper limit tho :-)
58 if (cpus->map[cpu] >= CPU_SETSIZE) {
59 pr_debug("Ignoring CPU %d\n", cpus->map[cpu]);
63 CPU_SET(cpus->map[cpu], &cpu_set);
64 if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
65 pr_debug("sched_setaffinity() failed on CPU %d: %s ",
67 strerror_r(errno, sbuf, sizeof(sbuf)));
70 for (i = 0; i < ncalls; ++i) {
71 fd = openat(0, "/etc/passwd", O_RDONLY);
74 CPU_CLR(cpus->map[cpu], &cpu_set);
78 * Here we need to explicitely preallocate the counts, as if
79 * we use the auto allocation it will allocate just for 1 cpu,
80 * as we start by cpu 0.
82 if (perf_evsel__alloc_counts(evsel, cpus->nr, 1) < 0) {
83 pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
89 for (cpu = 0; cpu < cpus->nr; ++cpu) {
90 unsigned int expected;
92 if (cpus->map[cpu] >= CPU_SETSIZE)
95 if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
96 pr_debug("perf_evsel__read_on_cpu\n");
101 expected = nr_openat_calls + cpu;
102 if (perf_counts(evsel->counts, cpu, 0)->val != expected) {
103 pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
104 expected, cpus->map[cpu], perf_counts(evsel->counts, cpu, 0)->val);
109 perf_evsel__free_counts(evsel);
111 perf_evsel__close_fd(evsel, 1, threads->nr);
113 perf_evsel__delete(evsel);
114 out_thread_map_delete:
115 thread_map__put(threads);