]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
[karo-tx-linux.git] / tools / testing / selftests / powerpc / pmu / ebb / lost_exception_test.c
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c b/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c
new file mode 100644 (file)
index 0000000..0c9dd9b
--- /dev/null
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2014, Michael Ellerman, IBM Corp.
+ * Licensed under GPLv2.
+ */
+
+#include <sched.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+
+#include "ebb.h"
+
+
+/*
+ * Test that tries to trigger CPU_FTR_PMAO_BUG. Which is a hardware defect
+ * where an exception triggers but we context switch before it is delivered and
+ * lose the exception.
+ */
+
+static int test_body(void)
+{
+       int i, orig_period, max_period;
+       struct event event;
+
+       /* We use PMC4 to make sure the kernel switches all counters correctly */
+       event_init_named(&event, 0x40002, "instructions");
+       event_leader_ebb_init(&event);
+
+       event.attr.exclude_kernel = 1;
+       event.attr.exclude_hv = 1;
+       event.attr.exclude_idle = 1;
+
+       FAIL_IF(event_open(&event));
+
+       ebb_enable_pmc_counting(4);
+       setup_ebb_handler(standard_ebb_callee);
+       ebb_global_enable();
+       FAIL_IF(ebb_event_enable(&event));
+
+       /*
+        * We want a low sample period, but we also want to get out of the EBB
+        * handler without tripping up again.
+        *
+        * This value picked after much experimentation.
+        */
+       orig_period = max_period = sample_period = 400;
+
+       mtspr(SPRN_PMC4, pmc_sample_period(sample_period));
+
+       while (ebb_state.stats.ebb_count < 1000000) {
+               /*
+                * We are trying to get the EBB exception to race exactly with
+                * us entering the kernel to do the syscall. We then need the
+                * kernel to decide our timeslice is up and context switch to
+                * the other thread. When we come back our EBB will have been
+                * lost and we'll spin in this while loop forever.
+                */
+
+               for (i = 0; i < 100000; i++)
+                       sched_yield();
+
+               /* Change the sample period slightly to try and hit the race */
+               if (sample_period >= (orig_period + 200))
+                       sample_period = orig_period;
+               else
+                       sample_period++;
+
+               if (sample_period > max_period)
+                       max_period = sample_period;
+       }
+
+       ebb_freeze_pmcs();
+       ebb_global_disable();
+
+       count_pmc(4, sample_period);
+       mtspr(SPRN_PMC4, 0xdead);
+
+       dump_summary_ebb_state();
+       dump_ebb_hw_state();
+
+       event_close(&event);
+
+       FAIL_IF(ebb_state.stats.ebb_count == 0);
+
+       /* We vary our sample period so we need extra fudge here */
+       FAIL_IF(!ebb_check_count(4, orig_period, 2 * (max_period - orig_period)));
+
+       return 0;
+}
+
+static int lost_exception(void)
+{
+       return eat_cpu(test_body);
+}
+
+int main(void)
+{
+       return test_harness(lost_exception, "lost_exception");
+}