2 * arch/s390/oprofile/hwsampler.c
4 * Copyright IBM Corp. 2010
5 * Author: Heinz Graalfs <graalfs@de.ibm.com>
8 #include <linux/kernel_stat.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/smp.h>
12 #include <linux/errno.h>
13 #include <linux/workqueue.h>
14 #include <linux/interrupt.h>
15 #include <linux/notifier.h>
16 #include <linux/cpu.h>
17 #include <linux/semaphore.h>
18 #include <linux/oom.h>
19 #include <linux/oprofile.h>
21 #include <asm/facility.h>
22 #include <asm/cpu_mf.h>
25 #include "hwsampler.h"
26 #include "op_counter.h"
28 #define MAX_NUM_SDB 511
31 #define ALERT_REQ_MASK 0x4000000000000000ul
32 #define BUFFER_FULL_MASK 0x8000000000000000ul
34 DECLARE_PER_CPU(struct hws_cpu_buffer, sampler_cpu_buffer);
36 struct hws_execute_parms {
41 DEFINE_PER_CPU(struct hws_cpu_buffer, sampler_cpu_buffer);
42 EXPORT_PER_CPU_SYMBOL(sampler_cpu_buffer);
44 static DEFINE_MUTEX(hws_sem);
45 static DEFINE_MUTEX(hws_sem_oom);
47 static unsigned char hws_flush_all;
48 static unsigned int hws_oom;
49 static struct workqueue_struct *hws_wq;
51 static unsigned int hws_state;
59 /* set to 1 if called by kernel during memory allocation */
60 static unsigned char oom_killer_was_active;
61 /* size of SDBT and SDB as of allocate API */
62 static unsigned long num_sdbt = 100;
63 static unsigned long num_sdb = 511;
64 /* sampling interval (machine cycles) */
65 static unsigned long interval;
67 static unsigned long min_sampler_rate;
68 static unsigned long max_sampler_rate;
70 static int ssctl(void *buffer)
74 /* set in order to detect a program check */
78 "0: .insn s,0xB2870000,0(%1)\n"
82 EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
83 : "+d" (cc), "+a" (buffer)
84 : "m" (*((struct hws_ssctl_request_block *)buffer))
87 return cc ? -EINVAL : 0 ;
90 static int qsi(void *buffer)
96 "0: .insn s,0xB2860000,0(%1)\n"
99 EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
100 : "=d" (cc), "+a" (buffer)
101 : "m" (*((struct hws_qsi_info_block *)buffer))
104 return cc ? -EINVAL : 0;
107 static void execute_qsi(void *parms)
109 struct hws_execute_parms *ep = parms;
111 ep->rc = qsi(ep->buffer);
114 static void execute_ssctl(void *parms)
116 struct hws_execute_parms *ep = parms;
118 ep->rc = ssctl(ep->buffer);
121 static int smp_ctl_ssctl_stop(int cpu)
124 struct hws_execute_parms ep;
125 struct hws_cpu_buffer *cb;
127 cb = &per_cpu(sampler_cpu_buffer, cpu);
132 ep.buffer = &cb->ssctl;
133 smp_call_function_single(cpu, execute_ssctl, &ep, 1);
136 printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu);
140 ep.buffer = &cb->qsi;
141 smp_call_function_single(cpu, execute_qsi, &ep, 1);
143 if (cb->qsi.es || cb->qsi.cs) {
144 printk(KERN_EMERG "CPUMF sampling did not stop properly.\n");
151 static int smp_ctl_ssctl_deactivate(int cpu)
154 struct hws_execute_parms ep;
155 struct hws_cpu_buffer *cb;
157 cb = &per_cpu(sampler_cpu_buffer, cpu);
162 ep.buffer = &cb->ssctl;
163 smp_call_function_single(cpu, execute_ssctl, &ep, 1);
166 printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu);
168 ep.buffer = &cb->qsi;
169 smp_call_function_single(cpu, execute_qsi, &ep, 1);
172 printk(KERN_EMERG "CPUMF sampling was not set inactive.\n");
177 static int smp_ctl_ssctl_enable_activate(int cpu, unsigned long interval)
180 struct hws_execute_parms ep;
181 struct hws_cpu_buffer *cb;
183 cb = &per_cpu(sampler_cpu_buffer, cpu);
186 cb->ssctl.tear = cb->first_sdbt;
187 cb->ssctl.dear = *(unsigned long *) cb->first_sdbt;
188 cb->ssctl.interval = interval;
192 ep.buffer = &cb->ssctl;
193 smp_call_function_single(cpu, execute_ssctl, &ep, 1);
196 printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu);
198 ep.buffer = &cb->qsi;
199 smp_call_function_single(cpu, execute_qsi, &ep, 1);
201 printk(KERN_ERR "hwsampler: CPU %d CPUMF QSI failed.\n", cpu);
206 static int smp_ctl_qsi(int cpu)
208 struct hws_execute_parms ep;
209 struct hws_cpu_buffer *cb;
211 cb = &per_cpu(sampler_cpu_buffer, cpu);
213 ep.buffer = &cb->qsi;
214 smp_call_function_single(cpu, execute_qsi, &ep, 1);
219 static inline unsigned long *trailer_entry_ptr(unsigned long v)
225 ret -= sizeof(struct hws_trailer_entry);
227 return (unsigned long *) ret;
230 static void hws_ext_handler(struct ext_code ext_code,
231 unsigned int param32, unsigned long param64)
233 struct hws_cpu_buffer *cb = &__get_cpu_var(sampler_cpu_buffer);
235 if (!(param32 & CPU_MF_INT_SF_MASK))
238 kstat_cpu(smp_processor_id()).irqs[EXTINT_CMS]++;
239 atomic_xchg(&cb->ext_params, atomic_read(&cb->ext_params) | param32);
242 queue_work(hws_wq, &cb->worker);
245 static void worker(struct work_struct *work);
247 static void add_samples_to_oprofile(unsigned cpu, unsigned long *,
248 unsigned long *dear);
250 static void init_all_cpu_buffers(void)
253 struct hws_cpu_buffer *cb;
255 for_each_online_cpu(cpu) {
256 cb = &per_cpu(sampler_cpu_buffer, cpu);
257 memset(cb, 0, sizeof(struct hws_cpu_buffer));
261 static int is_link_entry(unsigned long *s)
263 return *s & 0x1ul ? 1 : 0;
266 static unsigned long *get_next_sdbt(unsigned long *s)
268 return (unsigned long *) (*s & ~0x1ul);
271 static int prepare_cpu_buffers(void)
275 struct hws_cpu_buffer *cb;
278 for_each_online_cpu(cpu) {
279 cb = &per_cpu(sampler_cpu_buffer, cpu);
280 atomic_set(&cb->ext_params, 0);
281 cb->worker_entry = 0;
282 cb->sample_overflow = 0;
284 cb->incorrect_sdbt_entry = 0;
285 cb->invalid_entry_address = 0;
286 cb->loss_of_sample_data = 0;
287 cb->sample_auth_change_alert = 0;
297 * allocate_sdbt() - allocate sampler memory
298 * @cpu: the cpu for which sampler memory is allocated
300 * A 4K page is allocated for each requested SDBT.
301 * A maximum of 511 4K pages are allocated for the SDBs in each of the SDBTs.
302 * Set ALERT_REQ mask in each SDBs trailer.
303 * Returns zero if successful, <0 otherwise.
305 static int allocate_sdbt(int cpu)
311 unsigned long *trailer;
312 struct hws_cpu_buffer *cb;
314 cb = &per_cpu(sampler_cpu_buffer, cpu);
322 for (j = 0; j < num_sdbt; j++) {
323 sdbt = (unsigned long *)get_zeroed_page(GFP_KERNEL);
325 mutex_lock(&hws_sem_oom);
326 /* OOM killer might have been activated */
328 if (oom_killer_was_active || !sdbt) {
330 free_page((unsigned long)sdbt);
332 goto allocate_sdbt_error;
334 if (cb->first_sdbt == 0)
335 cb->first_sdbt = (unsigned long)sdbt;
337 /* link current page to tail of chain */
339 *tail = (unsigned long)(void *)sdbt + 1;
341 mutex_unlock(&hws_sem_oom);
343 for (k = 0; k < num_sdb; k++) {
344 /* get and set SDB page */
345 sdb = get_zeroed_page(GFP_KERNEL);
347 mutex_lock(&hws_sem_oom);
348 /* OOM killer might have been activated */
350 if (oom_killer_was_active || !sdb) {
354 goto allocate_sdbt_error;
357 trailer = trailer_entry_ptr(*sdbt);
358 *trailer = ALERT_REQ_MASK;
360 mutex_unlock(&hws_sem_oom);
364 mutex_lock(&hws_sem_oom);
365 if (oom_killer_was_active)
366 goto allocate_sdbt_error;
370 *tail = (unsigned long)
371 ((void *)cb->first_sdbt) + 1;
374 mutex_unlock(&hws_sem_oom);
379 goto allocate_sdbt_exit;
383 * deallocate_sdbt() - deallocate all sampler memory
385 * For each online CPU all SDBT trees are deallocated.
386 * Returns the number of freed pages.
388 static int deallocate_sdbt(void)
395 for_each_online_cpu(cpu) {
399 struct hws_cpu_buffer *cb;
401 cb = &per_cpu(sampler_cpu_buffer, cpu);
406 sdbt = cb->first_sdbt;
407 curr = (unsigned long *) sdbt;
410 /* we'll free the SDBT after all SDBs are processed... */
415 /* watch for link entry reset if found */
416 if (is_link_entry(curr)) {
417 curr = get_next_sdbt(curr);
421 /* we are done if we reach the start */
422 if ((unsigned long) curr == start)
425 sdbt = (unsigned long) curr;
427 /* process SDB pointer */
440 static int start_sampling(int cpu)
443 struct hws_cpu_buffer *cb;
445 cb = &per_cpu(sampler_cpu_buffer, cpu);
446 rc = smp_ctl_ssctl_enable_activate(cpu, interval);
448 printk(KERN_INFO "hwsampler: CPU %d ssctl failed.\n", cpu);
454 printk(KERN_INFO "hwsampler: CPU %d ssctl not enabled.\n", cpu);
459 printk(KERN_INFO "hwsampler: CPU %d ssctl not active.\n", cpu);
464 "hwsampler: CPU %d, CPUMF Sampling started, interval %lu.\n",
473 static int stop_sampling(int cpu)
477 struct hws_cpu_buffer *cb;
479 rc = smp_ctl_qsi(cpu);
482 cb = &per_cpu(sampler_cpu_buffer, cpu);
483 if (!rc && !cb->qsi.es)
484 printk(KERN_INFO "hwsampler: CPU %d, already stopped.\n", cpu);
486 rc = smp_ctl_ssctl_stop(cpu);
488 printk(KERN_INFO "hwsampler: CPU %d, ssctl stop error %d.\n",
493 printk(KERN_INFO "hwsampler: CPU %d, CPUMF Sampling stopped.\n", cpu);
498 printk(KERN_ERR "hwsampler: CPU %d CPUMF Request alert,"
499 " count=%lu.\n", cpu, v);
501 v = cb->loss_of_sample_data;
503 printk(KERN_ERR "hwsampler: CPU %d CPUMF Loss of sample data,"
504 " count=%lu.\n", cpu, v);
506 v = cb->invalid_entry_address;
508 printk(KERN_ERR "hwsampler: CPU %d CPUMF Invalid entry address,"
509 " count=%lu.\n", cpu, v);
511 v = cb->incorrect_sdbt_entry;
514 "hwsampler: CPU %d CPUMF Incorrect SDBT address,"
515 " count=%lu.\n", cpu, v);
517 v = cb->sample_auth_change_alert;
520 "hwsampler: CPU %d CPUMF Sample authorization change,"
521 " count=%lu.\n", cpu, v);
526 static int check_hardware_prerequisites(void)
528 if (!test_facility(68))
533 * hws_oom_callback() - the OOM callback function
535 * In case the callback is invoked during memory allocation for the
536 * hw sampler, all obtained memory is deallocated and a flag is set
537 * so main sampler memory allocation can exit with a failure code.
538 * In case the callback is invoked during sampling the hw sampler
539 * is deactivated for all CPUs.
541 static int hws_oom_callback(struct notifier_block *nfb,
542 unsigned long dummy, void *parm)
544 unsigned long *freed;
546 struct hws_cpu_buffer *cb;
550 mutex_lock(&hws_sem_oom);
552 if (hws_state == HWS_DEALLOCATED) {
553 /* during memory allocation */
554 if (oom_killer_was_active == 0) {
555 oom_killer_was_active = 1;
556 *freed += deallocate_sdbt();
561 cb = &per_cpu(sampler_cpu_buffer, cpu);
564 for_each_online_cpu(i) {
565 smp_ctl_ssctl_deactivate(i);
571 "hwsampler: CPU %d, OOM notify during CPUMF Sampling.\n",
576 mutex_unlock(&hws_sem_oom);
581 static struct notifier_block hws_oom_notifier = {
582 .notifier_call = hws_oom_callback
585 static int hws_cpu_callback(struct notifier_block *nfb,
586 unsigned long action, void *hcpu)
588 /* We do not have sampler space available for all possible CPUs.
589 All CPUs should be online when hw sampling is activated. */
590 return (hws_state <= HWS_DEALLOCATED) ? NOTIFY_OK : NOTIFY_BAD;
593 static struct notifier_block hws_cpu_notifier = {
594 .notifier_call = hws_cpu_callback
598 * hwsampler_deactivate() - set hardware sampling temporarily inactive
599 * @cpu: specifies the CPU to be set inactive.
601 * Returns 0 on success, !0 on failure.
603 int hwsampler_deactivate(unsigned int cpu)
606 * Deactivate hw sampling temporarily and flush the buffer
607 * by pushing all the pending samples to oprofile buffer.
609 * This function can be called under one of the following conditions:
610 * Memory unmap, task is exiting.
613 struct hws_cpu_buffer *cb;
616 mutex_lock(&hws_sem);
618 cb = &per_cpu(sampler_cpu_buffer, cpu);
619 if (hws_state == HWS_STARTED) {
620 rc = smp_ctl_qsi(cpu);
623 rc = smp_ctl_ssctl_deactivate(cpu);
626 "hwsampler: CPU %d, CPUMF Deactivation failed.\n", cpu);
628 hws_state = HWS_STOPPING;
631 /* Add work to queue to read pending samples.*/
632 queue_work_on(cpu, hws_wq, &cb->worker);
636 mutex_unlock(&hws_sem);
639 flush_workqueue(hws_wq);
645 * hwsampler_activate() - activate/resume hardware sampling which was deactivated
646 * @cpu: specifies the CPU to be set active.
648 * Returns 0 on success, !0 on failure.
650 int hwsampler_activate(unsigned int cpu)
653 * Re-activate hw sampling. This should be called in pair with
654 * hwsampler_deactivate().
657 struct hws_cpu_buffer *cb;
660 mutex_lock(&hws_sem);
662 cb = &per_cpu(sampler_cpu_buffer, cpu);
663 if (hws_state == HWS_STARTED) {
664 rc = smp_ctl_qsi(cpu);
668 rc = smp_ctl_ssctl_enable_activate(cpu, interval);
671 "CPU %d, CPUMF activate sampling failed.\n",
677 mutex_unlock(&hws_sem);
682 static int check_qsi_on_setup(void)
686 struct hws_cpu_buffer *cb;
688 for_each_online_cpu(cpu) {
689 cb = &per_cpu(sampler_cpu_buffer, cpu);
690 rc = smp_ctl_qsi(cpu);
696 printk(KERN_INFO "hwsampler: CPUMF sampling is not authorized.\n");
701 printk(KERN_WARNING "hwsampler: CPUMF is still enabled.\n");
702 rc = smp_ctl_ssctl_stop(cpu);
707 "CPU %d, CPUMF Sampling stopped now.\n", cpu);
713 static int check_qsi_on_start(void)
717 struct hws_cpu_buffer *cb;
719 for_each_online_cpu(cpu) {
720 cb = &per_cpu(sampler_cpu_buffer, cpu);
721 rc = smp_ctl_qsi(cpu);
736 static void worker_on_start(unsigned int cpu)
738 struct hws_cpu_buffer *cb;
740 cb = &per_cpu(sampler_cpu_buffer, cpu);
741 cb->worker_entry = cb->first_sdbt;
744 static int worker_check_error(unsigned int cpu, int ext_params)
748 struct hws_cpu_buffer *cb;
751 cb = &per_cpu(sampler_cpu_buffer, cpu);
752 sdbt = (unsigned long *) cb->worker_entry;
757 if (ext_params & CPU_MF_INT_SF_PRA)
760 if (ext_params & CPU_MF_INT_SF_LSDA)
761 cb->loss_of_sample_data++;
763 if (ext_params & CPU_MF_INT_SF_IAE) {
764 cb->invalid_entry_address++;
768 if (ext_params & CPU_MF_INT_SF_ISE) {
769 cb->incorrect_sdbt_entry++;
773 if (ext_params & CPU_MF_INT_SF_SACA) {
774 cb->sample_auth_change_alert++;
781 static void worker_on_finish(unsigned int cpu)
784 struct hws_cpu_buffer *cb;
786 cb = &per_cpu(sampler_cpu_buffer, cpu);
789 rc = smp_ctl_qsi(cpu);
793 "hwsampler: CPU %d, CPUMF Stop/Deactivate sampling.\n",
795 rc = smp_ctl_ssctl_stop(cpu);
798 "hwsampler: CPU %d, CPUMF Deactivation failed.\n",
801 for_each_online_cpu(i) {
806 queue_work_on(i, hws_wq,
814 static void worker_on_interrupt(unsigned int cpu)
818 struct hws_cpu_buffer *cb;
820 cb = &per_cpu(sampler_cpu_buffer, cpu);
822 sdbt = (unsigned long *) cb->worker_entry;
825 /* do not proceed if stop was entered,
826 * forget the buffers not yet processed */
827 while (!done && !cb->stop_mode) {
828 unsigned long *trailer;
829 struct hws_trailer_entry *te;
830 unsigned long *dear = 0;
832 trailer = trailer_entry_ptr(*sdbt);
833 /* leave loop if no more work to do */
834 if (!(*trailer & BUFFER_FULL_MASK)) {
840 te = (struct hws_trailer_entry *)trailer;
841 cb->sample_overflow += te->overflow;
843 add_samples_to_oprofile(cpu, sdbt, dear);
846 xchg((unsigned char *) te, 0x40);
848 /* advance to next sdb slot in current sdbt */
850 /* in case link bit is set use address w/o link bit */
851 if (is_link_entry(sdbt))
852 sdbt = get_next_sdbt(sdbt);
854 cb->worker_entry = (unsigned long)sdbt;
858 static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt,
861 struct hws_data_entry *sample_data_ptr;
862 unsigned long *trailer;
864 trailer = trailer_entry_ptr(*sdbt);
871 sample_data_ptr = (struct hws_data_entry *)(*sdbt);
873 while ((unsigned long *)sample_data_ptr < trailer) {
874 struct pt_regs *regs = NULL;
875 struct task_struct *tsk = NULL;
878 * Check sampling mode, 1 indicates basic (=customer) sampling
881 if (sample_data_ptr->def != 1) {
882 /* sample slot is not yet written */
885 /* make sure we don't use it twice,
886 * the next time the sampler will set it again */
887 sample_data_ptr->def = 0;
891 if (sample_data_ptr->P == 1) {
892 /* userspace sample */
893 unsigned int pid = sample_data_ptr->prim_asn;
894 if (!counter_config.user)
897 tsk = pid_task(find_vpid(pid), PIDTYPE_PID);
899 regs = task_pt_regs(tsk);
902 /* kernelspace sample */
903 if (!counter_config.kernel)
905 regs = task_pt_regs(current);
908 mutex_lock(&hws_sem);
909 oprofile_add_ext_hw_sample(sample_data_ptr->ia, regs, 0,
910 !sample_data_ptr->P, tsk);
911 mutex_unlock(&hws_sem);
917 static void worker(struct work_struct *work)
921 struct hws_cpu_buffer *cb;
923 cb = container_of(work, struct hws_cpu_buffer, worker);
924 cpu = smp_processor_id();
925 ext_params = atomic_xchg(&cb->ext_params, 0);
927 if (!cb->worker_entry)
928 worker_on_start(cpu);
930 if (worker_check_error(cpu, ext_params))
934 worker_on_interrupt(cpu);
937 worker_on_finish(cpu);
941 * hwsampler_allocate() - allocate memory for the hardware sampler
942 * @sdbt: number of SDBTs per online CPU (must be > 0)
943 * @sdb: number of SDBs per SDBT (minimum 1, maximum 511)
945 * Returns 0 on success, !0 on failure.
947 int hwsampler_allocate(unsigned long sdbt, unsigned long sdb)
950 mutex_lock(&hws_sem);
953 if (hws_state != HWS_DEALLOCATED)
959 if (sdb > MAX_NUM_SDB || sdb < MIN_NUM_SDB)
965 oom_killer_was_active = 0;
966 register_oom_notifier(&hws_oom_notifier);
968 for_each_online_cpu(cpu) {
969 if (allocate_sdbt(cpu)) {
970 unregister_oom_notifier(&hws_oom_notifier);
974 unregister_oom_notifier(&hws_oom_notifier);
975 if (oom_killer_was_active)
978 hws_state = HWS_STOPPED;
982 mutex_unlock(&hws_sem);
987 printk(KERN_ERR "hwsampler: CPUMF Memory allocation failed.\n");
992 * hwsampler_deallocate() - deallocate hardware sampler memory
994 * Returns 0 on success, !0 on failure.
996 int hwsampler_deallocate(void)
1000 mutex_lock(&hws_sem);
1003 if (hws_state != HWS_STOPPED)
1004 goto deallocate_exit;
1006 measurement_alert_subclass_unregister();
1009 hws_state = HWS_DEALLOCATED;
1013 mutex_unlock(&hws_sem);
1018 unsigned long hwsampler_query_min_interval(void)
1020 return min_sampler_rate;
1023 unsigned long hwsampler_query_max_interval(void)
1025 return max_sampler_rate;
1028 unsigned long hwsampler_get_sample_overflow_count(unsigned int cpu)
1030 struct hws_cpu_buffer *cb;
1032 cb = &per_cpu(sampler_cpu_buffer, cpu);
1034 return cb->sample_overflow;
1037 int hwsampler_setup(void)
1041 struct hws_cpu_buffer *cb;
1043 mutex_lock(&hws_sem);
1049 hws_state = HWS_INIT;
1051 init_all_cpu_buffers();
1053 rc = check_hardware_prerequisites();
1057 rc = check_qsi_on_setup();
1062 hws_wq = create_workqueue("hwsampler");
1066 register_cpu_notifier(&hws_cpu_notifier);
1068 for_each_online_cpu(cpu) {
1069 cb = &per_cpu(sampler_cpu_buffer, cpu);
1070 INIT_WORK(&cb->worker, worker);
1071 rc = smp_ctl_qsi(cpu);
1073 if (min_sampler_rate != cb->qsi.min_sampl_rate) {
1074 if (min_sampler_rate) {
1076 "hwsampler: different min sampler rate values.\n");
1077 if (min_sampler_rate < cb->qsi.min_sampl_rate)
1079 cb->qsi.min_sampl_rate;
1081 min_sampler_rate = cb->qsi.min_sampl_rate;
1083 if (max_sampler_rate != cb->qsi.max_sampl_rate) {
1084 if (max_sampler_rate) {
1086 "hwsampler: different max sampler rate values.\n");
1087 if (max_sampler_rate > cb->qsi.max_sampl_rate)
1089 cb->qsi.max_sampl_rate;
1091 max_sampler_rate = cb->qsi.max_sampl_rate;
1094 register_external_interrupt(0x1407, hws_ext_handler);
1096 hws_state = HWS_DEALLOCATED;
1100 mutex_unlock(&hws_sem);
1104 int hwsampler_shutdown(void)
1108 mutex_lock(&hws_sem);
1111 if (hws_state == HWS_DEALLOCATED || hws_state == HWS_STOPPED) {
1112 mutex_unlock(&hws_sem);
1115 flush_workqueue(hws_wq);
1117 mutex_lock(&hws_sem);
1119 if (hws_state == HWS_STOPPED) {
1120 measurement_alert_subclass_unregister();
1124 destroy_workqueue(hws_wq);
1128 unregister_external_interrupt(0x1407, hws_ext_handler);
1129 hws_state = HWS_INIT;
1132 mutex_unlock(&hws_sem);
1134 unregister_cpu_notifier(&hws_cpu_notifier);
1140 * hwsampler_start_all() - start hardware sampling on all online CPUs
1141 * @rate: specifies the used interval when samples are taken
1143 * Returns 0 on success, !0 on failure.
1145 int hwsampler_start_all(unsigned long rate)
1149 mutex_lock(&hws_sem);
1154 if (hws_state != HWS_STOPPED)
1155 goto start_all_exit;
1159 /* fail if rate is not valid */
1160 if (interval < min_sampler_rate || interval > max_sampler_rate)
1161 goto start_all_exit;
1163 rc = check_qsi_on_start();
1165 goto start_all_exit;
1167 rc = prepare_cpu_buffers();
1169 goto start_all_exit;
1171 for_each_online_cpu(cpu) {
1172 rc = start_sampling(cpu);
1177 for_each_online_cpu(cpu) {
1180 goto start_all_exit;
1182 hws_state = HWS_STARTED;
1186 mutex_unlock(&hws_sem);
1191 register_oom_notifier(&hws_oom_notifier);
1194 /* now let them in, 1407 CPUMF external interrupts */
1195 measurement_alert_subclass_register();
1201 * hwsampler_stop_all() - stop hardware sampling on all online CPUs
1203 * Returns 0 on success, !0 on failure.
1205 int hwsampler_stop_all(void)
1207 int tmp_rc, rc, cpu;
1208 struct hws_cpu_buffer *cb;
1210 mutex_lock(&hws_sem);
1213 if (hws_state == HWS_INIT) {
1214 mutex_unlock(&hws_sem);
1217 hws_state = HWS_STOPPING;
1218 mutex_unlock(&hws_sem);
1220 for_each_online_cpu(cpu) {
1221 cb = &per_cpu(sampler_cpu_buffer, cpu);
1223 tmp_rc = stop_sampling(cpu);
1229 flush_workqueue(hws_wq);
1231 mutex_lock(&hws_sem);
1233 unregister_oom_notifier(&hws_oom_notifier);
1236 hws_state = HWS_STOPPED;
1237 mutex_unlock(&hws_sem);