2 * acpi_pad.c ACPI Processor Aggregator Driver
4 * Copyright (c) 2009, Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 #include <linux/kernel.h>
22 #include <linux/cpumask.h>
23 #include <linux/module.h>
24 #include <linux/init.h>
25 #include <linux/types.h>
26 #include <linux/kthread.h>
27 #include <linux/freezer.h>
28 #include <linux/cpu.h>
29 #include <linux/clockchips.h>
30 #include <linux/slab.h>
31 #include <acpi/acpi_bus.h>
32 #include <acpi/acpi_drivers.h>
34 #define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
35 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
36 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
37 static DEFINE_MUTEX(isolated_cpus_lock);
39 #define MWAIT_SUBSTATE_MASK (0xf)
40 #define MWAIT_CSTATE_MASK (0xf)
41 #define MWAIT_SUBSTATE_SIZE (4)
42 #define CPUID_MWAIT_LEAF (5)
43 #define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1)
44 #define CPUID5_ECX_INTERRUPT_BREAK (0x2)
45 static unsigned long power_saving_mwait_eax;
47 static unsigned char tsc_detected_unstable;
48 static unsigned char tsc_marked_unstable;
50 static void power_saving_mwait_init(void)
52 unsigned int eax, ebx, ecx, edx;
53 unsigned int highest_cstate = 0;
54 unsigned int highest_subcstate = 0;
57 if (!boot_cpu_has(X86_FEATURE_MWAIT))
59 if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
62 cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
64 if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
65 !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
68 edx >>= MWAIT_SUBSTATE_SIZE;
69 for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
70 if (edx & MWAIT_SUBSTATE_MASK) {
72 highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
75 power_saving_mwait_eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
76 (highest_subcstate - 1);
78 for_each_online_cpu(i)
79 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &i);
81 #if defined(CONFIG_GENERIC_TIME) && defined(CONFIG_X86)
82 switch (boot_cpu_data.x86_vendor) {
84 case X86_VENDOR_INTEL:
86 * AMD Fam10h TSC will tick in all
87 * C/P/S0/S1 states when this bit is set.
89 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
94 /* TSC could halt in idle */
95 tsc_detected_unstable = 1;
100 static unsigned long cpu_weight[NR_CPUS];
101 static int tsk_in_cpu[NR_CPUS] = {[0 ... NR_CPUS-1] = -1};
102 static DECLARE_BITMAP(pad_busy_cpus_bits, NR_CPUS);
103 static void round_robin_cpu(unsigned int tsk_index)
105 struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits);
108 unsigned long min_weight = -1;
109 unsigned long uninitialized_var(preferred_cpu);
111 if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
114 mutex_lock(&isolated_cpus_lock);
116 for_each_cpu(cpu, pad_busy_cpus)
117 cpumask_or(tmp, tmp, topology_thread_cpumask(cpu));
118 cpumask_andnot(tmp, cpu_online_mask, tmp);
119 /* avoid HT sibilings if possible */
120 if (cpumask_empty(tmp))
121 cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
122 if (cpumask_empty(tmp)) {
123 mutex_unlock(&isolated_cpus_lock);
126 for_each_cpu(cpu, tmp) {
127 if (cpu_weight[cpu] < min_weight) {
128 min_weight = cpu_weight[cpu];
133 if (tsk_in_cpu[tsk_index] != -1)
134 cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
135 tsk_in_cpu[tsk_index] = preferred_cpu;
136 cpumask_set_cpu(preferred_cpu, pad_busy_cpus);
137 cpu_weight[preferred_cpu]++;
138 mutex_unlock(&isolated_cpus_lock);
140 set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
143 static void exit_round_robin(unsigned int tsk_index)
145 struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits);
146 cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
147 tsk_in_cpu[tsk_index] = -1;
150 static unsigned int idle_pct = 5; /* percentage */
151 static unsigned int round_robin_time = 10; /* second */
152 static int power_saving_thread(void *data)
154 struct sched_param param = {.sched_priority = 1};
156 unsigned int tsk_index = (unsigned long)data;
157 u64 last_jiffies = 0;
159 sched_setscheduler(current, SCHED_RR, ¶m);
161 while (!kthread_should_stop()) {
167 /* round robin to cpus */
168 if (last_jiffies + round_robin_time * HZ < jiffies) {
169 last_jiffies = jiffies;
170 round_robin_cpu(tsk_index);
175 current_thread_info()->status &= ~TS_POLLING;
177 * TS_POLLING-cleared state must be visible before we test
182 expire_time = jiffies + HZ * (100 - idle_pct) / 100;
184 while (!need_resched()) {
185 if (tsc_detected_unstable && !tsc_marked_unstable) {
186 /* TSC could halt in idle, so notify users */
187 mark_tsc_unstable("TSC halts in idle");
188 tsc_marked_unstable = 1;
191 cpu = smp_processor_id();
192 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
194 stop_critical_timings();
196 __monitor((void *)¤t_thread_info()->flags, 0, 0);
199 __mwait(power_saving_mwait_eax, 1);
201 start_critical_timings();
202 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
206 if (jiffies > expire_time) {
212 current_thread_info()->status |= TS_POLLING;
215 * current sched_rt has threshold for rt task running time.
216 * When a rt task uses 95% CPU time, the rt thread will be
217 * scheduled out for 5% CPU time to not starve other tasks. But
218 * the mechanism only works when all CPUs have RT task running,
219 * as if one CPU hasn't RT task, RT task from other CPUs will
220 * borrow CPU time from this CPU and cause RT task use > 95%
221 * CPU time. To make 'avoid starvation' work, takes a nap here.
224 schedule_timeout_killable(HZ * idle_pct / 100);
227 exit_round_robin(tsk_index);
231 static struct task_struct *ps_tsks[NR_CPUS];
232 static unsigned int ps_tsk_num;
233 static int create_power_saving_task(void)
237 ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread,
238 (void *)(unsigned long)ps_tsk_num,
239 "power_saving/%d", ps_tsk_num);
240 rc = IS_ERR(ps_tsks[ps_tsk_num]) ? PTR_ERR(ps_tsks[ps_tsk_num]) : 0;
244 ps_tsks[ps_tsk_num] = NULL;
249 static void destroy_power_saving_task(void)
251 if (ps_tsk_num > 0) {
253 kthread_stop(ps_tsks[ps_tsk_num]);
254 ps_tsks[ps_tsk_num] = NULL;
258 static void set_power_saving_task_num(unsigned int num)
260 if (num > ps_tsk_num) {
261 while (ps_tsk_num < num) {
262 if (create_power_saving_task())
265 } else if (num < ps_tsk_num) {
266 while (ps_tsk_num > num)
267 destroy_power_saving_task();
271 static void acpi_pad_idle_cpus(unsigned int num_cpus)
275 num_cpus = min_t(unsigned int, num_cpus, num_online_cpus());
276 set_power_saving_task_num(num_cpus);
281 static uint32_t acpi_pad_idle_cpus_num(void)
286 static ssize_t acpi_pad_rrtime_store(struct device *dev,
287 struct device_attribute *attr, const char *buf, size_t count)
290 if (strict_strtoul(buf, 0, &num))
292 if (num < 1 || num >= 100)
294 mutex_lock(&isolated_cpus_lock);
295 round_robin_time = num;
296 mutex_unlock(&isolated_cpus_lock);
300 static ssize_t acpi_pad_rrtime_show(struct device *dev,
301 struct device_attribute *attr, char *buf)
303 return scnprintf(buf, PAGE_SIZE, "%d", round_robin_time);
305 static DEVICE_ATTR(rrtime, S_IRUGO|S_IWUSR,
306 acpi_pad_rrtime_show,
307 acpi_pad_rrtime_store);
309 static ssize_t acpi_pad_idlepct_store(struct device *dev,
310 struct device_attribute *attr, const char *buf, size_t count)
313 if (strict_strtoul(buf, 0, &num))
315 if (num < 1 || num >= 100)
317 mutex_lock(&isolated_cpus_lock);
319 mutex_unlock(&isolated_cpus_lock);
323 static ssize_t acpi_pad_idlepct_show(struct device *dev,
324 struct device_attribute *attr, char *buf)
326 return scnprintf(buf, PAGE_SIZE, "%d", idle_pct);
328 static DEVICE_ATTR(idlepct, S_IRUGO|S_IWUSR,
329 acpi_pad_idlepct_show,
330 acpi_pad_idlepct_store);
332 static ssize_t acpi_pad_idlecpus_store(struct device *dev,
333 struct device_attribute *attr, const char *buf, size_t count)
336 if (strict_strtoul(buf, 0, &num))
338 mutex_lock(&isolated_cpus_lock);
339 acpi_pad_idle_cpus(num);
340 mutex_unlock(&isolated_cpus_lock);
344 static ssize_t acpi_pad_idlecpus_show(struct device *dev,
345 struct device_attribute *attr, char *buf)
347 return cpumask_scnprintf(buf, PAGE_SIZE,
348 to_cpumask(pad_busy_cpus_bits));
350 static DEVICE_ATTR(idlecpus, S_IRUGO|S_IWUSR,
351 acpi_pad_idlecpus_show,
352 acpi_pad_idlecpus_store);
354 static int acpi_pad_add_sysfs(struct acpi_device *device)
358 result = device_create_file(&device->dev, &dev_attr_idlecpus);
361 result = device_create_file(&device->dev, &dev_attr_idlepct);
363 device_remove_file(&device->dev, &dev_attr_idlecpus);
366 result = device_create_file(&device->dev, &dev_attr_rrtime);
368 device_remove_file(&device->dev, &dev_attr_idlecpus);
369 device_remove_file(&device->dev, &dev_attr_idlepct);
375 static void acpi_pad_remove_sysfs(struct acpi_device *device)
377 device_remove_file(&device->dev, &dev_attr_idlecpus);
378 device_remove_file(&device->dev, &dev_attr_idlepct);
379 device_remove_file(&device->dev, &dev_attr_rrtime);
382 /* Query firmware how many CPUs should be idle */
383 static int acpi_pad_pur(acpi_handle handle, int *num_cpus)
385 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
386 union acpi_object *package;
387 int rev, num, ret = -EINVAL;
389 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer)))
392 if (!buffer.length || !buffer.pointer)
395 package = buffer.pointer;
396 if (package->type != ACPI_TYPE_PACKAGE || package->package.count != 2)
398 rev = package->package.elements[0].integer.value;
399 num = package->package.elements[1].integer.value;
400 if (rev != 1 || num < 0)
405 kfree(buffer.pointer);
409 /* Notify firmware how many CPUs are idle */
410 static void acpi_pad_ost(acpi_handle handle, int stat,
413 union acpi_object params[3] = {
414 {.type = ACPI_TYPE_INTEGER,},
415 {.type = ACPI_TYPE_INTEGER,},
416 {.type = ACPI_TYPE_BUFFER,},
418 struct acpi_object_list arg_list = {3, params};
420 params[0].integer.value = ACPI_PROCESSOR_AGGREGATOR_NOTIFY;
421 params[1].integer.value = stat;
422 params[2].buffer.length = 4;
423 params[2].buffer.pointer = (void *)&idle_cpus;
424 acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
427 static void acpi_pad_handle_notify(acpi_handle handle)
432 mutex_lock(&isolated_cpus_lock);
433 if (acpi_pad_pur(handle, &num_cpus)) {
434 mutex_unlock(&isolated_cpus_lock);
437 acpi_pad_idle_cpus(num_cpus);
438 idle_cpus = acpi_pad_idle_cpus_num();
439 acpi_pad_ost(handle, 0, idle_cpus);
440 mutex_unlock(&isolated_cpus_lock);
443 static void acpi_pad_notify(acpi_handle handle, u32 event,
446 struct acpi_device *device = data;
449 case ACPI_PROCESSOR_AGGREGATOR_NOTIFY:
450 acpi_pad_handle_notify(handle);
451 acpi_bus_generate_proc_event(device, event, 0);
452 acpi_bus_generate_netlink_event(device->pnp.device_class,
453 dev_name(&device->dev), event, 0);
456 printk(KERN_WARNING"Unsupported event [0x%x]\n", event);
461 static int acpi_pad_add(struct acpi_device *device)
465 strcpy(acpi_device_name(device), ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME);
466 strcpy(acpi_device_class(device), ACPI_PROCESSOR_AGGREGATOR_CLASS);
468 if (acpi_pad_add_sysfs(device))
471 status = acpi_install_notify_handler(device->handle,
472 ACPI_DEVICE_NOTIFY, acpi_pad_notify, device);
473 if (ACPI_FAILURE(status)) {
474 acpi_pad_remove_sysfs(device);
481 static int acpi_pad_remove(struct acpi_device *device,
484 mutex_lock(&isolated_cpus_lock);
485 acpi_pad_idle_cpus(0);
486 mutex_unlock(&isolated_cpus_lock);
488 acpi_remove_notify_handler(device->handle,
489 ACPI_DEVICE_NOTIFY, acpi_pad_notify);
490 acpi_pad_remove_sysfs(device);
494 static const struct acpi_device_id pad_device_ids[] = {
498 MODULE_DEVICE_TABLE(acpi, pad_device_ids);
500 static struct acpi_driver acpi_pad_driver = {
501 .name = "processor_aggregator",
502 .class = ACPI_PROCESSOR_AGGREGATOR_CLASS,
503 .ids = pad_device_ids,
506 .remove = acpi_pad_remove,
510 static int __init acpi_pad_init(void)
512 power_saving_mwait_init();
513 if (power_saving_mwait_eax == 0)
516 return acpi_bus_register_driver(&acpi_pad_driver);
519 static void __exit acpi_pad_exit(void)
521 acpi_bus_unregister_driver(&acpi_pad_driver);
524 module_init(acpi_pad_init);
525 module_exit(acpi_pad_exit);
526 MODULE_AUTHOR("Shaohua Li<shaohua.li@intel.com>");
527 MODULE_DESCRIPTION("ACPI Processor Aggregator Driver");
528 MODULE_LICENSE("GPL");