From: Stephen Rothwell Date: Sun, 7 Oct 2012 23:48:45 +0000 (+1100) Subject: Merge remote-tracking branch 'thermal/next' X-Git-Tag: next-20121008~51 X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=6fdee86d13457fa7dc7578aea7151d3c2397efd6;p=karo-tx-linux.git Merge remote-tracking branch 'thermal/next' Conflicts: drivers/staging/omap-thermal/omap-thermal-common.c drivers/thermal/thermal_sys.c --- 6fdee86d13457fa7dc7578aea7151d3c2397efd6 diff --cc drivers/hwmon/Kconfig index c74e73b2069a,84e02b416a4a..c4633de64465 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@@ -334,19 -324,9 +334,9 @@@ config SENSORS_DA9052_AD This driver can also be built as module. If so, the module will be called da9052-hwmon. - config SENSORS_EXYNOS4_TMU - tristate "Temperature sensor on Samsung EXYNOS4" - depends on ARCH_EXYNOS4 - help - If you say yes here you get support for TMU (Thermal Management - Unit) on SAMSUNG EXYNOS4 series of SoC. - - This driver can also be built as a module. If so, the module - will be called exynos4-tmu. - config SENSORS_I5K_AMB tristate "FB-DIMM AMB temperature sensor on Intel 5000 series chipsets" - depends on PCI && EXPERIMENTAL + depends on PCI help If you say yes here you get support for FB-DIMM AMB temperature monitoring chips on systems with the Intel 5000 series chipset. diff --cc drivers/thermal/cpu_cooling.c index 000000000000,9050c1b0573c..cc1c930a90e4 mode 000000,100644..100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c @@@ -1,0 -1,449 +1,449 @@@ + /* + * linux/drivers/thermal/cpu_cooling.c + * + * Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com) + * Copyright (C) 2012 Amit Daniel + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + #include + #include + #include + #include + #include + #include + #include + #include + #include + + /** + * struct cpufreq_cooling_device + * @id: unique integer value corresponding to each cpufreq_cooling_device + * registered. + * @cool_dev: thermal_cooling_device pointer to keep track of the the + * egistered cooling device. + * @cpufreq_state: integer value representing the current state of cpufreq + * cooling devices. + * @cpufreq_val: integer value representing the absolute value of the clipped + * frequency. + * @allowed_cpus: all the cpus involved for this cpufreq_cooling_device. + * @node: list_head to link all cpufreq_cooling_device together. + * + * This structure is required for keeping information of each + * cpufreq_cooling_device registered as a list whose head is represented by + * cooling_cpufreq_list. In order to prevent corruption of this list a + * mutex lock cooling_cpufreq_lock is used. + */ + struct cpufreq_cooling_device { + int id; + struct thermal_cooling_device *cool_dev; + unsigned int cpufreq_state; + unsigned int cpufreq_val; + struct cpumask allowed_cpus; + struct list_head node; + }; + static LIST_HEAD(cooling_cpufreq_list); + static DEFINE_IDR(cpufreq_idr); + + static struct mutex cooling_cpufreq_lock; + + /* notify_table passes value to the CPUFREQ_ADJUST callback function. */ + #define NOTIFY_INVALID NULL + struct cpufreq_cooling_device *notify_device; + + /** + * get_idr - function to get a unique id. + * @idr: struct idr * handle used to create a id. + * @id: int * value generated by this function. + */ + static int get_idr(struct idr *idr, int *id) + { + int err; + again: + if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0)) + return -ENOMEM; + + mutex_lock(&cooling_cpufreq_lock); + err = idr_get_new(idr, NULL, id); + mutex_unlock(&cooling_cpufreq_lock); + + if (unlikely(err == -EAGAIN)) + goto again; + else if (unlikely(err)) + return err; + - *id = *id & MAX_ID_MASK; ++ *id = *id & MAX_IDR_MASK; + return 0; + } + + /** + * release_idr - function to free the unique id. + * @idr: struct idr * handle used for creating the id. + * @id: int value representing the unique id. + */ + static void release_idr(struct idr *idr, int id) + { + mutex_lock(&cooling_cpufreq_lock); + idr_remove(idr, id); + mutex_unlock(&cooling_cpufreq_lock); + } + + /* Below code defines functions to be used for cpufreq as cooling device */ + + /** + * is_cpufreq_valid - function to check if a cpu has frequency transition policy. + * @cpu: cpu for which check is needed. + */ + static int is_cpufreq_valid(int cpu) + { + struct cpufreq_policy policy; + return !cpufreq_get_policy(&policy, cpu); + } + + /** + * get_cpu_frequency - get the absolute value of frequency from level. + * @cpu: cpu for which frequency is fetched. + * @level: level of frequency of the CPU + * e.g level=1 --> 1st MAX FREQ, LEVEL=2 ---> 2nd MAX FREQ, .... etc + */ + static unsigned int get_cpu_frequency(unsigned int cpu, unsigned long level) + { + int ret = 0, i = 0; + unsigned long level_index; + bool descend = false; + struct cpufreq_frequency_table *table = + cpufreq_frequency_get_table(cpu); + if (!table) + return ret; + + while (table[i].frequency != CPUFREQ_TABLE_END) { + if (table[i].frequency == CPUFREQ_ENTRY_INVALID) + continue; + + /*check if table in ascending or descending order*/ + if ((table[i + 1].frequency != CPUFREQ_TABLE_END) && + (table[i + 1].frequency < table[i].frequency) + && !descend) { + descend = true; + } + + /*return if level matched and table in descending order*/ + if (descend && i == level) + return table[i].frequency; + i++; + } + i--; + + if (level > i || descend) + return ret; + level_index = i - level; + + /*Scan the table in reverse order and match the level*/ + while (i >= 0) { + if (table[i].frequency == CPUFREQ_ENTRY_INVALID) + continue; + /*return if level matched*/ + if (i == level_index) + return table[i].frequency; + i--; + } + return ret; + } + + /** + * cpufreq_apply_cooling - function to apply frequency clipping. + * @cpufreq_device: cpufreq_cooling_device pointer containing frequency + * clipping data. + * @cooling_state: value of the cooling state. + */ + static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device, + unsigned long cooling_state) + { + unsigned int cpuid, clip_freq; + struct cpumask *maskPtr = &cpufreq_device->allowed_cpus; + unsigned int cpu = cpumask_any(maskPtr); + + + /* Check if the old cooling action is same as new cooling action */ + if (cpufreq_device->cpufreq_state == cooling_state) + return 0; + + clip_freq = get_cpu_frequency(cpu, cooling_state); + if (!clip_freq) + return -EINVAL; + + cpufreq_device->cpufreq_state = cooling_state; + cpufreq_device->cpufreq_val = clip_freq; + notify_device = cpufreq_device; + + for_each_cpu(cpuid, maskPtr) { + if (is_cpufreq_valid(cpuid)) + cpufreq_update_policy(cpuid); + } + + notify_device = NOTIFY_INVALID; + + return 0; + } + + /** + * cpufreq_thermal_notifier - notifier callback for cpufreq policy change. + * @nb: struct notifier_block * with callback info. + * @event: value showing cpufreq event for which this function invoked. + * @data: callback-specific data + */ + static int cpufreq_thermal_notifier(struct notifier_block *nb, + unsigned long event, void *data) + { + struct cpufreq_policy *policy = data; + unsigned long max_freq = 0; + + if (event != CPUFREQ_ADJUST || notify_device == NOTIFY_INVALID) + return 0; + + if (cpumask_test_cpu(policy->cpu, ¬ify_device->allowed_cpus)) + max_freq = notify_device->cpufreq_val; + + /* Never exceed user_policy.max*/ + if (max_freq > policy->user_policy.max) + max_freq = policy->user_policy.max; + + if (policy->max != max_freq) + cpufreq_verify_within_limits(policy, 0, max_freq); + + return 0; + } + + /* + * cpufreq cooling device callback functions are defined below + */ + + /** + * cpufreq_get_max_state - callback function to get the max cooling state. + * @cdev: thermal cooling device pointer. + * @state: fill this variable with the max cooling state. + */ + static int cpufreq_get_max_state(struct thermal_cooling_device *cdev, + unsigned long *state) + { + int ret = -EINVAL, i = 0; + struct cpufreq_cooling_device *cpufreq_device; + struct cpumask *maskPtr; + unsigned int cpu; + struct cpufreq_frequency_table *table; + + mutex_lock(&cooling_cpufreq_lock); + list_for_each_entry(cpufreq_device, &cooling_cpufreq_list, node) { + if (cpufreq_device && cpufreq_device->cool_dev == cdev) + break; + } + if (cpufreq_device == NULL) + goto return_get_max_state; + + maskPtr = &cpufreq_device->allowed_cpus; + cpu = cpumask_any(maskPtr); + table = cpufreq_frequency_get_table(cpu); + if (!table) { + *state = 0; + ret = 0; + goto return_get_max_state; + } + + while (table[i].frequency != CPUFREQ_TABLE_END) { + if (table[i].frequency == CPUFREQ_ENTRY_INVALID) + continue; + i++; + } + if (i > 0) { + *state = --i; + ret = 0; + } + + return_get_max_state: + mutex_unlock(&cooling_cpufreq_lock); + return ret; + } + + /** + * cpufreq_get_cur_state - callback function to get the current cooling state. + * @cdev: thermal cooling device pointer. + * @state: fill this variable with the current cooling state. + */ + static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev, + unsigned long *state) + { + int ret = -EINVAL; + struct cpufreq_cooling_device *cpufreq_device; + + mutex_lock(&cooling_cpufreq_lock); + list_for_each_entry(cpufreq_device, &cooling_cpufreq_list, node) { + if (cpufreq_device && cpufreq_device->cool_dev == cdev) { + *state = cpufreq_device->cpufreq_state; + ret = 0; + break; + } + } + mutex_unlock(&cooling_cpufreq_lock); + + return ret; + } + + /** + * cpufreq_set_cur_state - callback function to set the current cooling state. + * @cdev: thermal cooling device pointer. + * @state: set this variable to the current cooling state. + */ + static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev, + unsigned long state) + { + int ret = -EINVAL; + struct cpufreq_cooling_device *cpufreq_device; + + mutex_lock(&cooling_cpufreq_lock); + list_for_each_entry(cpufreq_device, &cooling_cpufreq_list, node) { + if (cpufreq_device && cpufreq_device->cool_dev == cdev) { + ret = 0; + break; + } + } + if (!ret) + ret = cpufreq_apply_cooling(cpufreq_device, state); + + mutex_unlock(&cooling_cpufreq_lock); + + return ret; + } + + /* Bind cpufreq callbacks to thermal cooling device ops */ + static struct thermal_cooling_device_ops const cpufreq_cooling_ops = { + .get_max_state = cpufreq_get_max_state, + .get_cur_state = cpufreq_get_cur_state, + .set_cur_state = cpufreq_set_cur_state, + }; + + /* Notifier for cpufreq policy change */ + static struct notifier_block thermal_cpufreq_notifier_block = { + .notifier_call = cpufreq_thermal_notifier, + }; + + /** + * cpufreq_cooling_register - function to create cpufreq cooling device. + * @clip_cpus: cpumask of cpus where the frequency constraints will happen. + */ + struct thermal_cooling_device *cpufreq_cooling_register( + struct cpumask *clip_cpus) + { + struct thermal_cooling_device *cool_dev; + struct cpufreq_cooling_device *cpufreq_dev = NULL; + unsigned int cpufreq_dev_count = 0, min = 0, max = 0; + char dev_name[THERMAL_NAME_LENGTH]; + int ret = 0, i; + struct cpufreq_policy policy; + + list_for_each_entry(cpufreq_dev, &cooling_cpufreq_list, node) + cpufreq_dev_count++; + + /*Verify that all the clip cpus have same freq_min, freq_max limit*/ + for_each_cpu(i, clip_cpus) { + /*continue if cpufreq policy not found and not return error*/ + if (!cpufreq_get_policy(&policy, i)) + continue; + if (min == 0 && max == 0) { + min = policy.cpuinfo.min_freq; + max = policy.cpuinfo.max_freq; + } else { + if (min != policy.cpuinfo.min_freq || + max != policy.cpuinfo.max_freq) + return ERR_PTR(-EINVAL); + } + } + cpufreq_dev = kzalloc(sizeof(struct cpufreq_cooling_device), + GFP_KERNEL); + if (!cpufreq_dev) + return ERR_PTR(-ENOMEM); + + cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus); + + if (cpufreq_dev_count == 0) + mutex_init(&cooling_cpufreq_lock); + + ret = get_idr(&cpufreq_idr, &cpufreq_dev->id); + if (ret) { + kfree(cpufreq_dev); + return ERR_PTR(-EINVAL); + } + + sprintf(dev_name, "thermal-cpufreq-%d", cpufreq_dev->id); + + cool_dev = thermal_cooling_device_register(dev_name, cpufreq_dev, + &cpufreq_cooling_ops); + if (!cool_dev) { + release_idr(&cpufreq_idr, cpufreq_dev->id); + kfree(cpufreq_dev); + return ERR_PTR(-EINVAL); + } + cpufreq_dev->cool_dev = cool_dev; + cpufreq_dev->cpufreq_state = 0; + mutex_lock(&cooling_cpufreq_lock); + list_add_tail(&cpufreq_dev->node, &cooling_cpufreq_list); + + /* Register the notifier for first cpufreq cooling device */ + if (cpufreq_dev_count == 0) + cpufreq_register_notifier(&thermal_cpufreq_notifier_block, + CPUFREQ_POLICY_NOTIFIER); + + mutex_unlock(&cooling_cpufreq_lock); + return cool_dev; + } + EXPORT_SYMBOL(cpufreq_cooling_register); + + /** + * cpufreq_cooling_unregister - function to remove cpufreq cooling device. + * @cdev: thermal cooling device pointer. + */ + void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) + { + struct cpufreq_cooling_device *cpufreq_dev = NULL; + unsigned int cpufreq_dev_count = 0; + + mutex_lock(&cooling_cpufreq_lock); + list_for_each_entry(cpufreq_dev, &cooling_cpufreq_list, node) { + if (cpufreq_dev && cpufreq_dev->cool_dev == cdev) + break; + cpufreq_dev_count++; + } + + if (!cpufreq_dev || cpufreq_dev->cool_dev != cdev) { + mutex_unlock(&cooling_cpufreq_lock); + return; + } + + list_del(&cpufreq_dev->node); + + /* Unregister the notifier for the last cpufreq cooling device */ + if (cpufreq_dev_count == 1) { + cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block, + CPUFREQ_POLICY_NOTIFIER); + } + mutex_unlock(&cooling_cpufreq_lock); + thermal_cooling_device_unregister(cpufreq_dev->cool_dev); + release_idr(&cpufreq_idr, cpufreq_dev->id); + if (cpufreq_dev_count == 1) + mutex_destroy(&cooling_cpufreq_lock); + kfree(cpufreq_dev); + } + EXPORT_SYMBOL(cpufreq_cooling_unregister); diff --cc drivers/thermal/thermal_sys.c index efd81bb25e01,848553dd4bb7..8f0f37bb2825 --- a/drivers/thermal/thermal_sys.c +++ b/drivers/thermal/thermal_sys.c @@@ -91,6 -161,264 +161,261 @@@ static void release_idr(struct idr *idr mutex_unlock(lock); } + int get_tz_trend(struct thermal_zone_device *tz, int trip) + { + enum thermal_trend trend; + + if (!tz->ops->get_trend || tz->ops->get_trend(tz, trip, &trend)) { + if (tz->temperature > tz->last_temperature) + trend = THERMAL_TREND_RAISING; + else if (tz->temperature < tz->last_temperature) + trend = THERMAL_TREND_DROPPING; + else + trend = THERMAL_TREND_STABLE; + } + + return trend; + } + EXPORT_SYMBOL(get_tz_trend); + + struct thermal_instance *get_thermal_instance(struct thermal_zone_device *tz, + struct thermal_cooling_device *cdev, int trip) + { + struct thermal_instance *pos = NULL; + struct thermal_instance *target_instance = NULL; + + mutex_lock(&tz->lock); + mutex_lock(&cdev->lock); + + list_for_each_entry(pos, &tz->thermal_instances, tz_node) { + if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) { + target_instance = pos; + break; + } + } + + mutex_unlock(&cdev->lock); + mutex_unlock(&tz->lock); + + return target_instance; + } + EXPORT_SYMBOL(get_thermal_instance); + + static void print_bind_err_msg(struct thermal_zone_device *tz, + struct thermal_cooling_device *cdev, int ret) + { + dev_err(&tz->device, "binding zone %s with cdev %s failed:%d\n", + tz->type, cdev->type, ret); + } + + static void __bind(struct thermal_zone_device *tz, int mask, + struct thermal_cooling_device *cdev) + { + int i, ret; + + for (i = 0; i < tz->trips; i++) { + if (mask & (1 << i)) { + ret = thermal_zone_bind_cooling_device(tz, i, cdev, + THERMAL_NO_LIMIT, THERMAL_NO_LIMIT); + if (ret) + print_bind_err_msg(tz, cdev, ret); + } + } + } + + static void __unbind(struct thermal_zone_device *tz, int mask, + struct thermal_cooling_device *cdev) + { + int i; + + for (i = 0; i < tz->trips; i++) + if (mask & (1 << i)) + thermal_zone_unbind_cooling_device(tz, i, cdev); + } + + static void bind_cdev(struct thermal_cooling_device *cdev) + { + int i, ret; + const struct thermal_zone_params *tzp; + struct thermal_zone_device *pos = NULL; + + mutex_lock(&thermal_list_lock); + + list_for_each_entry(pos, &thermal_tz_list, node) { + if (!pos->tzp && !pos->ops->bind) + continue; + + if (!pos->tzp && pos->ops->bind) { + ret = pos->ops->bind(pos, cdev); + if (ret) + print_bind_err_msg(pos, cdev, ret); + } + + tzp = pos->tzp; + if (!tzp || !tzp->tbp) + continue; + + for (i = 0; i < tzp->num_tbps; i++) { + if (tzp->tbp[i].cdev || !tzp->tbp[i].match) + continue; + if (tzp->tbp[i].match(pos, cdev)) + continue; + tzp->tbp[i].cdev = cdev; + __bind(pos, tzp->tbp[i].trip_mask, cdev); + } + } + + mutex_unlock(&thermal_list_lock); + } + + static void bind_tz(struct thermal_zone_device *tz) + { + int i, ret; + struct thermal_cooling_device *pos = NULL; + const struct thermal_zone_params *tzp = tz->tzp; + + if (!tzp && !tz->ops->bind) + return; + + mutex_lock(&thermal_list_lock); + + /* If there is no platform data, try to use ops->bind */ + if (!tzp && tz->ops->bind) { + list_for_each_entry(pos, &thermal_cdev_list, node) { + ret = tz->ops->bind(tz, pos); + if (ret) + print_bind_err_msg(tz, pos, ret); + } + goto exit; + } + + if (!tzp || !tzp->tbp) + goto exit; + + list_for_each_entry(pos, &thermal_cdev_list, node) { + for (i = 0; i < tzp->num_tbps; i++) { + if (tzp->tbp[i].cdev || !tzp->tbp[i].match) + continue; + if (tzp->tbp[i].match(tz, pos)) + continue; + tzp->tbp[i].cdev = pos; + __bind(tz, tzp->tbp[i].trip_mask, pos); + } + } + exit: + mutex_unlock(&thermal_list_lock); + } + + static void thermal_zone_device_set_polling(struct thermal_zone_device *tz, + int delay) + { - cancel_delayed_work(&(tz->poll_queue)); - - if (!delay) - return; - + if (delay > 1000) - queue_delayed_work(system_freezable_wq, &(tz->poll_queue), - round_jiffies(msecs_to_jiffies(delay))); ++ mod_delayed_work(system_freezable_wq, &tz->poll_queue, ++ round_jiffies(msecs_to_jiffies(delay))); ++ else if (delay) ++ mod_delayed_work(system_freezable_wq, &tz->poll_queue, ++ msecs_to_jiffies(delay)); + else - queue_delayed_work(system_freezable_wq, &(tz->poll_queue), - msecs_to_jiffies(delay)); ++ cancel_delayed_work(&tz->poll_queue); + } + + static void monitor_thermal_zone(struct thermal_zone_device *tz) + { + mutex_lock(&tz->lock); + + if (tz->passive) + thermal_zone_device_set_polling(tz, tz->passive_delay); + else if (tz->polling_delay) + thermal_zone_device_set_polling(tz, tz->polling_delay); + else + thermal_zone_device_set_polling(tz, 0); + + mutex_unlock(&tz->lock); + } + + static void handle_non_critical_trips(struct thermal_zone_device *tz, + int trip, enum thermal_trip_type trip_type) + { + tz->governor->throttle(tz, trip); + } + + static void handle_critical_trips(struct thermal_zone_device *tz, + int trip, enum thermal_trip_type trip_type) + { + long trip_temp; + + tz->ops->get_trip_temp(tz, trip, &trip_temp); + + /* If we have not crossed the trip_temp, we do not care. */ + if (tz->temperature < trip_temp) + return; + + if (tz->ops->notify) + tz->ops->notify(tz, trip, trip_type); + + if (trip_type == THERMAL_TRIP_CRITICAL) { + pr_emerg("Critical temperature reached(%d C),shutting down\n", + tz->temperature / 1000); + orderly_poweroff(true); + } + } + + static void handle_thermal_trip(struct thermal_zone_device *tz, int trip) + { + enum thermal_trip_type type; + + tz->ops->get_trip_type(tz, trip, &type); + + if (type == THERMAL_TRIP_CRITICAL || type == THERMAL_TRIP_HOT) + handle_critical_trips(tz, trip, type); + else + handle_non_critical_trips(tz, trip, type); + /* + * Alright, we handled this trip successfully. + * So, start monitoring again. + */ + monitor_thermal_zone(tz); + } + + static void update_temperature(struct thermal_zone_device *tz) + { + long temp; + int ret; + + mutex_lock(&tz->lock); + + ret = tz->ops->get_temp(tz, &temp); + if (ret) { + pr_warn("failed to read out thermal zone %d\n", tz->id); + goto exit; + } + + tz->last_temperature = tz->temperature; + tz->temperature = temp; + + exit: + mutex_unlock(&tz->lock); + } + + void thermal_zone_device_update(struct thermal_zone_device *tz) + { + int count; + + update_temperature(tz); + + for (count = 0; count < tz->trips; count++) + handle_thermal_trip(tz, count); + } + EXPORT_SYMBOL(thermal_zone_device_update); + + static void thermal_zone_device_check(struct work_struct *work) + { + struct thermal_zone_device *tz = container_of(work, struct + thermal_zone_device, + poll_queue.work); + thermal_zone_device_update(tz); + } + /* sys I/F for thermal zone */ #define to_thermal_zone(_dev) \