u32 tj_max;
u32 msr_pkg_therm_low;
u32 msr_pkg_therm_high;
+ struct delayed_work work;
struct thermal_zone_device *tzone;
struct cpumask cpumask;
};
/* Protects zone operation in the work function against hotplug removal */
static DEFINE_MUTEX(thermal_zone_mutex);
-/* Interrupt to work function schedule queue */
-static DEFINE_PER_CPU(struct delayed_work, pkg_temp_thermal_threshold_work);
-
/* Debug counters to show using debugfs */
static struct dentry *debugfs;
static unsigned int pkg_interrupt_cnt;
mutex_unlock(&thermal_zone_mutex);
}
+static void pkg_thermal_schedule_work(int cpu, struct delayed_work *work)
+{
+ unsigned long ms = msecs_to_jiffies(notify_delay_ms);
+
+ schedule_delayed_work_on(cpu, work, ms);
+}
+
static int pkg_thermal_notify(u64 msr_val)
{
int cpu = smp_processor_id();
pkgdev = pkg_temp_thermal_get_dev(cpu);
if (pkgdev && !pkgdev->work_scheduled) {
pkgdev->work_scheduled = true;
- schedule_delayed_work_on(cpu,
- &per_cpu(pkg_temp_thermal_threshold_work, cpu),
- msecs_to_jiffies(notify_delay_ms));
+ pkg_thermal_schedule_work(pkgdev->cpu, &pkgdev->work);
}
spin_unlock_irqrestore(&pkg_temp_lock, flags);
if (!pkgdev)
return -ENOMEM;
+ INIT_DELAYED_WORK(&pkgdev->work, pkg_temp_thermal_threshold_work_fn);
pkgdev->phys_proc_id = topology_physical_package_id(cpu);
pkgdev->cpu = cpu;
pkgdev->tj_max = tj_max;
static void put_core_offline(unsigned int cpu)
{
struct pkg_device *pkgdev = pkg_temp_thermal_get_dev(cpu);
- bool lastcpu;
+ bool lastcpu, was_target;
int target;
if (!pkgdev)
thermal_zone_device_unregister(tzone);
}
+ /* Protect against work and interrupts */
+ spin_lock_irq(&pkg_temp_lock);
+
/*
- * If this is the last CPU in the package, restore the interrupt
- * MSR and remove the package reference from the array.
+ * Check whether this cpu was the current target and store the new
+ * one. When we drop the lock, then the interrupt notify function
+ * will see the new target.
+ */
+ was_target = pkgdev->cpu == cpu;
+ pkgdev->cpu = target;
+
+ /*
+ * If this is the last CPU in the package remove the package
+ * reference from the list and restore the interrupt MSR. When we
+ * drop the lock neither the interrupt notify function nor the
+ * worker will see the package anymore.
*/
if (lastcpu) {
- /* Protect against work and interrupts */
- spin_lock_irq(&pkg_temp_lock);
list_del(&pkgdev->list);
/*
* After this point nothing touches the MSR anymore. We
wrmsr_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
pkgdev->msr_pkg_therm_low,
pkgdev->msr_pkg_therm_high);
- kfree(pkgdev);
+ spin_lock_irq(&pkg_temp_lock);
}
/*
- * Note, this is broken when work was really scheduled on the
- * outgoing cpu because this will leave the work_scheduled flag set
- * and the thermal interrupts disabled. Will be fixed in the next
- * step as there is no way to fix it in a sane way with the per cpu
- * work nonsense.
+ * Check whether there is work scheduled and whether the work is
+ * targeted at the outgoing CPU.
*/
- cancel_delayed_work_sync(&per_cpu(pkg_temp_thermal_threshold_work, cpu));
+ if (pkgdev->work_scheduled && was_target) {
+ /*
+ * To cancel the work we need to drop the lock, otherwise
+ * we might deadlock if the work needs to be flushed.
+ */
+ spin_unlock_irq(&pkg_temp_lock);
+ cancel_delayed_work_sync(&pkgdev->work);
+ spin_lock_irq(&pkg_temp_lock);
+ /*
+ * If this is not the last cpu in the package and the work
+ * did not run after we dropped the lock above, then we
+ * need to reschedule the work, otherwise the interrupt
+ * stays disabled forever.
+ */
+ if (!lastcpu && pkgdev->work_scheduled)
+ pkg_thermal_schedule_work(target, &pkgdev->work);
+ }
+
+ spin_unlock_irq(&pkg_temp_lock);
+
+ /* Final cleanup if this is the last cpu */
+ if (lastcpu)
+ kfree(pkgdev);
}
static int get_core_online(unsigned int cpu)
if (!cpu_has(c, X86_FEATURE_DTHERM) || !cpu_has(c, X86_FEATURE_PTS))
return -ENODEV;
- INIT_DELAYED_WORK(&per_cpu(pkg_temp_thermal_threshold_work, cpu),
- pkg_temp_thermal_threshold_work_fn);
-
/* If the package exists, nothing to do */
if (pkgdev) {
cpumask_set_cpu(cpu, &pkgdev->cpumask);