]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/radeon/radeon_pm.c
Merge branch 'drm-radeon-mullins' of git://people.freedesktop.org/~airlied/linux
[karo-tx-linux.git] / drivers / gpu / drm / radeon / radeon_pm.c
1 /*
2  * Permission is hereby granted, free of charge, to any person obtaining a
3  * copy of this software and associated documentation files (the "Software"),
4  * to deal in the Software without restriction, including without limitation
5  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
6  * and/or sell copies of the Software, and to permit persons to whom the
7  * Software is furnished to do so, subject to the following conditions:
8  *
9  * The above copyright notice and this permission notice shall be included in
10  * all copies or substantial portions of the Software.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
15  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
16  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
17  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
18  * OTHER DEALINGS IN THE SOFTWARE.
19  *
20  * Authors: Rafał Miłecki <zajec5@gmail.com>
21  *          Alex Deucher <alexdeucher@gmail.com>
22  */
23 #include <drm/drmP.h>
24 #include "radeon.h"
25 #include "avivod.h"
26 #include "atom.h"
27 #include <linux/power_supply.h>
28 #include <linux/hwmon.h>
29 #include <linux/hwmon-sysfs.h>
30
31 #define RADEON_IDLE_LOOP_MS 100
32 #define RADEON_RECLOCK_DELAY_MS 200
33 #define RADEON_WAIT_VBLANK_TIMEOUT 200
34
35 static const char *radeon_pm_state_type_name[5] = {
36         "",
37         "Powersave",
38         "Battery",
39         "Balanced",
40         "Performance",
41 };
42
43 static void radeon_dynpm_idle_work_handler(struct work_struct *work);
44 static int radeon_debugfs_pm_init(struct radeon_device *rdev);
45 static bool radeon_pm_in_vbl(struct radeon_device *rdev);
46 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
47 static void radeon_pm_update_profile(struct radeon_device *rdev);
48 static void radeon_pm_set_clocks(struct radeon_device *rdev);
49
50 int radeon_pm_get_type_index(struct radeon_device *rdev,
51                              enum radeon_pm_state_type ps_type,
52                              int instance)
53 {
54         int i;
55         int found_instance = -1;
56
57         for (i = 0; i < rdev->pm.num_power_states; i++) {
58                 if (rdev->pm.power_state[i].type == ps_type) {
59                         found_instance++;
60                         if (found_instance == instance)
61                                 return i;
62                 }
63         }
64         /* return default if no match */
65         return rdev->pm.default_power_state_index;
66 }
67
68 void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
69 {
70         if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
71                 mutex_lock(&rdev->pm.mutex);
72                 if (power_supply_is_system_supplied() > 0)
73                         rdev->pm.dpm.ac_power = true;
74                 else
75                         rdev->pm.dpm.ac_power = false;
76                 if (rdev->asic->dpm.enable_bapm)
77                         radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
78                 mutex_unlock(&rdev->pm.mutex);
79         } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
80                 if (rdev->pm.profile == PM_PROFILE_AUTO) {
81                         mutex_lock(&rdev->pm.mutex);
82                         radeon_pm_update_profile(rdev);
83                         radeon_pm_set_clocks(rdev);
84                         mutex_unlock(&rdev->pm.mutex);
85                 }
86         }
87 }
88
89 static void radeon_pm_update_profile(struct radeon_device *rdev)
90 {
91         switch (rdev->pm.profile) {
92         case PM_PROFILE_DEFAULT:
93                 rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
94                 break;
95         case PM_PROFILE_AUTO:
96                 if (power_supply_is_system_supplied() > 0) {
97                         if (rdev->pm.active_crtc_count > 1)
98                                 rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
99                         else
100                                 rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
101                 } else {
102                         if (rdev->pm.active_crtc_count > 1)
103                                 rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
104                         else
105                                 rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
106                 }
107                 break;
108         case PM_PROFILE_LOW:
109                 if (rdev->pm.active_crtc_count > 1)
110                         rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
111                 else
112                         rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
113                 break;
114         case PM_PROFILE_MID:
115                 if (rdev->pm.active_crtc_count > 1)
116                         rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
117                 else
118                         rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
119                 break;
120         case PM_PROFILE_HIGH:
121                 if (rdev->pm.active_crtc_count > 1)
122                         rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
123                 else
124                         rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
125                 break;
126         }
127
128         if (rdev->pm.active_crtc_count == 0) {
129                 rdev->pm.requested_power_state_index =
130                         rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
131                 rdev->pm.requested_clock_mode_index =
132                         rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
133         } else {
134                 rdev->pm.requested_power_state_index =
135                         rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
136                 rdev->pm.requested_clock_mode_index =
137                         rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
138         }
139 }
140
141 static void radeon_unmap_vram_bos(struct radeon_device *rdev)
142 {
143         struct radeon_bo *bo, *n;
144
145         if (list_empty(&rdev->gem.objects))
146                 return;
147
148         list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
149                 if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
150                         ttm_bo_unmap_virtual(&bo->tbo);
151         }
152 }
153
154 static void radeon_sync_with_vblank(struct radeon_device *rdev)
155 {
156         if (rdev->pm.active_crtcs) {
157                 rdev->pm.vblank_sync = false;
158                 wait_event_timeout(
159                         rdev->irq.vblank_queue, rdev->pm.vblank_sync,
160                         msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
161         }
162 }
163
164 static void radeon_set_power_state(struct radeon_device *rdev)
165 {
166         u32 sclk, mclk;
167         bool misc_after = false;
168
169         if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
170             (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
171                 return;
172
173         if (radeon_gui_idle(rdev)) {
174                 sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
175                         clock_info[rdev->pm.requested_clock_mode_index].sclk;
176                 if (sclk > rdev->pm.default_sclk)
177                         sclk = rdev->pm.default_sclk;
178
179                 /* starting with BTC, there is one state that is used for both
180                  * MH and SH.  Difference is that we always use the high clock index for
181                  * mclk and vddci.
182                  */
183                 if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
184                     (rdev->family >= CHIP_BARTS) &&
185                     rdev->pm.active_crtc_count &&
186                     ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
187                      (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
188                         mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
189                                 clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk;
190                 else
191                         mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
192                                 clock_info[rdev->pm.requested_clock_mode_index].mclk;
193
194                 if (mclk > rdev->pm.default_mclk)
195                         mclk = rdev->pm.default_mclk;
196
197                 /* upvolt before raising clocks, downvolt after lowering clocks */
198                 if (sclk < rdev->pm.current_sclk)
199                         misc_after = true;
200
201                 radeon_sync_with_vblank(rdev);
202
203                 if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
204                         if (!radeon_pm_in_vbl(rdev))
205                                 return;
206                 }
207
208                 radeon_pm_prepare(rdev);
209
210                 if (!misc_after)
211                         /* voltage, pcie lanes, etc.*/
212                         radeon_pm_misc(rdev);
213
214                 /* set engine clock */
215                 if (sclk != rdev->pm.current_sclk) {
216                         radeon_pm_debug_check_in_vbl(rdev, false);
217                         radeon_set_engine_clock(rdev, sclk);
218                         radeon_pm_debug_check_in_vbl(rdev, true);
219                         rdev->pm.current_sclk = sclk;
220                         DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk);
221                 }
222
223                 /* set memory clock */
224                 if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) {
225                         radeon_pm_debug_check_in_vbl(rdev, false);
226                         radeon_set_memory_clock(rdev, mclk);
227                         radeon_pm_debug_check_in_vbl(rdev, true);
228                         rdev->pm.current_mclk = mclk;
229                         DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk);
230                 }
231
232                 if (misc_after)
233                         /* voltage, pcie lanes, etc.*/
234                         radeon_pm_misc(rdev);
235
236                 radeon_pm_finish(rdev);
237
238                 rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
239                 rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
240         } else
241                 DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n");
242 }
243
244 static void radeon_pm_set_clocks(struct radeon_device *rdev)
245 {
246         int i, r;
247
248         /* no need to take locks, etc. if nothing's going to change */
249         if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
250             (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
251                 return;
252
253         mutex_lock(&rdev->ddev->struct_mutex);
254         down_write(&rdev->pm.mclk_lock);
255         mutex_lock(&rdev->ring_lock);
256
257         /* wait for the rings to drain */
258         for (i = 0; i < RADEON_NUM_RINGS; i++) {
259                 struct radeon_ring *ring = &rdev->ring[i];
260                 if (!ring->ready) {
261                         continue;
262                 }
263                 r = radeon_fence_wait_empty(rdev, i);
264                 if (r) {
265                         /* needs a GPU reset dont reset here */
266                         mutex_unlock(&rdev->ring_lock);
267                         up_write(&rdev->pm.mclk_lock);
268                         mutex_unlock(&rdev->ddev->struct_mutex);
269                         return;
270                 }
271         }
272
273         radeon_unmap_vram_bos(rdev);
274
275         if (rdev->irq.installed) {
276                 for (i = 0; i < rdev->num_crtc; i++) {
277                         if (rdev->pm.active_crtcs & (1 << i)) {
278                                 rdev->pm.req_vblank |= (1 << i);
279                                 drm_vblank_get(rdev->ddev, i);
280                         }
281                 }
282         }
283
284         radeon_set_power_state(rdev);
285
286         if (rdev->irq.installed) {
287                 for (i = 0; i < rdev->num_crtc; i++) {
288                         if (rdev->pm.req_vblank & (1 << i)) {
289                                 rdev->pm.req_vblank &= ~(1 << i);
290                                 drm_vblank_put(rdev->ddev, i);
291                         }
292                 }
293         }
294
295         /* update display watermarks based on new power state */
296         radeon_update_bandwidth_info(rdev);
297         if (rdev->pm.active_crtc_count)
298                 radeon_bandwidth_update(rdev);
299
300         rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
301
302         mutex_unlock(&rdev->ring_lock);
303         up_write(&rdev->pm.mclk_lock);
304         mutex_unlock(&rdev->ddev->struct_mutex);
305 }
306
307 static void radeon_pm_print_states(struct radeon_device *rdev)
308 {
309         int i, j;
310         struct radeon_power_state *power_state;
311         struct radeon_pm_clock_info *clock_info;
312
313         DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states);
314         for (i = 0; i < rdev->pm.num_power_states; i++) {
315                 power_state = &rdev->pm.power_state[i];
316                 DRM_DEBUG_DRIVER("State %d: %s\n", i,
317                         radeon_pm_state_type_name[power_state->type]);
318                 if (i == rdev->pm.default_power_state_index)
319                         DRM_DEBUG_DRIVER("\tDefault");
320                 if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
321                         DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes);
322                 if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
323                         DRM_DEBUG_DRIVER("\tSingle display only\n");
324                 DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
325                 for (j = 0; j < power_state->num_clock_modes; j++) {
326                         clock_info = &(power_state->clock_info[j]);
327                         if (rdev->flags & RADEON_IS_IGP)
328                                 DRM_DEBUG_DRIVER("\t\t%d e: %d\n",
329                                                  j,
330                                                  clock_info->sclk * 10);
331                         else
332                                 DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n",
333                                                  j,
334                                                  clock_info->sclk * 10,
335                                                  clock_info->mclk * 10,
336                                                  clock_info->voltage.voltage);
337                 }
338         }
339 }
340
341 static ssize_t radeon_get_pm_profile(struct device *dev,
342                                      struct device_attribute *attr,
343                                      char *buf)
344 {
345         struct drm_device *ddev = dev_get_drvdata(dev);
346         struct radeon_device *rdev = ddev->dev_private;
347         int cp = rdev->pm.profile;
348
349         return snprintf(buf, PAGE_SIZE, "%s\n",
350                         (cp == PM_PROFILE_AUTO) ? "auto" :
351                         (cp == PM_PROFILE_LOW) ? "low" :
352                         (cp == PM_PROFILE_MID) ? "mid" :
353                         (cp == PM_PROFILE_HIGH) ? "high" : "default");
354 }
355
356 static ssize_t radeon_set_pm_profile(struct device *dev,
357                                      struct device_attribute *attr,
358                                      const char *buf,
359                                      size_t count)
360 {
361         struct drm_device *ddev = dev_get_drvdata(dev);
362         struct radeon_device *rdev = ddev->dev_private;
363
364         mutex_lock(&rdev->pm.mutex);
365         if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
366                 if (strncmp("default", buf, strlen("default")) == 0)
367                         rdev->pm.profile = PM_PROFILE_DEFAULT;
368                 else if (strncmp("auto", buf, strlen("auto")) == 0)
369                         rdev->pm.profile = PM_PROFILE_AUTO;
370                 else if (strncmp("low", buf, strlen("low")) == 0)
371                         rdev->pm.profile = PM_PROFILE_LOW;
372                 else if (strncmp("mid", buf, strlen("mid")) == 0)
373                         rdev->pm.profile = PM_PROFILE_MID;
374                 else if (strncmp("high", buf, strlen("high")) == 0)
375                         rdev->pm.profile = PM_PROFILE_HIGH;
376                 else {
377                         count = -EINVAL;
378                         goto fail;
379                 }
380                 radeon_pm_update_profile(rdev);
381                 radeon_pm_set_clocks(rdev);
382         } else
383                 count = -EINVAL;
384
385 fail:
386         mutex_unlock(&rdev->pm.mutex);
387
388         return count;
389 }
390
391 static ssize_t radeon_get_pm_method(struct device *dev,
392                                     struct device_attribute *attr,
393                                     char *buf)
394 {
395         struct drm_device *ddev = dev_get_drvdata(dev);
396         struct radeon_device *rdev = ddev->dev_private;
397         int pm = rdev->pm.pm_method;
398
399         return snprintf(buf, PAGE_SIZE, "%s\n",
400                         (pm == PM_METHOD_DYNPM) ? "dynpm" :
401                         (pm == PM_METHOD_PROFILE) ? "profile" : "dpm");
402 }
403
404 static ssize_t radeon_set_pm_method(struct device *dev,
405                                     struct device_attribute *attr,
406                                     const char *buf,
407                                     size_t count)
408 {
409         struct drm_device *ddev = dev_get_drvdata(dev);
410         struct radeon_device *rdev = ddev->dev_private;
411
412         /* we don't support the legacy modes with dpm */
413         if (rdev->pm.pm_method == PM_METHOD_DPM) {
414                 count = -EINVAL;
415                 goto fail;
416         }
417
418         if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
419                 mutex_lock(&rdev->pm.mutex);
420                 rdev->pm.pm_method = PM_METHOD_DYNPM;
421                 rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
422                 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
423                 mutex_unlock(&rdev->pm.mutex);
424         } else if (strncmp("profile", buf, strlen("profile")) == 0) {
425                 mutex_lock(&rdev->pm.mutex);
426                 /* disable dynpm */
427                 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
428                 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
429                 rdev->pm.pm_method = PM_METHOD_PROFILE;
430                 mutex_unlock(&rdev->pm.mutex);
431                 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
432         } else {
433                 count = -EINVAL;
434                 goto fail;
435         }
436         radeon_pm_compute_clocks(rdev);
437 fail:
438         return count;
439 }
440
441 static ssize_t radeon_get_dpm_state(struct device *dev,
442                                     struct device_attribute *attr,
443                                     char *buf)
444 {
445         struct drm_device *ddev = dev_get_drvdata(dev);
446         struct radeon_device *rdev = ddev->dev_private;
447         enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
448
449         return snprintf(buf, PAGE_SIZE, "%s\n",
450                         (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
451                         (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
452 }
453
454 static ssize_t radeon_set_dpm_state(struct device *dev,
455                                     struct device_attribute *attr,
456                                     const char *buf,
457                                     size_t count)
458 {
459         struct drm_device *ddev = dev_get_drvdata(dev);
460         struct radeon_device *rdev = ddev->dev_private;
461
462         mutex_lock(&rdev->pm.mutex);
463         if (strncmp("battery", buf, strlen("battery")) == 0)
464                 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
465         else if (strncmp("balanced", buf, strlen("balanced")) == 0)
466                 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
467         else if (strncmp("performance", buf, strlen("performance")) == 0)
468                 rdev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE;
469         else {
470                 mutex_unlock(&rdev->pm.mutex);
471                 count = -EINVAL;
472                 goto fail;
473         }
474         mutex_unlock(&rdev->pm.mutex);
475         radeon_pm_compute_clocks(rdev);
476 fail:
477         return count;
478 }
479
480 static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
481                                                        struct device_attribute *attr,
482                                                        char *buf)
483 {
484         struct drm_device *ddev = dev_get_drvdata(dev);
485         struct radeon_device *rdev = ddev->dev_private;
486         enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
487
488         return snprintf(buf, PAGE_SIZE, "%s\n",
489                         (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
490                         (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
491 }
492
493 static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
494                                                        struct device_attribute *attr,
495                                                        const char *buf,
496                                                        size_t count)
497 {
498         struct drm_device *ddev = dev_get_drvdata(dev);
499         struct radeon_device *rdev = ddev->dev_private;
500         enum radeon_dpm_forced_level level;
501         int ret = 0;
502
503         mutex_lock(&rdev->pm.mutex);
504         if (strncmp("low", buf, strlen("low")) == 0) {
505                 level = RADEON_DPM_FORCED_LEVEL_LOW;
506         } else if (strncmp("high", buf, strlen("high")) == 0) {
507                 level = RADEON_DPM_FORCED_LEVEL_HIGH;
508         } else if (strncmp("auto", buf, strlen("auto")) == 0) {
509                 level = RADEON_DPM_FORCED_LEVEL_AUTO;
510         } else {
511                 count = -EINVAL;
512                 goto fail;
513         }
514         if (rdev->asic->dpm.force_performance_level) {
515                 if (rdev->pm.dpm.thermal_active) {
516                         count = -EINVAL;
517                         goto fail;
518                 }
519                 ret = radeon_dpm_force_performance_level(rdev, level);
520                 if (ret)
521                         count = -EINVAL;
522         }
523 fail:
524         mutex_unlock(&rdev->pm.mutex);
525
526         return count;
527 }
528
529 static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
530 static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
531 static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, radeon_get_dpm_state, radeon_set_dpm_state);
532 static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
533                    radeon_get_dpm_forced_performance_level,
534                    radeon_set_dpm_forced_performance_level);
535
536 static ssize_t radeon_hwmon_show_temp(struct device *dev,
537                                       struct device_attribute *attr,
538                                       char *buf)
539 {
540         struct radeon_device *rdev = dev_get_drvdata(dev);
541         int temp;
542
543         if (rdev->asic->pm.get_temperature)
544                 temp = radeon_get_temperature(rdev);
545         else
546                 temp = 0;
547
548         return snprintf(buf, PAGE_SIZE, "%d\n", temp);
549 }
550
551 static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
552                                              struct device_attribute *attr,
553                                              char *buf)
554 {
555         struct radeon_device *rdev = dev_get_drvdata(dev);
556         int hyst = to_sensor_dev_attr(attr)->index;
557         int temp;
558
559         if (hyst)
560                 temp = rdev->pm.dpm.thermal.min_temp;
561         else
562                 temp = rdev->pm.dpm.thermal.max_temp;
563
564         return snprintf(buf, PAGE_SIZE, "%d\n", temp);
565 }
566
567 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
568 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0);
569 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1);
570
571 static struct attribute *hwmon_attributes[] = {
572         &sensor_dev_attr_temp1_input.dev_attr.attr,
573         &sensor_dev_attr_temp1_crit.dev_attr.attr,
574         &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
575         NULL
576 };
577
578 static umode_t hwmon_attributes_visible(struct kobject *kobj,
579                                         struct attribute *attr, int index)
580 {
581         struct device *dev = container_of(kobj, struct device, kobj);
582         struct radeon_device *rdev = dev_get_drvdata(dev);
583
584         /* Skip limit attributes if DPM is not enabled */
585         if (rdev->pm.pm_method != PM_METHOD_DPM &&
586             (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
587              attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
588                 return 0;
589
590         return attr->mode;
591 }
592
593 static const struct attribute_group hwmon_attrgroup = {
594         .attrs = hwmon_attributes,
595         .is_visible = hwmon_attributes_visible,
596 };
597
598 static const struct attribute_group *hwmon_groups[] = {
599         &hwmon_attrgroup,
600         NULL
601 };
602
603 static int radeon_hwmon_init(struct radeon_device *rdev)
604 {
605         int err = 0;
606
607         switch (rdev->pm.int_thermal_type) {
608         case THERMAL_TYPE_RV6XX:
609         case THERMAL_TYPE_RV770:
610         case THERMAL_TYPE_EVERGREEN:
611         case THERMAL_TYPE_NI:
612         case THERMAL_TYPE_SUMO:
613         case THERMAL_TYPE_SI:
614         case THERMAL_TYPE_CI:
615         case THERMAL_TYPE_KV:
616                 if (rdev->asic->pm.get_temperature == NULL)
617                         return err;
618                 rdev->pm.int_hwmon_dev = hwmon_device_register_with_groups(rdev->dev,
619                                                                            "radeon", rdev,
620                                                                            hwmon_groups);
621                 if (IS_ERR(rdev->pm.int_hwmon_dev)) {
622                         err = PTR_ERR(rdev->pm.int_hwmon_dev);
623                         dev_err(rdev->dev,
624                                 "Unable to register hwmon device: %d\n", err);
625                 }
626                 break;
627         default:
628                 break;
629         }
630
631         return err;
632 }
633
634 static void radeon_hwmon_fini(struct radeon_device *rdev)
635 {
636         if (rdev->pm.int_hwmon_dev)
637                 hwmon_device_unregister(rdev->pm.int_hwmon_dev);
638 }
639
640 static void radeon_dpm_thermal_work_handler(struct work_struct *work)
641 {
642         struct radeon_device *rdev =
643                 container_of(work, struct radeon_device,
644                              pm.dpm.thermal.work);
645         /* switch to the thermal state */
646         enum radeon_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
647
648         if (!rdev->pm.dpm_enabled)
649                 return;
650
651         if (rdev->asic->pm.get_temperature) {
652                 int temp = radeon_get_temperature(rdev);
653
654                 if (temp < rdev->pm.dpm.thermal.min_temp)
655                         /* switch back the user state */
656                         dpm_state = rdev->pm.dpm.user_state;
657         } else {
658                 if (rdev->pm.dpm.thermal.high_to_low)
659                         /* switch back the user state */
660                         dpm_state = rdev->pm.dpm.user_state;
661         }
662         mutex_lock(&rdev->pm.mutex);
663         if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
664                 rdev->pm.dpm.thermal_active = true;
665         else
666                 rdev->pm.dpm.thermal_active = false;
667         rdev->pm.dpm.state = dpm_state;
668         mutex_unlock(&rdev->pm.mutex);
669
670         radeon_pm_compute_clocks(rdev);
671 }
672
673 static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
674                                                      enum radeon_pm_state_type dpm_state)
675 {
676         int i;
677         struct radeon_ps *ps;
678         u32 ui_class;
679         bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ?
680                 true : false;
681
682         /* check if the vblank period is too short to adjust the mclk */
683         if (single_display && rdev->asic->dpm.vblank_too_short) {
684                 if (radeon_dpm_vblank_too_short(rdev))
685                         single_display = false;
686         }
687
688         /* certain older asics have a separare 3D performance state,
689          * so try that first if the user selected performance
690          */
691         if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
692                 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
693         /* balanced states don't exist at the moment */
694         if (dpm_state == POWER_STATE_TYPE_BALANCED)
695                 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
696
697 restart_search:
698         /* Pick the best power state based on current conditions */
699         for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
700                 ps = &rdev->pm.dpm.ps[i];
701                 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
702                 switch (dpm_state) {
703                 /* user states */
704                 case POWER_STATE_TYPE_BATTERY:
705                         if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
706                                 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
707                                         if (single_display)
708                                                 return ps;
709                                 } else
710                                         return ps;
711                         }
712                         break;
713                 case POWER_STATE_TYPE_BALANCED:
714                         if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
715                                 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
716                                         if (single_display)
717                                                 return ps;
718                                 } else
719                                         return ps;
720                         }
721                         break;
722                 case POWER_STATE_TYPE_PERFORMANCE:
723                         if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
724                                 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
725                                         if (single_display)
726                                                 return ps;
727                                 } else
728                                         return ps;
729                         }
730                         break;
731                 /* internal states */
732                 case POWER_STATE_TYPE_INTERNAL_UVD:
733                         if (rdev->pm.dpm.uvd_ps)
734                                 return rdev->pm.dpm.uvd_ps;
735                         else
736                                 break;
737                 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
738                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
739                                 return ps;
740                         break;
741                 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
742                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
743                                 return ps;
744                         break;
745                 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
746                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
747                                 return ps;
748                         break;
749                 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
750                         if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
751                                 return ps;
752                         break;
753                 case POWER_STATE_TYPE_INTERNAL_BOOT:
754                         return rdev->pm.dpm.boot_ps;
755                 case POWER_STATE_TYPE_INTERNAL_THERMAL:
756                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
757                                 return ps;
758                         break;
759                 case POWER_STATE_TYPE_INTERNAL_ACPI:
760                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
761                                 return ps;
762                         break;
763                 case POWER_STATE_TYPE_INTERNAL_ULV:
764                         if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
765                                 return ps;
766                         break;
767                 case POWER_STATE_TYPE_INTERNAL_3DPERF:
768                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
769                                 return ps;
770                         break;
771                 default:
772                         break;
773                 }
774         }
775         /* use a fallback state if we didn't match */
776         switch (dpm_state) {
777         case POWER_STATE_TYPE_INTERNAL_UVD_SD:
778                 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
779                 goto restart_search;
780         case POWER_STATE_TYPE_INTERNAL_UVD_HD:
781         case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
782         case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
783                 if (rdev->pm.dpm.uvd_ps) {
784                         return rdev->pm.dpm.uvd_ps;
785                 } else {
786                         dpm_state = POWER_STATE_TYPE_PERFORMANCE;
787                         goto restart_search;
788                 }
789         case POWER_STATE_TYPE_INTERNAL_THERMAL:
790                 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
791                 goto restart_search;
792         case POWER_STATE_TYPE_INTERNAL_ACPI:
793                 dpm_state = POWER_STATE_TYPE_BATTERY;
794                 goto restart_search;
795         case POWER_STATE_TYPE_BATTERY:
796         case POWER_STATE_TYPE_BALANCED:
797         case POWER_STATE_TYPE_INTERNAL_3DPERF:
798                 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
799                 goto restart_search;
800         default:
801                 break;
802         }
803
804         return NULL;
805 }
806
807 static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
808 {
809         int i;
810         struct radeon_ps *ps;
811         enum radeon_pm_state_type dpm_state;
812         int ret;
813
814         /* if dpm init failed */
815         if (!rdev->pm.dpm_enabled)
816                 return;
817
818         if (rdev->pm.dpm.user_state != rdev->pm.dpm.state) {
819                 /* add other state override checks here */
820                 if ((!rdev->pm.dpm.thermal_active) &&
821                     (!rdev->pm.dpm.uvd_active))
822                         rdev->pm.dpm.state = rdev->pm.dpm.user_state;
823         }
824         dpm_state = rdev->pm.dpm.state;
825
826         ps = radeon_dpm_pick_power_state(rdev, dpm_state);
827         if (ps)
828                 rdev->pm.dpm.requested_ps = ps;
829         else
830                 return;
831
832         /* no need to reprogram if nothing changed unless we are on BTC+ */
833         if (rdev->pm.dpm.current_ps == rdev->pm.dpm.requested_ps) {
834                 /* vce just modifies an existing state so force a change */
835                 if (ps->vce_active != rdev->pm.dpm.vce_active)
836                         goto force;
837                 if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) {
838                         /* for pre-BTC and APUs if the num crtcs changed but state is the same,
839                          * all we need to do is update the display configuration.
840                          */
841                         if (rdev->pm.dpm.new_active_crtcs != rdev->pm.dpm.current_active_crtcs) {
842                                 /* update display watermarks based on new power state */
843                                 radeon_bandwidth_update(rdev);
844                                 /* update displays */
845                                 radeon_dpm_display_configuration_changed(rdev);
846                                 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
847                                 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
848                         }
849                         return;
850                 } else {
851                         /* for BTC+ if the num crtcs hasn't changed and state is the same,
852                          * nothing to do, if the num crtcs is > 1 and state is the same,
853                          * update display configuration.
854                          */
855                         if (rdev->pm.dpm.new_active_crtcs ==
856                             rdev->pm.dpm.current_active_crtcs) {
857                                 return;
858                         } else {
859                                 if ((rdev->pm.dpm.current_active_crtc_count > 1) &&
860                                     (rdev->pm.dpm.new_active_crtc_count > 1)) {
861                                         /* update display watermarks based on new power state */
862                                         radeon_bandwidth_update(rdev);
863                                         /* update displays */
864                                         radeon_dpm_display_configuration_changed(rdev);
865                                         rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
866                                         rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
867                                         return;
868                                 }
869                         }
870                 }
871         }
872
873 force:
874         if (radeon_dpm == 1) {
875                 printk("switching from power state:\n");
876                 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps);
877                 printk("switching to power state:\n");
878                 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
879         }
880
881         mutex_lock(&rdev->ddev->struct_mutex);
882         down_write(&rdev->pm.mclk_lock);
883         mutex_lock(&rdev->ring_lock);
884
885         /* update whether vce is active */
886         ps->vce_active = rdev->pm.dpm.vce_active;
887
888         ret = radeon_dpm_pre_set_power_state(rdev);
889         if (ret)
890                 goto done;
891
892         /* update display watermarks based on new power state */
893         radeon_bandwidth_update(rdev);
894         /* update displays */
895         radeon_dpm_display_configuration_changed(rdev);
896
897         rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
898         rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
899
900         /* wait for the rings to drain */
901         for (i = 0; i < RADEON_NUM_RINGS; i++) {
902                 struct radeon_ring *ring = &rdev->ring[i];
903                 if (ring->ready)
904                         radeon_fence_wait_empty(rdev, i);
905         }
906
907         /* program the new power state */
908         radeon_dpm_set_power_state(rdev);
909
910         /* update current power state */
911         rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps;
912
913         radeon_dpm_post_set_power_state(rdev);
914
915         if (rdev->asic->dpm.force_performance_level) {
916                 if (rdev->pm.dpm.thermal_active) {
917                         enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
918                         /* force low perf level for thermal */
919                         radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
920                         /* save the user's level */
921                         rdev->pm.dpm.forced_level = level;
922                 } else {
923                         /* otherwise, user selected level */
924                         radeon_dpm_force_performance_level(rdev, rdev->pm.dpm.forced_level);
925                 }
926         }
927
928 done:
929         mutex_unlock(&rdev->ring_lock);
930         up_write(&rdev->pm.mclk_lock);
931         mutex_unlock(&rdev->ddev->struct_mutex);
932 }
933
934 void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
935 {
936         enum radeon_pm_state_type dpm_state;
937
938         if (rdev->asic->dpm.powergate_uvd) {
939                 mutex_lock(&rdev->pm.mutex);
940                 /* don't powergate anything if we
941                    have active but pause streams */
942                 enable |= rdev->pm.dpm.sd > 0;
943                 enable |= rdev->pm.dpm.hd > 0;
944                 /* enable/disable UVD */
945                 radeon_dpm_powergate_uvd(rdev, !enable);
946                 mutex_unlock(&rdev->pm.mutex);
947         } else {
948                 if (enable) {
949                         mutex_lock(&rdev->pm.mutex);
950                         rdev->pm.dpm.uvd_active = true;
951                         if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
952                                 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
953                         else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
954                                 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
955                         else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 1))
956                                 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
957                         else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
958                                 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
959                         else
960                                 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
961                         rdev->pm.dpm.state = dpm_state;
962                         mutex_unlock(&rdev->pm.mutex);
963                 } else {
964                         mutex_lock(&rdev->pm.mutex);
965                         rdev->pm.dpm.uvd_active = false;
966                         mutex_unlock(&rdev->pm.mutex);
967                 }
968
969                 radeon_pm_compute_clocks(rdev);
970         }
971 }
972
973 void radeon_dpm_enable_vce(struct radeon_device *rdev, bool enable)
974 {
975         if (enable) {
976                 mutex_lock(&rdev->pm.mutex);
977                 rdev->pm.dpm.vce_active = true;
978                 /* XXX select vce level based on ring/task */
979                 rdev->pm.dpm.vce_level = RADEON_VCE_LEVEL_AC_ALL;
980                 mutex_unlock(&rdev->pm.mutex);
981         } else {
982                 mutex_lock(&rdev->pm.mutex);
983                 rdev->pm.dpm.vce_active = false;
984                 mutex_unlock(&rdev->pm.mutex);
985         }
986
987         radeon_pm_compute_clocks(rdev);
988 }
989
990 static void radeon_pm_suspend_old(struct radeon_device *rdev)
991 {
992         mutex_lock(&rdev->pm.mutex);
993         if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
994                 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
995                         rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
996         }
997         mutex_unlock(&rdev->pm.mutex);
998
999         cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
1000 }
1001
1002 static void radeon_pm_suspend_dpm(struct radeon_device *rdev)
1003 {
1004         mutex_lock(&rdev->pm.mutex);
1005         /* disable dpm */
1006         radeon_dpm_disable(rdev);
1007         /* reset the power state */
1008         rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1009         rdev->pm.dpm_enabled = false;
1010         mutex_unlock(&rdev->pm.mutex);
1011 }
1012
1013 void radeon_pm_suspend(struct radeon_device *rdev)
1014 {
1015         if (rdev->pm.pm_method == PM_METHOD_DPM)
1016                 radeon_pm_suspend_dpm(rdev);
1017         else
1018                 radeon_pm_suspend_old(rdev);
1019 }
1020
1021 static void radeon_pm_resume_old(struct radeon_device *rdev)
1022 {
1023         /* set up the default clocks if the MC ucode is loaded */
1024         if ((rdev->family >= CHIP_BARTS) &&
1025             (rdev->family <= CHIP_CAYMAN) &&
1026             rdev->mc_fw) {
1027                 if (rdev->pm.default_vddc)
1028                         radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1029                                                 SET_VOLTAGE_TYPE_ASIC_VDDC);
1030                 if (rdev->pm.default_vddci)
1031                         radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1032                                                 SET_VOLTAGE_TYPE_ASIC_VDDCI);
1033                 if (rdev->pm.default_sclk)
1034                         radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1035                 if (rdev->pm.default_mclk)
1036                         radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1037         }
1038         /* asic init will reset the default power state */
1039         mutex_lock(&rdev->pm.mutex);
1040         rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
1041         rdev->pm.current_clock_mode_index = 0;
1042         rdev->pm.current_sclk = rdev->pm.default_sclk;
1043         rdev->pm.current_mclk = rdev->pm.default_mclk;
1044         if (rdev->pm.power_state) {
1045                 rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
1046                 rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
1047         }
1048         if (rdev->pm.pm_method == PM_METHOD_DYNPM
1049             && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
1050                 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1051                 schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1052                                       msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1053         }
1054         mutex_unlock(&rdev->pm.mutex);
1055         radeon_pm_compute_clocks(rdev);
1056 }
1057
1058 static void radeon_pm_resume_dpm(struct radeon_device *rdev)
1059 {
1060         int ret;
1061
1062         /* asic init will reset to the boot state */
1063         mutex_lock(&rdev->pm.mutex);
1064         rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1065         radeon_dpm_setup_asic(rdev);
1066         ret = radeon_dpm_enable(rdev);
1067         mutex_unlock(&rdev->pm.mutex);
1068         if (ret)
1069                 goto dpm_resume_fail;
1070         rdev->pm.dpm_enabled = true;
1071         radeon_pm_compute_clocks(rdev);
1072         return;
1073
1074 dpm_resume_fail:
1075         DRM_ERROR("radeon: dpm resume failed\n");
1076         if ((rdev->family >= CHIP_BARTS) &&
1077             (rdev->family <= CHIP_CAYMAN) &&
1078             rdev->mc_fw) {
1079                 if (rdev->pm.default_vddc)
1080                         radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1081                                                 SET_VOLTAGE_TYPE_ASIC_VDDC);
1082                 if (rdev->pm.default_vddci)
1083                         radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1084                                                 SET_VOLTAGE_TYPE_ASIC_VDDCI);
1085                 if (rdev->pm.default_sclk)
1086                         radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1087                 if (rdev->pm.default_mclk)
1088                         radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1089         }
1090 }
1091
1092 void radeon_pm_resume(struct radeon_device *rdev)
1093 {
1094         if (rdev->pm.pm_method == PM_METHOD_DPM)
1095                 radeon_pm_resume_dpm(rdev);
1096         else
1097                 radeon_pm_resume_old(rdev);
1098 }
1099
1100 static int radeon_pm_init_old(struct radeon_device *rdev)
1101 {
1102         int ret;
1103
1104         rdev->pm.profile = PM_PROFILE_DEFAULT;
1105         rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
1106         rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
1107         rdev->pm.dynpm_can_upclock = true;
1108         rdev->pm.dynpm_can_downclock = true;
1109         rdev->pm.default_sclk = rdev->clock.default_sclk;
1110         rdev->pm.default_mclk = rdev->clock.default_mclk;
1111         rdev->pm.current_sclk = rdev->clock.default_sclk;
1112         rdev->pm.current_mclk = rdev->clock.default_mclk;
1113         rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
1114
1115         if (rdev->bios) {
1116                 if (rdev->is_atom_bios)
1117                         radeon_atombios_get_power_modes(rdev);
1118                 else
1119                         radeon_combios_get_power_modes(rdev);
1120                 radeon_pm_print_states(rdev);
1121                 radeon_pm_init_profile(rdev);
1122                 /* set up the default clocks if the MC ucode is loaded */
1123                 if ((rdev->family >= CHIP_BARTS) &&
1124                     (rdev->family <= CHIP_CAYMAN) &&
1125                     rdev->mc_fw) {
1126                         if (rdev->pm.default_vddc)
1127                                 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1128                                                         SET_VOLTAGE_TYPE_ASIC_VDDC);
1129                         if (rdev->pm.default_vddci)
1130                                 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1131                                                         SET_VOLTAGE_TYPE_ASIC_VDDCI);
1132                         if (rdev->pm.default_sclk)
1133                                 radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1134                         if (rdev->pm.default_mclk)
1135                                 radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1136                 }
1137         }
1138
1139         /* set up the internal thermal sensor if applicable */
1140         ret = radeon_hwmon_init(rdev);
1141         if (ret)
1142                 return ret;
1143
1144         INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
1145
1146         if (rdev->pm.num_power_states > 1) {
1147                 /* where's the best place to put these? */
1148                 ret = device_create_file(rdev->dev, &dev_attr_power_profile);
1149                 if (ret)
1150                         DRM_ERROR("failed to create device file for power profile\n");
1151                 ret = device_create_file(rdev->dev, &dev_attr_power_method);
1152                 if (ret)
1153                         DRM_ERROR("failed to create device file for power method\n");
1154
1155                 if (radeon_debugfs_pm_init(rdev)) {
1156                         DRM_ERROR("Failed to register debugfs file for PM!\n");
1157                 }
1158
1159                 DRM_INFO("radeon: power management initialized\n");
1160         }
1161
1162         return 0;
1163 }
1164
1165 static void radeon_dpm_print_power_states(struct radeon_device *rdev)
1166 {
1167         int i;
1168
1169         for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
1170                 printk("== power state %d ==\n", i);
1171                 radeon_dpm_print_power_state(rdev, &rdev->pm.dpm.ps[i]);
1172         }
1173 }
1174
1175 static int radeon_pm_init_dpm(struct radeon_device *rdev)
1176 {
1177         int ret;
1178
1179         /* default to balanced state */
1180         rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
1181         rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
1182         rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
1183         rdev->pm.default_sclk = rdev->clock.default_sclk;
1184         rdev->pm.default_mclk = rdev->clock.default_mclk;
1185         rdev->pm.current_sclk = rdev->clock.default_sclk;
1186         rdev->pm.current_mclk = rdev->clock.default_mclk;
1187         rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
1188
1189         if (rdev->bios && rdev->is_atom_bios)
1190                 radeon_atombios_get_power_modes(rdev);
1191         else
1192                 return -EINVAL;
1193
1194         /* set up the internal thermal sensor if applicable */
1195         ret = radeon_hwmon_init(rdev);
1196         if (ret)
1197                 return ret;
1198
1199         INIT_WORK(&rdev->pm.dpm.thermal.work, radeon_dpm_thermal_work_handler);
1200         mutex_lock(&rdev->pm.mutex);
1201         radeon_dpm_init(rdev);
1202         rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1203         if (radeon_dpm == 1)
1204                 radeon_dpm_print_power_states(rdev);
1205         radeon_dpm_setup_asic(rdev);
1206         ret = radeon_dpm_enable(rdev);
1207         mutex_unlock(&rdev->pm.mutex);
1208         if (ret)
1209                 goto dpm_failed;
1210         rdev->pm.dpm_enabled = true;
1211
1212         ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
1213         if (ret)
1214                 DRM_ERROR("failed to create device file for dpm state\n");
1215         ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
1216         if (ret)
1217                 DRM_ERROR("failed to create device file for dpm state\n");
1218         /* XXX: these are noops for dpm but are here for backwards compat */
1219         ret = device_create_file(rdev->dev, &dev_attr_power_profile);
1220         if (ret)
1221                 DRM_ERROR("failed to create device file for power profile\n");
1222         ret = device_create_file(rdev->dev, &dev_attr_power_method);
1223         if (ret)
1224                 DRM_ERROR("failed to create device file for power method\n");
1225
1226         if (radeon_debugfs_pm_init(rdev)) {
1227                 DRM_ERROR("Failed to register debugfs file for dpm!\n");
1228         }
1229
1230         DRM_INFO("radeon: dpm initialized\n");
1231
1232         return 0;
1233
1234 dpm_failed:
1235         rdev->pm.dpm_enabled = false;
1236         if ((rdev->family >= CHIP_BARTS) &&
1237             (rdev->family <= CHIP_CAYMAN) &&
1238             rdev->mc_fw) {
1239                 if (rdev->pm.default_vddc)
1240                         radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1241                                                 SET_VOLTAGE_TYPE_ASIC_VDDC);
1242                 if (rdev->pm.default_vddci)
1243                         radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1244                                                 SET_VOLTAGE_TYPE_ASIC_VDDCI);
1245                 if (rdev->pm.default_sclk)
1246                         radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1247                 if (rdev->pm.default_mclk)
1248                         radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1249         }
1250         DRM_ERROR("radeon: dpm initialization failed\n");
1251         return ret;
1252 }
1253
1254 int radeon_pm_init(struct radeon_device *rdev)
1255 {
1256         /* enable dpm on rv6xx+ */
1257         switch (rdev->family) {
1258         case CHIP_RV610:
1259         case CHIP_RV630:
1260         case CHIP_RV620:
1261         case CHIP_RV635:
1262         case CHIP_RV670:
1263         case CHIP_RS780:
1264         case CHIP_RS880:
1265         case CHIP_RV770:
1266         case CHIP_BARTS:
1267         case CHIP_TURKS:
1268         case CHIP_CAICOS:
1269         case CHIP_CAYMAN:
1270                 /* DPM requires the RLC, RV770+ dGPU requires SMC */
1271                 if (!rdev->rlc_fw)
1272                         rdev->pm.pm_method = PM_METHOD_PROFILE;
1273                 else if ((rdev->family >= CHIP_RV770) &&
1274                          (!(rdev->flags & RADEON_IS_IGP)) &&
1275                          (!rdev->smc_fw))
1276                         rdev->pm.pm_method = PM_METHOD_PROFILE;
1277                 else if (radeon_dpm == 1)
1278                         rdev->pm.pm_method = PM_METHOD_DPM;
1279                 else
1280                         rdev->pm.pm_method = PM_METHOD_PROFILE;
1281                 break;
1282         case CHIP_RV730:
1283         case CHIP_RV710:
1284         case CHIP_RV740:
1285         case CHIP_CEDAR:
1286         case CHIP_REDWOOD:
1287         case CHIP_JUNIPER:
1288         case CHIP_CYPRESS:
1289         case CHIP_HEMLOCK:
1290         case CHIP_PALM:
1291         case CHIP_SUMO:
1292         case CHIP_SUMO2:
1293         case CHIP_ARUBA:
1294         case CHIP_TAHITI:
1295         case CHIP_PITCAIRN:
1296         case CHIP_VERDE:
1297         case CHIP_OLAND:
1298         case CHIP_HAINAN:
1299         case CHIP_BONAIRE:
1300         case CHIP_KABINI:
1301         case CHIP_KAVERI:
1302         case CHIP_HAWAII:
1303         case CHIP_MULLINS:
1304                 /* DPM requires the RLC, RV770+ dGPU requires SMC */
1305                 if (!rdev->rlc_fw)
1306                         rdev->pm.pm_method = PM_METHOD_PROFILE;
1307                 else if ((rdev->family >= CHIP_RV770) &&
1308                          (!(rdev->flags & RADEON_IS_IGP)) &&
1309                          (!rdev->smc_fw))
1310                         rdev->pm.pm_method = PM_METHOD_PROFILE;
1311                 else if (radeon_dpm == 0)
1312                         rdev->pm.pm_method = PM_METHOD_PROFILE;
1313                 else
1314                         rdev->pm.pm_method = PM_METHOD_DPM;
1315                 break;
1316         default:
1317                 /* default to profile method */
1318                 rdev->pm.pm_method = PM_METHOD_PROFILE;
1319                 break;
1320         }
1321
1322         if (rdev->pm.pm_method == PM_METHOD_DPM)
1323                 return radeon_pm_init_dpm(rdev);
1324         else
1325                 return radeon_pm_init_old(rdev);
1326 }
1327
1328 int radeon_pm_late_init(struct radeon_device *rdev)
1329 {
1330         int ret = 0;
1331
1332         if (rdev->pm.pm_method == PM_METHOD_DPM) {
1333                 mutex_lock(&rdev->pm.mutex);
1334                 ret = radeon_dpm_late_enable(rdev);
1335                 mutex_unlock(&rdev->pm.mutex);
1336         }
1337         return ret;
1338 }
1339
1340 static void radeon_pm_fini_old(struct radeon_device *rdev)
1341 {
1342         if (rdev->pm.num_power_states > 1) {
1343                 mutex_lock(&rdev->pm.mutex);
1344                 if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
1345                         rdev->pm.profile = PM_PROFILE_DEFAULT;
1346                         radeon_pm_update_profile(rdev);
1347                         radeon_pm_set_clocks(rdev);
1348                 } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1349                         /* reset default clocks */
1350                         rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
1351                         rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
1352                         radeon_pm_set_clocks(rdev);
1353                 }
1354                 mutex_unlock(&rdev->pm.mutex);
1355
1356                 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
1357
1358                 device_remove_file(rdev->dev, &dev_attr_power_profile);
1359                 device_remove_file(rdev->dev, &dev_attr_power_method);
1360         }
1361
1362         radeon_hwmon_fini(rdev);
1363
1364         if (rdev->pm.power_state)
1365                 kfree(rdev->pm.power_state);
1366 }
1367
1368 static void radeon_pm_fini_dpm(struct radeon_device *rdev)
1369 {
1370         if (rdev->pm.num_power_states > 1) {
1371                 mutex_lock(&rdev->pm.mutex);
1372                 radeon_dpm_disable(rdev);
1373                 mutex_unlock(&rdev->pm.mutex);
1374
1375                 device_remove_file(rdev->dev, &dev_attr_power_dpm_state);
1376                 device_remove_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
1377                 /* XXX backwards compat */
1378                 device_remove_file(rdev->dev, &dev_attr_power_profile);
1379                 device_remove_file(rdev->dev, &dev_attr_power_method);
1380         }
1381         radeon_dpm_fini(rdev);
1382
1383         radeon_hwmon_fini(rdev);
1384
1385         if (rdev->pm.power_state)
1386                 kfree(rdev->pm.power_state);
1387 }
1388
1389 void radeon_pm_fini(struct radeon_device *rdev)
1390 {
1391         if (rdev->pm.pm_method == PM_METHOD_DPM)
1392                 radeon_pm_fini_dpm(rdev);
1393         else
1394                 radeon_pm_fini_old(rdev);
1395 }
1396
1397 static void radeon_pm_compute_clocks_old(struct radeon_device *rdev)
1398 {
1399         struct drm_device *ddev = rdev->ddev;
1400         struct drm_crtc *crtc;
1401         struct radeon_crtc *radeon_crtc;
1402
1403         if (rdev->pm.num_power_states < 2)
1404                 return;
1405
1406         mutex_lock(&rdev->pm.mutex);
1407
1408         rdev->pm.active_crtcs = 0;
1409         rdev->pm.active_crtc_count = 0;
1410         if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
1411                 list_for_each_entry(crtc,
1412                                     &ddev->mode_config.crtc_list, head) {
1413                         radeon_crtc = to_radeon_crtc(crtc);
1414                         if (radeon_crtc->enabled) {
1415                                 rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
1416                                 rdev->pm.active_crtc_count++;
1417                         }
1418                 }
1419         }
1420
1421         if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
1422                 radeon_pm_update_profile(rdev);
1423                 radeon_pm_set_clocks(rdev);
1424         } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1425                 if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
1426                         if (rdev->pm.active_crtc_count > 1) {
1427                                 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
1428                                         cancel_delayed_work(&rdev->pm.dynpm_idle_work);
1429
1430                                         rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
1431                                         rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
1432                                         radeon_pm_get_dynpm_state(rdev);
1433                                         radeon_pm_set_clocks(rdev);
1434
1435                                         DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n");
1436                                 }
1437                         } else if (rdev->pm.active_crtc_count == 1) {
1438                                 /* TODO: Increase clocks if needed for current mode */
1439
1440                                 if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
1441                                         rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1442                                         rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
1443                                         radeon_pm_get_dynpm_state(rdev);
1444                                         radeon_pm_set_clocks(rdev);
1445
1446                                         schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1447                                                               msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1448                                 } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
1449                                         rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1450                                         schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1451                                                               msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1452                                         DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
1453                                 }
1454                         } else { /* count == 0 */
1455                                 if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
1456                                         cancel_delayed_work(&rdev->pm.dynpm_idle_work);
1457
1458                                         rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
1459                                         rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
1460                                         radeon_pm_get_dynpm_state(rdev);
1461                                         radeon_pm_set_clocks(rdev);
1462                                 }
1463                         }
1464                 }
1465         }
1466
1467         mutex_unlock(&rdev->pm.mutex);
1468 }
1469
1470 static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
1471 {
1472         struct drm_device *ddev = rdev->ddev;
1473         struct drm_crtc *crtc;
1474         struct radeon_crtc *radeon_crtc;
1475
1476         if (!rdev->pm.dpm_enabled)
1477                 return;
1478
1479         mutex_lock(&rdev->pm.mutex);
1480
1481         /* update active crtc counts */
1482         rdev->pm.dpm.new_active_crtcs = 0;
1483         rdev->pm.dpm.new_active_crtc_count = 0;
1484         if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
1485                 list_for_each_entry(crtc,
1486                                     &ddev->mode_config.crtc_list, head) {
1487                         radeon_crtc = to_radeon_crtc(crtc);
1488                         if (crtc->enabled) {
1489                                 rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
1490                                 rdev->pm.dpm.new_active_crtc_count++;
1491                         }
1492                 }
1493         }
1494
1495         /* update battery/ac status */
1496         if (power_supply_is_system_supplied() > 0)
1497                 rdev->pm.dpm.ac_power = true;
1498         else
1499                 rdev->pm.dpm.ac_power = false;
1500
1501         radeon_dpm_change_power_state_locked(rdev);
1502
1503         mutex_unlock(&rdev->pm.mutex);
1504
1505 }
1506
1507 void radeon_pm_compute_clocks(struct radeon_device *rdev)
1508 {
1509         if (rdev->pm.pm_method == PM_METHOD_DPM)
1510                 radeon_pm_compute_clocks_dpm(rdev);
1511         else
1512                 radeon_pm_compute_clocks_old(rdev);
1513 }
1514
1515 static bool radeon_pm_in_vbl(struct radeon_device *rdev)
1516 {
1517         int  crtc, vpos, hpos, vbl_status;
1518         bool in_vbl = true;
1519
1520         /* Iterate over all active crtc's. All crtc's must be in vblank,
1521          * otherwise return in_vbl == false.
1522          */
1523         for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
1524                 if (rdev->pm.active_crtcs & (1 << crtc)) {
1525                         vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, 0, &vpos, &hpos, NULL, NULL);
1526                         if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
1527                             !(vbl_status & DRM_SCANOUTPOS_INVBL))
1528                                 in_vbl = false;
1529                 }
1530         }
1531
1532         return in_vbl;
1533 }
1534
1535 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
1536 {
1537         u32 stat_crtc = 0;
1538         bool in_vbl = radeon_pm_in_vbl(rdev);
1539
1540         if (in_vbl == false)
1541                 DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc,
1542                          finish ? "exit" : "entry");
1543         return in_vbl;
1544 }
1545
1546 static void radeon_dynpm_idle_work_handler(struct work_struct *work)
1547 {
1548         struct radeon_device *rdev;
1549         int resched;
1550         rdev = container_of(work, struct radeon_device,
1551                                 pm.dynpm_idle_work.work);
1552
1553         resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
1554         mutex_lock(&rdev->pm.mutex);
1555         if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
1556                 int not_processed = 0;
1557                 int i;
1558
1559                 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1560                         struct radeon_ring *ring = &rdev->ring[i];
1561
1562                         if (ring->ready) {
1563                                 not_processed += radeon_fence_count_emitted(rdev, i);
1564                                 if (not_processed >= 3)
1565                                         break;
1566                         }
1567                 }
1568
1569                 if (not_processed >= 3) { /* should upclock */
1570                         if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) {
1571                                 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
1572                         } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
1573                                    rdev->pm.dynpm_can_upclock) {
1574                                 rdev->pm.dynpm_planned_action =
1575                                         DYNPM_ACTION_UPCLOCK;
1576                                 rdev->pm.dynpm_action_timeout = jiffies +
1577                                 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
1578                         }
1579                 } else if (not_processed == 0) { /* should downclock */
1580                         if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) {
1581                                 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
1582                         } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
1583                                    rdev->pm.dynpm_can_downclock) {
1584                                 rdev->pm.dynpm_planned_action =
1585                                         DYNPM_ACTION_DOWNCLOCK;
1586                                 rdev->pm.dynpm_action_timeout = jiffies +
1587                                 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
1588                         }
1589                 }
1590
1591                 /* Note, radeon_pm_set_clocks is called with static_switch set
1592                  * to false since we want to wait for vbl to avoid flicker.
1593                  */
1594                 if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE &&
1595                     jiffies > rdev->pm.dynpm_action_timeout) {
1596                         radeon_pm_get_dynpm_state(rdev);
1597                         radeon_pm_set_clocks(rdev);
1598                 }
1599
1600                 schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1601                                       msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1602         }
1603         mutex_unlock(&rdev->pm.mutex);
1604         ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1605 }
1606
1607 /*
1608  * Debugfs info
1609  */
1610 #if defined(CONFIG_DEBUG_FS)
1611
1612 static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
1613 {
1614         struct drm_info_node *node = (struct drm_info_node *) m->private;
1615         struct drm_device *dev = node->minor->dev;
1616         struct radeon_device *rdev = dev->dev_private;
1617
1618         if (rdev->pm.dpm_enabled) {
1619                 mutex_lock(&rdev->pm.mutex);
1620                 if (rdev->asic->dpm.debugfs_print_current_performance_level)
1621                         radeon_dpm_debugfs_print_current_performance_level(rdev, m);
1622                 else
1623                         seq_printf(m, "Debugfs support not implemented for this asic\n");
1624                 mutex_unlock(&rdev->pm.mutex);
1625         } else {
1626                 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
1627                 /* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
1628                 if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
1629                         seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
1630                 else
1631                         seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
1632                 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
1633                 if (rdev->asic->pm.get_memory_clock)
1634                         seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
1635                 if (rdev->pm.current_vddc)
1636                         seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
1637                 if (rdev->asic->pm.get_pcie_lanes)
1638                         seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
1639         }
1640
1641         return 0;
1642 }
1643
1644 static struct drm_info_list radeon_pm_info_list[] = {
1645         {"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
1646 };
1647 #endif
1648
1649 static int radeon_debugfs_pm_init(struct radeon_device *rdev)
1650 {
1651 #if defined(CONFIG_DEBUG_FS)
1652         return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
1653 #else
1654         return 0;
1655 #endif
1656 }