]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
scsi: zero per-cmd private driver data for each MQ I/O
[karo-tx-linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_pm.c
1 /*
2  * Permission is hereby granted, free of charge, to any person obtaining a
3  * copy of this software and associated documentation files (the "Software"),
4  * to deal in the Software without restriction, including without limitation
5  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
6  * and/or sell copies of the Software, and to permit persons to whom the
7  * Software is furnished to do so, subject to the following conditions:
8  *
9  * The above copyright notice and this permission notice shall be included in
10  * all copies or substantial portions of the Software.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
15  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
16  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
17  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
18  * OTHER DEALINGS IN THE SOFTWARE.
19  *
20  * Authors: Rafał Miłecki <zajec5@gmail.com>
21  *          Alex Deucher <alexdeucher@gmail.com>
22  */
23 #include <drm/drmP.h>
24 #include "amdgpu.h"
25 #include "amdgpu_drv.h"
26 #include "amdgpu_pm.h"
27 #include "amdgpu_dpm.h"
28 #include "atom.h"
29 #include <linux/power_supply.h>
30 #include <linux/hwmon.h>
31 #include <linux/hwmon-sysfs.h>
32
33 #include "amd_powerplay.h"
34
35 static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
36
37 static const struct cg_flag_name clocks[] = {
38         {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
39         {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
40         {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
41         {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
42         {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
43         {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
44         {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
45         {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
46         {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
47         {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
48         {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
49         {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
50         {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
51         {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
52         {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
53         {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
54         {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
55         {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
56         {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
57         {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
58         {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
59         {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
60         {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
61         {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
62         {0, NULL},
63 };
64
65 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
66 {
67         if (adev->pp_enabled)
68                 /* TODO */
69                 return;
70
71         if (adev->pm.dpm_enabled) {
72                 mutex_lock(&adev->pm.mutex);
73                 if (power_supply_is_system_supplied() > 0)
74                         adev->pm.dpm.ac_power = true;
75                 else
76                         adev->pm.dpm.ac_power = false;
77                 if (adev->pm.funcs->enable_bapm)
78                         amdgpu_dpm_enable_bapm(adev, adev->pm.dpm.ac_power);
79                 mutex_unlock(&adev->pm.mutex);
80         }
81 }
82
83 static ssize_t amdgpu_get_dpm_state(struct device *dev,
84                                     struct device_attribute *attr,
85                                     char *buf)
86 {
87         struct drm_device *ddev = dev_get_drvdata(dev);
88         struct amdgpu_device *adev = ddev->dev_private;
89         enum amd_pm_state_type pm;
90
91         if (adev->pp_enabled) {
92                 pm = amdgpu_dpm_get_current_power_state(adev);
93         } else
94                 pm = adev->pm.dpm.user_state;
95
96         return snprintf(buf, PAGE_SIZE, "%s\n",
97                         (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
98                         (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
99 }
100
101 static ssize_t amdgpu_set_dpm_state(struct device *dev,
102                                     struct device_attribute *attr,
103                                     const char *buf,
104                                     size_t count)
105 {
106         struct drm_device *ddev = dev_get_drvdata(dev);
107         struct amdgpu_device *adev = ddev->dev_private;
108         enum amd_pm_state_type  state;
109
110         if (strncmp("battery", buf, strlen("battery")) == 0)
111                 state = POWER_STATE_TYPE_BATTERY;
112         else if (strncmp("balanced", buf, strlen("balanced")) == 0)
113                 state = POWER_STATE_TYPE_BALANCED;
114         else if (strncmp("performance", buf, strlen("performance")) == 0)
115                 state = POWER_STATE_TYPE_PERFORMANCE;
116         else {
117                 count = -EINVAL;
118                 goto fail;
119         }
120
121         if (adev->pp_enabled) {
122                 amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
123         } else {
124                 mutex_lock(&adev->pm.mutex);
125                 adev->pm.dpm.user_state = state;
126                 mutex_unlock(&adev->pm.mutex);
127
128                 /* Can't set dpm state when the card is off */
129                 if (!(adev->flags & AMD_IS_PX) ||
130                     (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
131                         amdgpu_pm_compute_clocks(adev);
132         }
133 fail:
134         return count;
135 }
136
137 static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
138                                                 struct device_attribute *attr,
139                                                                 char *buf)
140 {
141         struct drm_device *ddev = dev_get_drvdata(dev);
142         struct amdgpu_device *adev = ddev->dev_private;
143         enum amd_dpm_forced_level level;
144
145         if  ((adev->flags & AMD_IS_PX) &&
146              (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
147                 return snprintf(buf, PAGE_SIZE, "off\n");
148
149         level = amdgpu_dpm_get_performance_level(adev);
150         return snprintf(buf, PAGE_SIZE, "%s\n",
151                         (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
152                         (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
153                         (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
154                         (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
155                         (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
156                         (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
157                         (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
158                         (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
159                         "unknown");
160 }
161
162 static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
163                                                        struct device_attribute *attr,
164                                                        const char *buf,
165                                                        size_t count)
166 {
167         struct drm_device *ddev = dev_get_drvdata(dev);
168         struct amdgpu_device *adev = ddev->dev_private;
169         enum amd_dpm_forced_level level;
170         enum amd_dpm_forced_level current_level;
171         int ret = 0;
172
173         /* Can't force performance level when the card is off */
174         if  ((adev->flags & AMD_IS_PX) &&
175              (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
176                 return -EINVAL;
177
178         current_level = amdgpu_dpm_get_performance_level(adev);
179
180         if (strncmp("low", buf, strlen("low")) == 0) {
181                 level = AMD_DPM_FORCED_LEVEL_LOW;
182         } else if (strncmp("high", buf, strlen("high")) == 0) {
183                 level = AMD_DPM_FORCED_LEVEL_HIGH;
184         } else if (strncmp("auto", buf, strlen("auto")) == 0) {
185                 level = AMD_DPM_FORCED_LEVEL_AUTO;
186         } else if (strncmp("manual", buf, strlen("manual")) == 0) {
187                 level = AMD_DPM_FORCED_LEVEL_MANUAL;
188         } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
189                 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
190         } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
191                 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
192         } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
193                 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
194         } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
195                 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
196         } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
197                 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
198         }  else {
199                 count = -EINVAL;
200                 goto fail;
201         }
202
203         if (current_level == level)
204                 return count;
205
206         if (adev->pp_enabled)
207                 amdgpu_dpm_force_performance_level(adev, level);
208         else {
209                 mutex_lock(&adev->pm.mutex);
210                 if (adev->pm.dpm.thermal_active) {
211                         count = -EINVAL;
212                         mutex_unlock(&adev->pm.mutex);
213                         goto fail;
214                 }
215                 ret = amdgpu_dpm_force_performance_level(adev, level);
216                 if (ret)
217                         count = -EINVAL;
218                 else
219                         adev->pm.dpm.forced_level = level;
220                 mutex_unlock(&adev->pm.mutex);
221         }
222
223 fail:
224         return count;
225 }
226
227 static ssize_t amdgpu_get_pp_num_states(struct device *dev,
228                 struct device_attribute *attr,
229                 char *buf)
230 {
231         struct drm_device *ddev = dev_get_drvdata(dev);
232         struct amdgpu_device *adev = ddev->dev_private;
233         struct pp_states_info data;
234         int i, buf_len;
235
236         if (adev->pp_enabled)
237                 amdgpu_dpm_get_pp_num_states(adev, &data);
238
239         buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
240         for (i = 0; i < data.nums; i++)
241                 buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i,
242                                 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
243                                 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
244                                 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
245                                 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
246
247         return buf_len;
248 }
249
250 static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
251                 struct device_attribute *attr,
252                 char *buf)
253 {
254         struct drm_device *ddev = dev_get_drvdata(dev);
255         struct amdgpu_device *adev = ddev->dev_private;
256         struct pp_states_info data;
257         enum amd_pm_state_type pm = 0;
258         int i = 0;
259
260         if (adev->pp_enabled) {
261
262                 pm = amdgpu_dpm_get_current_power_state(adev);
263                 amdgpu_dpm_get_pp_num_states(adev, &data);
264
265                 for (i = 0; i < data.nums; i++) {
266                         if (pm == data.states[i])
267                                 break;
268                 }
269
270                 if (i == data.nums)
271                         i = -EINVAL;
272         }
273
274         return snprintf(buf, PAGE_SIZE, "%d\n", i);
275 }
276
277 static ssize_t amdgpu_get_pp_force_state(struct device *dev,
278                 struct device_attribute *attr,
279                 char *buf)
280 {
281         struct drm_device *ddev = dev_get_drvdata(dev);
282         struct amdgpu_device *adev = ddev->dev_private;
283         struct pp_states_info data;
284         enum amd_pm_state_type pm = 0;
285         int i;
286
287         if (adev->pp_force_state_enabled && adev->pp_enabled) {
288                 pm = amdgpu_dpm_get_current_power_state(adev);
289                 amdgpu_dpm_get_pp_num_states(adev, &data);
290
291                 for (i = 0; i < data.nums; i++) {
292                         if (pm == data.states[i])
293                                 break;
294                 }
295
296                 if (i == data.nums)
297                         i = -EINVAL;
298
299                 return snprintf(buf, PAGE_SIZE, "%d\n", i);
300
301         } else
302                 return snprintf(buf, PAGE_SIZE, "\n");
303 }
304
305 static ssize_t amdgpu_set_pp_force_state(struct device *dev,
306                 struct device_attribute *attr,
307                 const char *buf,
308                 size_t count)
309 {
310         struct drm_device *ddev = dev_get_drvdata(dev);
311         struct amdgpu_device *adev = ddev->dev_private;
312         enum amd_pm_state_type state = 0;
313         unsigned long idx;
314         int ret;
315
316         if (strlen(buf) == 1)
317                 adev->pp_force_state_enabled = false;
318         else if (adev->pp_enabled) {
319                 struct pp_states_info data;
320
321                 ret = kstrtoul(buf, 0, &idx);
322                 if (ret || idx >= ARRAY_SIZE(data.states)) {
323                         count = -EINVAL;
324                         goto fail;
325                 }
326
327                 amdgpu_dpm_get_pp_num_states(adev, &data);
328                 state = data.states[idx];
329                 /* only set user selected power states */
330                 if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
331                     state != POWER_STATE_TYPE_DEFAULT) {
332                         amdgpu_dpm_dispatch_task(adev,
333                                         AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
334                         adev->pp_force_state_enabled = true;
335                 }
336         }
337 fail:
338         return count;
339 }
340
341 static ssize_t amdgpu_get_pp_table(struct device *dev,
342                 struct device_attribute *attr,
343                 char *buf)
344 {
345         struct drm_device *ddev = dev_get_drvdata(dev);
346         struct amdgpu_device *adev = ddev->dev_private;
347         char *table = NULL;
348         int size;
349
350         if (adev->pp_enabled)
351                 size = amdgpu_dpm_get_pp_table(adev, &table);
352         else
353                 return 0;
354
355         if (size >= PAGE_SIZE)
356                 size = PAGE_SIZE - 1;
357
358         memcpy(buf, table, size);
359
360         return size;
361 }
362
363 static ssize_t amdgpu_set_pp_table(struct device *dev,
364                 struct device_attribute *attr,
365                 const char *buf,
366                 size_t count)
367 {
368         struct drm_device *ddev = dev_get_drvdata(dev);
369         struct amdgpu_device *adev = ddev->dev_private;
370
371         if (adev->pp_enabled)
372                 amdgpu_dpm_set_pp_table(adev, buf, count);
373
374         return count;
375 }
376
377 static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
378                 struct device_attribute *attr,
379                 char *buf)
380 {
381         struct drm_device *ddev = dev_get_drvdata(dev);
382         struct amdgpu_device *adev = ddev->dev_private;
383         ssize_t size = 0;
384
385         if (adev->pp_enabled)
386                 size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
387         else if (adev->pm.funcs->print_clock_levels)
388                 size = adev->pm.funcs->print_clock_levels(adev, PP_SCLK, buf);
389
390         return size;
391 }
392
393 static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
394                 struct device_attribute *attr,
395                 const char *buf,
396                 size_t count)
397 {
398         struct drm_device *ddev = dev_get_drvdata(dev);
399         struct amdgpu_device *adev = ddev->dev_private;
400         int ret;
401         long level;
402         uint32_t i, mask = 0;
403         char sub_str[2];
404
405         for (i = 0; i < strlen(buf); i++) {
406                 if (*(buf + i) == '\n')
407                         continue;
408                 sub_str[0] = *(buf + i);
409                 sub_str[1] = '\0';
410                 ret = kstrtol(sub_str, 0, &level);
411
412                 if (ret) {
413                         count = -EINVAL;
414                         goto fail;
415                 }
416                 mask |= 1 << level;
417         }
418
419         if (adev->pp_enabled)
420                 amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
421         else if (adev->pm.funcs->force_clock_level)
422                 adev->pm.funcs->force_clock_level(adev, PP_SCLK, mask);
423 fail:
424         return count;
425 }
426
427 static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
428                 struct device_attribute *attr,
429                 char *buf)
430 {
431         struct drm_device *ddev = dev_get_drvdata(dev);
432         struct amdgpu_device *adev = ddev->dev_private;
433         ssize_t size = 0;
434
435         if (adev->pp_enabled)
436                 size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
437         else if (adev->pm.funcs->print_clock_levels)
438                 size = adev->pm.funcs->print_clock_levels(adev, PP_MCLK, buf);
439
440         return size;
441 }
442
443 static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
444                 struct device_attribute *attr,
445                 const char *buf,
446                 size_t count)
447 {
448         struct drm_device *ddev = dev_get_drvdata(dev);
449         struct amdgpu_device *adev = ddev->dev_private;
450         int ret;
451         long level;
452         uint32_t i, mask = 0;
453         char sub_str[2];
454
455         for (i = 0; i < strlen(buf); i++) {
456                 if (*(buf + i) == '\n')
457                         continue;
458                 sub_str[0] = *(buf + i);
459                 sub_str[1] = '\0';
460                 ret = kstrtol(sub_str, 0, &level);
461
462                 if (ret) {
463                         count = -EINVAL;
464                         goto fail;
465                 }
466                 mask |= 1 << level;
467         }
468
469         if (adev->pp_enabled)
470                 amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
471         else if (adev->pm.funcs->force_clock_level)
472                 adev->pm.funcs->force_clock_level(adev, PP_MCLK, mask);
473 fail:
474         return count;
475 }
476
477 static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
478                 struct device_attribute *attr,
479                 char *buf)
480 {
481         struct drm_device *ddev = dev_get_drvdata(dev);
482         struct amdgpu_device *adev = ddev->dev_private;
483         ssize_t size = 0;
484
485         if (adev->pp_enabled)
486                 size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
487         else if (adev->pm.funcs->print_clock_levels)
488                 size = adev->pm.funcs->print_clock_levels(adev, PP_PCIE, buf);
489
490         return size;
491 }
492
493 static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
494                 struct device_attribute *attr,
495                 const char *buf,
496                 size_t count)
497 {
498         struct drm_device *ddev = dev_get_drvdata(dev);
499         struct amdgpu_device *adev = ddev->dev_private;
500         int ret;
501         long level;
502         uint32_t i, mask = 0;
503         char sub_str[2];
504
505         for (i = 0; i < strlen(buf); i++) {
506                 if (*(buf + i) == '\n')
507                         continue;
508                 sub_str[0] = *(buf + i);
509                 sub_str[1] = '\0';
510                 ret = kstrtol(sub_str, 0, &level);
511
512                 if (ret) {
513                         count = -EINVAL;
514                         goto fail;
515                 }
516                 mask |= 1 << level;
517         }
518
519         if (adev->pp_enabled)
520                 amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
521         else if (adev->pm.funcs->force_clock_level)
522                 adev->pm.funcs->force_clock_level(adev, PP_PCIE, mask);
523 fail:
524         return count;
525 }
526
527 static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
528                 struct device_attribute *attr,
529                 char *buf)
530 {
531         struct drm_device *ddev = dev_get_drvdata(dev);
532         struct amdgpu_device *adev = ddev->dev_private;
533         uint32_t value = 0;
534
535         if (adev->pp_enabled)
536                 value = amdgpu_dpm_get_sclk_od(adev);
537         else if (adev->pm.funcs->get_sclk_od)
538                 value = adev->pm.funcs->get_sclk_od(adev);
539
540         return snprintf(buf, PAGE_SIZE, "%d\n", value);
541 }
542
543 static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
544                 struct device_attribute *attr,
545                 const char *buf,
546                 size_t count)
547 {
548         struct drm_device *ddev = dev_get_drvdata(dev);
549         struct amdgpu_device *adev = ddev->dev_private;
550         int ret;
551         long int value;
552
553         ret = kstrtol(buf, 0, &value);
554
555         if (ret) {
556                 count = -EINVAL;
557                 goto fail;
558         }
559
560         if (adev->pp_enabled) {
561                 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
562                 amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL);
563         } else if (adev->pm.funcs->set_sclk_od) {
564                 adev->pm.funcs->set_sclk_od(adev, (uint32_t)value);
565                 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
566                 amdgpu_pm_compute_clocks(adev);
567         }
568
569 fail:
570         return count;
571 }
572
573 static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
574                 struct device_attribute *attr,
575                 char *buf)
576 {
577         struct drm_device *ddev = dev_get_drvdata(dev);
578         struct amdgpu_device *adev = ddev->dev_private;
579         uint32_t value = 0;
580
581         if (adev->pp_enabled)
582                 value = amdgpu_dpm_get_mclk_od(adev);
583         else if (adev->pm.funcs->get_mclk_od)
584                 value = adev->pm.funcs->get_mclk_od(adev);
585
586         return snprintf(buf, PAGE_SIZE, "%d\n", value);
587 }
588
589 static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
590                 struct device_attribute *attr,
591                 const char *buf,
592                 size_t count)
593 {
594         struct drm_device *ddev = dev_get_drvdata(dev);
595         struct amdgpu_device *adev = ddev->dev_private;
596         int ret;
597         long int value;
598
599         ret = kstrtol(buf, 0, &value);
600
601         if (ret) {
602                 count = -EINVAL;
603                 goto fail;
604         }
605
606         if (adev->pp_enabled) {
607                 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
608                 amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL);
609         } else if (adev->pm.funcs->set_mclk_od) {
610                 adev->pm.funcs->set_mclk_od(adev, (uint32_t)value);
611                 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
612                 amdgpu_pm_compute_clocks(adev);
613         }
614
615 fail:
616         return count;
617 }
618
619 static ssize_t amdgpu_get_pp_power_profile(struct device *dev,
620                 char *buf, struct amd_pp_profile *query)
621 {
622         struct drm_device *ddev = dev_get_drvdata(dev);
623         struct amdgpu_device *adev = ddev->dev_private;
624         int ret = 0;
625
626         if (adev->pp_enabled)
627                 ret = amdgpu_dpm_get_power_profile_state(
628                                 adev, query);
629         else if (adev->pm.funcs->get_power_profile_state)
630                 ret = adev->pm.funcs->get_power_profile_state(
631                                 adev, query);
632
633         if (ret)
634                 return ret;
635
636         return snprintf(buf, PAGE_SIZE,
637                         "%d %d %d %d %d\n",
638                         query->min_sclk / 100,
639                         query->min_mclk / 100,
640                         query->activity_threshold,
641                         query->up_hyst,
642                         query->down_hyst);
643 }
644
645 static ssize_t amdgpu_get_pp_gfx_power_profile(struct device *dev,
646                 struct device_attribute *attr,
647                 char *buf)
648 {
649         struct amd_pp_profile query = {0};
650
651         query.type = AMD_PP_GFX_PROFILE;
652
653         return amdgpu_get_pp_power_profile(dev, buf, &query);
654 }
655
656 static ssize_t amdgpu_get_pp_compute_power_profile(struct device *dev,
657                 struct device_attribute *attr,
658                 char *buf)
659 {
660         struct amd_pp_profile query = {0};
661
662         query.type = AMD_PP_COMPUTE_PROFILE;
663
664         return amdgpu_get_pp_power_profile(dev, buf, &query);
665 }
666
667 static ssize_t amdgpu_set_pp_power_profile(struct device *dev,
668                 const char *buf,
669                 size_t count,
670                 struct amd_pp_profile *request)
671 {
672         struct drm_device *ddev = dev_get_drvdata(dev);
673         struct amdgpu_device *adev = ddev->dev_private;
674         uint32_t loop = 0;
675         char *sub_str, buf_cpy[128], *tmp_str;
676         const char delimiter[3] = {' ', '\n', '\0'};
677         long int value;
678         int ret = 0;
679
680         if (strncmp("reset", buf, strlen("reset")) == 0) {
681                 if (adev->pp_enabled)
682                         ret = amdgpu_dpm_reset_power_profile_state(
683                                         adev, request);
684                 else if (adev->pm.funcs->reset_power_profile_state)
685                         ret = adev->pm.funcs->reset_power_profile_state(
686                                         adev, request);
687                 if (ret) {
688                         count = -EINVAL;
689                         goto fail;
690                 }
691                 return count;
692         }
693
694         if (strncmp("set", buf, strlen("set")) == 0) {
695                 if (adev->pp_enabled)
696                         ret = amdgpu_dpm_set_power_profile_state(
697                                         adev, request);
698                 else if (adev->pm.funcs->set_power_profile_state)
699                         ret = adev->pm.funcs->set_power_profile_state(
700                                         adev, request);
701                 if (ret) {
702                         count = -EINVAL;
703                         goto fail;
704                 }
705                 return count;
706         }
707
708         if (count + 1 >= 128) {
709                 count = -EINVAL;
710                 goto fail;
711         }
712
713         memcpy(buf_cpy, buf, count + 1);
714         tmp_str = buf_cpy;
715
716         while (tmp_str[0]) {
717                 sub_str = strsep(&tmp_str, delimiter);
718                 ret = kstrtol(sub_str, 0, &value);
719                 if (ret) {
720                         count = -EINVAL;
721                         goto fail;
722                 }
723
724                 switch (loop) {
725                 case 0:
726                         /* input unit MHz convert to dpm table unit 10KHz*/
727                         request->min_sclk = (uint32_t)value * 100;
728                         break;
729                 case 1:
730                         /* input unit MHz convert to dpm table unit 10KHz*/
731                         request->min_mclk = (uint32_t)value * 100;
732                         break;
733                 case 2:
734                         request->activity_threshold = (uint16_t)value;
735                         break;
736                 case 3:
737                         request->up_hyst = (uint8_t)value;
738                         break;
739                 case 4:
740                         request->down_hyst = (uint8_t)value;
741                         break;
742                 default:
743                         break;
744                 }
745
746                 loop++;
747         }
748
749         if (adev->pp_enabled)
750                 ret = amdgpu_dpm_set_power_profile_state(
751                                 adev, request);
752         else if (adev->pm.funcs->set_power_profile_state)
753                 ret = adev->pm.funcs->set_power_profile_state(
754                                 adev, request);
755
756         if (ret)
757                 count = -EINVAL;
758
759 fail:
760         return count;
761 }
762
763 static ssize_t amdgpu_set_pp_gfx_power_profile(struct device *dev,
764                 struct device_attribute *attr,
765                 const char *buf,
766                 size_t count)
767 {
768         struct amd_pp_profile request = {0};
769
770         request.type = AMD_PP_GFX_PROFILE;
771
772         return amdgpu_set_pp_power_profile(dev, buf, count, &request);
773 }
774
775 static ssize_t amdgpu_set_pp_compute_power_profile(struct device *dev,
776                 struct device_attribute *attr,
777                 const char *buf,
778                 size_t count)
779 {
780         struct amd_pp_profile request = {0};
781
782         request.type = AMD_PP_COMPUTE_PROFILE;
783
784         return amdgpu_set_pp_power_profile(dev, buf, count, &request);
785 }
786
787 static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
788 static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
789                    amdgpu_get_dpm_forced_performance_level,
790                    amdgpu_set_dpm_forced_performance_level);
791 static DEVICE_ATTR(pp_num_states, S_IRUGO, amdgpu_get_pp_num_states, NULL);
792 static DEVICE_ATTR(pp_cur_state, S_IRUGO, amdgpu_get_pp_cur_state, NULL);
793 static DEVICE_ATTR(pp_force_state, S_IRUGO | S_IWUSR,
794                 amdgpu_get_pp_force_state,
795                 amdgpu_set_pp_force_state);
796 static DEVICE_ATTR(pp_table, S_IRUGO | S_IWUSR,
797                 amdgpu_get_pp_table,
798                 amdgpu_set_pp_table);
799 static DEVICE_ATTR(pp_dpm_sclk, S_IRUGO | S_IWUSR,
800                 amdgpu_get_pp_dpm_sclk,
801                 amdgpu_set_pp_dpm_sclk);
802 static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR,
803                 amdgpu_get_pp_dpm_mclk,
804                 amdgpu_set_pp_dpm_mclk);
805 static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR,
806                 amdgpu_get_pp_dpm_pcie,
807                 amdgpu_set_pp_dpm_pcie);
808 static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR,
809                 amdgpu_get_pp_sclk_od,
810                 amdgpu_set_pp_sclk_od);
811 static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR,
812                 amdgpu_get_pp_mclk_od,
813                 amdgpu_set_pp_mclk_od);
814 static DEVICE_ATTR(pp_gfx_power_profile, S_IRUGO | S_IWUSR,
815                 amdgpu_get_pp_gfx_power_profile,
816                 amdgpu_set_pp_gfx_power_profile);
817 static DEVICE_ATTR(pp_compute_power_profile, S_IRUGO | S_IWUSR,
818                 amdgpu_get_pp_compute_power_profile,
819                 amdgpu_set_pp_compute_power_profile);
820
821 static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
822                                       struct device_attribute *attr,
823                                       char *buf)
824 {
825         struct amdgpu_device *adev = dev_get_drvdata(dev);
826         struct drm_device *ddev = adev->ddev;
827         int temp;
828
829         /* Can't get temperature when the card is off */
830         if  ((adev->flags & AMD_IS_PX) &&
831              (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
832                 return -EINVAL;
833
834         if (!adev->pp_enabled && !adev->pm.funcs->get_temperature)
835                 temp = 0;
836         else
837                 temp = amdgpu_dpm_get_temperature(adev);
838
839         return snprintf(buf, PAGE_SIZE, "%d\n", temp);
840 }
841
842 static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
843                                              struct device_attribute *attr,
844                                              char *buf)
845 {
846         struct amdgpu_device *adev = dev_get_drvdata(dev);
847         int hyst = to_sensor_dev_attr(attr)->index;
848         int temp;
849
850         if (hyst)
851                 temp = adev->pm.dpm.thermal.min_temp;
852         else
853                 temp = adev->pm.dpm.thermal.max_temp;
854
855         return snprintf(buf, PAGE_SIZE, "%d\n", temp);
856 }
857
858 static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
859                                             struct device_attribute *attr,
860                                             char *buf)
861 {
862         struct amdgpu_device *adev = dev_get_drvdata(dev);
863         u32 pwm_mode = 0;
864
865         if (!adev->pp_enabled && !adev->pm.funcs->get_fan_control_mode)
866                 return -EINVAL;
867
868         pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
869
870         /* never 0 (full-speed), fuse or smc-controlled always */
871         return sprintf(buf, "%i\n", pwm_mode == FDO_PWM_MODE_STATIC ? 1 : 2);
872 }
873
874 static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
875                                             struct device_attribute *attr,
876                                             const char *buf,
877                                             size_t count)
878 {
879         struct amdgpu_device *adev = dev_get_drvdata(dev);
880         int err;
881         int value;
882
883         if (!adev->pp_enabled && !adev->pm.funcs->set_fan_control_mode)
884                 return -EINVAL;
885
886         err = kstrtoint(buf, 10, &value);
887         if (err)
888                 return err;
889
890         switch (value) {
891         case 1: /* manual, percent-based */
892                 amdgpu_dpm_set_fan_control_mode(adev, FDO_PWM_MODE_STATIC);
893                 break;
894         default: /* disable */
895                 amdgpu_dpm_set_fan_control_mode(adev, 0);
896                 break;
897         }
898
899         return count;
900 }
901
902 static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
903                                          struct device_attribute *attr,
904                                          char *buf)
905 {
906         return sprintf(buf, "%i\n", 0);
907 }
908
909 static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
910                                          struct device_attribute *attr,
911                                          char *buf)
912 {
913         return sprintf(buf, "%i\n", 255);
914 }
915
916 static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
917                                      struct device_attribute *attr,
918                                      const char *buf, size_t count)
919 {
920         struct amdgpu_device *adev = dev_get_drvdata(dev);
921         int err;
922         u32 value;
923
924         err = kstrtou32(buf, 10, &value);
925         if (err)
926                 return err;
927
928         value = (value * 100) / 255;
929
930         err = amdgpu_dpm_set_fan_speed_percent(adev, value);
931         if (err)
932                 return err;
933
934         return count;
935 }
936
937 static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
938                                      struct device_attribute *attr,
939                                      char *buf)
940 {
941         struct amdgpu_device *adev = dev_get_drvdata(dev);
942         int err;
943         u32 speed;
944
945         err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
946         if (err)
947                 return err;
948
949         speed = (speed * 255) / 100;
950
951         return sprintf(buf, "%i\n", speed);
952 }
953
954 static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
955                                            struct device_attribute *attr,
956                                            char *buf)
957 {
958         struct amdgpu_device *adev = dev_get_drvdata(dev);
959         int err;
960         u32 speed;
961
962         err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
963         if (err)
964                 return err;
965
966         return sprintf(buf, "%i\n", speed);
967 }
968
969 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0);
970 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
971 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
972 static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
973 static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
974 static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
975 static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
976 static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
977
978 static struct attribute *hwmon_attributes[] = {
979         &sensor_dev_attr_temp1_input.dev_attr.attr,
980         &sensor_dev_attr_temp1_crit.dev_attr.attr,
981         &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
982         &sensor_dev_attr_pwm1.dev_attr.attr,
983         &sensor_dev_attr_pwm1_enable.dev_attr.attr,
984         &sensor_dev_attr_pwm1_min.dev_attr.attr,
985         &sensor_dev_attr_pwm1_max.dev_attr.attr,
986         &sensor_dev_attr_fan1_input.dev_attr.attr,
987         NULL
988 };
989
990 static umode_t hwmon_attributes_visible(struct kobject *kobj,
991                                         struct attribute *attr, int index)
992 {
993         struct device *dev = kobj_to_dev(kobj);
994         struct amdgpu_device *adev = dev_get_drvdata(dev);
995         umode_t effective_mode = attr->mode;
996
997         /* Skip limit attributes if DPM is not enabled */
998         if (!adev->pm.dpm_enabled &&
999             (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
1000              attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
1001              attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
1002              attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
1003              attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
1004              attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
1005                 return 0;
1006
1007         if (adev->pp_enabled)
1008                 return effective_mode;
1009
1010         /* Skip fan attributes if fan is not present */
1011         if (adev->pm.no_fan &&
1012             (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
1013              attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
1014              attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
1015              attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
1016                 return 0;
1017
1018         /* mask fan attributes if we have no bindings for this asic to expose */
1019         if ((!adev->pm.funcs->get_fan_speed_percent &&
1020              attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
1021             (!adev->pm.funcs->get_fan_control_mode &&
1022              attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
1023                 effective_mode &= ~S_IRUGO;
1024
1025         if ((!adev->pm.funcs->set_fan_speed_percent &&
1026              attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
1027             (!adev->pm.funcs->set_fan_control_mode &&
1028              attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
1029                 effective_mode &= ~S_IWUSR;
1030
1031         /* hide max/min values if we can't both query and manage the fan */
1032         if ((!adev->pm.funcs->set_fan_speed_percent &&
1033              !adev->pm.funcs->get_fan_speed_percent) &&
1034             (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
1035              attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
1036                 return 0;
1037
1038         /* requires powerplay */
1039         if (attr == &sensor_dev_attr_fan1_input.dev_attr.attr)
1040                 return 0;
1041
1042         return effective_mode;
1043 }
1044
1045 static const struct attribute_group hwmon_attrgroup = {
1046         .attrs = hwmon_attributes,
1047         .is_visible = hwmon_attributes_visible,
1048 };
1049
1050 static const struct attribute_group *hwmon_groups[] = {
1051         &hwmon_attrgroup,
1052         NULL
1053 };
1054
1055 void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
1056 {
1057         struct amdgpu_device *adev =
1058                 container_of(work, struct amdgpu_device,
1059                              pm.dpm.thermal.work);
1060         /* switch to the thermal state */
1061         enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
1062
1063         if (!adev->pm.dpm_enabled)
1064                 return;
1065
1066         if (adev->pm.funcs->get_temperature) {
1067                 int temp = amdgpu_dpm_get_temperature(adev);
1068
1069                 if (temp < adev->pm.dpm.thermal.min_temp)
1070                         /* switch back the user state */
1071                         dpm_state = adev->pm.dpm.user_state;
1072         } else {
1073                 if (adev->pm.dpm.thermal.high_to_low)
1074                         /* switch back the user state */
1075                         dpm_state = adev->pm.dpm.user_state;
1076         }
1077         mutex_lock(&adev->pm.mutex);
1078         if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
1079                 adev->pm.dpm.thermal_active = true;
1080         else
1081                 adev->pm.dpm.thermal_active = false;
1082         adev->pm.dpm.state = dpm_state;
1083         mutex_unlock(&adev->pm.mutex);
1084
1085         amdgpu_pm_compute_clocks(adev);
1086 }
1087
1088 static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
1089                                                      enum amd_pm_state_type dpm_state)
1090 {
1091         int i;
1092         struct amdgpu_ps *ps;
1093         u32 ui_class;
1094         bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
1095                 true : false;
1096
1097         /* check if the vblank period is too short to adjust the mclk */
1098         if (single_display && adev->pm.funcs->vblank_too_short) {
1099                 if (amdgpu_dpm_vblank_too_short(adev))
1100                         single_display = false;
1101         }
1102
1103         /* certain older asics have a separare 3D performance state,
1104          * so try that first if the user selected performance
1105          */
1106         if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
1107                 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
1108         /* balanced states don't exist at the moment */
1109         if (dpm_state == POWER_STATE_TYPE_BALANCED)
1110                 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1111
1112 restart_search:
1113         /* Pick the best power state based on current conditions */
1114         for (i = 0; i < adev->pm.dpm.num_ps; i++) {
1115                 ps = &adev->pm.dpm.ps[i];
1116                 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
1117                 switch (dpm_state) {
1118                 /* user states */
1119                 case POWER_STATE_TYPE_BATTERY:
1120                         if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
1121                                 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1122                                         if (single_display)
1123                                                 return ps;
1124                                 } else
1125                                         return ps;
1126                         }
1127                         break;
1128                 case POWER_STATE_TYPE_BALANCED:
1129                         if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
1130                                 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1131                                         if (single_display)
1132                                                 return ps;
1133                                 } else
1134                                         return ps;
1135                         }
1136                         break;
1137                 case POWER_STATE_TYPE_PERFORMANCE:
1138                         if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
1139                                 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1140                                         if (single_display)
1141                                                 return ps;
1142                                 } else
1143                                         return ps;
1144                         }
1145                         break;
1146                 /* internal states */
1147                 case POWER_STATE_TYPE_INTERNAL_UVD:
1148                         if (adev->pm.dpm.uvd_ps)
1149                                 return adev->pm.dpm.uvd_ps;
1150                         else
1151                                 break;
1152                 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
1153                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
1154                                 return ps;
1155                         break;
1156                 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
1157                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
1158                                 return ps;
1159                         break;
1160                 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
1161                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
1162                                 return ps;
1163                         break;
1164                 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
1165                         if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
1166                                 return ps;
1167                         break;
1168                 case POWER_STATE_TYPE_INTERNAL_BOOT:
1169                         return adev->pm.dpm.boot_ps;
1170                 case POWER_STATE_TYPE_INTERNAL_THERMAL:
1171                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
1172                                 return ps;
1173                         break;
1174                 case POWER_STATE_TYPE_INTERNAL_ACPI:
1175                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
1176                                 return ps;
1177                         break;
1178                 case POWER_STATE_TYPE_INTERNAL_ULV:
1179                         if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
1180                                 return ps;
1181                         break;
1182                 case POWER_STATE_TYPE_INTERNAL_3DPERF:
1183                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
1184                                 return ps;
1185                         break;
1186                 default:
1187                         break;
1188                 }
1189         }
1190         /* use a fallback state if we didn't match */
1191         switch (dpm_state) {
1192         case POWER_STATE_TYPE_INTERNAL_UVD_SD:
1193                 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
1194                 goto restart_search;
1195         case POWER_STATE_TYPE_INTERNAL_UVD_HD:
1196         case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
1197         case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
1198                 if (adev->pm.dpm.uvd_ps) {
1199                         return adev->pm.dpm.uvd_ps;
1200                 } else {
1201                         dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1202                         goto restart_search;
1203                 }
1204         case POWER_STATE_TYPE_INTERNAL_THERMAL:
1205                 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
1206                 goto restart_search;
1207         case POWER_STATE_TYPE_INTERNAL_ACPI:
1208                 dpm_state = POWER_STATE_TYPE_BATTERY;
1209                 goto restart_search;
1210         case POWER_STATE_TYPE_BATTERY:
1211         case POWER_STATE_TYPE_BALANCED:
1212         case POWER_STATE_TYPE_INTERNAL_3DPERF:
1213                 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1214                 goto restart_search;
1215         default:
1216                 break;
1217         }
1218
1219         return NULL;
1220 }
1221
1222 static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
1223 {
1224         struct amdgpu_ps *ps;
1225         enum amd_pm_state_type dpm_state;
1226         int ret;
1227         bool equal;
1228
1229         /* if dpm init failed */
1230         if (!adev->pm.dpm_enabled)
1231                 return;
1232
1233         if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
1234                 /* add other state override checks here */
1235                 if ((!adev->pm.dpm.thermal_active) &&
1236                     (!adev->pm.dpm.uvd_active))
1237                         adev->pm.dpm.state = adev->pm.dpm.user_state;
1238         }
1239         dpm_state = adev->pm.dpm.state;
1240
1241         ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
1242         if (ps)
1243                 adev->pm.dpm.requested_ps = ps;
1244         else
1245                 return;
1246
1247         if (amdgpu_dpm == 1) {
1248                 printk("switching from power state:\n");
1249                 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
1250                 printk("switching to power state:\n");
1251                 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
1252         }
1253
1254         /* update whether vce is active */
1255         ps->vce_active = adev->pm.dpm.vce_active;
1256
1257         amdgpu_dpm_display_configuration_changed(adev);
1258
1259         ret = amdgpu_dpm_pre_set_power_state(adev);
1260         if (ret)
1261                 return;
1262
1263         if ((0 != amgdpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)))
1264                 equal = false;
1265
1266         if (equal)
1267                 return;
1268
1269         amdgpu_dpm_set_power_state(adev);
1270         amdgpu_dpm_post_set_power_state(adev);
1271
1272         adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
1273         adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
1274
1275         if (adev->pm.funcs->force_performance_level) {
1276                 if (adev->pm.dpm.thermal_active) {
1277                         enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
1278                         /* force low perf level for thermal */
1279                         amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
1280                         /* save the user's level */
1281                         adev->pm.dpm.forced_level = level;
1282                 } else {
1283                         /* otherwise, user selected level */
1284                         amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
1285                 }
1286         }
1287 }
1288
1289 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
1290 {
1291         if (adev->pp_enabled || adev->pm.funcs->powergate_uvd) {
1292                 /* enable/disable UVD */
1293                 mutex_lock(&adev->pm.mutex);
1294                 amdgpu_dpm_powergate_uvd(adev, !enable);
1295                 mutex_unlock(&adev->pm.mutex);
1296         } else {
1297                 if (enable) {
1298                         mutex_lock(&adev->pm.mutex);
1299                         adev->pm.dpm.uvd_active = true;
1300                         adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
1301                         mutex_unlock(&adev->pm.mutex);
1302                 } else {
1303                         mutex_lock(&adev->pm.mutex);
1304                         adev->pm.dpm.uvd_active = false;
1305                         mutex_unlock(&adev->pm.mutex);
1306                 }
1307                 amdgpu_pm_compute_clocks(adev);
1308         }
1309 }
1310
1311 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
1312 {
1313         if (adev->pp_enabled || adev->pm.funcs->powergate_vce) {
1314                 /* enable/disable VCE */
1315                 mutex_lock(&adev->pm.mutex);
1316                 amdgpu_dpm_powergate_vce(adev, !enable);
1317                 mutex_unlock(&adev->pm.mutex);
1318         } else {
1319                 if (enable) {
1320                         mutex_lock(&adev->pm.mutex);
1321                         adev->pm.dpm.vce_active = true;
1322                         /* XXX select vce level based on ring/task */
1323                         adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
1324                         mutex_unlock(&adev->pm.mutex);
1325                         amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
1326                                                         AMD_CG_STATE_UNGATE);
1327                         amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
1328                                                         AMD_PG_STATE_UNGATE);
1329                         amdgpu_pm_compute_clocks(adev);
1330                 } else {
1331                         amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
1332                                                         AMD_PG_STATE_GATE);
1333                         amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
1334                                                         AMD_CG_STATE_GATE);
1335                         mutex_lock(&adev->pm.mutex);
1336                         adev->pm.dpm.vce_active = false;
1337                         mutex_unlock(&adev->pm.mutex);
1338                         amdgpu_pm_compute_clocks(adev);
1339                 }
1340
1341         }
1342 }
1343
1344 void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
1345 {
1346         int i;
1347
1348         if (adev->pp_enabled)
1349                 /* TO DO */
1350                 return;
1351
1352         for (i = 0; i < adev->pm.dpm.num_ps; i++)
1353                 amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
1354
1355 }
1356
1357 int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
1358 {
1359         int ret;
1360
1361         if (adev->pm.sysfs_initialized)
1362                 return 0;
1363
1364         if (!adev->pp_enabled) {
1365                 if (adev->pm.funcs->get_temperature == NULL)
1366                         return 0;
1367         }
1368
1369         adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
1370                                                                    DRIVER_NAME, adev,
1371                                                                    hwmon_groups);
1372         if (IS_ERR(adev->pm.int_hwmon_dev)) {
1373                 ret = PTR_ERR(adev->pm.int_hwmon_dev);
1374                 dev_err(adev->dev,
1375                         "Unable to register hwmon device: %d\n", ret);
1376                 return ret;
1377         }
1378
1379         ret = device_create_file(adev->dev, &dev_attr_power_dpm_state);
1380         if (ret) {
1381                 DRM_ERROR("failed to create device file for dpm state\n");
1382                 return ret;
1383         }
1384         ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
1385         if (ret) {
1386                 DRM_ERROR("failed to create device file for dpm state\n");
1387                 return ret;
1388         }
1389
1390         if (adev->pp_enabled) {
1391                 ret = device_create_file(adev->dev, &dev_attr_pp_num_states);
1392                 if (ret) {
1393                         DRM_ERROR("failed to create device file pp_num_states\n");
1394                         return ret;
1395                 }
1396                 ret = device_create_file(adev->dev, &dev_attr_pp_cur_state);
1397                 if (ret) {
1398                         DRM_ERROR("failed to create device file pp_cur_state\n");
1399                         return ret;
1400                 }
1401                 ret = device_create_file(adev->dev, &dev_attr_pp_force_state);
1402                 if (ret) {
1403                         DRM_ERROR("failed to create device file pp_force_state\n");
1404                         return ret;
1405                 }
1406                 ret = device_create_file(adev->dev, &dev_attr_pp_table);
1407                 if (ret) {
1408                         DRM_ERROR("failed to create device file pp_table\n");
1409                         return ret;
1410                 }
1411         }
1412
1413         ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
1414         if (ret) {
1415                 DRM_ERROR("failed to create device file pp_dpm_sclk\n");
1416                 return ret;
1417         }
1418         ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
1419         if (ret) {
1420                 DRM_ERROR("failed to create device file pp_dpm_mclk\n");
1421                 return ret;
1422         }
1423         ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
1424         if (ret) {
1425                 DRM_ERROR("failed to create device file pp_dpm_pcie\n");
1426                 return ret;
1427         }
1428         ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od);
1429         if (ret) {
1430                 DRM_ERROR("failed to create device file pp_sclk_od\n");
1431                 return ret;
1432         }
1433         ret = device_create_file(adev->dev, &dev_attr_pp_mclk_od);
1434         if (ret) {
1435                 DRM_ERROR("failed to create device file pp_mclk_od\n");
1436                 return ret;
1437         }
1438         ret = device_create_file(adev->dev,
1439                         &dev_attr_pp_gfx_power_profile);
1440         if (ret) {
1441                 DRM_ERROR("failed to create device file "
1442                                 "pp_gfx_power_profile\n");
1443                 return ret;
1444         }
1445         ret = device_create_file(adev->dev,
1446                         &dev_attr_pp_compute_power_profile);
1447         if (ret) {
1448                 DRM_ERROR("failed to create device file "
1449                                 "pp_compute_power_profile\n");
1450                 return ret;
1451         }
1452
1453         ret = amdgpu_debugfs_pm_init(adev);
1454         if (ret) {
1455                 DRM_ERROR("Failed to register debugfs file for dpm!\n");
1456                 return ret;
1457         }
1458
1459         adev->pm.sysfs_initialized = true;
1460
1461         return 0;
1462 }
1463
1464 void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
1465 {
1466         if (adev->pm.int_hwmon_dev)
1467                 hwmon_device_unregister(adev->pm.int_hwmon_dev);
1468         device_remove_file(adev->dev, &dev_attr_power_dpm_state);
1469         device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
1470         if (adev->pp_enabled) {
1471                 device_remove_file(adev->dev, &dev_attr_pp_num_states);
1472                 device_remove_file(adev->dev, &dev_attr_pp_cur_state);
1473                 device_remove_file(adev->dev, &dev_attr_pp_force_state);
1474                 device_remove_file(adev->dev, &dev_attr_pp_table);
1475         }
1476         device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
1477         device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
1478         device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
1479         device_remove_file(adev->dev, &dev_attr_pp_sclk_od);
1480         device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
1481         device_remove_file(adev->dev,
1482                         &dev_attr_pp_gfx_power_profile);
1483         device_remove_file(adev->dev,
1484                         &dev_attr_pp_compute_power_profile);
1485 }
1486
1487 void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
1488 {
1489         struct drm_device *ddev = adev->ddev;
1490         struct drm_crtc *crtc;
1491         struct amdgpu_crtc *amdgpu_crtc;
1492         int i = 0;
1493
1494         if (!adev->pm.dpm_enabled)
1495                 return;
1496
1497         if (adev->mode_info.num_crtc)
1498                 amdgpu_display_bandwidth_update(adev);
1499
1500         for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1501                 struct amdgpu_ring *ring = adev->rings[i];
1502                 if (ring && ring->ready)
1503                         amdgpu_fence_wait_empty(ring);
1504         }
1505
1506         if (adev->pp_enabled) {
1507                 amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL);
1508         } else {
1509                 mutex_lock(&adev->pm.mutex);
1510                 adev->pm.dpm.new_active_crtcs = 0;
1511                 adev->pm.dpm.new_active_crtc_count = 0;
1512                 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
1513                         list_for_each_entry(crtc,
1514                                             &ddev->mode_config.crtc_list, head) {
1515                                 amdgpu_crtc = to_amdgpu_crtc(crtc);
1516                                 if (crtc->enabled) {
1517                                         adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
1518                                         adev->pm.dpm.new_active_crtc_count++;
1519                                 }
1520                         }
1521                 }
1522                 /* update battery/ac status */
1523                 if (power_supply_is_system_supplied() > 0)
1524                         adev->pm.dpm.ac_power = true;
1525                 else
1526                         adev->pm.dpm.ac_power = false;
1527
1528                 amdgpu_dpm_change_power_state_locked(adev);
1529
1530                 mutex_unlock(&adev->pm.mutex);
1531         }
1532 }
1533
1534 /*
1535  * Debugfs info
1536  */
1537 #if defined(CONFIG_DEBUG_FS)
1538
1539 static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
1540 {
1541         uint32_t value;
1542         struct pp_gpu_power query = {0};
1543         int size;
1544
1545         /* sanity check PP is enabled */
1546         if (!(adev->powerplay.pp_funcs &&
1547               adev->powerplay.pp_funcs->read_sensor))
1548               return -EINVAL;
1549
1550         /* GPU Clocks */
1551         size = sizeof(value);
1552         seq_printf(m, "GFX Clocks and Power:\n");
1553         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
1554                 seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
1555         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
1556                 seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
1557         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
1558                 seq_printf(m, "\t%u mV (VDDGFX)\n", value);
1559         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
1560                 seq_printf(m, "\t%u mV (VDDNB)\n", value);
1561         size = sizeof(query);
1562         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size)) {
1563                 seq_printf(m, "\t%u.%u W (VDDC)\n", query.vddc_power >> 8,
1564                                 query.vddc_power & 0xff);
1565                 seq_printf(m, "\t%u.%u W (VDDCI)\n", query.vddci_power >> 8,
1566                                 query.vddci_power & 0xff);
1567                 seq_printf(m, "\t%u.%u W (max GPU)\n", query.max_gpu_power >> 8,
1568                                 query.max_gpu_power & 0xff);
1569                 seq_printf(m, "\t%u.%u W (average GPU)\n", query.average_gpu_power >> 8,
1570                                 query.average_gpu_power & 0xff);
1571         }
1572         size = sizeof(value);
1573         seq_printf(m, "\n");
1574
1575         /* GPU Temp */
1576         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
1577                 seq_printf(m, "GPU Temperature: %u C\n", value/1000);
1578
1579         /* GPU Load */
1580         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
1581                 seq_printf(m, "GPU Load: %u %%\n", value);
1582         seq_printf(m, "\n");
1583
1584         /* UVD clocks */
1585         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
1586                 if (!value) {
1587                         seq_printf(m, "UVD: Disabled\n");
1588                 } else {
1589                         seq_printf(m, "UVD: Enabled\n");
1590                         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
1591                                 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
1592                         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
1593                                 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
1594                 }
1595         }
1596         seq_printf(m, "\n");
1597
1598         /* VCE clocks */
1599         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
1600                 if (!value) {
1601                         seq_printf(m, "VCE: Disabled\n");
1602                 } else {
1603                         seq_printf(m, "VCE: Enabled\n");
1604                         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
1605                                 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
1606                 }
1607         }
1608
1609         return 0;
1610 }
1611
1612 static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags)
1613 {
1614         int i;
1615
1616         for (i = 0; clocks[i].flag; i++)
1617                 seq_printf(m, "\t%s: %s\n", clocks[i].name,
1618                            (flags & clocks[i].flag) ? "On" : "Off");
1619 }
1620
1621 static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
1622 {
1623         struct drm_info_node *node = (struct drm_info_node *) m->private;
1624         struct drm_device *dev = node->minor->dev;
1625         struct amdgpu_device *adev = dev->dev_private;
1626         struct drm_device *ddev = adev->ddev;
1627         u32 flags = 0;
1628
1629         amdgpu_get_clockgating_state(adev, &flags);
1630         seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
1631         amdgpu_parse_cg_state(m, flags);
1632         seq_printf(m, "\n");
1633
1634         if (!adev->pm.dpm_enabled) {
1635                 seq_printf(m, "dpm not enabled\n");
1636                 return 0;
1637         }
1638         if  ((adev->flags & AMD_IS_PX) &&
1639              (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
1640                 seq_printf(m, "PX asic powered off\n");
1641         } else if (adev->pp_enabled) {
1642                 return amdgpu_debugfs_pm_info_pp(m, adev);
1643         } else {
1644                 mutex_lock(&adev->pm.mutex);
1645                 if (adev->pm.funcs->debugfs_print_current_performance_level)
1646                         adev->pm.funcs->debugfs_print_current_performance_level(adev, m);
1647                 else
1648                         seq_printf(m, "Debugfs support not implemented for this asic\n");
1649                 mutex_unlock(&adev->pm.mutex);
1650         }
1651
1652         return 0;
1653 }
1654
1655 static const struct drm_info_list amdgpu_pm_info_list[] = {
1656         {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL},
1657 };
1658 #endif
1659
1660 static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
1661 {
1662 #if defined(CONFIG_DEBUG_FS)
1663         return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));
1664 #else
1665         return 0;
1666 #endif
1667 }