]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
Merge tag 'irqchip-fixes-5.6-1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_pm.c
1 /*
2  * Copyright 2017 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Rafał Miłecki <zajec5@gmail.com>
23  *          Alex Deucher <alexdeucher@gmail.com>
24  */
25
26 #include <drm/drm_debugfs.h>
27
28 #include "amdgpu.h"
29 #include "amdgpu_drv.h"
30 #include "amdgpu_pm.h"
31 #include "amdgpu_dpm.h"
32 #include "amdgpu_display.h"
33 #include "amdgpu_smu.h"
34 #include "atom.h"
35 #include <linux/power_supply.h>
36 #include <linux/pci.h>
37 #include <linux/hwmon.h>
38 #include <linux/hwmon-sysfs.h>
39 #include <linux/nospec.h>
40 #include <linux/pm_runtime.h>
41 #include "hwmgr.h"
42 #define WIDTH_4K 3840
43
44 static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
45
46 static const struct cg_flag_name clocks[] = {
47         {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
48         {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
49         {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
50         {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
51         {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
52         {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
53         {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
54         {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
55         {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
56         {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
57         {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
58         {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
59         {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
60         {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
61         {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
62         {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
63         {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
64         {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
65         {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
66         {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
67         {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
68         {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
69         {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
70         {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
71
72         {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
73         {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
74         {0, NULL},
75 };
76
77 static const struct hwmon_temp_label {
78         enum PP_HWMON_TEMP channel;
79         const char *label;
80 } temp_label[] = {
81         {PP_TEMP_EDGE, "edge"},
82         {PP_TEMP_JUNCTION, "junction"},
83         {PP_TEMP_MEM, "mem"},
84 };
85
86 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
87 {
88         if (adev->pm.dpm_enabled) {
89                 mutex_lock(&adev->pm.mutex);
90                 if (power_supply_is_system_supplied() > 0)
91                         adev->pm.ac_power = true;
92                 else
93                         adev->pm.ac_power = false;
94                 if (adev->powerplay.pp_funcs->enable_bapm)
95                         amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
96                 mutex_unlock(&adev->pm.mutex);
97         }
98 }
99
100 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
101                            void *data, uint32_t *size)
102 {
103         int ret = 0;
104
105         if (!data || !size)
106                 return -EINVAL;
107
108         if (is_support_sw_smu(adev))
109                 ret = smu_read_sensor(&adev->smu, sensor, data, size);
110         else {
111                 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
112                         ret = adev->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle,
113                                                                     sensor, data, size);
114                 else
115                         ret = -EINVAL;
116         }
117
118         return ret;
119 }
120
121 /**
122  * DOC: power_dpm_state
123  *
124  * The power_dpm_state file is a legacy interface and is only provided for
125  * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting
126  * certain power related parameters.  The file power_dpm_state is used for this.
127  * It accepts the following arguments:
128  *
129  * - battery
130  *
131  * - balanced
132  *
133  * - performance
134  *
135  * battery
136  *
137  * On older GPUs, the vbios provided a special power state for battery
138  * operation.  Selecting battery switched to this state.  This is no
139  * longer provided on newer GPUs so the option does nothing in that case.
140  *
141  * balanced
142  *
143  * On older GPUs, the vbios provided a special power state for balanced
144  * operation.  Selecting balanced switched to this state.  This is no
145  * longer provided on newer GPUs so the option does nothing in that case.
146  *
147  * performance
148  *
149  * On older GPUs, the vbios provided a special power state for performance
150  * operation.  Selecting performance switched to this state.  This is no
151  * longer provided on newer GPUs so the option does nothing in that case.
152  *
153  */
154
155 static ssize_t amdgpu_get_dpm_state(struct device *dev,
156                                     struct device_attribute *attr,
157                                     char *buf)
158 {
159         struct drm_device *ddev = dev_get_drvdata(dev);
160         struct amdgpu_device *adev = ddev->dev_private;
161         enum amd_pm_state_type pm;
162         int ret;
163
164         if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
165                 return 0;
166
167         ret = pm_runtime_get_sync(ddev->dev);
168         if (ret < 0)
169                 return ret;
170
171         if (is_support_sw_smu(adev)) {
172                 if (adev->smu.ppt_funcs->get_current_power_state)
173                         pm = smu_get_current_power_state(&adev->smu);
174                 else
175                         pm = adev->pm.dpm.user_state;
176         } else if (adev->powerplay.pp_funcs->get_current_power_state) {
177                 pm = amdgpu_dpm_get_current_power_state(adev);
178         } else {
179                 pm = adev->pm.dpm.user_state;
180         }
181
182         pm_runtime_mark_last_busy(ddev->dev);
183         pm_runtime_put_autosuspend(ddev->dev);
184
185         return snprintf(buf, PAGE_SIZE, "%s\n",
186                         (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
187                         (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
188 }
189
190 static ssize_t amdgpu_set_dpm_state(struct device *dev,
191                                     struct device_attribute *attr,
192                                     const char *buf,
193                                     size_t count)
194 {
195         struct drm_device *ddev = dev_get_drvdata(dev);
196         struct amdgpu_device *adev = ddev->dev_private;
197         enum amd_pm_state_type  state;
198         int ret;
199
200         if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
201                 return -EINVAL;
202
203         if (strncmp("battery", buf, strlen("battery")) == 0)
204                 state = POWER_STATE_TYPE_BATTERY;
205         else if (strncmp("balanced", buf, strlen("balanced")) == 0)
206                 state = POWER_STATE_TYPE_BALANCED;
207         else if (strncmp("performance", buf, strlen("performance")) == 0)
208                 state = POWER_STATE_TYPE_PERFORMANCE;
209         else
210                 return -EINVAL;
211
212         ret = pm_runtime_get_sync(ddev->dev);
213         if (ret < 0)
214                 return ret;
215
216         if (is_support_sw_smu(adev)) {
217                 mutex_lock(&adev->pm.mutex);
218                 adev->pm.dpm.user_state = state;
219                 mutex_unlock(&adev->pm.mutex);
220         } else if (adev->powerplay.pp_funcs->dispatch_tasks) {
221                 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state);
222         } else {
223                 mutex_lock(&adev->pm.mutex);
224                 adev->pm.dpm.user_state = state;
225                 mutex_unlock(&adev->pm.mutex);
226
227                 amdgpu_pm_compute_clocks(adev);
228         }
229         pm_runtime_mark_last_busy(ddev->dev);
230         pm_runtime_put_autosuspend(ddev->dev);
231
232         return count;
233 }
234
235
236 /**
237  * DOC: power_dpm_force_performance_level
238  *
239  * The amdgpu driver provides a sysfs API for adjusting certain power
240  * related parameters.  The file power_dpm_force_performance_level is
241  * used for this.  It accepts the following arguments:
242  *
243  * - auto
244  *
245  * - low
246  *
247  * - high
248  *
249  * - manual
250  *
251  * - profile_standard
252  *
253  * - profile_min_sclk
254  *
255  * - profile_min_mclk
256  *
257  * - profile_peak
258  *
259  * auto
260  *
261  * When auto is selected, the driver will attempt to dynamically select
262  * the optimal power profile for current conditions in the driver.
263  *
264  * low
265  *
266  * When low is selected, the clocks are forced to the lowest power state.
267  *
268  * high
269  *
270  * When high is selected, the clocks are forced to the highest power state.
271  *
272  * manual
273  *
274  * When manual is selected, the user can manually adjust which power states
275  * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk,
276  * and pp_dpm_pcie files and adjust the power state transition heuristics
277  * via the pp_power_profile_mode sysfs file.
278  *
279  * profile_standard
280  * profile_min_sclk
281  * profile_min_mclk
282  * profile_peak
283  *
284  * When the profiling modes are selected, clock and power gating are
285  * disabled and the clocks are set for different profiling cases. This
286  * mode is recommended for profiling specific work loads where you do
287  * not want clock or power gating for clock fluctuation to interfere
288  * with your results. profile_standard sets the clocks to a fixed clock
289  * level which varies from asic to asic.  profile_min_sclk forces the sclk
290  * to the lowest level.  profile_min_mclk forces the mclk to the lowest level.
291  * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels.
292  *
293  */
294
295 static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
296                                                 struct device_attribute *attr,
297                                                                 char *buf)
298 {
299         struct drm_device *ddev = dev_get_drvdata(dev);
300         struct amdgpu_device *adev = ddev->dev_private;
301         enum amd_dpm_forced_level level = 0xff;
302         int ret;
303
304         if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
305                 return 0;
306
307         ret = pm_runtime_get_sync(ddev->dev);
308         if (ret < 0)
309                 return ret;
310
311         if (is_support_sw_smu(adev))
312                 level = smu_get_performance_level(&adev->smu);
313         else if (adev->powerplay.pp_funcs->get_performance_level)
314                 level = amdgpu_dpm_get_performance_level(adev);
315         else
316                 level = adev->pm.dpm.forced_level;
317
318         pm_runtime_mark_last_busy(ddev->dev);
319         pm_runtime_put_autosuspend(ddev->dev);
320
321         return snprintf(buf, PAGE_SIZE, "%s\n",
322                         (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
323                         (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
324                         (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
325                         (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
326                         (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
327                         (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
328                         (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
329                         (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
330                         "unknown");
331 }
332
333 static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
334                                                        struct device_attribute *attr,
335                                                        const char *buf,
336                                                        size_t count)
337 {
338         struct drm_device *ddev = dev_get_drvdata(dev);
339         struct amdgpu_device *adev = ddev->dev_private;
340         enum amd_dpm_forced_level level;
341         enum amd_dpm_forced_level current_level = 0xff;
342         int ret = 0;
343
344         if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
345                 return -EINVAL;
346
347         if (strncmp("low", buf, strlen("low")) == 0) {
348                 level = AMD_DPM_FORCED_LEVEL_LOW;
349         } else if (strncmp("high", buf, strlen("high")) == 0) {
350                 level = AMD_DPM_FORCED_LEVEL_HIGH;
351         } else if (strncmp("auto", buf, strlen("auto")) == 0) {
352                 level = AMD_DPM_FORCED_LEVEL_AUTO;
353         } else if (strncmp("manual", buf, strlen("manual")) == 0) {
354                 level = AMD_DPM_FORCED_LEVEL_MANUAL;
355         } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
356                 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
357         } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
358                 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
359         } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
360                 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
361         } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
362                 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
363         } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
364                 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
365         }  else {
366                 return -EINVAL;
367         }
368
369         ret = pm_runtime_get_sync(ddev->dev);
370         if (ret < 0)
371                 return ret;
372
373         if (is_support_sw_smu(adev))
374                 current_level = smu_get_performance_level(&adev->smu);
375         else if (adev->powerplay.pp_funcs->get_performance_level)
376                 current_level = amdgpu_dpm_get_performance_level(adev);
377
378         if (current_level == level) {
379                 pm_runtime_mark_last_busy(ddev->dev);
380                 pm_runtime_put_autosuspend(ddev->dev);
381                 return count;
382         }
383
384         /* profile_exit setting is valid only when current mode is in profile mode */
385         if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
386             AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
387             AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
388             AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) &&
389             (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) {
390                 pr_err("Currently not in any profile mode!\n");
391                 pm_runtime_mark_last_busy(ddev->dev);
392                 pm_runtime_put_autosuspend(ddev->dev);
393                 return -EINVAL;
394         }
395
396         if (is_support_sw_smu(adev)) {
397                 ret = smu_force_performance_level(&adev->smu, level);
398                 if (ret) {
399                         pm_runtime_mark_last_busy(ddev->dev);
400                         pm_runtime_put_autosuspend(ddev->dev);
401                         return -EINVAL;
402                 }
403         } else if (adev->powerplay.pp_funcs->force_performance_level) {
404                 mutex_lock(&adev->pm.mutex);
405                 if (adev->pm.dpm.thermal_active) {
406                         mutex_unlock(&adev->pm.mutex);
407                         pm_runtime_mark_last_busy(ddev->dev);
408                         pm_runtime_put_autosuspend(ddev->dev);
409                         return -EINVAL;
410                 }
411                 ret = amdgpu_dpm_force_performance_level(adev, level);
412                 if (ret) {
413                         mutex_unlock(&adev->pm.mutex);
414                         pm_runtime_mark_last_busy(ddev->dev);
415                         pm_runtime_put_autosuspend(ddev->dev);
416                         return -EINVAL;
417                 } else {
418                         adev->pm.dpm.forced_level = level;
419                 }
420                 mutex_unlock(&adev->pm.mutex);
421         }
422         pm_runtime_mark_last_busy(ddev->dev);
423         pm_runtime_put_autosuspend(ddev->dev);
424
425         return count;
426 }
427
428 static ssize_t amdgpu_get_pp_num_states(struct device *dev,
429                 struct device_attribute *attr,
430                 char *buf)
431 {
432         struct drm_device *ddev = dev_get_drvdata(dev);
433         struct amdgpu_device *adev = ddev->dev_private;
434         struct pp_states_info data;
435         int i, buf_len, ret;
436
437         ret = pm_runtime_get_sync(ddev->dev);
438         if (ret < 0)
439                 return ret;
440
441         if (is_support_sw_smu(adev)) {
442                 ret = smu_get_power_num_states(&adev->smu, &data);
443                 if (ret)
444                         return ret;
445         } else if (adev->powerplay.pp_funcs->get_pp_num_states)
446                 amdgpu_dpm_get_pp_num_states(adev, &data);
447
448         pm_runtime_mark_last_busy(ddev->dev);
449         pm_runtime_put_autosuspend(ddev->dev);
450
451         buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
452         for (i = 0; i < data.nums; i++)
453                 buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i,
454                                 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
455                                 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
456                                 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
457                                 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
458
459         return buf_len;
460 }
461
462 static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
463                 struct device_attribute *attr,
464                 char *buf)
465 {
466         struct drm_device *ddev = dev_get_drvdata(dev);
467         struct amdgpu_device *adev = ddev->dev_private;
468         struct pp_states_info data;
469         struct smu_context *smu = &adev->smu;
470         enum amd_pm_state_type pm = 0;
471         int i = 0, ret = 0;
472
473         if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
474                 return 0;
475
476         ret = pm_runtime_get_sync(ddev->dev);
477         if (ret < 0)
478                 return ret;
479
480         if (is_support_sw_smu(adev)) {
481                 pm = smu_get_current_power_state(smu);
482                 ret = smu_get_power_num_states(smu, &data);
483                 if (ret)
484                         return ret;
485         } else if (adev->powerplay.pp_funcs->get_current_power_state
486                  && adev->powerplay.pp_funcs->get_pp_num_states) {
487                 pm = amdgpu_dpm_get_current_power_state(adev);
488                 amdgpu_dpm_get_pp_num_states(adev, &data);
489         }
490
491         pm_runtime_mark_last_busy(ddev->dev);
492         pm_runtime_put_autosuspend(ddev->dev);
493
494         for (i = 0; i < data.nums; i++) {
495                 if (pm == data.states[i])
496                         break;
497         }
498
499         if (i == data.nums)
500                 i = -EINVAL;
501
502         return snprintf(buf, PAGE_SIZE, "%d\n", i);
503 }
504
505 static ssize_t amdgpu_get_pp_force_state(struct device *dev,
506                 struct device_attribute *attr,
507                 char *buf)
508 {
509         struct drm_device *ddev = dev_get_drvdata(dev);
510         struct amdgpu_device *adev = ddev->dev_private;
511
512         if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
513                 return 0;
514
515         if (adev->pp_force_state_enabled)
516                 return amdgpu_get_pp_cur_state(dev, attr, buf);
517         else
518                 return snprintf(buf, PAGE_SIZE, "\n");
519 }
520
521 static ssize_t amdgpu_set_pp_force_state(struct device *dev,
522                 struct device_attribute *attr,
523                 const char *buf,
524                 size_t count)
525 {
526         struct drm_device *ddev = dev_get_drvdata(dev);
527         struct amdgpu_device *adev = ddev->dev_private;
528         enum amd_pm_state_type state = 0;
529         unsigned long idx;
530         int ret;
531
532         if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
533                 return -EINVAL;
534
535         if (strlen(buf) == 1)
536                 adev->pp_force_state_enabled = false;
537         else if (is_support_sw_smu(adev))
538                 adev->pp_force_state_enabled = false;
539         else if (adev->powerplay.pp_funcs->dispatch_tasks &&
540                         adev->powerplay.pp_funcs->get_pp_num_states) {
541                 struct pp_states_info data;
542
543                 ret = kstrtoul(buf, 0, &idx);
544                 if (ret || idx >= ARRAY_SIZE(data.states))
545                         return -EINVAL;
546
547                 idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
548
549                 amdgpu_dpm_get_pp_num_states(adev, &data);
550                 state = data.states[idx];
551
552                 ret = pm_runtime_get_sync(ddev->dev);
553                 if (ret < 0)
554                         return ret;
555
556                 /* only set user selected power states */
557                 if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
558                     state != POWER_STATE_TYPE_DEFAULT) {
559                         amdgpu_dpm_dispatch_task(adev,
560                                         AMD_PP_TASK_ENABLE_USER_STATE, &state);
561                         adev->pp_force_state_enabled = true;
562                 }
563                 pm_runtime_mark_last_busy(ddev->dev);
564                 pm_runtime_put_autosuspend(ddev->dev);
565         }
566
567         return count;
568 }
569
570 /**
571  * DOC: pp_table
572  *
573  * The amdgpu driver provides a sysfs API for uploading new powerplay
574  * tables.  The file pp_table is used for this.  Reading the file
575  * will dump the current power play table.  Writing to the file
576  * will attempt to upload a new powerplay table and re-initialize
577  * powerplay using that new table.
578  *
579  */
580
581 static ssize_t amdgpu_get_pp_table(struct device *dev,
582                 struct device_attribute *attr,
583                 char *buf)
584 {
585         struct drm_device *ddev = dev_get_drvdata(dev);
586         struct amdgpu_device *adev = ddev->dev_private;
587         char *table = NULL;
588         int size, ret;
589
590         if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
591                 return 0;
592
593         ret = pm_runtime_get_sync(ddev->dev);
594         if (ret < 0)
595                 return ret;
596
597         if (is_support_sw_smu(adev)) {
598                 size = smu_sys_get_pp_table(&adev->smu, (void **)&table);
599                 pm_runtime_mark_last_busy(ddev->dev);
600                 pm_runtime_put_autosuspend(ddev->dev);
601                 if (size < 0)
602                         return size;
603         } else if (adev->powerplay.pp_funcs->get_pp_table) {
604                 size = amdgpu_dpm_get_pp_table(adev, &table);
605                 pm_runtime_mark_last_busy(ddev->dev);
606                 pm_runtime_put_autosuspend(ddev->dev);
607                 if (size < 0)
608                         return size;
609         } else {
610                 pm_runtime_mark_last_busy(ddev->dev);
611                 pm_runtime_put_autosuspend(ddev->dev);
612                 return 0;
613         }
614
615         if (size >= PAGE_SIZE)
616                 size = PAGE_SIZE - 1;
617
618         memcpy(buf, table, size);
619
620         return size;
621 }
622
623 static ssize_t amdgpu_set_pp_table(struct device *dev,
624                 struct device_attribute *attr,
625                 const char *buf,
626                 size_t count)
627 {
628         struct drm_device *ddev = dev_get_drvdata(dev);
629         struct amdgpu_device *adev = ddev->dev_private;
630         int ret = 0;
631
632         if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
633                 return -EINVAL;
634
635         ret = pm_runtime_get_sync(ddev->dev);
636         if (ret < 0)
637                 return ret;
638
639         if (is_support_sw_smu(adev)) {
640                 ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count);
641                 if (ret) {
642                         pm_runtime_mark_last_busy(ddev->dev);
643                         pm_runtime_put_autosuspend(ddev->dev);
644                         return ret;
645                 }
646         } else if (adev->powerplay.pp_funcs->set_pp_table)
647                 amdgpu_dpm_set_pp_table(adev, buf, count);
648
649         pm_runtime_mark_last_busy(ddev->dev);
650         pm_runtime_put_autosuspend(ddev->dev);
651
652         return count;
653 }
654
655 /**
656  * DOC: pp_od_clk_voltage
657  *
658  * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages
659  * in each power level within a power state.  The pp_od_clk_voltage is used for
660  * this.
661  *
662  * < For Vega10 and previous ASICs >
663  *
664  * Reading the file will display:
665  *
666  * - a list of engine clock levels and voltages labeled OD_SCLK
667  *
668  * - a list of memory clock levels and voltages labeled OD_MCLK
669  *
670  * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
671  *
672  * To manually adjust these settings, first select manual using
673  * power_dpm_force_performance_level. Enter a new value for each
674  * level by writing a string that contains "s/m level clock voltage" to
675  * the file.  E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz
676  * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at
677  * 810 mV.  When you have edited all of the states as needed, write
678  * "c" (commit) to the file to commit your changes.  If you want to reset to the
679  * default power levels, write "r" (reset) to the file to reset them.
680  *
681  *
682  * < For Vega20 >
683  *
684  * Reading the file will display:
685  *
686  * - minimum and maximum engine clock labeled OD_SCLK
687  *
688  * - maximum memory clock labeled OD_MCLK
689  *
690  * - three <frequency, voltage> points labeled OD_VDDC_CURVE.
691  *   They can be used to calibrate the sclk voltage curve.
692  *
693  * - a list of valid ranges for sclk, mclk, and voltage curve points
694  *   labeled OD_RANGE
695  *
696  * To manually adjust these settings:
697  *
698  * - First select manual using power_dpm_force_performance_level
699  *
700  * - For clock frequency setting, enter a new value by writing a
701  *   string that contains "s/m index clock" to the file. The index
702  *   should be 0 if to set minimum clock. And 1 if to set maximum
703  *   clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz.
704  *   "m 1 800" will update maximum mclk to be 800Mhz.
705  *
706  *   For sclk voltage curve, enter the new values by writing a
707  *   string that contains "vc point clock voltage" to the file. The
708  *   points are indexed by 0, 1 and 2. E.g., "vc 0 300 600" will
709  *   update point1 with clock set as 300Mhz and voltage as
710  *   600mV. "vc 2 1000 1000" will update point3 with clock set
711  *   as 1000Mhz and voltage 1000mV.
712  *
713  * - When you have edited all of the states as needed, write "c" (commit)
714  *   to the file to commit your changes
715  *
716  * - If you want to reset to the default power levels, write "r" (reset)
717  *   to the file to reset them
718  *
719  */
720
721 static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
722                 struct device_attribute *attr,
723                 const char *buf,
724                 size_t count)
725 {
726         struct drm_device *ddev = dev_get_drvdata(dev);
727         struct amdgpu_device *adev = ddev->dev_private;
728         int ret;
729         uint32_t parameter_size = 0;
730         long parameter[64];
731         char buf_cpy[128];
732         char *tmp_str;
733         char *sub_str;
734         const char delimiter[3] = {' ', '\n', '\0'};
735         uint32_t type;
736
737         if (amdgpu_sriov_vf(adev))
738                 return -EINVAL;
739
740         if (count > 127)
741                 return -EINVAL;
742
743         if (*buf == 's')
744                 type = PP_OD_EDIT_SCLK_VDDC_TABLE;
745         else if (*buf == 'm')
746                 type = PP_OD_EDIT_MCLK_VDDC_TABLE;
747         else if(*buf == 'r')
748                 type = PP_OD_RESTORE_DEFAULT_TABLE;
749         else if (*buf == 'c')
750                 type = PP_OD_COMMIT_DPM_TABLE;
751         else if (!strncmp(buf, "vc", 2))
752                 type = PP_OD_EDIT_VDDC_CURVE;
753         else
754                 return -EINVAL;
755
756         memcpy(buf_cpy, buf, count+1);
757
758         tmp_str = buf_cpy;
759
760         if (type == PP_OD_EDIT_VDDC_CURVE)
761                 tmp_str++;
762         while (isspace(*++tmp_str));
763
764         while (tmp_str[0]) {
765                 sub_str = strsep(&tmp_str, delimiter);
766                 ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
767                 if (ret)
768                         return -EINVAL;
769                 parameter_size++;
770
771                 while (isspace(*tmp_str))
772                         tmp_str++;
773         }
774
775         ret = pm_runtime_get_sync(ddev->dev);
776         if (ret < 0)
777                 return ret;
778
779         if (is_support_sw_smu(adev)) {
780                 ret = smu_od_edit_dpm_table(&adev->smu, type,
781                                             parameter, parameter_size);
782
783                 if (ret) {
784                         pm_runtime_mark_last_busy(ddev->dev);
785                         pm_runtime_put_autosuspend(ddev->dev);
786                         return -EINVAL;
787                 }
788         } else {
789                 if (adev->powerplay.pp_funcs->odn_edit_dpm_table) {
790                         ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
791                                                 parameter, parameter_size);
792                         if (ret) {
793                                 pm_runtime_mark_last_busy(ddev->dev);
794                                 pm_runtime_put_autosuspend(ddev->dev);
795                                 return -EINVAL;
796                         }
797                 }
798
799                 if (type == PP_OD_COMMIT_DPM_TABLE) {
800                         if (adev->powerplay.pp_funcs->dispatch_tasks) {
801                                 amdgpu_dpm_dispatch_task(adev,
802                                                 AMD_PP_TASK_READJUST_POWER_STATE,
803                                                 NULL);
804                                 pm_runtime_mark_last_busy(ddev->dev);
805                                 pm_runtime_put_autosuspend(ddev->dev);
806                                 return count;
807                         } else {
808                                 pm_runtime_mark_last_busy(ddev->dev);
809                                 pm_runtime_put_autosuspend(ddev->dev);
810                                 return -EINVAL;
811                         }
812                 }
813         }
814         pm_runtime_mark_last_busy(ddev->dev);
815         pm_runtime_put_autosuspend(ddev->dev);
816
817         return count;
818 }
819
820 static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
821                 struct device_attribute *attr,
822                 char *buf)
823 {
824         struct drm_device *ddev = dev_get_drvdata(dev);
825         struct amdgpu_device *adev = ddev->dev_private;
826         ssize_t size;
827         int ret;
828
829         if (amdgpu_sriov_vf(adev))
830                 return 0;
831
832         ret = pm_runtime_get_sync(ddev->dev);
833         if (ret < 0)
834                 return ret;
835
836         if (is_support_sw_smu(adev)) {
837                 size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf);
838                 size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size);
839                 size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDC_CURVE, buf+size);
840                 size += smu_print_clk_levels(&adev->smu, SMU_OD_RANGE, buf+size);
841         } else if (adev->powerplay.pp_funcs->print_clock_levels) {
842                 size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
843                 size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
844                 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size);
845                 size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size);
846         } else {
847                 size = snprintf(buf, PAGE_SIZE, "\n");
848         }
849         pm_runtime_mark_last_busy(ddev->dev);
850         pm_runtime_put_autosuspend(ddev->dev);
851
852         return size;
853 }
854
855 /**
856  * DOC: pp_features
857  *
858  * The amdgpu driver provides a sysfs API for adjusting what powerplay
859  * features to be enabled. The file pp_features is used for this. And
860  * this is only available for Vega10 and later dGPUs.
861  *
862  * Reading back the file will show you the followings:
863  * - Current ppfeature masks
864  * - List of the all supported powerplay features with their naming,
865  *   bitmasks and enablement status('Y'/'N' means "enabled"/"disabled").
866  *
867  * To manually enable or disable a specific feature, just set or clear
868  * the corresponding bit from original ppfeature masks and input the
869  * new ppfeature masks.
870  */
871 static ssize_t amdgpu_set_pp_feature_status(struct device *dev,
872                 struct device_attribute *attr,
873                 const char *buf,
874                 size_t count)
875 {
876         struct drm_device *ddev = dev_get_drvdata(dev);
877         struct amdgpu_device *adev = ddev->dev_private;
878         uint64_t featuremask;
879         int ret;
880
881         if (amdgpu_sriov_vf(adev))
882                 return -EINVAL;
883
884         ret = kstrtou64(buf, 0, &featuremask);
885         if (ret)
886                 return -EINVAL;
887
888         pr_debug("featuremask = 0x%llx\n", featuremask);
889
890         ret = pm_runtime_get_sync(ddev->dev);
891         if (ret < 0)
892                 return ret;
893
894         if (is_support_sw_smu(adev)) {
895                 ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask);
896                 if (ret) {
897                         pm_runtime_mark_last_busy(ddev->dev);
898                         pm_runtime_put_autosuspend(ddev->dev);
899                         return -EINVAL;
900                 }
901         } else if (adev->powerplay.pp_funcs->set_ppfeature_status) {
902                 ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
903                 if (ret) {
904                         pm_runtime_mark_last_busy(ddev->dev);
905                         pm_runtime_put_autosuspend(ddev->dev);
906                         return -EINVAL;
907                 }
908         }
909         pm_runtime_mark_last_busy(ddev->dev);
910         pm_runtime_put_autosuspend(ddev->dev);
911
912         return count;
913 }
914
915 static ssize_t amdgpu_get_pp_feature_status(struct device *dev,
916                 struct device_attribute *attr,
917                 char *buf)
918 {
919         struct drm_device *ddev = dev_get_drvdata(dev);
920         struct amdgpu_device *adev = ddev->dev_private;
921         ssize_t size;
922         int ret;
923
924         if (amdgpu_sriov_vf(adev))
925                 return 0;
926
927         ret = pm_runtime_get_sync(ddev->dev);
928         if (ret < 0)
929                 return ret;
930
931         if (is_support_sw_smu(adev))
932                 size = smu_sys_get_pp_feature_mask(&adev->smu, buf);
933         else if (adev->powerplay.pp_funcs->get_ppfeature_status)
934                 size = amdgpu_dpm_get_ppfeature_status(adev, buf);
935         else
936                 size = snprintf(buf, PAGE_SIZE, "\n");
937
938         pm_runtime_mark_last_busy(ddev->dev);
939         pm_runtime_put_autosuspend(ddev->dev);
940
941         return size;
942 }
943
944 /**
945  * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
946  *
947  * The amdgpu driver provides a sysfs API for adjusting what power levels
948  * are enabled for a given power state.  The files pp_dpm_sclk, pp_dpm_mclk,
949  * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for
950  * this.
951  *
952  * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for
953  * Vega10 and later ASICs.
954  * pp_dpm_fclk interface is only available for Vega20 and later ASICs.
955  *
956  * Reading back the files will show you the available power levels within
957  * the power state and the clock information for those levels.
958  *
959  * To manually adjust these states, first select manual using
960  * power_dpm_force_performance_level.
961  * Secondly, enter a new value for each level by inputing a string that
962  * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
963  * E.g.,
964  *
965  * .. code-block:: bash
966  *
967  *      echo "4 5 6" > pp_dpm_sclk
968  *
969  * will enable sclk levels 4, 5, and 6.
970  *
971  * NOTE: change to the dcefclk max dpm level is not supported now
972  */
973
974 static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
975                 struct device_attribute *attr,
976                 char *buf)
977 {
978         struct drm_device *ddev = dev_get_drvdata(dev);
979         struct amdgpu_device *adev = ddev->dev_private;
980         ssize_t size;
981         int ret;
982
983         if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
984                 return 0;
985
986         ret = pm_runtime_get_sync(ddev->dev);
987         if (ret < 0)
988                 return ret;
989
990         if (is_support_sw_smu(adev))
991                 size = smu_print_clk_levels(&adev->smu, SMU_SCLK, buf);
992         else if (adev->powerplay.pp_funcs->print_clock_levels)
993                 size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
994         else
995                 size = snprintf(buf, PAGE_SIZE, "\n");
996
997         pm_runtime_mark_last_busy(ddev->dev);
998         pm_runtime_put_autosuspend(ddev->dev);
999
1000         return size;
1001 }
1002
1003 /*
1004  * Worst case: 32 bits individually specified, in octal at 12 characters
1005  * per line (+1 for \n).
1006  */
1007 #define AMDGPU_MASK_BUF_MAX     (32 * 13)
1008
1009 static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
1010 {
1011         int ret;
1012         long level;
1013         char *sub_str = NULL;
1014         char *tmp;
1015         char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
1016         const char delimiter[3] = {' ', '\n', '\0'};
1017         size_t bytes;
1018
1019         *mask = 0;
1020
1021         bytes = min(count, sizeof(buf_cpy) - 1);
1022         memcpy(buf_cpy, buf, bytes);
1023         buf_cpy[bytes] = '\0';
1024         tmp = buf_cpy;
1025         while (tmp[0]) {
1026                 sub_str = strsep(&tmp, delimiter);
1027                 if (strlen(sub_str)) {
1028                         ret = kstrtol(sub_str, 0, &level);
1029                         if (ret)
1030                                 return -EINVAL;
1031                         *mask |= 1 << level;
1032                 } else
1033                         break;
1034         }
1035
1036         return 0;
1037 }
1038
1039 static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
1040                 struct device_attribute *attr,
1041                 const char *buf,
1042                 size_t count)
1043 {
1044         struct drm_device *ddev = dev_get_drvdata(dev);
1045         struct amdgpu_device *adev = ddev->dev_private;
1046         int ret;
1047         uint32_t mask = 0;
1048
1049         if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1050                 return -EINVAL;
1051
1052         ret = amdgpu_read_mask(buf, count, &mask);
1053         if (ret)
1054                 return ret;
1055
1056         ret = pm_runtime_get_sync(ddev->dev);
1057         if (ret < 0)
1058                 return ret;
1059
1060         if (is_support_sw_smu(adev))
1061                 ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask, true);
1062         else if (adev->powerplay.pp_funcs->force_clock_level)
1063                 ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
1064
1065         pm_runtime_mark_last_busy(ddev->dev);
1066         pm_runtime_put_autosuspend(ddev->dev);
1067
1068         if (ret)
1069                 return -EINVAL;
1070
1071         return count;
1072 }
1073
1074 static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
1075                 struct device_attribute *attr,
1076                 char *buf)
1077 {
1078         struct drm_device *ddev = dev_get_drvdata(dev);
1079         struct amdgpu_device *adev = ddev->dev_private;
1080         ssize_t size;
1081         int ret;
1082
1083         if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1084                 return 0;
1085
1086         ret = pm_runtime_get_sync(ddev->dev);
1087         if (ret < 0)
1088                 return ret;
1089
1090         if (is_support_sw_smu(adev))
1091                 size = smu_print_clk_levels(&adev->smu, SMU_MCLK, buf);
1092         else if (adev->powerplay.pp_funcs->print_clock_levels)
1093                 size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
1094         else
1095                 size = snprintf(buf, PAGE_SIZE, "\n");
1096
1097         pm_runtime_mark_last_busy(ddev->dev);
1098         pm_runtime_put_autosuspend(ddev->dev);
1099
1100         return size;
1101 }
1102
1103 static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
1104                 struct device_attribute *attr,
1105                 const char *buf,
1106                 size_t count)
1107 {
1108         struct drm_device *ddev = dev_get_drvdata(dev);
1109         struct amdgpu_device *adev = ddev->dev_private;
1110         uint32_t mask = 0;
1111         int ret;
1112
1113         if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1114                         return -EINVAL;
1115
1116         ret = amdgpu_read_mask(buf, count, &mask);
1117         if (ret)
1118                 return ret;
1119
1120         ret = pm_runtime_get_sync(ddev->dev);
1121         if (ret < 0)
1122                 return ret;
1123
1124         if (is_support_sw_smu(adev))
1125                 ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask, true);
1126         else if (adev->powerplay.pp_funcs->force_clock_level)
1127                 ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
1128
1129         pm_runtime_mark_last_busy(ddev->dev);
1130         pm_runtime_put_autosuspend(ddev->dev);
1131
1132         if (ret)
1133                 return -EINVAL;
1134
1135         return count;
1136 }
1137
1138 static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
1139                 struct device_attribute *attr,
1140                 char *buf)
1141 {
1142         struct drm_device *ddev = dev_get_drvdata(dev);
1143         struct amdgpu_device *adev = ddev->dev_private;
1144         ssize_t size;
1145         int ret;
1146
1147         if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1148                 return 0;
1149
1150         ret = pm_runtime_get_sync(ddev->dev);
1151         if (ret < 0)
1152                 return ret;
1153
1154         if (is_support_sw_smu(adev))
1155                 size = smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf);
1156         else if (adev->powerplay.pp_funcs->print_clock_levels)
1157                 size = amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf);
1158         else
1159                 size = snprintf(buf, PAGE_SIZE, "\n");
1160
1161         pm_runtime_mark_last_busy(ddev->dev);
1162         pm_runtime_put_autosuspend(ddev->dev);
1163
1164         return size;
1165 }
1166
1167 static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
1168                 struct device_attribute *attr,
1169                 const char *buf,
1170                 size_t count)
1171 {
1172         struct drm_device *ddev = dev_get_drvdata(dev);
1173         struct amdgpu_device *adev = ddev->dev_private;
1174         int ret;
1175         uint32_t mask = 0;
1176
1177         if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1178                 return -EINVAL;
1179
1180         ret = amdgpu_read_mask(buf, count, &mask);
1181         if (ret)
1182                 return ret;
1183
1184         ret = pm_runtime_get_sync(ddev->dev);
1185         if (ret < 0)
1186                 return ret;
1187
1188         if (is_support_sw_smu(adev))
1189                 ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask, true);
1190         else if (adev->powerplay.pp_funcs->force_clock_level)
1191                 ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
1192         else
1193                 ret = 0;
1194
1195         pm_runtime_mark_last_busy(ddev->dev);
1196         pm_runtime_put_autosuspend(ddev->dev);
1197
1198         if (ret)
1199                 return -EINVAL;
1200
1201         return count;
1202 }
1203
1204 static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
1205                 struct device_attribute *attr,
1206                 char *buf)
1207 {
1208         struct drm_device *ddev = dev_get_drvdata(dev);
1209         struct amdgpu_device *adev = ddev->dev_private;
1210         ssize_t size;
1211         int ret;
1212
1213         if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1214                 return 0;
1215
1216         ret = pm_runtime_get_sync(ddev->dev);
1217         if (ret < 0)
1218                 return ret;
1219
1220         if (is_support_sw_smu(adev))
1221                 size = smu_print_clk_levels(&adev->smu, SMU_FCLK, buf);
1222         else if (adev->powerplay.pp_funcs->print_clock_levels)
1223                 size = amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf);
1224         else
1225                 size = snprintf(buf, PAGE_SIZE, "\n");
1226
1227         pm_runtime_mark_last_busy(ddev->dev);
1228         pm_runtime_put_autosuspend(ddev->dev);
1229
1230         return size;
1231 }
1232
1233 static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
1234                 struct device_attribute *attr,
1235                 const char *buf,
1236                 size_t count)
1237 {
1238         struct drm_device *ddev = dev_get_drvdata(dev);
1239         struct amdgpu_device *adev = ddev->dev_private;
1240         int ret;
1241         uint32_t mask = 0;
1242
1243         if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1244                 return -EINVAL;
1245
1246         ret = amdgpu_read_mask(buf, count, &mask);
1247         if (ret)
1248                 return ret;
1249
1250         ret = pm_runtime_get_sync(ddev->dev);
1251         if (ret < 0)
1252                 return ret;
1253
1254         if (is_support_sw_smu(adev))
1255                 ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask, true);
1256         else if (adev->powerplay.pp_funcs->force_clock_level)
1257                 ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
1258         else
1259                 ret = 0;
1260
1261         pm_runtime_mark_last_busy(ddev->dev);
1262         pm_runtime_put_autosuspend(ddev->dev);
1263
1264         if (ret)
1265                 return -EINVAL;
1266
1267         return count;
1268 }
1269
1270 static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
1271                 struct device_attribute *attr,
1272                 char *buf)
1273 {
1274         struct drm_device *ddev = dev_get_drvdata(dev);
1275         struct amdgpu_device *adev = ddev->dev_private;
1276         ssize_t size;
1277         int ret;
1278
1279         if (amdgpu_sriov_vf(adev))
1280                 return 0;
1281
1282         ret = pm_runtime_get_sync(ddev->dev);
1283         if (ret < 0)
1284                 return ret;
1285
1286         if (is_support_sw_smu(adev))
1287                 size = smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf);
1288         else if (adev->powerplay.pp_funcs->print_clock_levels)
1289                 size = amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf);
1290         else
1291                 size = snprintf(buf, PAGE_SIZE, "\n");
1292
1293         pm_runtime_mark_last_busy(ddev->dev);
1294         pm_runtime_put_autosuspend(ddev->dev);
1295
1296         return size;
1297 }
1298
1299 static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
1300                 struct device_attribute *attr,
1301                 const char *buf,
1302                 size_t count)
1303 {
1304         struct drm_device *ddev = dev_get_drvdata(dev);
1305         struct amdgpu_device *adev = ddev->dev_private;
1306         int ret;
1307         uint32_t mask = 0;
1308
1309         if (amdgpu_sriov_vf(adev))
1310                 return -EINVAL;
1311
1312         ret = amdgpu_read_mask(buf, count, &mask);
1313         if (ret)
1314                 return ret;
1315
1316         ret = pm_runtime_get_sync(ddev->dev);
1317         if (ret < 0)
1318                 return ret;
1319
1320         if (is_support_sw_smu(adev))
1321                 ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask, true);
1322         else if (adev->powerplay.pp_funcs->force_clock_level)
1323                 ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
1324         else
1325                 ret = 0;
1326
1327         pm_runtime_mark_last_busy(ddev->dev);
1328         pm_runtime_put_autosuspend(ddev->dev);
1329
1330         if (ret)
1331                 return -EINVAL;
1332
1333         return count;
1334 }
1335
1336 static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
1337                 struct device_attribute *attr,
1338                 char *buf)
1339 {
1340         struct drm_device *ddev = dev_get_drvdata(dev);
1341         struct amdgpu_device *adev = ddev->dev_private;
1342         ssize_t size;
1343         int ret;
1344
1345         if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1346                 return 0;
1347
1348         ret = pm_runtime_get_sync(ddev->dev);
1349         if (ret < 0)
1350                 return ret;
1351
1352         if (is_support_sw_smu(adev))
1353                 size = smu_print_clk_levels(&adev->smu, SMU_PCIE, buf);
1354         else if (adev->powerplay.pp_funcs->print_clock_levels)
1355                 size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
1356         else
1357                 size = snprintf(buf, PAGE_SIZE, "\n");
1358
1359         pm_runtime_mark_last_busy(ddev->dev);
1360         pm_runtime_put_autosuspend(ddev->dev);
1361
1362         return size;
1363 }
1364
1365 static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
1366                 struct device_attribute *attr,
1367                 const char *buf,
1368                 size_t count)
1369 {
1370         struct drm_device *ddev = dev_get_drvdata(dev);
1371         struct amdgpu_device *adev = ddev->dev_private;
1372         int ret;
1373         uint32_t mask = 0;
1374
1375         if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1376                 return -EINVAL;
1377
1378         ret = amdgpu_read_mask(buf, count, &mask);
1379         if (ret)
1380                 return ret;
1381
1382         ret = pm_runtime_get_sync(ddev->dev);
1383         if (ret < 0)
1384                 return ret;
1385
1386         if (is_support_sw_smu(adev))
1387                 ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask, true);
1388         else if (adev->powerplay.pp_funcs->force_clock_level)
1389                 ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
1390         else
1391                 ret = 0;
1392
1393         pm_runtime_mark_last_busy(ddev->dev);
1394         pm_runtime_put_autosuspend(ddev->dev);
1395
1396         if (ret)
1397                 return -EINVAL;
1398
1399         return count;
1400 }
1401
1402 static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
1403                 struct device_attribute *attr,
1404                 char *buf)
1405 {
1406         struct drm_device *ddev = dev_get_drvdata(dev);
1407         struct amdgpu_device *adev = ddev->dev_private;
1408         uint32_t value = 0;
1409         int ret;
1410
1411         if (amdgpu_sriov_vf(adev))
1412                 return 0;
1413
1414         ret = pm_runtime_get_sync(ddev->dev);
1415         if (ret < 0)
1416                 return ret;
1417
1418         if (is_support_sw_smu(adev))
1419                 value = smu_get_od_percentage(&(adev->smu), SMU_OD_SCLK);
1420         else if (adev->powerplay.pp_funcs->get_sclk_od)
1421                 value = amdgpu_dpm_get_sclk_od(adev);
1422
1423         pm_runtime_mark_last_busy(ddev->dev);
1424         pm_runtime_put_autosuspend(ddev->dev);
1425
1426         return snprintf(buf, PAGE_SIZE, "%d\n", value);
1427 }
1428
1429 static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
1430                 struct device_attribute *attr,
1431                 const char *buf,
1432                 size_t count)
1433 {
1434         struct drm_device *ddev = dev_get_drvdata(dev);
1435         struct amdgpu_device *adev = ddev->dev_private;
1436         int ret;
1437         long int value;
1438
1439         if (amdgpu_sriov_vf(adev))
1440                 return -EINVAL;
1441
1442         ret = kstrtol(buf, 0, &value);
1443
1444         if (ret)
1445                 return -EINVAL;
1446
1447         ret = pm_runtime_get_sync(ddev->dev);
1448         if (ret < 0)
1449                 return ret;
1450
1451         if (is_support_sw_smu(adev)) {
1452                 value = smu_set_od_percentage(&(adev->smu), SMU_OD_SCLK, (uint32_t)value);
1453         } else {
1454                 if (adev->powerplay.pp_funcs->set_sclk_od)
1455                         amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
1456
1457                 if (adev->powerplay.pp_funcs->dispatch_tasks) {
1458                         amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
1459                 } else {
1460                         adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1461                         amdgpu_pm_compute_clocks(adev);
1462                 }
1463         }
1464
1465         pm_runtime_mark_last_busy(ddev->dev);
1466         pm_runtime_put_autosuspend(ddev->dev);
1467
1468         return count;
1469 }
1470
1471 static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
1472                 struct device_attribute *attr,
1473                 char *buf)
1474 {
1475         struct drm_device *ddev = dev_get_drvdata(dev);
1476         struct amdgpu_device *adev = ddev->dev_private;
1477         uint32_t value = 0;
1478         int ret;
1479
1480         if (amdgpu_sriov_vf(adev))
1481                 return 0;
1482
1483         ret = pm_runtime_get_sync(ddev->dev);
1484         if (ret < 0)
1485                 return ret;
1486
1487         if (is_support_sw_smu(adev))
1488                 value = smu_get_od_percentage(&(adev->smu), SMU_OD_MCLK);
1489         else if (adev->powerplay.pp_funcs->get_mclk_od)
1490                 value = amdgpu_dpm_get_mclk_od(adev);
1491
1492         pm_runtime_mark_last_busy(ddev->dev);
1493         pm_runtime_put_autosuspend(ddev->dev);
1494
1495         return snprintf(buf, PAGE_SIZE, "%d\n", value);
1496 }
1497
1498 static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
1499                 struct device_attribute *attr,
1500                 const char *buf,
1501                 size_t count)
1502 {
1503         struct drm_device *ddev = dev_get_drvdata(dev);
1504         struct amdgpu_device *adev = ddev->dev_private;
1505         int ret;
1506         long int value;
1507
1508         if (amdgpu_sriov_vf(adev))
1509                 return 0;
1510
1511         ret = kstrtol(buf, 0, &value);
1512
1513         if (ret)
1514                 return -EINVAL;
1515
1516         ret = pm_runtime_get_sync(ddev->dev);
1517         if (ret < 0)
1518                 return ret;
1519
1520         if (is_support_sw_smu(adev)) {
1521                 value = smu_set_od_percentage(&(adev->smu), SMU_OD_MCLK, (uint32_t)value);
1522         } else {
1523                 if (adev->powerplay.pp_funcs->set_mclk_od)
1524                         amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
1525
1526                 if (adev->powerplay.pp_funcs->dispatch_tasks) {
1527                         amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
1528                 } else {
1529                         adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1530                         amdgpu_pm_compute_clocks(adev);
1531                 }
1532         }
1533
1534         pm_runtime_mark_last_busy(ddev->dev);
1535         pm_runtime_put_autosuspend(ddev->dev);
1536
1537         return count;
1538 }
1539
1540 /**
1541  * DOC: pp_power_profile_mode
1542  *
1543  * The amdgpu driver provides a sysfs API for adjusting the heuristics
1544  * related to switching between power levels in a power state.  The file
1545  * pp_power_profile_mode is used for this.
1546  *
1547  * Reading this file outputs a list of all of the predefined power profiles
1548  * and the relevant heuristics settings for that profile.
1549  *
1550  * To select a profile or create a custom profile, first select manual using
1551  * power_dpm_force_performance_level.  Writing the number of a predefined
1552  * profile to pp_power_profile_mode will enable those heuristics.  To
1553  * create a custom set of heuristics, write a string of numbers to the file
1554  * starting with the number of the custom profile along with a setting
1555  * for each heuristic parameter.  Due to differences across asic families
1556  * the heuristic parameters vary from family to family.
1557  *
1558  */
1559
1560 static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
1561                 struct device_attribute *attr,
1562                 char *buf)
1563 {
1564         struct drm_device *ddev = dev_get_drvdata(dev);
1565         struct amdgpu_device *adev = ddev->dev_private;
1566         ssize_t size;
1567         int ret;
1568
1569         if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1570                 return 0;
1571
1572         ret = pm_runtime_get_sync(ddev->dev);
1573         if (ret < 0)
1574                 return ret;
1575
1576         if (is_support_sw_smu(adev))
1577                 size = smu_get_power_profile_mode(&adev->smu, buf);
1578         else if (adev->powerplay.pp_funcs->get_power_profile_mode)
1579                 size = amdgpu_dpm_get_power_profile_mode(adev, buf);
1580         else
1581                 size = snprintf(buf, PAGE_SIZE, "\n");
1582
1583         pm_runtime_mark_last_busy(ddev->dev);
1584         pm_runtime_put_autosuspend(ddev->dev);
1585
1586         return size;
1587 }
1588
1589
1590 static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
1591                 struct device_attribute *attr,
1592                 const char *buf,
1593                 size_t count)
1594 {
1595         int ret = 0xff;
1596         struct drm_device *ddev = dev_get_drvdata(dev);
1597         struct amdgpu_device *adev = ddev->dev_private;
1598         uint32_t parameter_size = 0;
1599         long parameter[64];
1600         char *sub_str, buf_cpy[128];
1601         char *tmp_str;
1602         uint32_t i = 0;
1603         char tmp[2];
1604         long int profile_mode = 0;
1605         const char delimiter[3] = {' ', '\n', '\0'};
1606
1607         tmp[0] = *(buf);
1608         tmp[1] = '\0';
1609         ret = kstrtol(tmp, 0, &profile_mode);
1610         if (ret)
1611                 return -EINVAL;
1612
1613         if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1614                 return -EINVAL;
1615
1616         if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1617                 if (count < 2 || count > 127)
1618                         return -EINVAL;
1619                 while (isspace(*++buf))
1620                         i++;
1621                 memcpy(buf_cpy, buf, count-i);
1622                 tmp_str = buf_cpy;
1623                 while (tmp_str[0]) {
1624                         sub_str = strsep(&tmp_str, delimiter);
1625                         ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
1626                         if (ret)
1627                                 return -EINVAL;
1628                         parameter_size++;
1629                         while (isspace(*tmp_str))
1630                                 tmp_str++;
1631                 }
1632         }
1633         parameter[parameter_size] = profile_mode;
1634
1635         ret = pm_runtime_get_sync(ddev->dev);
1636         if (ret < 0)
1637                 return ret;
1638
1639         if (is_support_sw_smu(adev))
1640                 ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true);
1641         else if (adev->powerplay.pp_funcs->set_power_profile_mode)
1642                 ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
1643
1644         pm_runtime_mark_last_busy(ddev->dev);
1645         pm_runtime_put_autosuspend(ddev->dev);
1646
1647         if (!ret)
1648                 return count;
1649
1650         return -EINVAL;
1651 }
1652
1653 /**
1654  * DOC: busy_percent
1655  *
1656  * The amdgpu driver provides a sysfs API for reading how busy the GPU
1657  * is as a percentage.  The file gpu_busy_percent is used for this.
1658  * The SMU firmware computes a percentage of load based on the
1659  * aggregate activity level in the IP cores.
1660  */
1661 static ssize_t amdgpu_get_busy_percent(struct device *dev,
1662                 struct device_attribute *attr,
1663                 char *buf)
1664 {
1665         struct drm_device *ddev = dev_get_drvdata(dev);
1666         struct amdgpu_device *adev = ddev->dev_private;
1667         int r, value, size = sizeof(value);
1668
1669         if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1670                 return 0;
1671
1672         r = pm_runtime_get_sync(ddev->dev);
1673         if (r < 0)
1674                 return r;
1675
1676         /* read the IP busy sensor */
1677         r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
1678                                    (void *)&value, &size);
1679
1680         pm_runtime_mark_last_busy(ddev->dev);
1681         pm_runtime_put_autosuspend(ddev->dev);
1682
1683         if (r)
1684                 return r;
1685
1686         return snprintf(buf, PAGE_SIZE, "%d\n", value);
1687 }
1688
1689 /**
1690  * DOC: mem_busy_percent
1691  *
1692  * The amdgpu driver provides a sysfs API for reading how busy the VRAM
1693  * is as a percentage.  The file mem_busy_percent is used for this.
1694  * The SMU firmware computes a percentage of load based on the
1695  * aggregate activity level in the IP cores.
1696  */
1697 static ssize_t amdgpu_get_memory_busy_percent(struct device *dev,
1698                 struct device_attribute *attr,
1699                 char *buf)
1700 {
1701         struct drm_device *ddev = dev_get_drvdata(dev);
1702         struct amdgpu_device *adev = ddev->dev_private;
1703         int r, value, size = sizeof(value);
1704
1705         if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1706                 return 0;
1707
1708         r = pm_runtime_get_sync(ddev->dev);
1709         if (r < 0)
1710                 return r;
1711
1712         /* read the IP busy sensor */
1713         r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD,
1714                                    (void *)&value, &size);
1715
1716         pm_runtime_mark_last_busy(ddev->dev);
1717         pm_runtime_put_autosuspend(ddev->dev);
1718
1719         if (r)
1720                 return r;
1721
1722         return snprintf(buf, PAGE_SIZE, "%d\n", value);
1723 }
1724
1725 /**
1726  * DOC: pcie_bw
1727  *
1728  * The amdgpu driver provides a sysfs API for estimating how much data
1729  * has been received and sent by the GPU in the last second through PCIe.
1730  * The file pcie_bw is used for this.
1731  * The Perf counters count the number of received and sent messages and return
1732  * those values, as well as the maximum payload size of a PCIe packet (mps).
1733  * Note that it is not possible to easily and quickly obtain the size of each
1734  * packet transmitted, so we output the max payload size (mps) to allow for
1735  * quick estimation of the PCIe bandwidth usage
1736  */
1737 static ssize_t amdgpu_get_pcie_bw(struct device *dev,
1738                 struct device_attribute *attr,
1739                 char *buf)
1740 {
1741         struct drm_device *ddev = dev_get_drvdata(dev);
1742         struct amdgpu_device *adev = ddev->dev_private;
1743         uint64_t count0, count1;
1744         int ret;
1745
1746         if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1747                 return 0;
1748
1749         ret = pm_runtime_get_sync(ddev->dev);
1750         if (ret < 0)
1751                 return ret;
1752
1753         amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
1754
1755         pm_runtime_mark_last_busy(ddev->dev);
1756         pm_runtime_put_autosuspend(ddev->dev);
1757
1758         return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n",
1759                         count0, count1, pcie_get_mps(adev->pdev));
1760 }
1761
1762 /**
1763  * DOC: unique_id
1764  *
1765  * The amdgpu driver provides a sysfs API for providing a unique ID for the GPU
1766  * The file unique_id is used for this.
1767  * This will provide a Unique ID that will persist from machine to machine
1768  *
1769  * NOTE: This will only work for GFX9 and newer. This file will be absent
1770  * on unsupported ASICs (GFX8 and older)
1771  */
1772 static ssize_t amdgpu_get_unique_id(struct device *dev,
1773                 struct device_attribute *attr,
1774                 char *buf)
1775 {
1776         struct drm_device *ddev = dev_get_drvdata(dev);
1777         struct amdgpu_device *adev = ddev->dev_private;
1778
1779         if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1780                 return 0;
1781
1782         if (adev->unique_id)
1783                 return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id);
1784
1785         return 0;
1786 }
1787
1788 static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
1789 static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
1790                    amdgpu_get_dpm_forced_performance_level,
1791                    amdgpu_set_dpm_forced_performance_level);
1792 static DEVICE_ATTR(pp_num_states, S_IRUGO, amdgpu_get_pp_num_states, NULL);
1793 static DEVICE_ATTR(pp_cur_state, S_IRUGO, amdgpu_get_pp_cur_state, NULL);
1794 static DEVICE_ATTR(pp_force_state, S_IRUGO | S_IWUSR,
1795                 amdgpu_get_pp_force_state,
1796                 amdgpu_set_pp_force_state);
1797 static DEVICE_ATTR(pp_table, S_IRUGO | S_IWUSR,
1798                 amdgpu_get_pp_table,
1799                 amdgpu_set_pp_table);
1800 static DEVICE_ATTR(pp_dpm_sclk, S_IRUGO | S_IWUSR,
1801                 amdgpu_get_pp_dpm_sclk,
1802                 amdgpu_set_pp_dpm_sclk);
1803 static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR,
1804                 amdgpu_get_pp_dpm_mclk,
1805                 amdgpu_set_pp_dpm_mclk);
1806 static DEVICE_ATTR(pp_dpm_socclk, S_IRUGO | S_IWUSR,
1807                 amdgpu_get_pp_dpm_socclk,
1808                 amdgpu_set_pp_dpm_socclk);
1809 static DEVICE_ATTR(pp_dpm_fclk, S_IRUGO | S_IWUSR,
1810                 amdgpu_get_pp_dpm_fclk,
1811                 amdgpu_set_pp_dpm_fclk);
1812 static DEVICE_ATTR(pp_dpm_dcefclk, S_IRUGO | S_IWUSR,
1813                 amdgpu_get_pp_dpm_dcefclk,
1814                 amdgpu_set_pp_dpm_dcefclk);
1815 static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR,
1816                 amdgpu_get_pp_dpm_pcie,
1817                 amdgpu_set_pp_dpm_pcie);
1818 static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR,
1819                 amdgpu_get_pp_sclk_od,
1820                 amdgpu_set_pp_sclk_od);
1821 static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR,
1822                 amdgpu_get_pp_mclk_od,
1823                 amdgpu_set_pp_mclk_od);
1824 static DEVICE_ATTR(pp_power_profile_mode, S_IRUGO | S_IWUSR,
1825                 amdgpu_get_pp_power_profile_mode,
1826                 amdgpu_set_pp_power_profile_mode);
1827 static DEVICE_ATTR(pp_od_clk_voltage, S_IRUGO | S_IWUSR,
1828                 amdgpu_get_pp_od_clk_voltage,
1829                 amdgpu_set_pp_od_clk_voltage);
1830 static DEVICE_ATTR(gpu_busy_percent, S_IRUGO,
1831                 amdgpu_get_busy_percent, NULL);
1832 static DEVICE_ATTR(mem_busy_percent, S_IRUGO,
1833                 amdgpu_get_memory_busy_percent, NULL);
1834 static DEVICE_ATTR(pcie_bw, S_IRUGO, amdgpu_get_pcie_bw, NULL);
1835 static DEVICE_ATTR(pp_features, S_IRUGO | S_IWUSR,
1836                 amdgpu_get_pp_feature_status,
1837                 amdgpu_set_pp_feature_status);
1838 static DEVICE_ATTR(unique_id, S_IRUGO, amdgpu_get_unique_id, NULL);
1839
1840 static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
1841                                       struct device_attribute *attr,
1842                                       char *buf)
1843 {
1844         struct amdgpu_device *adev = dev_get_drvdata(dev);
1845         int channel = to_sensor_dev_attr(attr)->index;
1846         int r, temp = 0, size = sizeof(temp);
1847
1848         if (channel >= PP_TEMP_MAX)
1849                 return -EINVAL;
1850
1851         r = pm_runtime_get_sync(adev->ddev->dev);
1852         if (r < 0)
1853                 return r;
1854
1855         switch (channel) {
1856         case PP_TEMP_JUNCTION:
1857                 /* get current junction temperature */
1858                 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
1859                                            (void *)&temp, &size);
1860                 break;
1861         case PP_TEMP_EDGE:
1862                 /* get current edge temperature */
1863                 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
1864                                            (void *)&temp, &size);
1865                 break;
1866         case PP_TEMP_MEM:
1867                 /* get current memory temperature */
1868                 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
1869                                            (void *)&temp, &size);
1870                 break;
1871         default:
1872                 r = -EINVAL;
1873                 break;
1874         }
1875
1876         pm_runtime_mark_last_busy(adev->ddev->dev);
1877         pm_runtime_put_autosuspend(adev->ddev->dev);
1878
1879         if (r)
1880                 return r;
1881
1882         return snprintf(buf, PAGE_SIZE, "%d\n", temp);
1883 }
1884
1885 static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
1886                                              struct device_attribute *attr,
1887                                              char *buf)
1888 {
1889         struct amdgpu_device *adev = dev_get_drvdata(dev);
1890         int hyst = to_sensor_dev_attr(attr)->index;
1891         int temp;
1892
1893         if (hyst)
1894                 temp = adev->pm.dpm.thermal.min_temp;
1895         else
1896                 temp = adev->pm.dpm.thermal.max_temp;
1897
1898         return snprintf(buf, PAGE_SIZE, "%d\n", temp);
1899 }
1900
1901 static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
1902                                              struct device_attribute *attr,
1903                                              char *buf)
1904 {
1905         struct amdgpu_device *adev = dev_get_drvdata(dev);
1906         int hyst = to_sensor_dev_attr(attr)->index;
1907         int temp;
1908
1909         if (hyst)
1910                 temp = adev->pm.dpm.thermal.min_hotspot_temp;
1911         else
1912                 temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
1913
1914         return snprintf(buf, PAGE_SIZE, "%d\n", temp);
1915 }
1916
1917 static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
1918                                              struct device_attribute *attr,
1919                                              char *buf)
1920 {
1921         struct amdgpu_device *adev = dev_get_drvdata(dev);
1922         int hyst = to_sensor_dev_attr(attr)->index;
1923         int temp;
1924
1925         if (hyst)
1926                 temp = adev->pm.dpm.thermal.min_mem_temp;
1927         else
1928                 temp = adev->pm.dpm.thermal.max_mem_crit_temp;
1929
1930         return snprintf(buf, PAGE_SIZE, "%d\n", temp);
1931 }
1932
1933 static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
1934                                              struct device_attribute *attr,
1935                                              char *buf)
1936 {
1937         int channel = to_sensor_dev_attr(attr)->index;
1938
1939         if (channel >= PP_TEMP_MAX)
1940                 return -EINVAL;
1941
1942         return snprintf(buf, PAGE_SIZE, "%s\n", temp_label[channel].label);
1943 }
1944
1945 static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
1946                                              struct device_attribute *attr,
1947                                              char *buf)
1948 {
1949         struct amdgpu_device *adev = dev_get_drvdata(dev);
1950         int channel = to_sensor_dev_attr(attr)->index;
1951         int temp = 0;
1952
1953         if (channel >= PP_TEMP_MAX)
1954                 return -EINVAL;
1955
1956         switch (channel) {
1957         case PP_TEMP_JUNCTION:
1958                 temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp;
1959                 break;
1960         case PP_TEMP_EDGE:
1961                 temp = adev->pm.dpm.thermal.max_edge_emergency_temp;
1962                 break;
1963         case PP_TEMP_MEM:
1964                 temp = adev->pm.dpm.thermal.max_mem_emergency_temp;
1965                 break;
1966         }
1967
1968         return snprintf(buf, PAGE_SIZE, "%d\n", temp);
1969 }
1970
1971 static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
1972                                             struct device_attribute *attr,
1973                                             char *buf)
1974 {
1975         struct amdgpu_device *adev = dev_get_drvdata(dev);
1976         u32 pwm_mode = 0;
1977         int ret;
1978
1979         ret = pm_runtime_get_sync(adev->ddev->dev);
1980         if (ret < 0)
1981                 return ret;
1982
1983         if (is_support_sw_smu(adev)) {
1984                 pwm_mode = smu_get_fan_control_mode(&adev->smu);
1985         } else {
1986                 if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
1987                         pm_runtime_mark_last_busy(adev->ddev->dev);
1988                         pm_runtime_put_autosuspend(adev->ddev->dev);
1989                         return -EINVAL;
1990                 }
1991
1992                 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
1993         }
1994
1995         pm_runtime_mark_last_busy(adev->ddev->dev);
1996         pm_runtime_put_autosuspend(adev->ddev->dev);
1997
1998         return sprintf(buf, "%i\n", pwm_mode);
1999 }
2000
2001 static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
2002                                             struct device_attribute *attr,
2003                                             const char *buf,
2004                                             size_t count)
2005 {
2006         struct amdgpu_device *adev = dev_get_drvdata(dev);
2007         int err, ret;
2008         int value;
2009
2010         err = kstrtoint(buf, 10, &value);
2011         if (err)
2012                 return err;
2013
2014         ret = pm_runtime_get_sync(adev->ddev->dev);
2015         if (ret < 0)
2016                 return ret;
2017
2018         if (is_support_sw_smu(adev)) {
2019                 smu_set_fan_control_mode(&adev->smu, value);
2020         } else {
2021                 if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
2022                         pm_runtime_mark_last_busy(adev->ddev->dev);
2023                         pm_runtime_put_autosuspend(adev->ddev->dev);
2024                         return -EINVAL;
2025                 }
2026
2027                 amdgpu_dpm_set_fan_control_mode(adev, value);
2028         }
2029
2030         pm_runtime_mark_last_busy(adev->ddev->dev);
2031         pm_runtime_put_autosuspend(adev->ddev->dev);
2032
2033         return count;
2034 }
2035
2036 static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
2037                                          struct device_attribute *attr,
2038                                          char *buf)
2039 {
2040         return sprintf(buf, "%i\n", 0);
2041 }
2042
2043 static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
2044                                          struct device_attribute *attr,
2045                                          char *buf)
2046 {
2047         return sprintf(buf, "%i\n", 255);
2048 }
2049
2050 static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
2051                                      struct device_attribute *attr,
2052                                      const char *buf, size_t count)
2053 {
2054         struct amdgpu_device *adev = dev_get_drvdata(dev);
2055         int err;
2056         u32 value;
2057         u32 pwm_mode;
2058
2059         err = pm_runtime_get_sync(adev->ddev->dev);
2060         if (err < 0)
2061                 return err;
2062
2063         if (is_support_sw_smu(adev))
2064                 pwm_mode = smu_get_fan_control_mode(&adev->smu);
2065         else
2066                 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2067
2068         if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2069                 pr_info("manual fan speed control should be enabled first\n");
2070                 pm_runtime_mark_last_busy(adev->ddev->dev);
2071                 pm_runtime_put_autosuspend(adev->ddev->dev);
2072                 return -EINVAL;
2073         }
2074
2075         err = kstrtou32(buf, 10, &value);
2076         if (err) {
2077                 pm_runtime_mark_last_busy(adev->ddev->dev);
2078                 pm_runtime_put_autosuspend(adev->ddev->dev);
2079                 return err;
2080         }
2081
2082         value = (value * 100) / 255;
2083
2084         if (is_support_sw_smu(adev))
2085                 err = smu_set_fan_speed_percent(&adev->smu, value);
2086         else if (adev->powerplay.pp_funcs->set_fan_speed_percent)
2087                 err = amdgpu_dpm_set_fan_speed_percent(adev, value);
2088         else
2089                 err = -EINVAL;
2090
2091         pm_runtime_mark_last_busy(adev->ddev->dev);
2092         pm_runtime_put_autosuspend(adev->ddev->dev);
2093
2094         if (err)
2095                 return err;
2096
2097         return count;
2098 }
2099
2100 static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
2101                                      struct device_attribute *attr,
2102                                      char *buf)
2103 {
2104         struct amdgpu_device *adev = dev_get_drvdata(dev);
2105         int err;
2106         u32 speed = 0;
2107
2108         err = pm_runtime_get_sync(adev->ddev->dev);
2109         if (err < 0)
2110                 return err;
2111
2112         if (is_support_sw_smu(adev))
2113                 err = smu_get_fan_speed_percent(&adev->smu, &speed);
2114         else if (adev->powerplay.pp_funcs->get_fan_speed_percent)
2115                 err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
2116         else
2117                 err = -EINVAL;
2118
2119         pm_runtime_mark_last_busy(adev->ddev->dev);
2120         pm_runtime_put_autosuspend(adev->ddev->dev);
2121
2122         if (err)
2123                 return err;
2124
2125         speed = (speed * 255) / 100;
2126
2127         return sprintf(buf, "%i\n", speed);
2128 }
2129
2130 static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
2131                                            struct device_attribute *attr,
2132                                            char *buf)
2133 {
2134         struct amdgpu_device *adev = dev_get_drvdata(dev);
2135         int err;
2136         u32 speed = 0;
2137
2138         err = pm_runtime_get_sync(adev->ddev->dev);
2139         if (err < 0)
2140                 return err;
2141
2142         if (is_support_sw_smu(adev))
2143                 err = smu_get_fan_speed_rpm(&adev->smu, &speed);
2144         else if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
2145                 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
2146         else
2147                 err = -EINVAL;
2148
2149         pm_runtime_mark_last_busy(adev->ddev->dev);
2150         pm_runtime_put_autosuspend(adev->ddev->dev);
2151
2152         if (err)
2153                 return err;
2154
2155         return sprintf(buf, "%i\n", speed);
2156 }
2157
2158 static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
2159                                          struct device_attribute *attr,
2160                                          char *buf)
2161 {
2162         struct amdgpu_device *adev = dev_get_drvdata(dev);
2163         u32 min_rpm = 0;
2164         u32 size = sizeof(min_rpm);
2165         int r;
2166
2167         r = pm_runtime_get_sync(adev->ddev->dev);
2168         if (r < 0)
2169                 return r;
2170
2171         r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
2172                                    (void *)&min_rpm, &size);
2173
2174         pm_runtime_mark_last_busy(adev->ddev->dev);
2175         pm_runtime_put_autosuspend(adev->ddev->dev);
2176
2177         if (r)
2178                 return r;
2179
2180         return snprintf(buf, PAGE_SIZE, "%d\n", min_rpm);
2181 }
2182
2183 static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
2184                                          struct device_attribute *attr,
2185                                          char *buf)
2186 {
2187         struct amdgpu_device *adev = dev_get_drvdata(dev);
2188         u32 max_rpm = 0;
2189         u32 size = sizeof(max_rpm);
2190         int r;
2191
2192         r = pm_runtime_get_sync(adev->ddev->dev);
2193         if (r < 0)
2194                 return r;
2195
2196         r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
2197                                    (void *)&max_rpm, &size);
2198
2199         pm_runtime_mark_last_busy(adev->ddev->dev);
2200         pm_runtime_put_autosuspend(adev->ddev->dev);
2201
2202         if (r)
2203                 return r;
2204
2205         return snprintf(buf, PAGE_SIZE, "%d\n", max_rpm);
2206 }
2207
2208 static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
2209                                            struct device_attribute *attr,
2210                                            char *buf)
2211 {
2212         struct amdgpu_device *adev = dev_get_drvdata(dev);
2213         int err;
2214         u32 rpm = 0;
2215
2216         err = pm_runtime_get_sync(adev->ddev->dev);
2217         if (err < 0)
2218                 return err;
2219
2220         if (is_support_sw_smu(adev))
2221                 err = smu_get_fan_speed_rpm(&adev->smu, &rpm);
2222         else if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
2223                 err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
2224         else
2225                 err = -EINVAL;
2226
2227         pm_runtime_mark_last_busy(adev->ddev->dev);
2228         pm_runtime_put_autosuspend(adev->ddev->dev);
2229
2230         if (err)
2231                 return err;
2232
2233         return sprintf(buf, "%i\n", rpm);
2234 }
2235
2236 static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
2237                                      struct device_attribute *attr,
2238                                      const char *buf, size_t count)
2239 {
2240         struct amdgpu_device *adev = dev_get_drvdata(dev);
2241         int err;
2242         u32 value;
2243         u32 pwm_mode;
2244
2245         err = pm_runtime_get_sync(adev->ddev->dev);
2246         if (err < 0)
2247                 return err;
2248
2249         if (is_support_sw_smu(adev))
2250                 pwm_mode = smu_get_fan_control_mode(&adev->smu);
2251         else
2252                 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2253
2254         if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2255                 pm_runtime_mark_last_busy(adev->ddev->dev);
2256                 pm_runtime_put_autosuspend(adev->ddev->dev);
2257                 return -ENODATA;
2258         }
2259
2260         err = kstrtou32(buf, 10, &value);
2261         if (err) {
2262                 pm_runtime_mark_last_busy(adev->ddev->dev);
2263                 pm_runtime_put_autosuspend(adev->ddev->dev);
2264                 return err;
2265         }
2266
2267         if (is_support_sw_smu(adev))
2268                 err = smu_set_fan_speed_rpm(&adev->smu, value);
2269         else if (adev->powerplay.pp_funcs->set_fan_speed_rpm)
2270                 err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
2271         else
2272                 err = -EINVAL;
2273
2274         pm_runtime_mark_last_busy(adev->ddev->dev);
2275         pm_runtime_put_autosuspend(adev->ddev->dev);
2276
2277         if (err)
2278                 return err;
2279
2280         return count;
2281 }
2282
2283 static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
2284                                             struct device_attribute *attr,
2285                                             char *buf)
2286 {
2287         struct amdgpu_device *adev = dev_get_drvdata(dev);
2288         u32 pwm_mode = 0;
2289         int ret;
2290
2291         ret = pm_runtime_get_sync(adev->ddev->dev);
2292         if (ret < 0)
2293                 return ret;
2294
2295         if (is_support_sw_smu(adev)) {
2296                 pwm_mode = smu_get_fan_control_mode(&adev->smu);
2297         } else {
2298                 if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
2299                         pm_runtime_mark_last_busy(adev->ddev->dev);
2300                         pm_runtime_put_autosuspend(adev->ddev->dev);
2301                         return -EINVAL;
2302                 }
2303
2304                 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2305         }
2306
2307         pm_runtime_mark_last_busy(adev->ddev->dev);
2308         pm_runtime_put_autosuspend(adev->ddev->dev);
2309
2310         return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
2311 }
2312
2313 static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
2314                                             struct device_attribute *attr,
2315                                             const char *buf,
2316                                             size_t count)
2317 {
2318         struct amdgpu_device *adev = dev_get_drvdata(dev);
2319         int err;
2320         int value;
2321         u32 pwm_mode;
2322
2323         err = kstrtoint(buf, 10, &value);
2324         if (err)
2325                 return err;
2326
2327         if (value == 0)
2328                 pwm_mode = AMD_FAN_CTRL_AUTO;
2329         else if (value == 1)
2330                 pwm_mode = AMD_FAN_CTRL_MANUAL;
2331         else
2332                 return -EINVAL;
2333
2334         err = pm_runtime_get_sync(adev->ddev->dev);
2335         if (err < 0)
2336                 return err;
2337
2338         if (is_support_sw_smu(adev)) {
2339                 smu_set_fan_control_mode(&adev->smu, pwm_mode);
2340         } else {
2341                 if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
2342                         pm_runtime_mark_last_busy(adev->ddev->dev);
2343                         pm_runtime_put_autosuspend(adev->ddev->dev);
2344                         return -EINVAL;
2345                 }
2346                 amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
2347         }
2348
2349         pm_runtime_mark_last_busy(adev->ddev->dev);
2350         pm_runtime_put_autosuspend(adev->ddev->dev);
2351
2352         return count;
2353 }
2354
2355 static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
2356                                         struct device_attribute *attr,
2357                                         char *buf)
2358 {
2359         struct amdgpu_device *adev = dev_get_drvdata(dev);
2360         u32 vddgfx;
2361         int r, size = sizeof(vddgfx);
2362
2363         r = pm_runtime_get_sync(adev->ddev->dev);
2364         if (r < 0)
2365                 return r;
2366
2367         /* get the voltage */
2368         r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
2369                                    (void *)&vddgfx, &size);
2370
2371         pm_runtime_mark_last_busy(adev->ddev->dev);
2372         pm_runtime_put_autosuspend(adev->ddev->dev);
2373
2374         if (r)
2375                 return r;
2376
2377         return snprintf(buf, PAGE_SIZE, "%d\n", vddgfx);
2378 }
2379
2380 static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
2381                                               struct device_attribute *attr,
2382                                               char *buf)
2383 {
2384         return snprintf(buf, PAGE_SIZE, "vddgfx\n");
2385 }
2386
2387 static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
2388                                        struct device_attribute *attr,
2389                                        char *buf)
2390 {
2391         struct amdgpu_device *adev = dev_get_drvdata(dev);
2392         u32 vddnb;
2393         int r, size = sizeof(vddnb);
2394
2395         /* only APUs have vddnb */
2396         if  (!(adev->flags & AMD_IS_APU))
2397                 return -EINVAL;
2398
2399         r = pm_runtime_get_sync(adev->ddev->dev);
2400         if (r < 0)
2401                 return r;
2402
2403         /* get the voltage */
2404         r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
2405                                    (void *)&vddnb, &size);
2406
2407         pm_runtime_mark_last_busy(adev->ddev->dev);
2408         pm_runtime_put_autosuspend(adev->ddev->dev);
2409
2410         if (r)
2411                 return r;
2412
2413         return snprintf(buf, PAGE_SIZE, "%d\n", vddnb);
2414 }
2415
2416 static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
2417                                               struct device_attribute *attr,
2418                                               char *buf)
2419 {
2420         return snprintf(buf, PAGE_SIZE, "vddnb\n");
2421 }
2422
2423 static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
2424                                            struct device_attribute *attr,
2425                                            char *buf)
2426 {
2427         struct amdgpu_device *adev = dev_get_drvdata(dev);
2428         u32 query = 0;
2429         int r, size = sizeof(u32);
2430         unsigned uw;
2431
2432         r = pm_runtime_get_sync(adev->ddev->dev);
2433         if (r < 0)
2434                 return r;
2435
2436         /* get the voltage */
2437         r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
2438                                    (void *)&query, &size);
2439
2440         pm_runtime_mark_last_busy(adev->ddev->dev);
2441         pm_runtime_put_autosuspend(adev->ddev->dev);
2442
2443         if (r)
2444                 return r;
2445
2446         /* convert to microwatts */
2447         uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
2448
2449         return snprintf(buf, PAGE_SIZE, "%u\n", uw);
2450 }
2451
2452 static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
2453                                          struct device_attribute *attr,
2454                                          char *buf)
2455 {
2456         return sprintf(buf, "%i\n", 0);
2457 }
2458
2459 static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
2460                                          struct device_attribute *attr,
2461                                          char *buf)
2462 {
2463         struct amdgpu_device *adev = dev_get_drvdata(dev);
2464         uint32_t limit = 0;
2465         ssize_t size;
2466         int r;
2467
2468         r = pm_runtime_get_sync(adev->ddev->dev);
2469         if (r < 0)
2470                 return r;
2471
2472         if (is_support_sw_smu(adev)) {
2473                 smu_get_power_limit(&adev->smu, &limit, true, true);
2474                 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2475         } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
2476                 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
2477                 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2478         } else {
2479                 size = snprintf(buf, PAGE_SIZE, "\n");
2480         }
2481
2482         pm_runtime_mark_last_busy(adev->ddev->dev);
2483         pm_runtime_put_autosuspend(adev->ddev->dev);
2484
2485         return size;
2486 }
2487
2488 static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
2489                                          struct device_attribute *attr,
2490                                          char *buf)
2491 {
2492         struct amdgpu_device *adev = dev_get_drvdata(dev);
2493         uint32_t limit = 0;
2494         ssize_t size;
2495         int r;
2496
2497         r = pm_runtime_get_sync(adev->ddev->dev);
2498         if (r < 0)
2499                 return r;
2500
2501         if (is_support_sw_smu(adev)) {
2502                 smu_get_power_limit(&adev->smu, &limit, false,  true);
2503                 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2504         } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
2505                 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
2506                 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2507         } else {
2508                 size = snprintf(buf, PAGE_SIZE, "\n");
2509         }
2510
2511         pm_runtime_mark_last_busy(adev->ddev->dev);
2512         pm_runtime_put_autosuspend(adev->ddev->dev);
2513
2514         return size;
2515 }
2516
2517
2518 static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
2519                 struct device_attribute *attr,
2520                 const char *buf,
2521                 size_t count)
2522 {
2523         struct amdgpu_device *adev = dev_get_drvdata(dev);
2524         int err;
2525         u32 value;
2526
2527         if (amdgpu_sriov_vf(adev))
2528                 return -EINVAL;
2529
2530         err = kstrtou32(buf, 10, &value);
2531         if (err)
2532                 return err;
2533
2534         value = value / 1000000; /* convert to Watt */
2535
2536
2537         err = pm_runtime_get_sync(adev->ddev->dev);
2538         if (err < 0)
2539                 return err;
2540
2541         if (is_support_sw_smu(adev))
2542                 err = smu_set_power_limit(&adev->smu, value);
2543         else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit)
2544                 err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
2545         else
2546                 err = -EINVAL;
2547
2548         pm_runtime_mark_last_busy(adev->ddev->dev);
2549         pm_runtime_put_autosuspend(adev->ddev->dev);
2550
2551         if (err)
2552                 return err;
2553
2554         return count;
2555 }
2556
2557 static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
2558                                       struct device_attribute *attr,
2559                                       char *buf)
2560 {
2561         struct amdgpu_device *adev = dev_get_drvdata(dev);
2562         uint32_t sclk;
2563         int r, size = sizeof(sclk);
2564
2565         r = pm_runtime_get_sync(adev->ddev->dev);
2566         if (r < 0)
2567                 return r;
2568
2569         /* get the sclk */
2570         r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
2571                                    (void *)&sclk, &size);
2572
2573         pm_runtime_mark_last_busy(adev->ddev->dev);
2574         pm_runtime_put_autosuspend(adev->ddev->dev);
2575
2576         if (r)
2577                 return r;
2578
2579         return snprintf(buf, PAGE_SIZE, "%d\n", sclk * 10 * 1000);
2580 }
2581
2582 static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
2583                                             struct device_attribute *attr,
2584                                             char *buf)
2585 {
2586         return snprintf(buf, PAGE_SIZE, "sclk\n");
2587 }
2588
2589 static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
2590                                       struct device_attribute *attr,
2591                                       char *buf)
2592 {
2593         struct amdgpu_device *adev = dev_get_drvdata(dev);
2594         uint32_t mclk;
2595         int r, size = sizeof(mclk);
2596
2597         r = pm_runtime_get_sync(adev->ddev->dev);
2598         if (r < 0)
2599                 return r;
2600
2601         /* get the sclk */
2602         r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
2603                                    (void *)&mclk, &size);
2604
2605         pm_runtime_mark_last_busy(adev->ddev->dev);
2606         pm_runtime_put_autosuspend(adev->ddev->dev);
2607
2608         if (r)
2609                 return r;
2610
2611         return snprintf(buf, PAGE_SIZE, "%d\n", mclk * 10 * 1000);
2612 }
2613
2614 static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
2615                                             struct device_attribute *attr,
2616                                             char *buf)
2617 {
2618         return snprintf(buf, PAGE_SIZE, "mclk\n");
2619 }
2620
2621 /**
2622  * DOC: hwmon
2623  *
2624  * The amdgpu driver exposes the following sensor interfaces:
2625  *
2626  * - GPU temperature (via the on-die sensor)
2627  *
2628  * - GPU voltage
2629  *
2630  * - Northbridge voltage (APUs only)
2631  *
2632  * - GPU power
2633  *
2634  * - GPU fan
2635  *
2636  * - GPU gfx/compute engine clock
2637  *
2638  * - GPU memory clock (dGPU only)
2639  *
2640  * hwmon interfaces for GPU temperature:
2641  *
2642  * - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius
2643  *   - temp2_input and temp3_input are supported on SOC15 dGPUs only
2644  *
2645  * - temp[1-3]_label: temperature channel label
2646  *   - temp2_label and temp3_label are supported on SOC15 dGPUs only
2647  *
2648  * - temp[1-3]_crit: temperature critical max value in millidegrees Celsius
2649  *   - temp2_crit and temp3_crit are supported on SOC15 dGPUs only
2650  *
2651  * - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
2652  *   - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only
2653  *
2654  * - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius
2655  *   - these are supported on SOC15 dGPUs only
2656  *
2657  * hwmon interfaces for GPU voltage:
2658  *
2659  * - in0_input: the voltage on the GPU in millivolts
2660  *
2661  * - in1_input: the voltage on the Northbridge in millivolts
2662  *
2663  * hwmon interfaces for GPU power:
2664  *
2665  * - power1_average: average power used by the GPU in microWatts
2666  *
2667  * - power1_cap_min: minimum cap supported in microWatts
2668  *
2669  * - power1_cap_max: maximum cap supported in microWatts
2670  *
2671  * - power1_cap: selected power cap in microWatts
2672  *
2673  * hwmon interfaces for GPU fan:
2674  *
2675  * - pwm1: pulse width modulation fan level (0-255)
2676  *
2677  * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control)
2678  *
2679  * - pwm1_min: pulse width modulation fan control minimum level (0)
2680  *
2681  * - pwm1_max: pulse width modulation fan control maximum level (255)
2682  *
2683  * - fan1_min: an minimum value Unit: revolution/min (RPM)
2684  *
2685  * - fan1_max: an maxmum value Unit: revolution/max (RPM)
2686  *
2687  * - fan1_input: fan speed in RPM
2688  *
2689  * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM)
2690  *
2691  * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable
2692  *
2693  * hwmon interfaces for GPU clocks:
2694  *
2695  * - freq1_input: the gfx/compute clock in hertz
2696  *
2697  * - freq2_input: the memory clock in hertz
2698  *
2699  * You can use hwmon tools like sensors to view this information on your system.
2700  *
2701  */
2702
2703 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE);
2704 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
2705 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
2706 static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE);
2707 static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION);
2708 static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0);
2709 static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1);
2710 static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION);
2711 static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM);
2712 static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0);
2713 static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1);
2714 static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM);
2715 static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE);
2716 static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION);
2717 static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM);
2718 static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
2719 static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
2720 static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
2721 static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
2722 static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
2723 static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
2724 static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
2725 static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
2726 static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
2727 static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
2728 static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
2729 static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
2730 static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
2731 static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
2732 static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
2733 static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
2734 static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
2735 static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
2736 static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
2737 static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
2738 static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0);
2739
2740 static struct attribute *hwmon_attributes[] = {
2741         &sensor_dev_attr_temp1_input.dev_attr.attr,
2742         &sensor_dev_attr_temp1_crit.dev_attr.attr,
2743         &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
2744         &sensor_dev_attr_temp2_input.dev_attr.attr,
2745         &sensor_dev_attr_temp2_crit.dev_attr.attr,
2746         &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
2747         &sensor_dev_attr_temp3_input.dev_attr.attr,
2748         &sensor_dev_attr_temp3_crit.dev_attr.attr,
2749         &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
2750         &sensor_dev_attr_temp1_emergency.dev_attr.attr,
2751         &sensor_dev_attr_temp2_emergency.dev_attr.attr,
2752         &sensor_dev_attr_temp3_emergency.dev_attr.attr,
2753         &sensor_dev_attr_temp1_label.dev_attr.attr,
2754         &sensor_dev_attr_temp2_label.dev_attr.attr,
2755         &sensor_dev_attr_temp3_label.dev_attr.attr,
2756         &sensor_dev_attr_pwm1.dev_attr.attr,
2757         &sensor_dev_attr_pwm1_enable.dev_attr.attr,
2758         &sensor_dev_attr_pwm1_min.dev_attr.attr,
2759         &sensor_dev_attr_pwm1_max.dev_attr.attr,
2760         &sensor_dev_attr_fan1_input.dev_attr.attr,
2761         &sensor_dev_attr_fan1_min.dev_attr.attr,
2762         &sensor_dev_attr_fan1_max.dev_attr.attr,
2763         &sensor_dev_attr_fan1_target.dev_attr.attr,
2764         &sensor_dev_attr_fan1_enable.dev_attr.attr,
2765         &sensor_dev_attr_in0_input.dev_attr.attr,
2766         &sensor_dev_attr_in0_label.dev_attr.attr,
2767         &sensor_dev_attr_in1_input.dev_attr.attr,
2768         &sensor_dev_attr_in1_label.dev_attr.attr,
2769         &sensor_dev_attr_power1_average.dev_attr.attr,
2770         &sensor_dev_attr_power1_cap_max.dev_attr.attr,
2771         &sensor_dev_attr_power1_cap_min.dev_attr.attr,
2772         &sensor_dev_attr_power1_cap.dev_attr.attr,
2773         &sensor_dev_attr_freq1_input.dev_attr.attr,
2774         &sensor_dev_attr_freq1_label.dev_attr.attr,
2775         &sensor_dev_attr_freq2_input.dev_attr.attr,
2776         &sensor_dev_attr_freq2_label.dev_attr.attr,
2777         NULL
2778 };
2779
2780 static umode_t hwmon_attributes_visible(struct kobject *kobj,
2781                                         struct attribute *attr, int index)
2782 {
2783         struct device *dev = kobj_to_dev(kobj);
2784         struct amdgpu_device *adev = dev_get_drvdata(dev);
2785         umode_t effective_mode = attr->mode;
2786
2787         /* under multi-vf mode, the hwmon attributes are all not supported */
2788         if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
2789                 return 0;
2790
2791         /* there is no fan under pp one vf mode */
2792         if (amdgpu_sriov_is_pp_one_vf(adev) &&
2793             (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
2794              attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
2795              attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
2796              attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
2797              attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
2798              attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
2799              attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
2800              attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
2801              attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
2802                 return 0;
2803
2804         /* Skip fan attributes if fan is not present */
2805         if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
2806             attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
2807             attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
2808             attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
2809             attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
2810             attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
2811             attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
2812             attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
2813             attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
2814                 return 0;
2815
2816         /* Skip fan attributes on APU */
2817         if ((adev->flags & AMD_IS_APU) &&
2818             (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
2819              attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
2820              attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
2821              attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
2822              attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
2823              attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
2824              attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
2825              attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
2826              attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
2827                 return 0;
2828
2829         /* Skip limit attributes if DPM is not enabled */
2830         if (!adev->pm.dpm_enabled &&
2831             (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
2832              attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
2833              attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
2834              attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
2835              attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
2836              attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
2837              attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
2838              attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
2839              attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
2840              attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
2841              attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
2842                 return 0;
2843
2844         if (!is_support_sw_smu(adev)) {
2845                 /* mask fan attributes if we have no bindings for this asic to expose */
2846                 if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
2847                      attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
2848                     (!adev->powerplay.pp_funcs->get_fan_control_mode &&
2849                      attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
2850                         effective_mode &= ~S_IRUGO;
2851
2852                 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
2853                      attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
2854                     (!adev->powerplay.pp_funcs->set_fan_control_mode &&
2855                      attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
2856                         effective_mode &= ~S_IWUSR;
2857         }
2858
2859         if (((adev->flags & AMD_IS_APU) ||
2860              adev->family == AMDGPU_FAMILY_SI ||        /* not implemented yet */
2861              adev->family == AMDGPU_FAMILY_KV) &&       /* not implemented yet */
2862             (attr == &sensor_dev_attr_power1_average.dev_attr.attr ||
2863              attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
2864              attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
2865              attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
2866                 return 0;
2867
2868         if (!is_support_sw_smu(adev)) {
2869                 /* hide max/min values if we can't both query and manage the fan */
2870                 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
2871                      !adev->powerplay.pp_funcs->get_fan_speed_percent) &&
2872                      (!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
2873                      !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
2874                     (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
2875                      attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
2876                         return 0;
2877
2878                 if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
2879                      !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
2880                     (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
2881                      attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
2882                         return 0;
2883         }
2884
2885         if ((adev->family == AMDGPU_FAMILY_SI ||        /* not implemented yet */
2886              adev->family == AMDGPU_FAMILY_KV) &&       /* not implemented yet */
2887             (attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
2888              attr == &sensor_dev_attr_in0_label.dev_attr.attr))
2889                 return 0;
2890
2891         /* only APUs have vddnb */
2892         if (!(adev->flags & AMD_IS_APU) &&
2893             (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
2894              attr == &sensor_dev_attr_in1_label.dev_attr.attr))
2895                 return 0;
2896
2897         /* no mclk on APUs */
2898         if ((adev->flags & AMD_IS_APU) &&
2899             (attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
2900              attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
2901                 return 0;
2902
2903         /* only SOC15 dGPUs support hotspot and mem temperatures */
2904         if (((adev->flags & AMD_IS_APU) ||
2905              adev->asic_type < CHIP_VEGA10) &&
2906             (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
2907              attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
2908              attr == &sensor_dev_attr_temp3_crit.dev_attr.attr ||
2909              attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
2910              attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
2911              attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
2912              attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr ||
2913              attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
2914              attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
2915              attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
2916              attr == &sensor_dev_attr_temp3_label.dev_attr.attr))
2917                 return 0;
2918
2919         return effective_mode;
2920 }
2921
2922 static const struct attribute_group hwmon_attrgroup = {
2923         .attrs = hwmon_attributes,
2924         .is_visible = hwmon_attributes_visible,
2925 };
2926
2927 static const struct attribute_group *hwmon_groups[] = {
2928         &hwmon_attrgroup,
2929         NULL
2930 };
2931
2932 void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
2933 {
2934         struct amdgpu_device *adev =
2935                 container_of(work, struct amdgpu_device,
2936                              pm.dpm.thermal.work);
2937         /* switch to the thermal state */
2938         enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
2939         int temp, size = sizeof(temp);
2940
2941         if (!adev->pm.dpm_enabled)
2942                 return;
2943
2944         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
2945                                     (void *)&temp, &size)) {
2946                 if (temp < adev->pm.dpm.thermal.min_temp)
2947                         /* switch back the user state */
2948                         dpm_state = adev->pm.dpm.user_state;
2949         } else {
2950                 if (adev->pm.dpm.thermal.high_to_low)
2951                         /* switch back the user state */
2952                         dpm_state = adev->pm.dpm.user_state;
2953         }
2954         mutex_lock(&adev->pm.mutex);
2955         if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
2956                 adev->pm.dpm.thermal_active = true;
2957         else
2958                 adev->pm.dpm.thermal_active = false;
2959         adev->pm.dpm.state = dpm_state;
2960         mutex_unlock(&adev->pm.mutex);
2961
2962         amdgpu_pm_compute_clocks(adev);
2963 }
2964
2965 static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
2966                                                      enum amd_pm_state_type dpm_state)
2967 {
2968         int i;
2969         struct amdgpu_ps *ps;
2970         u32 ui_class;
2971         bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
2972                 true : false;
2973
2974         /* check if the vblank period is too short to adjust the mclk */
2975         if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
2976                 if (amdgpu_dpm_vblank_too_short(adev))
2977                         single_display = false;
2978         }
2979
2980         /* certain older asics have a separare 3D performance state,
2981          * so try that first if the user selected performance
2982          */
2983         if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
2984                 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
2985         /* balanced states don't exist at the moment */
2986         if (dpm_state == POWER_STATE_TYPE_BALANCED)
2987                 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
2988
2989 restart_search:
2990         /* Pick the best power state based on current conditions */
2991         for (i = 0; i < adev->pm.dpm.num_ps; i++) {
2992                 ps = &adev->pm.dpm.ps[i];
2993                 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
2994                 switch (dpm_state) {
2995                 /* user states */
2996                 case POWER_STATE_TYPE_BATTERY:
2997                         if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
2998                                 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
2999                                         if (single_display)
3000                                                 return ps;
3001                                 } else
3002                                         return ps;
3003                         }
3004                         break;
3005                 case POWER_STATE_TYPE_BALANCED:
3006                         if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
3007                                 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
3008                                         if (single_display)
3009                                                 return ps;
3010                                 } else
3011                                         return ps;
3012                         }
3013                         break;
3014                 case POWER_STATE_TYPE_PERFORMANCE:
3015                         if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
3016                                 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
3017                                         if (single_display)
3018                                                 return ps;
3019                                 } else
3020                                         return ps;
3021                         }
3022                         break;
3023                 /* internal states */
3024                 case POWER_STATE_TYPE_INTERNAL_UVD:
3025                         if (adev->pm.dpm.uvd_ps)
3026                                 return adev->pm.dpm.uvd_ps;
3027                         else
3028                                 break;
3029                 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
3030                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
3031                                 return ps;
3032                         break;
3033                 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
3034                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
3035                                 return ps;
3036                         break;
3037                 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
3038                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
3039                                 return ps;
3040                         break;
3041                 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
3042                         if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
3043                                 return ps;
3044                         break;
3045                 case POWER_STATE_TYPE_INTERNAL_BOOT:
3046                         return adev->pm.dpm.boot_ps;
3047                 case POWER_STATE_TYPE_INTERNAL_THERMAL:
3048                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
3049                                 return ps;
3050                         break;
3051                 case POWER_STATE_TYPE_INTERNAL_ACPI:
3052                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
3053                                 return ps;
3054                         break;
3055                 case POWER_STATE_TYPE_INTERNAL_ULV:
3056                         if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
3057                                 return ps;
3058                         break;
3059                 case POWER_STATE_TYPE_INTERNAL_3DPERF:
3060                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
3061                                 return ps;
3062                         break;
3063                 default:
3064                         break;
3065                 }
3066         }
3067         /* use a fallback state if we didn't match */
3068         switch (dpm_state) {
3069         case POWER_STATE_TYPE_INTERNAL_UVD_SD:
3070                 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
3071                 goto restart_search;
3072         case POWER_STATE_TYPE_INTERNAL_UVD_HD:
3073         case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
3074         case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
3075                 if (adev->pm.dpm.uvd_ps) {
3076                         return adev->pm.dpm.uvd_ps;
3077                 } else {
3078                         dpm_state = POWER_STATE_TYPE_PERFORMANCE;
3079                         goto restart_search;
3080                 }
3081         case POWER_STATE_TYPE_INTERNAL_THERMAL:
3082                 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
3083                 goto restart_search;
3084         case POWER_STATE_TYPE_INTERNAL_ACPI:
3085                 dpm_state = POWER_STATE_TYPE_BATTERY;
3086                 goto restart_search;
3087         case POWER_STATE_TYPE_BATTERY:
3088         case POWER_STATE_TYPE_BALANCED:
3089         case POWER_STATE_TYPE_INTERNAL_3DPERF:
3090                 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
3091                 goto restart_search;
3092         default:
3093                 break;
3094         }
3095
3096         return NULL;
3097 }
3098
3099 static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
3100 {
3101         struct amdgpu_ps *ps;
3102         enum amd_pm_state_type dpm_state;
3103         int ret;
3104         bool equal = false;
3105
3106         /* if dpm init failed */
3107         if (!adev->pm.dpm_enabled)
3108                 return;
3109
3110         if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
3111                 /* add other state override checks here */
3112                 if ((!adev->pm.dpm.thermal_active) &&
3113                     (!adev->pm.dpm.uvd_active))
3114                         adev->pm.dpm.state = adev->pm.dpm.user_state;
3115         }
3116         dpm_state = adev->pm.dpm.state;
3117
3118         ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
3119         if (ps)
3120                 adev->pm.dpm.requested_ps = ps;
3121         else
3122                 return;
3123
3124         if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
3125                 printk("switching from power state:\n");
3126                 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
3127                 printk("switching to power state:\n");
3128                 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
3129         }
3130
3131         /* update whether vce is active */
3132         ps->vce_active = adev->pm.dpm.vce_active;
3133         if (adev->powerplay.pp_funcs->display_configuration_changed)
3134                 amdgpu_dpm_display_configuration_changed(adev);
3135
3136         ret = amdgpu_dpm_pre_set_power_state(adev);
3137         if (ret)
3138                 return;
3139
3140         if (adev->powerplay.pp_funcs->check_state_equal) {
3141                 if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
3142                         equal = false;
3143         }
3144
3145         if (equal)
3146                 return;
3147
3148         amdgpu_dpm_set_power_state(adev);
3149         amdgpu_dpm_post_set_power_state(adev);
3150
3151         adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
3152         adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
3153
3154         if (adev->powerplay.pp_funcs->force_performance_level) {
3155                 if (adev->pm.dpm.thermal_active) {
3156                         enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
3157                         /* force low perf level for thermal */
3158                         amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
3159                         /* save the user's level */
3160                         adev->pm.dpm.forced_level = level;
3161                 } else {
3162                         /* otherwise, user selected level */
3163                         amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
3164                 }
3165         }
3166 }
3167
3168 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
3169 {
3170         int ret = 0;
3171
3172         ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
3173         if (ret)
3174                 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
3175                           enable ? "enable" : "disable", ret);
3176
3177         /* enable/disable Low Memory PState for UVD (4k videos) */
3178         if (adev->asic_type == CHIP_STONEY &&
3179                 adev->uvd.decode_image_width >= WIDTH_4K) {
3180                 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
3181
3182                 if (hwmgr && hwmgr->hwmgr_func &&
3183                     hwmgr->hwmgr_func->update_nbdpm_pstate)
3184                         hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
3185                                                                !enable,
3186                                                                true);
3187         }
3188 }
3189
3190 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
3191 {
3192         int ret = 0;
3193
3194         ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
3195         if (ret)
3196                 DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
3197                           enable ? "enable" : "disable", ret);
3198 }
3199
3200 void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
3201 {
3202         int i;
3203
3204         if (adev->powerplay.pp_funcs->print_power_state == NULL)
3205                 return;
3206
3207         for (i = 0; i < adev->pm.dpm.num_ps; i++)
3208                 amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
3209
3210 }
3211
3212 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
3213 {
3214         int ret = 0;
3215
3216         ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
3217         if (ret)
3218                 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
3219                           enable ? "enable" : "disable", ret);
3220 }
3221
3222 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
3223 {
3224         int r;
3225
3226         if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
3227                 r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
3228                 if (r) {
3229                         pr_err("smu firmware loading failed\n");
3230                         return r;
3231                 }
3232                 *smu_version = adev->pm.fw_version;
3233         }
3234         return 0;
3235 }
3236
3237 int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
3238 {
3239         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
3240         int ret;
3241
3242         if (adev->pm.sysfs_initialized)
3243                 return 0;
3244
3245         if (adev->pm.dpm_enabled == 0)
3246                 return 0;
3247
3248         adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
3249                                                                    DRIVER_NAME, adev,
3250                                                                    hwmon_groups);
3251         if (IS_ERR(adev->pm.int_hwmon_dev)) {
3252                 ret = PTR_ERR(adev->pm.int_hwmon_dev);
3253                 dev_err(adev->dev,
3254                         "Unable to register hwmon device: %d\n", ret);
3255                 return ret;
3256         }
3257
3258         ret = device_create_file(adev->dev, &dev_attr_power_dpm_state);
3259         if (ret) {
3260                 DRM_ERROR("failed to create device file for dpm state\n");
3261                 return ret;
3262         }
3263         ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
3264         if (ret) {
3265                 DRM_ERROR("failed to create device file for dpm state\n");
3266                 return ret;
3267         }
3268
3269
3270         ret = device_create_file(adev->dev, &dev_attr_pp_num_states);
3271         if (ret) {
3272                 DRM_ERROR("failed to create device file pp_num_states\n");
3273                 return ret;
3274         }
3275         ret = device_create_file(adev->dev, &dev_attr_pp_cur_state);
3276         if (ret) {
3277                 DRM_ERROR("failed to create device file pp_cur_state\n");
3278                 return ret;
3279         }
3280         ret = device_create_file(adev->dev, &dev_attr_pp_force_state);
3281         if (ret) {
3282                 DRM_ERROR("failed to create device file pp_force_state\n");
3283                 return ret;
3284         }
3285         ret = device_create_file(adev->dev, &dev_attr_pp_table);
3286         if (ret) {
3287                 DRM_ERROR("failed to create device file pp_table\n");
3288                 return ret;
3289         }
3290
3291         ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
3292         if (ret) {
3293                 DRM_ERROR("failed to create device file pp_dpm_sclk\n");
3294                 return ret;
3295         }
3296
3297         /* Arcturus does not support standalone mclk/socclk/fclk level setting */
3298         if (adev->asic_type == CHIP_ARCTURUS) {
3299                 dev_attr_pp_dpm_mclk.attr.mode &= ~S_IWUGO;
3300                 dev_attr_pp_dpm_mclk.store = NULL;
3301
3302                 dev_attr_pp_dpm_socclk.attr.mode &= ~S_IWUGO;
3303                 dev_attr_pp_dpm_socclk.store = NULL;
3304
3305                 dev_attr_pp_dpm_fclk.attr.mode &= ~S_IWUGO;
3306                 dev_attr_pp_dpm_fclk.store = NULL;
3307         }
3308
3309         ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
3310         if (ret) {
3311                 DRM_ERROR("failed to create device file pp_dpm_mclk\n");
3312                 return ret;
3313         }
3314         if (adev->asic_type >= CHIP_VEGA10) {
3315                 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_socclk);
3316                 if (ret) {
3317                         DRM_ERROR("failed to create device file pp_dpm_socclk\n");
3318                         return ret;
3319                 }
3320                 if (adev->asic_type != CHIP_ARCTURUS) {
3321                         ret = device_create_file(adev->dev, &dev_attr_pp_dpm_dcefclk);
3322                         if (ret) {
3323                                 DRM_ERROR("failed to create device file pp_dpm_dcefclk\n");
3324                                 return ret;
3325                         }
3326                 }
3327         }
3328         if (adev->asic_type >= CHIP_VEGA20) {
3329                 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_fclk);
3330                 if (ret) {
3331                         DRM_ERROR("failed to create device file pp_dpm_fclk\n");
3332                         return ret;
3333                 }
3334         }
3335         if (adev->asic_type != CHIP_ARCTURUS) {
3336                 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
3337                 if (ret) {
3338                         DRM_ERROR("failed to create device file pp_dpm_pcie\n");
3339                         return ret;
3340                 }
3341         }
3342         ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od);
3343         if (ret) {
3344                 DRM_ERROR("failed to create device file pp_sclk_od\n");
3345                 return ret;
3346         }
3347         ret = device_create_file(adev->dev, &dev_attr_pp_mclk_od);
3348         if (ret) {
3349                 DRM_ERROR("failed to create device file pp_mclk_od\n");
3350                 return ret;
3351         }
3352         ret = device_create_file(adev->dev,
3353                         &dev_attr_pp_power_profile_mode);
3354         if (ret) {
3355                 DRM_ERROR("failed to create device file "
3356                                 "pp_power_profile_mode\n");
3357                 return ret;
3358         }
3359         if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
3360             (!is_support_sw_smu(adev) && hwmgr->od_enabled)) {
3361                 ret = device_create_file(adev->dev,
3362                                 &dev_attr_pp_od_clk_voltage);
3363                 if (ret) {
3364                         DRM_ERROR("failed to create device file "
3365                                         "pp_od_clk_voltage\n");
3366                         return ret;
3367                 }
3368         }
3369         ret = device_create_file(adev->dev,
3370                         &dev_attr_gpu_busy_percent);
3371         if (ret) {
3372                 DRM_ERROR("failed to create device file "
3373                                 "gpu_busy_level\n");
3374                 return ret;
3375         }
3376         /* APU does not have its own dedicated memory */
3377         if (!(adev->flags & AMD_IS_APU) &&
3378              (adev->asic_type != CHIP_VEGA10)) {
3379                 ret = device_create_file(adev->dev,
3380                                 &dev_attr_mem_busy_percent);
3381                 if (ret) {
3382                         DRM_ERROR("failed to create device file "
3383                                         "mem_busy_percent\n");
3384                         return ret;
3385                 }
3386         }
3387         /* PCIe Perf counters won't work on APU nodes */
3388         if (!(adev->flags & AMD_IS_APU)) {
3389                 ret = device_create_file(adev->dev, &dev_attr_pcie_bw);
3390                 if (ret) {
3391                         DRM_ERROR("failed to create device file pcie_bw\n");
3392                         return ret;
3393                 }
3394         }
3395         if (adev->unique_id)
3396                 ret = device_create_file(adev->dev, &dev_attr_unique_id);
3397         if (ret) {
3398                 DRM_ERROR("failed to create device file unique_id\n");
3399                 return ret;
3400         }
3401         ret = amdgpu_debugfs_pm_init(adev);
3402         if (ret) {
3403                 DRM_ERROR("Failed to register debugfs file for dpm!\n");
3404                 return ret;
3405         }
3406
3407         if ((adev->asic_type >= CHIP_VEGA10) &&
3408             !(adev->flags & AMD_IS_APU)) {
3409                 ret = device_create_file(adev->dev,
3410                                 &dev_attr_pp_features);
3411                 if (ret) {
3412                         DRM_ERROR("failed to create device file "
3413                                         "pp_features\n");
3414                         return ret;
3415                 }
3416         }
3417
3418         adev->pm.sysfs_initialized = true;
3419
3420         return 0;
3421 }
3422
3423 void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
3424 {
3425         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
3426
3427         if (adev->pm.dpm_enabled == 0)
3428                 return;
3429
3430         if (adev->pm.int_hwmon_dev)
3431                 hwmon_device_unregister(adev->pm.int_hwmon_dev);
3432         device_remove_file(adev->dev, &dev_attr_power_dpm_state);
3433         device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
3434
3435         device_remove_file(adev->dev, &dev_attr_pp_num_states);
3436         device_remove_file(adev->dev, &dev_attr_pp_cur_state);
3437         device_remove_file(adev->dev, &dev_attr_pp_force_state);
3438         device_remove_file(adev->dev, &dev_attr_pp_table);
3439
3440         device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
3441         device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
3442         if (adev->asic_type >= CHIP_VEGA10) {
3443                 device_remove_file(adev->dev, &dev_attr_pp_dpm_socclk);
3444                 if (adev->asic_type != CHIP_ARCTURUS)
3445                         device_remove_file(adev->dev, &dev_attr_pp_dpm_dcefclk);
3446         }
3447         if (adev->asic_type != CHIP_ARCTURUS)
3448                 device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
3449         if (adev->asic_type >= CHIP_VEGA20)
3450                 device_remove_file(adev->dev, &dev_attr_pp_dpm_fclk);
3451         device_remove_file(adev->dev, &dev_attr_pp_sclk_od);
3452         device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
3453         device_remove_file(adev->dev,
3454                         &dev_attr_pp_power_profile_mode);
3455         if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
3456             (!is_support_sw_smu(adev) && hwmgr->od_enabled))
3457                 device_remove_file(adev->dev,
3458                                 &dev_attr_pp_od_clk_voltage);
3459         device_remove_file(adev->dev, &dev_attr_gpu_busy_percent);
3460         if (!(adev->flags & AMD_IS_APU) &&
3461              (adev->asic_type != CHIP_VEGA10))
3462                 device_remove_file(adev->dev, &dev_attr_mem_busy_percent);
3463         if (!(adev->flags & AMD_IS_APU))
3464                 device_remove_file(adev->dev, &dev_attr_pcie_bw);
3465         if (adev->unique_id)
3466                 device_remove_file(adev->dev, &dev_attr_unique_id);
3467         if ((adev->asic_type >= CHIP_VEGA10) &&
3468             !(adev->flags & AMD_IS_APU))
3469                 device_remove_file(adev->dev, &dev_attr_pp_features);
3470 }
3471
3472 void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
3473 {
3474         int i = 0;
3475
3476         if (!adev->pm.dpm_enabled)
3477                 return;
3478
3479         if (adev->mode_info.num_crtc)
3480                 amdgpu_display_bandwidth_update(adev);
3481
3482         for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
3483                 struct amdgpu_ring *ring = adev->rings[i];
3484                 if (ring && ring->sched.ready)
3485                         amdgpu_fence_wait_empty(ring);
3486         }
3487
3488         if (is_support_sw_smu(adev)) {
3489                 struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm;
3490                 smu_handle_task(&adev->smu,
3491                                 smu_dpm->dpm_level,
3492                                 AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
3493                                 true);
3494         } else {
3495                 if (adev->powerplay.pp_funcs->dispatch_tasks) {
3496                         if (!amdgpu_device_has_dc_support(adev)) {
3497                                 mutex_lock(&adev->pm.mutex);
3498                                 amdgpu_dpm_get_active_displays(adev);
3499                                 adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
3500                                 adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
3501                                 adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
3502                                 /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
3503                                 if (adev->pm.pm_display_cfg.vrefresh > 120)
3504                                         adev->pm.pm_display_cfg.min_vblank_time = 0;
3505                                 if (adev->powerplay.pp_funcs->display_configuration_change)
3506                                         adev->powerplay.pp_funcs->display_configuration_change(
3507                                                                         adev->powerplay.pp_handle,
3508                                                                         &adev->pm.pm_display_cfg);
3509                                 mutex_unlock(&adev->pm.mutex);
3510                         }
3511                         amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
3512                 } else {
3513                         mutex_lock(&adev->pm.mutex);
3514                         amdgpu_dpm_get_active_displays(adev);
3515                         amdgpu_dpm_change_power_state_locked(adev);
3516                         mutex_unlock(&adev->pm.mutex);
3517                 }
3518         }
3519 }
3520
3521 /*
3522  * Debugfs info
3523  */
3524 #if defined(CONFIG_DEBUG_FS)
3525
3526 static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
3527 {
3528         uint32_t value;
3529         uint64_t value64;
3530         uint32_t query = 0;
3531         int size;
3532
3533         /* GPU Clocks */
3534         size = sizeof(value);
3535         seq_printf(m, "GFX Clocks and Power:\n");
3536         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
3537                 seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
3538         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
3539                 seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
3540         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
3541                 seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
3542         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
3543                 seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
3544         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
3545                 seq_printf(m, "\t%u mV (VDDGFX)\n", value);
3546         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
3547                 seq_printf(m, "\t%u mV (VDDNB)\n", value);
3548         size = sizeof(uint32_t);
3549         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size))
3550                 seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff);
3551         size = sizeof(value);
3552         seq_printf(m, "\n");
3553
3554         /* GPU Temp */
3555         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
3556                 seq_printf(m, "GPU Temperature: %u C\n", value/1000);
3557
3558         /* GPU Load */
3559         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
3560                 seq_printf(m, "GPU Load: %u %%\n", value);
3561         /* MEM Load */
3562         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
3563                 seq_printf(m, "MEM Load: %u %%\n", value);
3564
3565         seq_printf(m, "\n");
3566
3567         /* SMC feature mask */
3568         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
3569                 seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
3570
3571         if (adev->asic_type > CHIP_VEGA20) {
3572                 /* VCN clocks */
3573                 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
3574                         if (!value) {
3575                                 seq_printf(m, "VCN: Disabled\n");
3576                         } else {
3577                                 seq_printf(m, "VCN: Enabled\n");
3578                                 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3579                                         seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3580                                 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3581                                         seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3582                         }
3583                 }
3584                 seq_printf(m, "\n");
3585         } else {
3586                 /* UVD clocks */
3587                 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
3588                         if (!value) {
3589                                 seq_printf(m, "UVD: Disabled\n");
3590                         } else {
3591                                 seq_printf(m, "UVD: Enabled\n");
3592                                 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3593                                         seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3594                                 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3595                                         seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3596                         }
3597                 }
3598                 seq_printf(m, "\n");
3599
3600                 /* VCE clocks */
3601                 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
3602                         if (!value) {
3603                                 seq_printf(m, "VCE: Disabled\n");
3604                         } else {
3605                                 seq_printf(m, "VCE: Enabled\n");
3606                                 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
3607                                         seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
3608                         }
3609                 }
3610         }
3611
3612         return 0;
3613 }
3614
3615 static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags)
3616 {
3617         int i;
3618
3619         for (i = 0; clocks[i].flag; i++)
3620                 seq_printf(m, "\t%s: %s\n", clocks[i].name,
3621                            (flags & clocks[i].flag) ? "On" : "Off");
3622 }
3623
3624 static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
3625 {
3626         struct drm_info_node *node = (struct drm_info_node *) m->private;
3627         struct drm_device *dev = node->minor->dev;
3628         struct amdgpu_device *adev = dev->dev_private;
3629         u32 flags = 0;
3630         int r;
3631
3632         r = pm_runtime_get_sync(dev->dev);
3633         if (r < 0)
3634                 return r;
3635
3636         amdgpu_device_ip_get_clockgating_state(adev, &flags);
3637         seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
3638         amdgpu_parse_cg_state(m, flags);
3639         seq_printf(m, "\n");
3640
3641         if (!adev->pm.dpm_enabled) {
3642                 seq_printf(m, "dpm not enabled\n");
3643                 pm_runtime_mark_last_busy(dev->dev);
3644                 pm_runtime_put_autosuspend(dev->dev);
3645                 return 0;
3646         }
3647
3648         if (!is_support_sw_smu(adev) &&
3649             adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
3650                 mutex_lock(&adev->pm.mutex);
3651                 if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
3652                         adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
3653                 else
3654                         seq_printf(m, "Debugfs support not implemented for this asic\n");
3655                 mutex_unlock(&adev->pm.mutex);
3656                 r = 0;
3657         } else {
3658                 r = amdgpu_debugfs_pm_info_pp(m, adev);
3659         }
3660
3661         pm_runtime_mark_last_busy(dev->dev);
3662         pm_runtime_put_autosuspend(dev->dev);
3663
3664         return r;
3665 }
3666
3667 static const struct drm_info_list amdgpu_pm_info_list[] = {
3668         {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL},
3669 };
3670 #endif
3671
3672 static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
3673 {
3674 #if defined(CONFIG_DEBUG_FS)
3675         return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));
3676 #else
3677         return 0;
3678 #endif
3679 }