2 * Permission is hereby granted, free of charge, to any person obtaining a
3 * copy of this software and associated documentation files (the "Software"),
4 * to deal in the Software without restriction, including without limitation
5 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
6 * and/or sell copies of the Software, and to permit persons to whom the
7 * Software is furnished to do so, subject to the following conditions:
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
16 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
17 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
18 * OTHER DEALINGS IN THE SOFTWARE.
20 * Authors: Rafał Miłecki <zajec5@gmail.com>
21 * Alex Deucher <alexdeucher@gmail.com>
25 #include "amdgpu_drv.h"
26 #include "amdgpu_pm.h"
27 #include "amdgpu_dpm.h"
29 #include <linux/power_supply.h>
30 #include <linux/hwmon.h>
31 #include <linux/hwmon-sysfs.h>
33 #include "amd_powerplay.h"
35 static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
37 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
43 if (adev->pm.dpm_enabled) {
44 mutex_lock(&adev->pm.mutex);
45 if (power_supply_is_system_supplied() > 0)
46 adev->pm.dpm.ac_power = true;
48 adev->pm.dpm.ac_power = false;
49 if (adev->pm.funcs->enable_bapm)
50 amdgpu_dpm_enable_bapm(adev, adev->pm.dpm.ac_power);
51 mutex_unlock(&adev->pm.mutex);
55 static ssize_t amdgpu_get_dpm_state(struct device *dev,
56 struct device_attribute *attr,
59 struct drm_device *ddev = dev_get_drvdata(dev);
60 struct amdgpu_device *adev = ddev->dev_private;
61 enum amd_pm_state_type pm;
63 if (adev->pp_enabled) {
64 pm = amdgpu_dpm_get_current_power_state(adev);
66 pm = adev->pm.dpm.user_state;
68 return snprintf(buf, PAGE_SIZE, "%s\n",
69 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
70 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
73 static ssize_t amdgpu_set_dpm_state(struct device *dev,
74 struct device_attribute *attr,
78 struct drm_device *ddev = dev_get_drvdata(dev);
79 struct amdgpu_device *adev = ddev->dev_private;
80 enum amd_pm_state_type state;
82 if (strncmp("battery", buf, strlen("battery")) == 0)
83 state = POWER_STATE_TYPE_BATTERY;
84 else if (strncmp("balanced", buf, strlen("balanced")) == 0)
85 state = POWER_STATE_TYPE_BALANCED;
86 else if (strncmp("performance", buf, strlen("performance")) == 0)
87 state = POWER_STATE_TYPE_PERFORMANCE;
93 if (adev->pp_enabled) {
94 amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
96 mutex_lock(&adev->pm.mutex);
97 adev->pm.dpm.user_state = state;
98 mutex_unlock(&adev->pm.mutex);
100 /* Can't set dpm state when the card is off */
101 if (!(adev->flags & AMD_IS_PX) ||
102 (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
103 amdgpu_pm_compute_clocks(adev);
109 static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
110 struct device_attribute *attr,
113 struct drm_device *ddev = dev_get_drvdata(dev);
114 struct amdgpu_device *adev = ddev->dev_private;
116 if ((adev->flags & AMD_IS_PX) &&
117 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
118 return snprintf(buf, PAGE_SIZE, "off\n");
120 if (adev->pp_enabled) {
121 enum amd_dpm_forced_level level;
123 level = amdgpu_dpm_get_performance_level(adev);
124 return snprintf(buf, PAGE_SIZE, "%s\n",
125 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
126 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
127 (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
128 (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" : "unknown");
130 enum amdgpu_dpm_forced_level level;
132 level = adev->pm.dpm.forced_level;
133 return snprintf(buf, PAGE_SIZE, "%s\n",
134 (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) ? "auto" :
135 (level == AMDGPU_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
139 static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
140 struct device_attribute *attr,
144 struct drm_device *ddev = dev_get_drvdata(dev);
145 struct amdgpu_device *adev = ddev->dev_private;
146 enum amdgpu_dpm_forced_level level;
149 /* Can't force performance level when the card is off */
150 if ((adev->flags & AMD_IS_PX) &&
151 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
154 if (strncmp("low", buf, strlen("low")) == 0) {
155 level = AMDGPU_DPM_FORCED_LEVEL_LOW;
156 } else if (strncmp("high", buf, strlen("high")) == 0) {
157 level = AMDGPU_DPM_FORCED_LEVEL_HIGH;
158 } else if (strncmp("auto", buf, strlen("auto")) == 0) {
159 level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
160 } else if (strncmp("manual", buf, strlen("manual")) == 0) {
161 level = AMDGPU_DPM_FORCED_LEVEL_MANUAL;
167 if (adev->pp_enabled)
168 amdgpu_dpm_force_performance_level(adev, level);
170 mutex_lock(&adev->pm.mutex);
171 if (adev->pm.dpm.thermal_active) {
173 mutex_unlock(&adev->pm.mutex);
176 ret = amdgpu_dpm_force_performance_level(adev, level);
180 adev->pm.dpm.forced_level = level;
181 mutex_unlock(&adev->pm.mutex);
187 static ssize_t amdgpu_get_pp_num_states(struct device *dev,
188 struct device_attribute *attr,
191 struct drm_device *ddev = dev_get_drvdata(dev);
192 struct amdgpu_device *adev = ddev->dev_private;
193 struct pp_states_info data;
196 if (adev->pp_enabled)
197 amdgpu_dpm_get_pp_num_states(adev, &data);
199 buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
200 for (i = 0; i < data.nums; i++)
201 buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i,
202 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
203 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
204 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
205 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
210 static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
211 struct device_attribute *attr,
214 struct drm_device *ddev = dev_get_drvdata(dev);
215 struct amdgpu_device *adev = ddev->dev_private;
216 struct pp_states_info data;
217 enum amd_pm_state_type pm = 0;
220 if (adev->pp_enabled) {
222 pm = amdgpu_dpm_get_current_power_state(adev);
223 amdgpu_dpm_get_pp_num_states(adev, &data);
225 for (i = 0; i < data.nums; i++) {
226 if (pm == data.states[i])
234 return snprintf(buf, PAGE_SIZE, "%d\n", i);
237 static ssize_t amdgpu_get_pp_force_state(struct device *dev,
238 struct device_attribute *attr,
241 struct drm_device *ddev = dev_get_drvdata(dev);
242 struct amdgpu_device *adev = ddev->dev_private;
243 struct pp_states_info data;
244 enum amd_pm_state_type pm = 0;
247 if (adev->pp_force_state_enabled && adev->pp_enabled) {
248 pm = amdgpu_dpm_get_current_power_state(adev);
249 amdgpu_dpm_get_pp_num_states(adev, &data);
251 for (i = 0; i < data.nums; i++) {
252 if (pm == data.states[i])
259 return snprintf(buf, PAGE_SIZE, "%d\n", i);
262 return snprintf(buf, PAGE_SIZE, "\n");
265 static ssize_t amdgpu_set_pp_force_state(struct device *dev,
266 struct device_attribute *attr,
270 struct drm_device *ddev = dev_get_drvdata(dev);
271 struct amdgpu_device *adev = ddev->dev_private;
272 enum amd_pm_state_type state = 0;
276 if (strlen(buf) == 1)
277 adev->pp_force_state_enabled = false;
278 else if (adev->pp_enabled) {
279 struct pp_states_info data;
281 ret = kstrtoul(buf, 0, &idx);
282 if (ret || idx >= ARRAY_SIZE(data.states)) {
287 amdgpu_dpm_get_pp_num_states(adev, &data);
288 state = data.states[idx];
289 /* only set user selected power states */
290 if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
291 state != POWER_STATE_TYPE_DEFAULT) {
292 amdgpu_dpm_dispatch_task(adev,
293 AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
294 adev->pp_force_state_enabled = true;
301 static ssize_t amdgpu_get_pp_table(struct device *dev,
302 struct device_attribute *attr,
305 struct drm_device *ddev = dev_get_drvdata(dev);
306 struct amdgpu_device *adev = ddev->dev_private;
310 if (adev->pp_enabled)
311 size = amdgpu_dpm_get_pp_table(adev, &table);
315 if (size >= PAGE_SIZE)
316 size = PAGE_SIZE - 1;
318 for (i = 0; i < size; i++) {
319 sprintf(buf + i, "%02x", table[i]);
321 sprintf(buf + i, "\n");
326 static ssize_t amdgpu_set_pp_table(struct device *dev,
327 struct device_attribute *attr,
331 struct drm_device *ddev = dev_get_drvdata(dev);
332 struct amdgpu_device *adev = ddev->dev_private;
334 if (adev->pp_enabled)
335 amdgpu_dpm_set_pp_table(adev, buf, count);
340 static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
341 struct device_attribute *attr,
344 struct drm_device *ddev = dev_get_drvdata(dev);
345 struct amdgpu_device *adev = ddev->dev_private;
348 if (adev->pp_enabled)
349 size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
354 static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
355 struct device_attribute *attr,
359 struct drm_device *ddev = dev_get_drvdata(dev);
360 struct amdgpu_device *adev = ddev->dev_private;
363 uint32_t i, mask = 0;
366 for (i = 0; i < strlen(buf) - 1; i++) {
367 sub_str[0] = *(buf + i);
369 ret = kstrtol(sub_str, 0, &level);
378 if (adev->pp_enabled)
379 amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
384 static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
385 struct device_attribute *attr,
388 struct drm_device *ddev = dev_get_drvdata(dev);
389 struct amdgpu_device *adev = ddev->dev_private;
392 if (adev->pp_enabled)
393 size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
398 static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
399 struct device_attribute *attr,
403 struct drm_device *ddev = dev_get_drvdata(dev);
404 struct amdgpu_device *adev = ddev->dev_private;
407 uint32_t i, mask = 0;
410 for (i = 0; i < strlen(buf) - 1; i++) {
411 sub_str[0] = *(buf + i);
413 ret = kstrtol(sub_str, 0, &level);
422 if (adev->pp_enabled)
423 amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
428 static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
429 struct device_attribute *attr,
432 struct drm_device *ddev = dev_get_drvdata(dev);
433 struct amdgpu_device *adev = ddev->dev_private;
436 if (adev->pp_enabled)
437 size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
442 static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
443 struct device_attribute *attr,
447 struct drm_device *ddev = dev_get_drvdata(dev);
448 struct amdgpu_device *adev = ddev->dev_private;
451 uint32_t i, mask = 0;
454 for (i = 0; i < strlen(buf) - 1; i++) {
455 sub_str[0] = *(buf + i);
457 ret = kstrtol(sub_str, 0, &level);
466 if (adev->pp_enabled)
467 amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
472 static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
473 static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
474 amdgpu_get_dpm_forced_performance_level,
475 amdgpu_set_dpm_forced_performance_level);
476 static DEVICE_ATTR(pp_num_states, S_IRUGO, amdgpu_get_pp_num_states, NULL);
477 static DEVICE_ATTR(pp_cur_state, S_IRUGO, amdgpu_get_pp_cur_state, NULL);
478 static DEVICE_ATTR(pp_force_state, S_IRUGO | S_IWUSR,
479 amdgpu_get_pp_force_state,
480 amdgpu_set_pp_force_state);
481 static DEVICE_ATTR(pp_table, S_IRUGO | S_IWUSR,
483 amdgpu_set_pp_table);
484 static DEVICE_ATTR(pp_dpm_sclk, S_IRUGO | S_IWUSR,
485 amdgpu_get_pp_dpm_sclk,
486 amdgpu_set_pp_dpm_sclk);
487 static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR,
488 amdgpu_get_pp_dpm_mclk,
489 amdgpu_set_pp_dpm_mclk);
490 static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR,
491 amdgpu_get_pp_dpm_pcie,
492 amdgpu_set_pp_dpm_pcie);
494 static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
495 struct device_attribute *attr,
498 struct amdgpu_device *adev = dev_get_drvdata(dev);
499 struct drm_device *ddev = adev->ddev;
502 /* Can't get temperature when the card is off */
503 if ((adev->flags & AMD_IS_PX) &&
504 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
507 if (!adev->pp_enabled && !adev->pm.funcs->get_temperature)
510 temp = amdgpu_dpm_get_temperature(adev);
512 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
515 static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
516 struct device_attribute *attr,
519 struct amdgpu_device *adev = dev_get_drvdata(dev);
520 int hyst = to_sensor_dev_attr(attr)->index;
524 temp = adev->pm.dpm.thermal.min_temp;
526 temp = adev->pm.dpm.thermal.max_temp;
528 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
531 static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
532 struct device_attribute *attr,
535 struct amdgpu_device *adev = dev_get_drvdata(dev);
538 if (!adev->pp_enabled && !adev->pm.funcs->get_fan_control_mode)
541 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
543 /* never 0 (full-speed), fuse or smc-controlled always */
544 return sprintf(buf, "%i\n", pwm_mode == FDO_PWM_MODE_STATIC ? 1 : 2);
547 static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
548 struct device_attribute *attr,
552 struct amdgpu_device *adev = dev_get_drvdata(dev);
556 if (!adev->pp_enabled && !adev->pm.funcs->set_fan_control_mode)
559 err = kstrtoint(buf, 10, &value);
564 case 1: /* manual, percent-based */
565 amdgpu_dpm_set_fan_control_mode(adev, FDO_PWM_MODE_STATIC);
567 default: /* disable */
568 amdgpu_dpm_set_fan_control_mode(adev, 0);
575 static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
576 struct device_attribute *attr,
579 return sprintf(buf, "%i\n", 0);
582 static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
583 struct device_attribute *attr,
586 return sprintf(buf, "%i\n", 255);
589 static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
590 struct device_attribute *attr,
591 const char *buf, size_t count)
593 struct amdgpu_device *adev = dev_get_drvdata(dev);
597 err = kstrtou32(buf, 10, &value);
601 value = (value * 100) / 255;
603 err = amdgpu_dpm_set_fan_speed_percent(adev, value);
610 static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
611 struct device_attribute *attr,
614 struct amdgpu_device *adev = dev_get_drvdata(dev);
618 err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
622 speed = (speed * 255) / 100;
624 return sprintf(buf, "%i\n", speed);
627 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0);
628 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
629 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
630 static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
631 static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
632 static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
633 static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
635 static struct attribute *hwmon_attributes[] = {
636 &sensor_dev_attr_temp1_input.dev_attr.attr,
637 &sensor_dev_attr_temp1_crit.dev_attr.attr,
638 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
639 &sensor_dev_attr_pwm1.dev_attr.attr,
640 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
641 &sensor_dev_attr_pwm1_min.dev_attr.attr,
642 &sensor_dev_attr_pwm1_max.dev_attr.attr,
646 static umode_t hwmon_attributes_visible(struct kobject *kobj,
647 struct attribute *attr, int index)
649 struct device *dev = kobj_to_dev(kobj);
650 struct amdgpu_device *adev = dev_get_drvdata(dev);
651 umode_t effective_mode = attr->mode;
653 /* Skip limit attributes if DPM is not enabled */
654 if (!adev->pm.dpm_enabled &&
655 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
656 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
657 attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
658 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
659 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
660 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
663 if (adev->pp_enabled)
664 return effective_mode;
666 /* Skip fan attributes if fan is not present */
667 if (adev->pm.no_fan &&
668 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
669 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
670 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
671 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
674 /* mask fan attributes if we have no bindings for this asic to expose */
675 if ((!adev->pm.funcs->get_fan_speed_percent &&
676 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
677 (!adev->pm.funcs->get_fan_control_mode &&
678 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
679 effective_mode &= ~S_IRUGO;
681 if ((!adev->pm.funcs->set_fan_speed_percent &&
682 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
683 (!adev->pm.funcs->set_fan_control_mode &&
684 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
685 effective_mode &= ~S_IWUSR;
687 /* hide max/min values if we can't both query and manage the fan */
688 if ((!adev->pm.funcs->set_fan_speed_percent &&
689 !adev->pm.funcs->get_fan_speed_percent) &&
690 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
691 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
694 return effective_mode;
697 static const struct attribute_group hwmon_attrgroup = {
698 .attrs = hwmon_attributes,
699 .is_visible = hwmon_attributes_visible,
702 static const struct attribute_group *hwmon_groups[] = {
707 void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
709 struct amdgpu_device *adev =
710 container_of(work, struct amdgpu_device,
711 pm.dpm.thermal.work);
712 /* switch to the thermal state */
713 enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
715 if (!adev->pm.dpm_enabled)
718 if (adev->pm.funcs->get_temperature) {
719 int temp = amdgpu_dpm_get_temperature(adev);
721 if (temp < adev->pm.dpm.thermal.min_temp)
722 /* switch back the user state */
723 dpm_state = adev->pm.dpm.user_state;
725 if (adev->pm.dpm.thermal.high_to_low)
726 /* switch back the user state */
727 dpm_state = adev->pm.dpm.user_state;
729 mutex_lock(&adev->pm.mutex);
730 if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
731 adev->pm.dpm.thermal_active = true;
733 adev->pm.dpm.thermal_active = false;
734 adev->pm.dpm.state = dpm_state;
735 mutex_unlock(&adev->pm.mutex);
737 amdgpu_pm_compute_clocks(adev);
740 static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
741 enum amd_pm_state_type dpm_state)
744 struct amdgpu_ps *ps;
746 bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
749 /* check if the vblank period is too short to adjust the mclk */
750 if (single_display && adev->pm.funcs->vblank_too_short) {
751 if (amdgpu_dpm_vblank_too_short(adev))
752 single_display = false;
755 /* certain older asics have a separare 3D performance state,
756 * so try that first if the user selected performance
758 if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
759 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
760 /* balanced states don't exist at the moment */
761 if (dpm_state == POWER_STATE_TYPE_BALANCED)
762 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
765 /* Pick the best power state based on current conditions */
766 for (i = 0; i < adev->pm.dpm.num_ps; i++) {
767 ps = &adev->pm.dpm.ps[i];
768 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
771 case POWER_STATE_TYPE_BATTERY:
772 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
773 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
780 case POWER_STATE_TYPE_BALANCED:
781 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
782 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
789 case POWER_STATE_TYPE_PERFORMANCE:
790 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
791 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
798 /* internal states */
799 case POWER_STATE_TYPE_INTERNAL_UVD:
800 if (adev->pm.dpm.uvd_ps)
801 return adev->pm.dpm.uvd_ps;
804 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
805 if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
808 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
809 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
812 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
813 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
816 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
817 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
820 case POWER_STATE_TYPE_INTERNAL_BOOT:
821 return adev->pm.dpm.boot_ps;
822 case POWER_STATE_TYPE_INTERNAL_THERMAL:
823 if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
826 case POWER_STATE_TYPE_INTERNAL_ACPI:
827 if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
830 case POWER_STATE_TYPE_INTERNAL_ULV:
831 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
834 case POWER_STATE_TYPE_INTERNAL_3DPERF:
835 if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
842 /* use a fallback state if we didn't match */
844 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
845 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
847 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
848 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
849 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
850 if (adev->pm.dpm.uvd_ps) {
851 return adev->pm.dpm.uvd_ps;
853 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
856 case POWER_STATE_TYPE_INTERNAL_THERMAL:
857 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
859 case POWER_STATE_TYPE_INTERNAL_ACPI:
860 dpm_state = POWER_STATE_TYPE_BATTERY;
862 case POWER_STATE_TYPE_BATTERY:
863 case POWER_STATE_TYPE_BALANCED:
864 case POWER_STATE_TYPE_INTERNAL_3DPERF:
865 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
874 static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
877 struct amdgpu_ps *ps;
878 enum amd_pm_state_type dpm_state;
881 /* if dpm init failed */
882 if (!adev->pm.dpm_enabled)
885 if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
886 /* add other state override checks here */
887 if ((!adev->pm.dpm.thermal_active) &&
888 (!adev->pm.dpm.uvd_active))
889 adev->pm.dpm.state = adev->pm.dpm.user_state;
891 dpm_state = adev->pm.dpm.state;
893 ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
895 adev->pm.dpm.requested_ps = ps;
899 /* no need to reprogram if nothing changed unless we are on BTC+ */
900 if (adev->pm.dpm.current_ps == adev->pm.dpm.requested_ps) {
901 /* vce just modifies an existing state so force a change */
902 if (ps->vce_active != adev->pm.dpm.vce_active)
904 if (adev->flags & AMD_IS_APU) {
905 /* for APUs if the num crtcs changed but state is the same,
906 * all we need to do is update the display configuration.
908 if (adev->pm.dpm.new_active_crtcs != adev->pm.dpm.current_active_crtcs) {
909 /* update display watermarks based on new power state */
910 amdgpu_display_bandwidth_update(adev);
911 /* update displays */
912 amdgpu_dpm_display_configuration_changed(adev);
913 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
914 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
918 /* for BTC+ if the num crtcs hasn't changed and state is the same,
919 * nothing to do, if the num crtcs is > 1 and state is the same,
920 * update display configuration.
922 if (adev->pm.dpm.new_active_crtcs ==
923 adev->pm.dpm.current_active_crtcs) {
925 } else if ((adev->pm.dpm.current_active_crtc_count > 1) &&
926 (adev->pm.dpm.new_active_crtc_count > 1)) {
927 /* update display watermarks based on new power state */
928 amdgpu_display_bandwidth_update(adev);
929 /* update displays */
930 amdgpu_dpm_display_configuration_changed(adev);
931 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
932 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
939 if (amdgpu_dpm == 1) {
940 printk("switching from power state:\n");
941 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
942 printk("switching to power state:\n");
943 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
946 /* update whether vce is active */
947 ps->vce_active = adev->pm.dpm.vce_active;
949 ret = amdgpu_dpm_pre_set_power_state(adev);
953 /* update display watermarks based on new power state */
954 amdgpu_display_bandwidth_update(adev);
956 /* wait for the rings to drain */
957 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
958 struct amdgpu_ring *ring = adev->rings[i];
959 if (ring && ring->ready)
960 amdgpu_fence_wait_empty(ring);
963 /* program the new power state */
964 amdgpu_dpm_set_power_state(adev);
966 /* update current power state */
967 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps;
969 amdgpu_dpm_post_set_power_state(adev);
971 /* update displays */
972 amdgpu_dpm_display_configuration_changed(adev);
974 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
975 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
977 if (adev->pm.funcs->force_performance_level) {
978 if (adev->pm.dpm.thermal_active) {
979 enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
980 /* force low perf level for thermal */
981 amdgpu_dpm_force_performance_level(adev, AMDGPU_DPM_FORCED_LEVEL_LOW);
982 /* save the user's level */
983 adev->pm.dpm.forced_level = level;
985 /* otherwise, user selected level */
986 amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
991 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
993 if (adev->pp_enabled)
994 amdgpu_dpm_powergate_uvd(adev, !enable);
996 if (adev->pm.funcs->powergate_uvd) {
997 mutex_lock(&adev->pm.mutex);
998 /* enable/disable UVD */
999 amdgpu_dpm_powergate_uvd(adev, !enable);
1000 mutex_unlock(&adev->pm.mutex);
1003 mutex_lock(&adev->pm.mutex);
1004 adev->pm.dpm.uvd_active = true;
1005 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
1006 mutex_unlock(&adev->pm.mutex);
1008 mutex_lock(&adev->pm.mutex);
1009 adev->pm.dpm.uvd_active = false;
1010 mutex_unlock(&adev->pm.mutex);
1012 amdgpu_pm_compute_clocks(adev);
1018 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
1020 if (adev->pp_enabled)
1021 amdgpu_dpm_powergate_vce(adev, !enable);
1023 if (adev->pm.funcs->powergate_vce) {
1024 mutex_lock(&adev->pm.mutex);
1025 amdgpu_dpm_powergate_vce(adev, !enable);
1026 mutex_unlock(&adev->pm.mutex);
1029 mutex_lock(&adev->pm.mutex);
1030 adev->pm.dpm.vce_active = true;
1031 /* XXX select vce level based on ring/task */
1032 adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
1033 mutex_unlock(&adev->pm.mutex);
1035 mutex_lock(&adev->pm.mutex);
1036 adev->pm.dpm.vce_active = false;
1037 mutex_unlock(&adev->pm.mutex);
1039 amdgpu_pm_compute_clocks(adev);
1044 void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
1048 if (adev->pp_enabled)
1052 for (i = 0; i < adev->pm.dpm.num_ps; i++)
1053 amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
1057 int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
1061 if (adev->pm.sysfs_initialized)
1064 if (!adev->pp_enabled) {
1065 if (adev->pm.funcs->get_temperature == NULL)
1069 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
1072 if (IS_ERR(adev->pm.int_hwmon_dev)) {
1073 ret = PTR_ERR(adev->pm.int_hwmon_dev);
1075 "Unable to register hwmon device: %d\n", ret);
1079 ret = device_create_file(adev->dev, &dev_attr_power_dpm_state);
1081 DRM_ERROR("failed to create device file for dpm state\n");
1084 ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
1086 DRM_ERROR("failed to create device file for dpm state\n");
1090 if (adev->pp_enabled) {
1091 ret = device_create_file(adev->dev, &dev_attr_pp_num_states);
1093 DRM_ERROR("failed to create device file pp_num_states\n");
1096 ret = device_create_file(adev->dev, &dev_attr_pp_cur_state);
1098 DRM_ERROR("failed to create device file pp_cur_state\n");
1101 ret = device_create_file(adev->dev, &dev_attr_pp_force_state);
1103 DRM_ERROR("failed to create device file pp_force_state\n");
1106 ret = device_create_file(adev->dev, &dev_attr_pp_table);
1108 DRM_ERROR("failed to create device file pp_table\n");
1111 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
1113 DRM_ERROR("failed to create device file pp_dpm_sclk\n");
1116 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
1118 DRM_ERROR("failed to create device file pp_dpm_mclk\n");
1121 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
1123 DRM_ERROR("failed to create device file pp_dpm_pcie\n");
1127 ret = amdgpu_debugfs_pm_init(adev);
1129 DRM_ERROR("Failed to register debugfs file for dpm!\n");
1133 adev->pm.sysfs_initialized = true;
1138 void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
1140 if (adev->pm.int_hwmon_dev)
1141 hwmon_device_unregister(adev->pm.int_hwmon_dev);
1142 device_remove_file(adev->dev, &dev_attr_power_dpm_state);
1143 device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
1144 if (adev->pp_enabled) {
1145 device_remove_file(adev->dev, &dev_attr_pp_num_states);
1146 device_remove_file(adev->dev, &dev_attr_pp_cur_state);
1147 device_remove_file(adev->dev, &dev_attr_pp_force_state);
1148 device_remove_file(adev->dev, &dev_attr_pp_table);
1149 device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
1150 device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
1151 device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
1155 void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
1157 struct drm_device *ddev = adev->ddev;
1158 struct drm_crtc *crtc;
1159 struct amdgpu_crtc *amdgpu_crtc;
1161 if (!adev->pm.dpm_enabled)
1164 if (adev->pp_enabled) {
1167 amdgpu_display_bandwidth_update(adev);
1168 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1169 struct amdgpu_ring *ring = adev->rings[i];
1170 if (ring && ring->ready)
1171 amdgpu_fence_wait_empty(ring);
1174 amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL);
1176 mutex_lock(&adev->pm.mutex);
1177 adev->pm.dpm.new_active_crtcs = 0;
1178 adev->pm.dpm.new_active_crtc_count = 0;
1179 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
1180 list_for_each_entry(crtc,
1181 &ddev->mode_config.crtc_list, head) {
1182 amdgpu_crtc = to_amdgpu_crtc(crtc);
1183 if (crtc->enabled) {
1184 adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
1185 adev->pm.dpm.new_active_crtc_count++;
1189 /* update battery/ac status */
1190 if (power_supply_is_system_supplied() > 0)
1191 adev->pm.dpm.ac_power = true;
1193 adev->pm.dpm.ac_power = false;
1195 amdgpu_dpm_change_power_state_locked(adev);
1197 mutex_unlock(&adev->pm.mutex);
1204 #if defined(CONFIG_DEBUG_FS)
1206 static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
1208 struct drm_info_node *node = (struct drm_info_node *) m->private;
1209 struct drm_device *dev = node->minor->dev;
1210 struct amdgpu_device *adev = dev->dev_private;
1211 struct drm_device *ddev = adev->ddev;
1213 if (!adev->pm.dpm_enabled) {
1214 seq_printf(m, "dpm not enabled\n");
1217 if ((adev->flags & AMD_IS_PX) &&
1218 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
1219 seq_printf(m, "PX asic powered off\n");
1220 } else if (adev->pp_enabled) {
1221 amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
1223 mutex_lock(&adev->pm.mutex);
1224 if (adev->pm.funcs->debugfs_print_current_performance_level)
1225 amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
1227 seq_printf(m, "Debugfs support not implemented for this asic\n");
1228 mutex_unlock(&adev->pm.mutex);
1234 static const struct drm_info_list amdgpu_pm_info_list[] = {
1235 {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL},
1239 static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
1241 #if defined(CONFIG_DEBUG_FS)
1242 return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));