2 * Copyright 2011 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
27 #include "amdgpu_atombios.h"
28 #include "amdgpu_i2c.h"
29 #include "amdgpu_dpm.h"
32 void amdgpu_dpm_print_class_info(u32 class, u32 class2)
34 printk("\tui class: ");
35 switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
36 case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
40 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
43 case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
46 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
47 printk("performance\n");
50 printk("\tinternal class: ");
51 if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
55 if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
57 if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
59 if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
60 printk("limited_pwr ");
61 if (class & ATOM_PPLIB_CLASSIFICATION_REST)
63 if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
65 if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
67 if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
69 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
71 if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
73 if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
75 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
77 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
79 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
81 if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
82 printk("limited_pwr2 ");
83 if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
85 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
91 void amdgpu_dpm_print_cap_info(u32 caps)
94 if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
95 printk("single_disp ");
96 if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
98 if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
103 void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
104 struct amdgpu_ps *rps)
106 printk("\tstatus: ");
107 if (rps == adev->pm.dpm.current_ps)
109 if (rps == adev->pm.dpm.requested_ps)
111 if (rps == adev->pm.dpm.boot_ps)
117 u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
119 struct drm_device *dev = adev->ddev;
120 struct drm_crtc *crtc;
121 struct amdgpu_crtc *amdgpu_crtc;
122 u32 vblank_in_pixels;
123 u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
125 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
126 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
127 amdgpu_crtc = to_amdgpu_crtc(crtc);
128 if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
130 amdgpu_crtc->hw_mode.crtc_htotal *
131 (amdgpu_crtc->hw_mode.crtc_vblank_end -
132 amdgpu_crtc->hw_mode.crtc_vdisplay +
133 (amdgpu_crtc->v_border * 2));
135 vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
141 return vblank_time_us;
144 u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
146 struct drm_device *dev = adev->ddev;
147 struct drm_crtc *crtc;
148 struct amdgpu_crtc *amdgpu_crtc;
151 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
152 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
153 amdgpu_crtc = to_amdgpu_crtc(crtc);
154 if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
155 vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
164 void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
171 i_c = (i * r_c) / 100;
180 *p = i_c / (1 << (2 * (*u)));
183 int amdgpu_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
188 if ((fl == 0) || (fh == 0) || (fl > fh))
192 t1 = (t * (k - 100));
193 a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
195 ah = ((a * t) + 5000) / 10000;
204 bool amdgpu_is_uvd_state(u32 class, u32 class2)
206 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
208 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
210 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
212 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
214 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
219 bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
222 case THERMAL_TYPE_RV6XX:
223 case THERMAL_TYPE_RV770:
224 case THERMAL_TYPE_EVERGREEN:
225 case THERMAL_TYPE_SUMO:
226 case THERMAL_TYPE_NI:
227 case THERMAL_TYPE_SI:
228 case THERMAL_TYPE_CI:
229 case THERMAL_TYPE_KV:
231 case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
232 case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
233 return false; /* need special handling */
234 case THERMAL_TYPE_NONE:
235 case THERMAL_TYPE_EXTERNAL:
236 case THERMAL_TYPE_EXTERNAL_GPIO:
243 struct _ATOM_POWERPLAY_INFO info;
244 struct _ATOM_POWERPLAY_INFO_V2 info_2;
245 struct _ATOM_POWERPLAY_INFO_V3 info_3;
246 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
247 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
248 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
249 struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
250 struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
254 struct _ATOM_PPLIB_FANTABLE fan;
255 struct _ATOM_PPLIB_FANTABLE2 fan2;
256 struct _ATOM_PPLIB_FANTABLE3 fan3;
259 static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
260 ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
262 u32 size = atom_table->ucNumEntries *
263 sizeof(struct amdgpu_clock_voltage_dependency_entry);
265 ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
267 amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
268 if (!amdgpu_table->entries)
271 entry = &atom_table->entries[0];
272 for (i = 0; i < atom_table->ucNumEntries; i++) {
273 amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
274 (entry->ucClockHigh << 16);
275 amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
276 entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
277 ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
279 amdgpu_table->count = atom_table->ucNumEntries;
284 int amdgpu_get_platform_caps(struct amdgpu_device *adev)
286 struct amdgpu_mode_info *mode_info = &adev->mode_info;
287 union power_info *power_info;
288 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
292 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
293 &frev, &crev, &data_offset))
295 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
297 adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
298 adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
299 adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
304 /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
305 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
306 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
307 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
308 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
309 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
310 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
311 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
312 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
314 int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
316 struct amdgpu_mode_info *mode_info = &adev->mode_info;
317 union power_info *power_info;
318 union fan_info *fan_info;
319 ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
320 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
325 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
326 &frev, &crev, &data_offset))
328 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
331 if (le16_to_cpu(power_info->pplib.usTableSize) >=
332 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
333 if (power_info->pplib3.usFanTableOffset) {
334 fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
335 le16_to_cpu(power_info->pplib3.usFanTableOffset));
336 adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
337 adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
338 adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
339 adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
340 adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
341 adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
342 adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
343 if (fan_info->fan.ucFanTableFormat >= 2)
344 adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
346 adev->pm.dpm.fan.t_max = 10900;
347 adev->pm.dpm.fan.cycle_delay = 100000;
348 if (fan_info->fan.ucFanTableFormat >= 3) {
349 adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
350 adev->pm.dpm.fan.default_max_fan_pwm =
351 le16_to_cpu(fan_info->fan3.usFanPWMMax);
352 adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
353 adev->pm.dpm.fan.fan_output_sensitivity =
354 le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
356 adev->pm.dpm.fan.ucode_fan_control = true;
360 /* clock dependancy tables, shedding tables */
361 if (le16_to_cpu(power_info->pplib.usTableSize) >=
362 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
363 if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
364 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
365 (mode_info->atom_context->bios + data_offset +
366 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
367 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
370 amdgpu_free_extended_power_table(adev);
374 if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
375 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
376 (mode_info->atom_context->bios + data_offset +
377 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
378 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
381 amdgpu_free_extended_power_table(adev);
385 if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
386 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
387 (mode_info->atom_context->bios + data_offset +
388 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
389 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
392 amdgpu_free_extended_power_table(adev);
396 if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
397 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
398 (mode_info->atom_context->bios + data_offset +
399 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
400 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
403 amdgpu_free_extended_power_table(adev);
407 if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
408 ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
409 (ATOM_PPLIB_Clock_Voltage_Limit_Table *)
410 (mode_info->atom_context->bios + data_offset +
411 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
412 if (clk_v->ucNumEntries) {
413 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
414 le16_to_cpu(clk_v->entries[0].usSclkLow) |
415 (clk_v->entries[0].ucSclkHigh << 16);
416 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
417 le16_to_cpu(clk_v->entries[0].usMclkLow) |
418 (clk_v->entries[0].ucMclkHigh << 16);
419 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
420 le16_to_cpu(clk_v->entries[0].usVddc);
421 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
422 le16_to_cpu(clk_v->entries[0].usVddci);
425 if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
426 ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
427 (ATOM_PPLIB_PhaseSheddingLimits_Table *)
428 (mode_info->atom_context->bios + data_offset +
429 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
430 ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
432 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
433 kzalloc(psl->ucNumEntries *
434 sizeof(struct amdgpu_phase_shedding_limits_entry),
436 if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
437 amdgpu_free_extended_power_table(adev);
441 entry = &psl->entries[0];
442 for (i = 0; i < psl->ucNumEntries; i++) {
443 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
444 le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
445 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
446 le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
447 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
448 le16_to_cpu(entry->usVoltage);
449 entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
450 ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
452 adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
458 if (le16_to_cpu(power_info->pplib.usTableSize) >=
459 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
460 adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
461 adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
462 adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
463 adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
464 if (adev->pm.dpm.tdp_od_limit)
465 adev->pm.dpm.power_control = true;
467 adev->pm.dpm.power_control = false;
468 adev->pm.dpm.tdp_adjustment = 0;
469 adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
470 adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
471 adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
472 if (power_info->pplib5.usCACLeakageTableOffset) {
473 ATOM_PPLIB_CAC_Leakage_Table *cac_table =
474 (ATOM_PPLIB_CAC_Leakage_Table *)
475 (mode_info->atom_context->bios + data_offset +
476 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
477 ATOM_PPLIB_CAC_Leakage_Record *entry;
478 u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
479 adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
480 if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
481 amdgpu_free_extended_power_table(adev);
484 entry = &cac_table->entries[0];
485 for (i = 0; i < cac_table->ucNumEntries; i++) {
486 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
487 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
488 le16_to_cpu(entry->usVddc1);
489 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
490 le16_to_cpu(entry->usVddc2);
491 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
492 le16_to_cpu(entry->usVddc3);
494 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
495 le16_to_cpu(entry->usVddc);
496 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
497 le32_to_cpu(entry->ulLeakageValue);
499 entry = (ATOM_PPLIB_CAC_Leakage_Record *)
500 ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
502 adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
507 if (le16_to_cpu(power_info->pplib.usTableSize) >=
508 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
509 ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
510 (mode_info->atom_context->bios + data_offset +
511 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
512 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
513 ext_hdr->usVCETableOffset) {
514 VCEClockInfoArray *array = (VCEClockInfoArray *)
515 (mode_info->atom_context->bios + data_offset +
516 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
517 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
518 (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
519 (mode_info->atom_context->bios + data_offset +
520 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
521 1 + array->ucNumEntries * sizeof(VCEClockInfo));
522 ATOM_PPLIB_VCE_State_Table *states =
523 (ATOM_PPLIB_VCE_State_Table *)
524 (mode_info->atom_context->bios + data_offset +
525 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
526 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
527 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
528 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
529 ATOM_PPLIB_VCE_State_Record *state_entry;
530 VCEClockInfo *vce_clk;
531 u32 size = limits->numEntries *
532 sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
533 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
534 kzalloc(size, GFP_KERNEL);
535 if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
536 amdgpu_free_extended_power_table(adev);
539 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
541 entry = &limits->entries[0];
542 state_entry = &states->entries[0];
543 for (i = 0; i < limits->numEntries; i++) {
544 vce_clk = (VCEClockInfo *)
545 ((u8 *)&array->entries[0] +
546 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
547 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
548 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
549 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
550 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
551 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
552 le16_to_cpu(entry->usVoltage);
553 entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
554 ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
556 adev->pm.dpm.num_of_vce_states =
557 states->numEntries > AMD_MAX_VCE_LEVELS ?
558 AMD_MAX_VCE_LEVELS : states->numEntries;
559 for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
560 vce_clk = (VCEClockInfo *)
561 ((u8 *)&array->entries[0] +
562 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
563 adev->pm.dpm.vce_states[i].evclk =
564 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
565 adev->pm.dpm.vce_states[i].ecclk =
566 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
567 adev->pm.dpm.vce_states[i].clk_idx =
568 state_entry->ucClockInfoIndex & 0x3f;
569 adev->pm.dpm.vce_states[i].pstate =
570 (state_entry->ucClockInfoIndex & 0xc0) >> 6;
571 state_entry = (ATOM_PPLIB_VCE_State_Record *)
572 ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
575 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
576 ext_hdr->usUVDTableOffset) {
577 UVDClockInfoArray *array = (UVDClockInfoArray *)
578 (mode_info->atom_context->bios + data_offset +
579 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
580 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
581 (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
582 (mode_info->atom_context->bios + data_offset +
583 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
584 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
585 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
586 u32 size = limits->numEntries *
587 sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
588 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
589 kzalloc(size, GFP_KERNEL);
590 if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
591 amdgpu_free_extended_power_table(adev);
594 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
596 entry = &limits->entries[0];
597 for (i = 0; i < limits->numEntries; i++) {
598 UVDClockInfo *uvd_clk = (UVDClockInfo *)
599 ((u8 *)&array->entries[0] +
600 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
601 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
602 le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
603 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
604 le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
605 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
606 le16_to_cpu(entry->usVoltage);
607 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
608 ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
611 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
612 ext_hdr->usSAMUTableOffset) {
613 ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
614 (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
615 (mode_info->atom_context->bios + data_offset +
616 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
617 ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
618 u32 size = limits->numEntries *
619 sizeof(struct amdgpu_clock_voltage_dependency_entry);
620 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
621 kzalloc(size, GFP_KERNEL);
622 if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
623 amdgpu_free_extended_power_table(adev);
626 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
628 entry = &limits->entries[0];
629 for (i = 0; i < limits->numEntries; i++) {
630 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
631 le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
632 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
633 le16_to_cpu(entry->usVoltage);
634 entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
635 ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
638 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
639 ext_hdr->usPPMTableOffset) {
640 ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
641 (mode_info->atom_context->bios + data_offset +
642 le16_to_cpu(ext_hdr->usPPMTableOffset));
643 adev->pm.dpm.dyn_state.ppm_table =
644 kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
645 if (!adev->pm.dpm.dyn_state.ppm_table) {
646 amdgpu_free_extended_power_table(adev);
649 adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
650 adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
651 le16_to_cpu(ppm->usCpuCoreNumber);
652 adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
653 le32_to_cpu(ppm->ulPlatformTDP);
654 adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
655 le32_to_cpu(ppm->ulSmallACPlatformTDP);
656 adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
657 le32_to_cpu(ppm->ulPlatformTDC);
658 adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
659 le32_to_cpu(ppm->ulSmallACPlatformTDC);
660 adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
661 le32_to_cpu(ppm->ulApuTDP);
662 adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
663 le32_to_cpu(ppm->ulDGpuTDP);
664 adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
665 le32_to_cpu(ppm->ulDGpuUlvPower);
666 adev->pm.dpm.dyn_state.ppm_table->tj_max =
667 le32_to_cpu(ppm->ulTjmax);
669 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
670 ext_hdr->usACPTableOffset) {
671 ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
672 (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
673 (mode_info->atom_context->bios + data_offset +
674 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
675 ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
676 u32 size = limits->numEntries *
677 sizeof(struct amdgpu_clock_voltage_dependency_entry);
678 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
679 kzalloc(size, GFP_KERNEL);
680 if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
681 amdgpu_free_extended_power_table(adev);
684 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
686 entry = &limits->entries[0];
687 for (i = 0; i < limits->numEntries; i++) {
688 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
689 le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
690 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
691 le16_to_cpu(entry->usVoltage);
692 entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
693 ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
696 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
697 ext_hdr->usPowerTuneTableOffset) {
698 u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
699 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
700 ATOM_PowerTune_Table *pt;
701 adev->pm.dpm.dyn_state.cac_tdp_table =
702 kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
703 if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
704 amdgpu_free_extended_power_table(adev);
708 ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
709 (mode_info->atom_context->bios + data_offset +
710 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
711 adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
712 ppt->usMaximumPowerDeliveryLimit;
713 pt = &ppt->power_tune_table;
715 ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
716 (mode_info->atom_context->bios + data_offset +
717 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
718 adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
719 pt = &ppt->power_tune_table;
721 adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
722 adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
723 le16_to_cpu(pt->usConfigurableTDP);
724 adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
725 adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
726 le16_to_cpu(pt->usBatteryPowerLimit);
727 adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
728 le16_to_cpu(pt->usSmallPowerLimit);
729 adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
730 le16_to_cpu(pt->usLowCACLeakage);
731 adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
732 le16_to_cpu(pt->usHighCACLeakage);
734 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
735 ext_hdr->usSclkVddgfxTableOffset) {
736 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
737 (mode_info->atom_context->bios + data_offset +
738 le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
739 ret = amdgpu_parse_clk_voltage_dep_table(
740 &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
743 kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
752 void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
754 struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
756 kfree(dyn_state->vddc_dependency_on_sclk.entries);
757 kfree(dyn_state->vddci_dependency_on_mclk.entries);
758 kfree(dyn_state->vddc_dependency_on_mclk.entries);
759 kfree(dyn_state->mvdd_dependency_on_mclk.entries);
760 kfree(dyn_state->cac_leakage_table.entries);
761 kfree(dyn_state->phase_shedding_limits_table.entries);
762 kfree(dyn_state->ppm_table);
763 kfree(dyn_state->cac_tdp_table);
764 kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
765 kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
766 kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
767 kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
768 kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
771 static const char *pp_lib_thermal_controller_names[] = {
794 void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
796 struct amdgpu_mode_info *mode_info = &adev->mode_info;
797 ATOM_PPLIB_POWERPLAYTABLE *power_table;
798 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
799 ATOM_PPLIB_THERMALCONTROLLER *controller;
800 struct amdgpu_i2c_bus_rec i2c_bus;
804 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
805 &frev, &crev, &data_offset))
807 power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
808 (mode_info->atom_context->bios + data_offset);
809 controller = &power_table->sThermalController;
811 /* add the i2c bus for thermal/fan chip */
812 if (controller->ucType > 0) {
813 if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
814 adev->pm.no_fan = true;
815 adev->pm.fan_pulses_per_revolution =
816 controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
817 if (adev->pm.fan_pulses_per_revolution) {
818 adev->pm.fan_min_rpm = controller->ucFanMinRPM;
819 adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
821 if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
822 DRM_INFO("Internal thermal controller %s fan control\n",
823 (controller->ucFanParameters &
824 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
825 adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
826 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
827 DRM_INFO("Internal thermal controller %s fan control\n",
828 (controller->ucFanParameters &
829 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
830 adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
831 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
832 DRM_INFO("Internal thermal controller %s fan control\n",
833 (controller->ucFanParameters &
834 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
835 adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
836 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
837 DRM_INFO("Internal thermal controller %s fan control\n",
838 (controller->ucFanParameters &
839 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
840 adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
841 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
842 DRM_INFO("Internal thermal controller %s fan control\n",
843 (controller->ucFanParameters &
844 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
845 adev->pm.int_thermal_type = THERMAL_TYPE_NI;
846 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
847 DRM_INFO("Internal thermal controller %s fan control\n",
848 (controller->ucFanParameters &
849 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
850 adev->pm.int_thermal_type = THERMAL_TYPE_SI;
851 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
852 DRM_INFO("Internal thermal controller %s fan control\n",
853 (controller->ucFanParameters &
854 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
855 adev->pm.int_thermal_type = THERMAL_TYPE_CI;
856 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
857 DRM_INFO("Internal thermal controller %s fan control\n",
858 (controller->ucFanParameters &
859 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
860 adev->pm.int_thermal_type = THERMAL_TYPE_KV;
861 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
862 DRM_INFO("External GPIO thermal controller %s fan control\n",
863 (controller->ucFanParameters &
864 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
865 adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
866 } else if (controller->ucType ==
867 ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
868 DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
869 (controller->ucFanParameters &
870 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
871 adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
872 } else if (controller->ucType ==
873 ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
874 DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
875 (controller->ucFanParameters &
876 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
877 adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
878 } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
879 DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
880 pp_lib_thermal_controller_names[controller->ucType],
881 controller->ucI2cAddress >> 1,
882 (controller->ucFanParameters &
883 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
884 adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
885 i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
886 adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
887 if (adev->pm.i2c_bus) {
888 struct i2c_board_info info = { };
889 const char *name = pp_lib_thermal_controller_names[controller->ucType];
890 info.addr = controller->ucI2cAddress >> 1;
891 strlcpy(info.type, name, sizeof(info.type));
892 i2c_new_device(&adev->pm.i2c_bus->adapter, &info);
895 DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
897 controller->ucI2cAddress >> 1,
898 (controller->ucFanParameters &
899 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
904 enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
906 enum amdgpu_pcie_gen asic_gen,
907 enum amdgpu_pcie_gen default_gen)
910 case AMDGPU_PCIE_GEN1:
911 return AMDGPU_PCIE_GEN1;
912 case AMDGPU_PCIE_GEN2:
913 return AMDGPU_PCIE_GEN2;
914 case AMDGPU_PCIE_GEN3:
915 return AMDGPU_PCIE_GEN3;
917 if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
918 return AMDGPU_PCIE_GEN3;
919 else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
920 return AMDGPU_PCIE_GEN2;
922 return AMDGPU_PCIE_GEN1;
924 return AMDGPU_PCIE_GEN1;
927 u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
931 switch (asic_lanes) {
934 return default_lanes;
950 u8 amdgpu_encode_pci_lane_width(u32 lanes)
952 u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 };
957 return encoded_lanes[lanes];
960 struct amd_vce_state*
961 amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx)
963 if (idx < adev->pm.dpm.num_of_vce_states)
964 return &adev->pm.dpm.vce_states[idx];