2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
29 #include "amdgpu_atombios.h"
30 #include "amdgpu_ih.h"
31 #include "amdgpu_uvd.h"
32 #include "amdgpu_vce.h"
33 #include "amdgpu_ucode.h"
34 #include "amdgpu_psp.h"
38 #include "uvd/uvd_7_0_offset.h"
39 #include "gc/gc_9_0_offset.h"
40 #include "gc/gc_9_0_sh_mask.h"
41 #include "sdma0/sdma0_4_0_offset.h"
42 #include "sdma1/sdma1_4_0_offset.h"
43 #include "hdp/hdp_4_0_offset.h"
44 #include "hdp/hdp_4_0_sh_mask.h"
45 #include "smuio/smuio_9_0_offset.h"
46 #include "smuio/smuio_9_0_sh_mask.h"
47 #include "nbio/nbio_7_0_default.h"
48 #include "nbio/nbio_7_0_offset.h"
49 #include "nbio/nbio_7_0_sh_mask.h"
50 #include "nbio/nbio_7_0_smn.h"
51 #include "mp/mp_9_0_offset.h"
54 #include "soc15_common.h"
57 #include "gfxhub_v1_0.h"
58 #include "mmhub_v1_0.h"
61 #include "nbio_v6_1.h"
62 #include "nbio_v7_0.h"
63 #include "nbio_v7_4.h"
64 #include "vega10_ih.h"
65 #include "sdma_v4_0.h"
70 #include "jpeg_v2_0.h"
72 #include "dce_virtual.h"
74 #include "amdgpu_smu.h"
75 #include "amdgpu_ras.h"
76 #include "amdgpu_xgmi.h"
77 #include <uapi/linux/kfd_ioctl.h>
79 #define mmMP0_MISC_CGTT_CTRL0 0x01b9
80 #define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0
81 #define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba
82 #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0
84 /* for Vega20 register name change */
85 #define mmHDP_MEM_POWER_CTRL 0x00d4
86 #define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x00000001L
87 #define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK 0x00000002L
88 #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L
89 #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
90 #define mmHDP_MEM_POWER_CTRL_BASE_IDX 0
92 * Indirect registers accessor
94 static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
96 unsigned long flags, address, data;
98 address = adev->nbio.funcs->get_pcie_index_offset(adev);
99 data = adev->nbio.funcs->get_pcie_data_offset(adev);
101 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
102 WREG32(address, reg);
103 (void)RREG32(address);
105 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
109 static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
111 unsigned long flags, address, data;
113 address = adev->nbio.funcs->get_pcie_index_offset(adev);
114 data = adev->nbio.funcs->get_pcie_data_offset(adev);
116 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
117 WREG32(address, reg);
118 (void)RREG32(address);
121 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
124 static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
126 unsigned long flags, address, data;
128 address = adev->nbio.funcs->get_pcie_index_offset(adev);
129 data = adev->nbio.funcs->get_pcie_data_offset(adev);
131 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
132 /* read low 32 bit */
133 WREG32(address, reg);
134 (void)RREG32(address);
137 /* read high 32 bit*/
138 WREG32(address, reg + 4);
139 (void)RREG32(address);
140 r |= ((u64)RREG32(data) << 32);
141 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
145 static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
147 unsigned long flags, address, data;
149 address = adev->nbio.funcs->get_pcie_index_offset(adev);
150 data = adev->nbio.funcs->get_pcie_data_offset(adev);
152 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
153 /* write low 32 bit */
154 WREG32(address, reg);
155 (void)RREG32(address);
156 WREG32(data, (u32)(v & 0xffffffffULL));
159 /* write high 32 bit */
160 WREG32(address, reg + 4);
161 (void)RREG32(address);
162 WREG32(data, (u32)(v >> 32));
164 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
167 static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
169 unsigned long flags, address, data;
172 address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
173 data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
175 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
176 WREG32(address, ((reg) & 0x1ff));
178 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
182 static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
184 unsigned long flags, address, data;
186 address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
187 data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
189 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
190 WREG32(address, ((reg) & 0x1ff));
192 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
195 static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg)
197 unsigned long flags, address, data;
200 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
201 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
203 spin_lock_irqsave(&adev->didt_idx_lock, flags);
204 WREG32(address, (reg));
206 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
210 static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
212 unsigned long flags, address, data;
214 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
215 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
217 spin_lock_irqsave(&adev->didt_idx_lock, flags);
218 WREG32(address, (reg));
220 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
223 static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
228 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
229 WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
230 r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA);
231 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
235 static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
239 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
240 WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
241 WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v));
242 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
245 static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg)
250 spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
251 WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
252 r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA);
253 spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
257 static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
261 spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
262 WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
263 WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v));
264 spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
267 static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
269 return adev->nbio.funcs->get_memsize(adev);
272 static u32 soc15_get_xclk(struct amdgpu_device *adev)
274 return adev->clock.spll.reference_freq;
278 void soc15_grbm_select(struct amdgpu_device *adev,
279 u32 me, u32 pipe, u32 queue, u32 vmid)
281 u32 grbm_gfx_cntl = 0;
282 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
283 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
284 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
285 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
287 WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl);
290 static void soc15_vga_set_state(struct amdgpu_device *adev, bool state)
295 static bool soc15_read_disabled_bios(struct amdgpu_device *adev)
301 static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
302 u8 *bios, u32 length_bytes)
309 if (length_bytes == 0)
311 /* APU vbios image is part of sbios image */
312 if (adev->flags & AMD_IS_APU)
315 dw_ptr = (u32 *)bios;
316 length_dw = ALIGN(length_bytes, 4) / 4;
318 /* set rom index to 0 */
319 WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
320 /* read out the rom data */
321 for (i = 0; i < length_dw; i++)
322 dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
327 static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
328 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
329 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
330 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
331 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
332 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
333 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
334 { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
335 { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
336 { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
337 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
338 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
339 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
340 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
341 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
342 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
343 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
344 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
345 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
346 { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
347 { SOC15_REG_ENTRY(GC, 0, mmDB_DEBUG2)},
350 static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
351 u32 sh_num, u32 reg_offset)
355 mutex_lock(&adev->grbm_idx_mutex);
356 if (se_num != 0xffffffff || sh_num != 0xffffffff)
357 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
359 val = RREG32(reg_offset);
361 if (se_num != 0xffffffff || sh_num != 0xffffffff)
362 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
363 mutex_unlock(&adev->grbm_idx_mutex);
367 static uint32_t soc15_get_register_value(struct amdgpu_device *adev,
368 bool indexed, u32 se_num,
369 u32 sh_num, u32 reg_offset)
372 return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset);
374 if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
375 return adev->gfx.config.gb_addr_config;
376 else if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2))
377 return adev->gfx.config.db_debug2;
378 return RREG32(reg_offset);
382 static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
383 u32 sh_num, u32 reg_offset, u32 *value)
386 struct soc15_allowed_register_entry *en;
389 for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
390 en = &soc15_allowed_read_registers[i];
391 if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
395 *value = soc15_get_register_value(adev,
396 soc15_allowed_read_registers[i].grbm_indexed,
397 se_num, sh_num, reg_offset);
405 * soc15_program_register_sequence - program an array of registers.
407 * @adev: amdgpu_device pointer
408 * @regs: pointer to the register array
409 * @array_size: size of the register array
411 * Programs an array or registers with and and or masks.
412 * This is a helper for setting golden registers.
415 void soc15_program_register_sequence(struct amdgpu_device *adev,
416 const struct soc15_reg_golden *regs,
417 const u32 array_size)
419 const struct soc15_reg_golden *entry;
423 for (i = 0; i < array_size; ++i) {
425 reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
427 if (entry->and_mask == 0xffffffff) {
428 tmp = entry->or_mask;
431 tmp &= ~(entry->and_mask);
432 tmp |= (entry->or_mask & entry->and_mask);
435 if (reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3) ||
436 reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE) ||
437 reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1) ||
438 reg == SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG))
439 WREG32_RLC(reg, tmp);
447 static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
452 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
454 dev_info(adev->dev, "GPU mode1 reset\n");
457 pci_clear_master(adev->pdev);
459 pci_save_state(adev->pdev);
461 ret = psp_gpu_reset(adev);
463 dev_err(adev->dev, "GPU mode1 reset failed\n");
465 pci_restore_state(adev->pdev);
467 /* wait for asic to come out of reset */
468 for (i = 0; i < adev->usec_timeout; i++) {
469 u32 memsize = adev->nbio.funcs->get_memsize(adev);
471 if (memsize != 0xffffffff)
476 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
481 static int soc15_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap)
483 if (is_support_sw_smu(adev)) {
484 struct smu_context *smu = &adev->smu;
486 *cap = smu_baco_is_support(smu);
489 void *pp_handle = adev->powerplay.pp_handle;
490 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
492 if (!pp_funcs || !pp_funcs->get_asic_baco_capability) {
497 return pp_funcs->get_asic_baco_capability(pp_handle, cap);
501 static int soc15_asic_baco_reset(struct amdgpu_device *adev)
503 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
505 /* avoid NBIF got stuck when do RAS recovery in BACO reset */
506 if (ras && ras->supported)
507 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
509 dev_info(adev->dev, "GPU BACO reset\n");
511 if (is_support_sw_smu(adev)) {
512 struct smu_context *smu = &adev->smu;
514 if (smu_baco_reset(smu))
517 void *pp_handle = adev->powerplay.pp_handle;
518 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
520 if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state)
523 /* enter BACO state */
524 if (pp_funcs->set_asic_baco_state(pp_handle, 1))
527 /* exit BACO state */
528 if (pp_funcs->set_asic_baco_state(pp_handle, 0))
532 /* re-enable doorbell interrupt after BACO exit */
533 if (ras && ras->supported)
534 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
539 static int soc15_mode2_reset(struct amdgpu_device *adev)
541 if (is_support_sw_smu(adev))
542 return smu_mode2_reset(&adev->smu);
543 if (!adev->powerplay.pp_funcs ||
544 !adev->powerplay.pp_funcs->asic_reset_mode_2)
547 return adev->powerplay.pp_funcs->asic_reset_mode_2(adev->powerplay.pp_handle);
550 static enum amd_reset_method
551 soc15_asic_reset_method(struct amdgpu_device *adev)
555 switch (adev->asic_type) {
558 return AMD_RESET_METHOD_MODE2;
561 soc15_asic_get_baco_capability(adev, &baco_reset);
564 if (adev->psp.sos_fw_version >= 0x80067)
565 soc15_asic_get_baco_capability(adev, &baco_reset);
569 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
570 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
572 if (hive || (ras && ras->supported))
582 return AMD_RESET_METHOD_BACO;
584 return AMD_RESET_METHOD_MODE1;
587 static int soc15_asic_reset(struct amdgpu_device *adev)
589 switch (soc15_asic_reset_method(adev)) {
590 case AMD_RESET_METHOD_BACO:
591 if (!adev->in_suspend)
592 amdgpu_inc_vram_lost(adev);
593 return soc15_asic_baco_reset(adev);
594 case AMD_RESET_METHOD_MODE2:
595 return soc15_mode2_reset(adev);
597 if (!adev->in_suspend)
598 amdgpu_inc_vram_lost(adev);
599 return soc15_asic_mode1_reset(adev);
603 /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
604 u32 cntl_reg, u32 status_reg)
609 static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
613 r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
617 r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
622 static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
629 static void soc15_pcie_gen3_enable(struct amdgpu_device *adev)
631 if (pci_is_root_bus(adev->pdev->bus))
634 if (amdgpu_pcie_gen2 == 0)
637 if (adev->flags & AMD_IS_APU)
640 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
641 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
647 static void soc15_program_aspm(struct amdgpu_device *adev)
650 if (amdgpu_aspm == 0)
656 static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
659 adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
660 adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
663 static const struct amdgpu_ip_block_version vega10_common_ip_block =
665 .type = AMD_IP_BLOCK_TYPE_COMMON,
669 .funcs = &soc15_common_ip_funcs,
672 static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
674 return adev->nbio.funcs->get_rev_id(adev);
677 int soc15_set_ip_blocks(struct amdgpu_device *adev)
679 /* Set IP register base before any HW register access */
680 switch (adev->asic_type) {
685 vega10_reg_base_init(adev);
688 vega20_reg_base_init(adev);
691 arct_reg_base_init(adev);
697 if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
698 adev->gmc.xgmi.supported = true;
700 if (adev->flags & AMD_IS_APU) {
701 adev->nbio.funcs = &nbio_v7_0_funcs;
702 adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
703 } else if (adev->asic_type == CHIP_VEGA20 ||
704 adev->asic_type == CHIP_ARCTURUS) {
705 adev->nbio.funcs = &nbio_v7_4_funcs;
706 adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
708 adev->nbio.funcs = &nbio_v6_1_funcs;
709 adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
712 if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
713 adev->df_funcs = &df_v3_6_funcs;
715 adev->df_funcs = &df_v1_7_funcs;
717 adev->rev_id = soc15_get_rev_id(adev);
718 adev->nbio.funcs->detect_hw_virt(adev);
720 if (amdgpu_sriov_vf(adev))
721 adev->virt.ops = &xgpu_ai_virt_ops;
723 switch (adev->asic_type) {
727 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
728 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
730 /* For Vega10 SR-IOV, PSP need to be initialized before IH */
731 if (amdgpu_sriov_vf(adev)) {
732 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
733 if (adev->asic_type == CHIP_VEGA20)
734 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
736 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
738 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
740 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
741 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
742 if (adev->asic_type == CHIP_VEGA20)
743 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
745 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
748 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
749 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
750 if (!amdgpu_sriov_vf(adev)) {
751 if (is_support_sw_smu(adev))
752 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
754 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
756 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
757 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
758 #if defined(CONFIG_DRM_AMD_DC)
759 else if (amdgpu_device_has_dc_support(adev))
760 amdgpu_device_ip_block_add(adev, &dm_ip_block);
762 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) {
763 amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
764 amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
768 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
769 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
770 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
771 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
772 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
773 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
774 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
775 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
776 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
777 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
778 #if defined(CONFIG_DRM_AMD_DC)
779 else if (amdgpu_device_has_dc_support(adev))
780 amdgpu_device_ip_block_add(adev, &dm_ip_block);
782 amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
785 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
786 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
788 if (amdgpu_sriov_vf(adev)) {
789 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
790 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
791 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
793 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
794 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
795 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
798 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
799 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
800 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
801 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
802 if (!amdgpu_sriov_vf(adev))
803 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
805 if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT))
806 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
809 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
810 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
811 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
812 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
813 amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
814 if (is_support_sw_smu(adev))
815 amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
816 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
817 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
818 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
819 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
820 #if defined(CONFIG_DRM_AMD_DC)
821 else if (amdgpu_device_has_dc_support(adev))
822 amdgpu_device_ip_block_add(adev, &dm_ip_block);
824 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
825 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
834 static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
836 adev->nbio.funcs->hdp_flush(adev, ring);
839 static void soc15_invalidate_hdp(struct amdgpu_device *adev,
840 struct amdgpu_ring *ring)
842 if (!ring || !ring->funcs->emit_wreg)
843 WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
845 amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
846 HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
849 static bool soc15_need_full_reset(struct amdgpu_device *adev)
851 /* change this when we implement soft reset */
854 static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
857 uint32_t perfctr = 0;
858 uint64_t cnt0_of, cnt1_of;
861 /* This reports 0 on APUs, so return to avoid writing/reading registers
862 * that may or may not be different from their GPU counterparts
864 if (adev->flags & AMD_IS_APU)
867 /* Set the 2 events that we wish to watch, defined above */
868 /* Reg 40 is # received msgs */
869 /* Reg 104 is # of posted requests sent */
870 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
871 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
873 /* Write to enable desired perf counters */
874 WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK, perfctr);
875 /* Zero out and enable the perf counters
877 * Bit 0 = Start all counters(1)
878 * Bit 2 = Global counter reset enable(1)
880 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
884 /* Load the shadow and disable the perf counters
886 * Bit 0 = Stop counters(0)
887 * Bit 1 = Load the shadow counters(1)
889 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
891 /* Read register values to get any >32bit overflow */
892 tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK);
893 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
894 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
896 /* Get the values and add the overflow */
897 *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
898 *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
901 static void vega20_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
904 uint32_t perfctr = 0;
905 uint64_t cnt0_of, cnt1_of;
908 /* This reports 0 on APUs, so return to avoid writing/reading registers
909 * that may or may not be different from their GPU counterparts
911 if (adev->flags & AMD_IS_APU)
914 /* Set the 2 events that we wish to watch, defined above */
915 /* Reg 40 is # received msgs */
916 /* Reg 108 is # of posted requests sent on VG20 */
917 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
919 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
922 /* Write to enable desired perf counters */
923 WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3, perfctr);
924 /* Zero out and enable the perf counters
926 * Bit 0 = Start all counters(1)
927 * Bit 2 = Global counter reset enable(1)
929 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
933 /* Load the shadow and disable the perf counters
935 * Bit 0 = Stop counters(0)
936 * Bit 1 = Load the shadow counters(1)
938 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
940 /* Read register values to get any >32bit overflow */
941 tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3);
942 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER0_UPPER);
943 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER1_UPPER);
945 /* Get the values and add the overflow */
946 *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK3) | (cnt0_of << 32);
947 *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK3) | (cnt1_of << 32);
950 static bool soc15_need_reset_on_init(struct amdgpu_device *adev)
954 /* Just return false for soc15 GPUs. Reset does not seem to
957 if (!amdgpu_passthrough(adev))
960 if (adev->flags & AMD_IS_APU)
963 /* Check sOS sign of life register to confirm sys driver and sOS
964 * are already been loaded.
966 sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
973 static uint64_t soc15_get_pcie_replay_count(struct amdgpu_device *adev)
975 uint64_t nak_r, nak_g;
977 /* Get the number of NAKs received and generated */
978 nak_r = RREG32_PCIE(smnPCIE_RX_NUM_NAK);
979 nak_g = RREG32_PCIE(smnPCIE_RX_NUM_NAK_GENERATED);
981 /* Add the total number of NAKs, i.e the number of replays */
982 return (nak_r + nak_g);
985 static const struct amdgpu_asic_funcs soc15_asic_funcs =
987 .read_disabled_bios = &soc15_read_disabled_bios,
988 .read_bios_from_rom = &soc15_read_bios_from_rom,
989 .read_register = &soc15_read_register,
990 .reset = &soc15_asic_reset,
991 .reset_method = &soc15_asic_reset_method,
992 .set_vga_state = &soc15_vga_set_state,
993 .get_xclk = &soc15_get_xclk,
994 .set_uvd_clocks = &soc15_set_uvd_clocks,
995 .set_vce_clocks = &soc15_set_vce_clocks,
996 .get_config_memsize = &soc15_get_config_memsize,
997 .flush_hdp = &soc15_flush_hdp,
998 .invalidate_hdp = &soc15_invalidate_hdp,
999 .need_full_reset = &soc15_need_full_reset,
1000 .init_doorbell_index = &vega10_doorbell_index_init,
1001 .get_pcie_usage = &soc15_get_pcie_usage,
1002 .need_reset_on_init = &soc15_need_reset_on_init,
1003 .get_pcie_replay_count = &soc15_get_pcie_replay_count,
1006 static const struct amdgpu_asic_funcs vega20_asic_funcs =
1008 .read_disabled_bios = &soc15_read_disabled_bios,
1009 .read_bios_from_rom = &soc15_read_bios_from_rom,
1010 .read_register = &soc15_read_register,
1011 .reset = &soc15_asic_reset,
1012 .set_vga_state = &soc15_vga_set_state,
1013 .get_xclk = &soc15_get_xclk,
1014 .set_uvd_clocks = &soc15_set_uvd_clocks,
1015 .set_vce_clocks = &soc15_set_vce_clocks,
1016 .get_config_memsize = &soc15_get_config_memsize,
1017 .flush_hdp = &soc15_flush_hdp,
1018 .invalidate_hdp = &soc15_invalidate_hdp,
1019 .need_full_reset = &soc15_need_full_reset,
1020 .init_doorbell_index = &vega20_doorbell_index_init,
1021 .get_pcie_usage = &vega20_get_pcie_usage,
1022 .need_reset_on_init = &soc15_need_reset_on_init,
1023 .get_pcie_replay_count = &soc15_get_pcie_replay_count,
1024 .reset_method = &soc15_asic_reset_method
1027 static int soc15_common_early_init(void *handle)
1029 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
1030 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1032 adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
1033 adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
1034 adev->smc_rreg = NULL;
1035 adev->smc_wreg = NULL;
1036 adev->pcie_rreg = &soc15_pcie_rreg;
1037 adev->pcie_wreg = &soc15_pcie_wreg;
1038 adev->pcie_rreg64 = &soc15_pcie_rreg64;
1039 adev->pcie_wreg64 = &soc15_pcie_wreg64;
1040 adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg;
1041 adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
1042 adev->didt_rreg = &soc15_didt_rreg;
1043 adev->didt_wreg = &soc15_didt_wreg;
1044 adev->gc_cac_rreg = &soc15_gc_cac_rreg;
1045 adev->gc_cac_wreg = &soc15_gc_cac_wreg;
1046 adev->se_cac_rreg = &soc15_se_cac_rreg;
1047 adev->se_cac_wreg = &soc15_se_cac_wreg;
1050 adev->external_rev_id = 0xFF;
1051 switch (adev->asic_type) {
1053 adev->asic_funcs = &soc15_asic_funcs;
1054 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1055 AMD_CG_SUPPORT_GFX_MGLS |
1056 AMD_CG_SUPPORT_GFX_RLC_LS |
1057 AMD_CG_SUPPORT_GFX_CP_LS |
1058 AMD_CG_SUPPORT_GFX_3D_CGCG |
1059 AMD_CG_SUPPORT_GFX_3D_CGLS |
1060 AMD_CG_SUPPORT_GFX_CGCG |
1061 AMD_CG_SUPPORT_GFX_CGLS |
1062 AMD_CG_SUPPORT_BIF_MGCG |
1063 AMD_CG_SUPPORT_BIF_LS |
1064 AMD_CG_SUPPORT_HDP_LS |
1065 AMD_CG_SUPPORT_DRM_MGCG |
1066 AMD_CG_SUPPORT_DRM_LS |
1067 AMD_CG_SUPPORT_ROM_MGCG |
1068 AMD_CG_SUPPORT_DF_MGCG |
1069 AMD_CG_SUPPORT_SDMA_MGCG |
1070 AMD_CG_SUPPORT_SDMA_LS |
1071 AMD_CG_SUPPORT_MC_MGCG |
1072 AMD_CG_SUPPORT_MC_LS;
1074 adev->external_rev_id = 0x1;
1077 adev->asic_funcs = &soc15_asic_funcs;
1078 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1079 AMD_CG_SUPPORT_GFX_MGLS |
1080 AMD_CG_SUPPORT_GFX_CGCG |
1081 AMD_CG_SUPPORT_GFX_CGLS |
1082 AMD_CG_SUPPORT_GFX_3D_CGCG |
1083 AMD_CG_SUPPORT_GFX_3D_CGLS |
1084 AMD_CG_SUPPORT_GFX_CP_LS |
1085 AMD_CG_SUPPORT_MC_LS |
1086 AMD_CG_SUPPORT_MC_MGCG |
1087 AMD_CG_SUPPORT_SDMA_MGCG |
1088 AMD_CG_SUPPORT_SDMA_LS |
1089 AMD_CG_SUPPORT_BIF_MGCG |
1090 AMD_CG_SUPPORT_BIF_LS |
1091 AMD_CG_SUPPORT_HDP_MGCG |
1092 AMD_CG_SUPPORT_HDP_LS |
1093 AMD_CG_SUPPORT_ROM_MGCG |
1094 AMD_CG_SUPPORT_VCE_MGCG |
1095 AMD_CG_SUPPORT_UVD_MGCG;
1097 adev->external_rev_id = adev->rev_id + 0x14;
1100 adev->asic_funcs = &vega20_asic_funcs;
1101 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1102 AMD_CG_SUPPORT_GFX_MGLS |
1103 AMD_CG_SUPPORT_GFX_CGCG |
1104 AMD_CG_SUPPORT_GFX_CGLS |
1105 AMD_CG_SUPPORT_GFX_3D_CGCG |
1106 AMD_CG_SUPPORT_GFX_3D_CGLS |
1107 AMD_CG_SUPPORT_GFX_CP_LS |
1108 AMD_CG_SUPPORT_MC_LS |
1109 AMD_CG_SUPPORT_MC_MGCG |
1110 AMD_CG_SUPPORT_SDMA_MGCG |
1111 AMD_CG_SUPPORT_SDMA_LS |
1112 AMD_CG_SUPPORT_BIF_MGCG |
1113 AMD_CG_SUPPORT_BIF_LS |
1114 AMD_CG_SUPPORT_HDP_MGCG |
1115 AMD_CG_SUPPORT_HDP_LS |
1116 AMD_CG_SUPPORT_ROM_MGCG |
1117 AMD_CG_SUPPORT_VCE_MGCG |
1118 AMD_CG_SUPPORT_UVD_MGCG;
1120 adev->external_rev_id = adev->rev_id + 0x28;
1123 adev->asic_funcs = &soc15_asic_funcs;
1124 if (adev->rev_id >= 0x8)
1125 adev->external_rev_id = adev->rev_id + 0x79;
1126 else if (adev->pdev->device == 0x15d8)
1127 adev->external_rev_id = adev->rev_id + 0x41;
1128 else if (adev->rev_id == 1)
1129 adev->external_rev_id = adev->rev_id + 0x20;
1131 adev->external_rev_id = adev->rev_id + 0x01;
1133 if (adev->rev_id >= 0x8) {
1134 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1135 AMD_CG_SUPPORT_GFX_MGLS |
1136 AMD_CG_SUPPORT_GFX_CP_LS |
1137 AMD_CG_SUPPORT_GFX_3D_CGCG |
1138 AMD_CG_SUPPORT_GFX_3D_CGLS |
1139 AMD_CG_SUPPORT_GFX_CGCG |
1140 AMD_CG_SUPPORT_GFX_CGLS |
1141 AMD_CG_SUPPORT_BIF_LS |
1142 AMD_CG_SUPPORT_HDP_LS |
1143 AMD_CG_SUPPORT_ROM_MGCG |
1144 AMD_CG_SUPPORT_MC_MGCG |
1145 AMD_CG_SUPPORT_MC_LS |
1146 AMD_CG_SUPPORT_SDMA_MGCG |
1147 AMD_CG_SUPPORT_SDMA_LS |
1148 AMD_CG_SUPPORT_VCN_MGCG;
1150 adev->pg_flags = AMD_PG_SUPPORT_SDMA |
1151 AMD_PG_SUPPORT_VCN |
1152 AMD_PG_SUPPORT_VCN_DPG;
1153 } else if (adev->pdev->device == 0x15d8) {
1154 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1155 AMD_CG_SUPPORT_GFX_MGLS |
1156 AMD_CG_SUPPORT_GFX_CP_LS |
1157 AMD_CG_SUPPORT_GFX_3D_CGCG |
1158 AMD_CG_SUPPORT_GFX_3D_CGLS |
1159 AMD_CG_SUPPORT_GFX_CGCG |
1160 AMD_CG_SUPPORT_GFX_CGLS |
1161 AMD_CG_SUPPORT_BIF_LS |
1162 AMD_CG_SUPPORT_HDP_LS |
1163 AMD_CG_SUPPORT_ROM_MGCG |
1164 AMD_CG_SUPPORT_MC_MGCG |
1165 AMD_CG_SUPPORT_MC_LS |
1166 AMD_CG_SUPPORT_SDMA_MGCG |
1167 AMD_CG_SUPPORT_SDMA_LS;
1169 adev->pg_flags = AMD_PG_SUPPORT_SDMA |
1170 AMD_PG_SUPPORT_MMHUB |
1171 AMD_PG_SUPPORT_VCN |
1172 AMD_PG_SUPPORT_VCN_DPG;
1174 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1175 AMD_CG_SUPPORT_GFX_MGLS |
1176 AMD_CG_SUPPORT_GFX_RLC_LS |
1177 AMD_CG_SUPPORT_GFX_CP_LS |
1178 AMD_CG_SUPPORT_GFX_3D_CGCG |
1179 AMD_CG_SUPPORT_GFX_3D_CGLS |
1180 AMD_CG_SUPPORT_GFX_CGCG |
1181 AMD_CG_SUPPORT_GFX_CGLS |
1182 AMD_CG_SUPPORT_BIF_MGCG |
1183 AMD_CG_SUPPORT_BIF_LS |
1184 AMD_CG_SUPPORT_HDP_MGCG |
1185 AMD_CG_SUPPORT_HDP_LS |
1186 AMD_CG_SUPPORT_DRM_MGCG |
1187 AMD_CG_SUPPORT_DRM_LS |
1188 AMD_CG_SUPPORT_ROM_MGCG |
1189 AMD_CG_SUPPORT_MC_MGCG |
1190 AMD_CG_SUPPORT_MC_LS |
1191 AMD_CG_SUPPORT_SDMA_MGCG |
1192 AMD_CG_SUPPORT_SDMA_LS |
1193 AMD_CG_SUPPORT_VCN_MGCG;
1195 adev->pg_flags = AMD_PG_SUPPORT_SDMA |
1196 AMD_PG_SUPPORT_VCN |
1197 AMD_PG_SUPPORT_VCN_DPG;
1201 adev->asic_funcs = &vega20_asic_funcs;
1202 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1203 AMD_CG_SUPPORT_GFX_MGLS |
1204 AMD_CG_SUPPORT_GFX_CGCG |
1205 AMD_CG_SUPPORT_GFX_CGLS |
1206 AMD_CG_SUPPORT_GFX_CP_LS |
1207 AMD_CG_SUPPORT_HDP_MGCG |
1208 AMD_CG_SUPPORT_HDP_LS |
1209 AMD_CG_SUPPORT_SDMA_MGCG |
1210 AMD_CG_SUPPORT_SDMA_LS |
1211 AMD_CG_SUPPORT_MC_MGCG |
1212 AMD_CG_SUPPORT_MC_LS |
1213 AMD_CG_SUPPORT_IH_CG |
1214 AMD_CG_SUPPORT_VCN_MGCG |
1215 AMD_CG_SUPPORT_JPEG_MGCG;
1217 adev->external_rev_id = adev->rev_id + 0x32;
1220 adev->asic_funcs = &soc15_asic_funcs;
1221 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1222 AMD_CG_SUPPORT_GFX_MGLS |
1223 AMD_CG_SUPPORT_GFX_3D_CGCG |
1224 AMD_CG_SUPPORT_GFX_3D_CGLS |
1225 AMD_CG_SUPPORT_GFX_CGCG |
1226 AMD_CG_SUPPORT_GFX_CGLS |
1227 AMD_CG_SUPPORT_GFX_CP_LS |
1228 AMD_CG_SUPPORT_MC_MGCG |
1229 AMD_CG_SUPPORT_MC_LS |
1230 AMD_CG_SUPPORT_SDMA_MGCG |
1231 AMD_CG_SUPPORT_SDMA_LS |
1232 AMD_CG_SUPPORT_BIF_LS |
1233 AMD_CG_SUPPORT_HDP_LS |
1234 AMD_CG_SUPPORT_ROM_MGCG |
1235 AMD_CG_SUPPORT_VCN_MGCG |
1236 AMD_CG_SUPPORT_JPEG_MGCG |
1237 AMD_CG_SUPPORT_IH_CG |
1238 AMD_CG_SUPPORT_ATHUB_LS |
1239 AMD_CG_SUPPORT_ATHUB_MGCG |
1240 AMD_CG_SUPPORT_DF_MGCG;
1241 adev->pg_flags = AMD_PG_SUPPORT_SDMA |
1242 AMD_PG_SUPPORT_VCN |
1243 AMD_PG_SUPPORT_JPEG |
1244 AMD_PG_SUPPORT_VCN_DPG;
1245 adev->external_rev_id = adev->rev_id + 0x91;
1248 /* FIXME: not supported yet */
1252 if (amdgpu_sriov_vf(adev)) {
1253 amdgpu_virt_init_setting(adev);
1254 xgpu_ai_mailbox_set_irq_funcs(adev);
1260 static int soc15_common_late_init(void *handle)
1262 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1265 if (amdgpu_sriov_vf(adev))
1266 xgpu_ai_mailbox_get_irq(adev);
1268 if (adev->nbio.funcs->ras_late_init)
1269 r = adev->nbio.funcs->ras_late_init(adev);
1274 static int soc15_common_sw_init(void *handle)
1276 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1278 if (amdgpu_sriov_vf(adev))
1279 xgpu_ai_mailbox_add_irq_id(adev);
1281 adev->df_funcs->sw_init(adev);
1286 static int soc15_common_sw_fini(void *handle)
1288 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1290 amdgpu_nbio_ras_fini(adev);
1291 adev->df_funcs->sw_fini(adev);
1295 static void soc15_doorbell_range_init(struct amdgpu_device *adev)
1298 struct amdgpu_ring *ring;
1300 /* sdma/ih doorbell range are programed by hypervisor */
1301 if (!amdgpu_sriov_vf(adev)) {
1302 for (i = 0; i < adev->sdma.num_instances; i++) {
1303 ring = &adev->sdma.instance[i].ring;
1304 adev->nbio.funcs->sdma_doorbell_range(adev, i,
1305 ring->use_doorbell, ring->doorbell_index,
1306 adev->doorbell_index.sdma_doorbell_range);
1309 adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
1310 adev->irq.ih.doorbell_index);
1314 static int soc15_common_hw_init(void *handle)
1316 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1318 /* enable pcie gen2/3 link */
1319 soc15_pcie_gen3_enable(adev);
1321 soc15_program_aspm(adev);
1322 /* setup nbio registers */
1323 adev->nbio.funcs->init_registers(adev);
1324 /* remap HDP registers to a hole in mmio space,
1325 * for the purpose of expose those registers
1328 if (adev->nbio.funcs->remap_hdp_registers)
1329 adev->nbio.funcs->remap_hdp_registers(adev);
1331 /* enable the doorbell aperture */
1332 soc15_enable_doorbell_aperture(adev, true);
1333 /* HW doorbell routing policy: doorbell writing not
1334 * in SDMA/IH/MM/ACV range will be routed to CP. So
1335 * we need to init SDMA/IH/MM/ACV doorbell range prior
1336 * to CP ip block init and ring test.
1338 soc15_doorbell_range_init(adev);
1343 static int soc15_common_hw_fini(void *handle)
1345 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1347 /* disable the doorbell aperture */
1348 soc15_enable_doorbell_aperture(adev, false);
1349 if (amdgpu_sriov_vf(adev))
1350 xgpu_ai_mailbox_put_irq(adev);
1352 if (adev->nbio.ras_if &&
1353 amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) {
1354 if (adev->nbio.funcs->init_ras_controller_interrupt)
1355 amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0);
1356 if (adev->nbio.funcs->init_ras_err_event_athub_interrupt)
1357 amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0);
1363 static int soc15_common_suspend(void *handle)
1365 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1367 return soc15_common_hw_fini(adev);
1370 static int soc15_common_resume(void *handle)
1372 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1374 return soc15_common_hw_init(adev);
1377 static bool soc15_common_is_idle(void *handle)
1382 static int soc15_common_wait_for_idle(void *handle)
1387 static int soc15_common_soft_reset(void *handle)
1392 static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable)
1396 if (adev->asic_type == CHIP_VEGA20 ||
1397 adev->asic_type == CHIP_ARCTURUS) {
1398 def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL));
1400 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1401 data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
1402 HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
1403 HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
1404 HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK;
1406 data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
1407 HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
1408 HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
1409 HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK);
1412 WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data);
1414 def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
1416 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1417 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1419 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1422 WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
1426 static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
1430 def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
1432 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG))
1433 data &= ~(0x01000000 |
1442 data |= (0x01000000 |
1452 WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0), data);
1455 static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable)
1459 def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
1461 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
1467 WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data);
1470 static void soc15_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
1475 def = data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0));
1477 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
1478 data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1479 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
1481 data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1482 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
1485 WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0), data);
1488 static int soc15_common_set_clockgating_state(void *handle,
1489 enum amd_clockgating_state state)
1491 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1493 if (amdgpu_sriov_vf(adev))
1496 switch (adev->asic_type) {
1500 adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1501 state == AMD_CG_STATE_GATE ? true : false);
1502 adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1503 state == AMD_CG_STATE_GATE ? true : false);
1504 soc15_update_hdp_light_sleep(adev,
1505 state == AMD_CG_STATE_GATE ? true : false);
1506 soc15_update_drm_clock_gating(adev,
1507 state == AMD_CG_STATE_GATE ? true : false);
1508 soc15_update_drm_light_sleep(adev,
1509 state == AMD_CG_STATE_GATE ? true : false);
1510 soc15_update_rom_medium_grain_clock_gating(adev,
1511 state == AMD_CG_STATE_GATE ? true : false);
1512 adev->df_funcs->update_medium_grain_clock_gating(adev,
1513 state == AMD_CG_STATE_GATE ? true : false);
1517 adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1518 state == AMD_CG_STATE_GATE ? true : false);
1519 adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1520 state == AMD_CG_STATE_GATE ? true : false);
1521 soc15_update_hdp_light_sleep(adev,
1522 state == AMD_CG_STATE_GATE ? true : false);
1523 soc15_update_drm_clock_gating(adev,
1524 state == AMD_CG_STATE_GATE ? true : false);
1525 soc15_update_drm_light_sleep(adev,
1526 state == AMD_CG_STATE_GATE ? true : false);
1527 soc15_update_rom_medium_grain_clock_gating(adev,
1528 state == AMD_CG_STATE_GATE ? true : false);
1531 soc15_update_hdp_light_sleep(adev,
1532 state == AMD_CG_STATE_GATE ? true : false);
1540 static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
1542 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1545 if (amdgpu_sriov_vf(adev))
1548 adev->nbio.funcs->get_clockgating_state(adev, flags);
1550 /* AMD_CG_SUPPORT_HDP_LS */
1551 data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
1552 if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
1553 *flags |= AMD_CG_SUPPORT_HDP_LS;
1555 /* AMD_CG_SUPPORT_DRM_MGCG */
1556 data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
1557 if (!(data & 0x01000000))
1558 *flags |= AMD_CG_SUPPORT_DRM_MGCG;
1560 /* AMD_CG_SUPPORT_DRM_LS */
1561 data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
1563 *flags |= AMD_CG_SUPPORT_DRM_LS;
1565 /* AMD_CG_SUPPORT_ROM_MGCG */
1566 data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0));
1567 if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
1568 *flags |= AMD_CG_SUPPORT_ROM_MGCG;
1570 adev->df_funcs->get_clockgating_state(adev, flags);
1573 static int soc15_common_set_powergating_state(void *handle,
1574 enum amd_powergating_state state)
1580 const struct amd_ip_funcs soc15_common_ip_funcs = {
1581 .name = "soc15_common",
1582 .early_init = soc15_common_early_init,
1583 .late_init = soc15_common_late_init,
1584 .sw_init = soc15_common_sw_init,
1585 .sw_fini = soc15_common_sw_fini,
1586 .hw_init = soc15_common_hw_init,
1587 .hw_fini = soc15_common_hw_fini,
1588 .suspend = soc15_common_suspend,
1589 .resume = soc15_common_resume,
1590 .is_idle = soc15_common_is_idle,
1591 .wait_for_idle = soc15_common_wait_for_idle,
1592 .soft_reset = soc15_common_soft_reset,
1593 .set_clockgating_state = soc15_common_set_clockgating_state,
1594 .set_powergating_state = soc15_common_set_powergating_state,
1595 .get_clockgating_state= soc15_common_get_clockgating_state,