2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
29 #include "amdgpu_atombios.h"
30 #include "amdgpu_ih.h"
31 #include "amdgpu_uvd.h"
32 #include "amdgpu_vce.h"
33 #include "amdgpu_ucode.h"
34 #include "amdgpu_psp.h"
38 #include "uvd/uvd_7_0_offset.h"
39 #include "gc/gc_9_0_offset.h"
40 #include "gc/gc_9_0_sh_mask.h"
41 #include "sdma0/sdma0_4_0_offset.h"
42 #include "sdma1/sdma1_4_0_offset.h"
43 #include "hdp/hdp_4_0_offset.h"
44 #include "hdp/hdp_4_0_sh_mask.h"
45 #include "smuio/smuio_9_0_offset.h"
46 #include "smuio/smuio_9_0_sh_mask.h"
47 #include "nbio/nbio_7_0_default.h"
48 #include "nbio/nbio_7_0_offset.h"
49 #include "nbio/nbio_7_0_sh_mask.h"
50 #include "nbio/nbio_7_0_smn.h"
51 #include "mp/mp_9_0_offset.h"
54 #include "soc15_common.h"
57 #include "gfxhub_v1_0.h"
58 #include "mmhub_v1_0.h"
61 #include "vega10_ih.h"
62 #include "sdma_v4_0.h"
68 #include "dce_virtual.h"
70 #include "amdgpu_smu.h"
71 #include "amdgpu_ras.h"
72 #include "amdgpu_xgmi.h"
73 #include <uapi/linux/kfd_ioctl.h>
75 #define mmMP0_MISC_CGTT_CTRL0 0x01b9
76 #define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0
77 #define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba
78 #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0
80 /* for Vega20 register name change */
81 #define mmHDP_MEM_POWER_CTRL 0x00d4
82 #define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x00000001L
83 #define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK 0x00000002L
84 #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L
85 #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
86 #define mmHDP_MEM_POWER_CTRL_BASE_IDX 0
88 * Indirect registers accessor
90 static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
92 unsigned long flags, address, data;
94 address = adev->nbio_funcs->get_pcie_index_offset(adev);
95 data = adev->nbio_funcs->get_pcie_data_offset(adev);
97 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
99 (void)RREG32(address);
101 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
105 static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
107 unsigned long flags, address, data;
109 address = adev->nbio_funcs->get_pcie_index_offset(adev);
110 data = adev->nbio_funcs->get_pcie_data_offset(adev);
112 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
113 WREG32(address, reg);
114 (void)RREG32(address);
117 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
120 static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
122 unsigned long flags, address, data;
124 address = adev->nbio_funcs->get_pcie_index_offset(adev);
125 data = adev->nbio_funcs->get_pcie_data_offset(adev);
127 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
128 /* read low 32 bit */
129 WREG32(address, reg);
130 (void)RREG32(address);
133 /* read high 32 bit*/
134 WREG32(address, reg + 4);
135 (void)RREG32(address);
136 r |= ((u64)RREG32(data) << 32);
137 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
141 static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
143 unsigned long flags, address, data;
145 address = adev->nbio_funcs->get_pcie_index_offset(adev);
146 data = adev->nbio_funcs->get_pcie_data_offset(adev);
148 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
149 /* write low 32 bit */
150 WREG32(address, reg);
151 (void)RREG32(address);
152 WREG32(data, (u32)(v & 0xffffffffULL));
155 /* write high 32 bit */
156 WREG32(address, reg + 4);
157 (void)RREG32(address);
158 WREG32(data, (u32)(v >> 32));
160 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
163 static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
165 unsigned long flags, address, data;
168 address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
169 data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
171 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
172 WREG32(address, ((reg) & 0x1ff));
174 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
178 static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
180 unsigned long flags, address, data;
182 address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
183 data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
185 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
186 WREG32(address, ((reg) & 0x1ff));
188 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
191 static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg)
193 unsigned long flags, address, data;
196 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
197 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
199 spin_lock_irqsave(&adev->didt_idx_lock, flags);
200 WREG32(address, (reg));
202 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
206 static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
208 unsigned long flags, address, data;
210 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
211 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
213 spin_lock_irqsave(&adev->didt_idx_lock, flags);
214 WREG32(address, (reg));
216 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
219 static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
224 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
225 WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
226 r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA);
227 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
231 static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
235 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
236 WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
237 WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v));
238 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
241 static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg)
246 spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
247 WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
248 r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA);
249 spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
253 static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
257 spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
258 WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
259 WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v));
260 spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
263 static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
265 return adev->nbio_funcs->get_memsize(adev);
268 static u32 soc15_get_xclk(struct amdgpu_device *adev)
270 return adev->clock.spll.reference_freq;
274 void soc15_grbm_select(struct amdgpu_device *adev,
275 u32 me, u32 pipe, u32 queue, u32 vmid)
277 u32 grbm_gfx_cntl = 0;
278 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
279 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
280 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
281 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
283 WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl);
286 static void soc15_vga_set_state(struct amdgpu_device *adev, bool state)
291 static bool soc15_read_disabled_bios(struct amdgpu_device *adev)
297 static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
298 u8 *bios, u32 length_bytes)
305 if (length_bytes == 0)
307 /* APU vbios image is part of sbios image */
308 if (adev->flags & AMD_IS_APU)
311 dw_ptr = (u32 *)bios;
312 length_dw = ALIGN(length_bytes, 4) / 4;
314 /* set rom index to 0 */
315 WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
316 /* read out the rom data */
317 for (i = 0; i < length_dw; i++)
318 dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
323 static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
324 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
325 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
326 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
327 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
328 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
329 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
330 { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
331 { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
332 { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
333 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
334 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
335 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
336 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
337 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
338 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
339 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
340 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
341 { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
342 { SOC15_REG_ENTRY(GC, 0, mmDB_DEBUG2)},
345 static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
346 u32 sh_num, u32 reg_offset)
350 mutex_lock(&adev->grbm_idx_mutex);
351 if (se_num != 0xffffffff || sh_num != 0xffffffff)
352 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
354 val = RREG32(reg_offset);
356 if (se_num != 0xffffffff || sh_num != 0xffffffff)
357 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
358 mutex_unlock(&adev->grbm_idx_mutex);
362 static uint32_t soc15_get_register_value(struct amdgpu_device *adev,
363 bool indexed, u32 se_num,
364 u32 sh_num, u32 reg_offset)
367 return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset);
369 if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
370 return adev->gfx.config.gb_addr_config;
371 else if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2))
372 return adev->gfx.config.db_debug2;
373 return RREG32(reg_offset);
377 static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
378 u32 sh_num, u32 reg_offset, u32 *value)
381 struct soc15_allowed_register_entry *en;
384 for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
385 en = &soc15_allowed_read_registers[i];
386 if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
390 *value = soc15_get_register_value(adev,
391 soc15_allowed_read_registers[i].grbm_indexed,
392 se_num, sh_num, reg_offset);
400 * soc15_program_register_sequence - program an array of registers.
402 * @adev: amdgpu_device pointer
403 * @regs: pointer to the register array
404 * @array_size: size of the register array
406 * Programs an array or registers with and and or masks.
407 * This is a helper for setting golden registers.
410 void soc15_program_register_sequence(struct amdgpu_device *adev,
411 const struct soc15_reg_golden *regs,
412 const u32 array_size)
414 const struct soc15_reg_golden *entry;
418 for (i = 0; i < array_size; ++i) {
420 reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
422 if (entry->and_mask == 0xffffffff) {
423 tmp = entry->or_mask;
426 tmp &= ~(entry->and_mask);
427 tmp |= (entry->or_mask & entry->and_mask);
430 if (reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3) ||
431 reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE) ||
432 reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1) ||
433 reg == SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG))
434 WREG32_RLC(reg, tmp);
442 static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
447 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
449 dev_info(adev->dev, "GPU mode1 reset\n");
452 pci_clear_master(adev->pdev);
454 pci_save_state(adev->pdev);
456 ret = psp_gpu_reset(adev);
458 dev_err(adev->dev, "GPU mode1 reset failed\n");
460 pci_restore_state(adev->pdev);
462 /* wait for asic to come out of reset */
463 for (i = 0; i < adev->usec_timeout; i++) {
464 u32 memsize = adev->nbio_funcs->get_memsize(adev);
466 if (memsize != 0xffffffff)
471 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
476 static int soc15_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap)
478 void *pp_handle = adev->powerplay.pp_handle;
479 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
481 if (!pp_funcs || !pp_funcs->get_asic_baco_capability) {
486 return pp_funcs->get_asic_baco_capability(pp_handle, cap);
489 static int soc15_asic_baco_reset(struct amdgpu_device *adev)
491 void *pp_handle = adev->powerplay.pp_handle;
492 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
494 if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state)
497 /* enter BACO state */
498 if (pp_funcs->set_asic_baco_state(pp_handle, 1))
501 /* exit BACO state */
502 if (pp_funcs->set_asic_baco_state(pp_handle, 0))
505 dev_info(adev->dev, "GPU BACO reset\n");
507 adev->in_baco_reset = 1;
512 static enum amd_reset_method
513 soc15_asic_reset_method(struct amdgpu_device *adev)
517 switch (adev->asic_type) {
519 return AMD_RESET_METHOD_MODE2;
522 soc15_asic_get_baco_capability(adev, &baco_reset);
525 if (adev->psp.sos_fw_version >= 0x80067)
526 soc15_asic_get_baco_capability(adev, &baco_reset);
530 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
531 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
533 if (hive || (ras && ras->supported))
543 return AMD_RESET_METHOD_BACO;
545 return AMD_RESET_METHOD_MODE1;
548 static int soc15_asic_reset(struct amdgpu_device *adev)
552 if (soc15_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)
553 ret = soc15_asic_baco_reset(adev);
555 ret = soc15_asic_mode1_reset(adev);
560 /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
561 u32 cntl_reg, u32 status_reg)
566 static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
570 r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
574 r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
579 static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
586 static void soc15_pcie_gen3_enable(struct amdgpu_device *adev)
588 if (pci_is_root_bus(adev->pdev->bus))
591 if (amdgpu_pcie_gen2 == 0)
594 if (adev->flags & AMD_IS_APU)
597 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
598 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
604 static void soc15_program_aspm(struct amdgpu_device *adev)
607 if (amdgpu_aspm == 0)
613 static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
616 adev->nbio_funcs->enable_doorbell_aperture(adev, enable);
617 adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable);
620 static const struct amdgpu_ip_block_version vega10_common_ip_block =
622 .type = AMD_IP_BLOCK_TYPE_COMMON,
626 .funcs = &soc15_common_ip_funcs,
629 static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
631 return adev->nbio_funcs->get_rev_id(adev);
634 int soc15_set_ip_blocks(struct amdgpu_device *adev)
636 /* Set IP register base before any HW register access */
637 switch (adev->asic_type) {
642 vega10_reg_base_init(adev);
645 vega20_reg_base_init(adev);
648 arct_reg_base_init(adev);
654 if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
655 adev->gmc.xgmi.supported = true;
657 if (adev->flags & AMD_IS_APU)
658 adev->nbio_funcs = &nbio_v7_0_funcs;
659 else if (adev->asic_type == CHIP_VEGA20 ||
660 adev->asic_type == CHIP_ARCTURUS)
661 adev->nbio_funcs = &nbio_v7_4_funcs;
663 adev->nbio_funcs = &nbio_v6_1_funcs;
665 if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
666 adev->df_funcs = &df_v3_6_funcs;
668 adev->df_funcs = &df_v1_7_funcs;
670 adev->rev_id = soc15_get_rev_id(adev);
671 adev->nbio_funcs->detect_hw_virt(adev);
673 if (amdgpu_sriov_vf(adev))
674 adev->virt.ops = &xgpu_ai_virt_ops;
676 switch (adev->asic_type) {
680 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
681 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
683 /* For Vega10 SR-IOV, PSP need to be initialized before IH */
684 if (amdgpu_sriov_vf(adev)) {
685 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
686 if (adev->asic_type == CHIP_VEGA20)
687 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
689 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
691 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
693 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
694 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
695 if (adev->asic_type == CHIP_VEGA20)
696 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
698 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
701 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
702 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
703 if (!amdgpu_sriov_vf(adev)) {
704 if (is_support_sw_smu(adev))
705 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
707 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
709 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
710 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
711 #if defined(CONFIG_DRM_AMD_DC)
712 else if (amdgpu_device_has_dc_support(adev))
713 amdgpu_device_ip_block_add(adev, &dm_ip_block);
715 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) {
716 amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
717 amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
721 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
722 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
723 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
724 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
725 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
726 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
727 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
728 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
729 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
730 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
731 #if defined(CONFIG_DRM_AMD_DC)
732 else if (amdgpu_device_has_dc_support(adev))
733 amdgpu_device_ip_block_add(adev, &dm_ip_block);
735 amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
738 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
739 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
740 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
741 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
742 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
743 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
744 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
745 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
746 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
749 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
750 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
751 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
752 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
753 amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
754 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
755 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
756 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
757 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
758 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
767 static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
769 adev->nbio_funcs->hdp_flush(adev, ring);
772 static void soc15_invalidate_hdp(struct amdgpu_device *adev,
773 struct amdgpu_ring *ring)
775 if (!ring || !ring->funcs->emit_wreg)
776 WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
778 amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
779 HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
782 static bool soc15_need_full_reset(struct amdgpu_device *adev)
784 /* change this when we implement soft reset */
787 static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
790 uint32_t perfctr = 0;
791 uint64_t cnt0_of, cnt1_of;
794 /* This reports 0 on APUs, so return to avoid writing/reading registers
795 * that may or may not be different from their GPU counterparts
797 if (adev->flags & AMD_IS_APU)
800 /* Set the 2 events that we wish to watch, defined above */
801 /* Reg 40 is # received msgs */
802 /* Reg 104 is # of posted requests sent */
803 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
804 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
806 /* Write to enable desired perf counters */
807 WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK, perfctr);
808 /* Zero out and enable the perf counters
810 * Bit 0 = Start all counters(1)
811 * Bit 2 = Global counter reset enable(1)
813 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
817 /* Load the shadow and disable the perf counters
819 * Bit 0 = Stop counters(0)
820 * Bit 1 = Load the shadow counters(1)
822 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
824 /* Read register values to get any >32bit overflow */
825 tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK);
826 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
827 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
829 /* Get the values and add the overflow */
830 *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
831 *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
834 static void vega20_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
837 uint32_t perfctr = 0;
838 uint64_t cnt0_of, cnt1_of;
841 /* This reports 0 on APUs, so return to avoid writing/reading registers
842 * that may or may not be different from their GPU counterparts
844 if (adev->flags & AMD_IS_APU)
847 /* Set the 2 events that we wish to watch, defined above */
848 /* Reg 40 is # received msgs */
849 /* Reg 108 is # of posted requests sent on VG20 */
850 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
852 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
855 /* Write to enable desired perf counters */
856 WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3, perfctr);
857 /* Zero out and enable the perf counters
859 * Bit 0 = Start all counters(1)
860 * Bit 2 = Global counter reset enable(1)
862 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
866 /* Load the shadow and disable the perf counters
868 * Bit 0 = Stop counters(0)
869 * Bit 1 = Load the shadow counters(1)
871 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
873 /* Read register values to get any >32bit overflow */
874 tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3);
875 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER0_UPPER);
876 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER1_UPPER);
878 /* Get the values and add the overflow */
879 *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK3) | (cnt0_of << 32);
880 *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK3) | (cnt1_of << 32);
883 static bool soc15_need_reset_on_init(struct amdgpu_device *adev)
887 /* Just return false for soc15 GPUs. Reset does not seem to
890 if (!amdgpu_passthrough(adev))
893 if (adev->flags & AMD_IS_APU)
896 /* Check sOS sign of life register to confirm sys driver and sOS
897 * are already been loaded.
899 sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
906 static uint64_t soc15_get_pcie_replay_count(struct amdgpu_device *adev)
908 uint64_t nak_r, nak_g;
910 /* Get the number of NAKs received and generated */
911 nak_r = RREG32_PCIE(smnPCIE_RX_NUM_NAK);
912 nak_g = RREG32_PCIE(smnPCIE_RX_NUM_NAK_GENERATED);
914 /* Add the total number of NAKs, i.e the number of replays */
915 return (nak_r + nak_g);
918 static const struct amdgpu_asic_funcs soc15_asic_funcs =
920 .read_disabled_bios = &soc15_read_disabled_bios,
921 .read_bios_from_rom = &soc15_read_bios_from_rom,
922 .read_register = &soc15_read_register,
923 .reset = &soc15_asic_reset,
924 .reset_method = &soc15_asic_reset_method,
925 .set_vga_state = &soc15_vga_set_state,
926 .get_xclk = &soc15_get_xclk,
927 .set_uvd_clocks = &soc15_set_uvd_clocks,
928 .set_vce_clocks = &soc15_set_vce_clocks,
929 .get_config_memsize = &soc15_get_config_memsize,
930 .flush_hdp = &soc15_flush_hdp,
931 .invalidate_hdp = &soc15_invalidate_hdp,
932 .need_full_reset = &soc15_need_full_reset,
933 .init_doorbell_index = &vega10_doorbell_index_init,
934 .get_pcie_usage = &soc15_get_pcie_usage,
935 .need_reset_on_init = &soc15_need_reset_on_init,
936 .get_pcie_replay_count = &soc15_get_pcie_replay_count,
939 static const struct amdgpu_asic_funcs vega20_asic_funcs =
941 .read_disabled_bios = &soc15_read_disabled_bios,
942 .read_bios_from_rom = &soc15_read_bios_from_rom,
943 .read_register = &soc15_read_register,
944 .reset = &soc15_asic_reset,
945 .set_vga_state = &soc15_vga_set_state,
946 .get_xclk = &soc15_get_xclk,
947 .set_uvd_clocks = &soc15_set_uvd_clocks,
948 .set_vce_clocks = &soc15_set_vce_clocks,
949 .get_config_memsize = &soc15_get_config_memsize,
950 .flush_hdp = &soc15_flush_hdp,
951 .invalidate_hdp = &soc15_invalidate_hdp,
952 .need_full_reset = &soc15_need_full_reset,
953 .init_doorbell_index = &vega20_doorbell_index_init,
954 .get_pcie_usage = &vega20_get_pcie_usage,
955 .need_reset_on_init = &soc15_need_reset_on_init,
956 .get_pcie_replay_count = &soc15_get_pcie_replay_count,
957 .reset_method = &soc15_asic_reset_method
960 static int soc15_common_early_init(void *handle)
962 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
963 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
965 adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
966 adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
967 adev->smc_rreg = NULL;
968 adev->smc_wreg = NULL;
969 adev->pcie_rreg = &soc15_pcie_rreg;
970 adev->pcie_wreg = &soc15_pcie_wreg;
971 adev->pcie_rreg64 = &soc15_pcie_rreg64;
972 adev->pcie_wreg64 = &soc15_pcie_wreg64;
973 adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg;
974 adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
975 adev->didt_rreg = &soc15_didt_rreg;
976 adev->didt_wreg = &soc15_didt_wreg;
977 adev->gc_cac_rreg = &soc15_gc_cac_rreg;
978 adev->gc_cac_wreg = &soc15_gc_cac_wreg;
979 adev->se_cac_rreg = &soc15_se_cac_rreg;
980 adev->se_cac_wreg = &soc15_se_cac_wreg;
983 adev->external_rev_id = 0xFF;
984 switch (adev->asic_type) {
986 adev->asic_funcs = &soc15_asic_funcs;
987 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
988 AMD_CG_SUPPORT_GFX_MGLS |
989 AMD_CG_SUPPORT_GFX_RLC_LS |
990 AMD_CG_SUPPORT_GFX_CP_LS |
991 AMD_CG_SUPPORT_GFX_3D_CGCG |
992 AMD_CG_SUPPORT_GFX_3D_CGLS |
993 AMD_CG_SUPPORT_GFX_CGCG |
994 AMD_CG_SUPPORT_GFX_CGLS |
995 AMD_CG_SUPPORT_BIF_MGCG |
996 AMD_CG_SUPPORT_BIF_LS |
997 AMD_CG_SUPPORT_HDP_LS |
998 AMD_CG_SUPPORT_DRM_MGCG |
999 AMD_CG_SUPPORT_DRM_LS |
1000 AMD_CG_SUPPORT_ROM_MGCG |
1001 AMD_CG_SUPPORT_DF_MGCG |
1002 AMD_CG_SUPPORT_SDMA_MGCG |
1003 AMD_CG_SUPPORT_SDMA_LS |
1004 AMD_CG_SUPPORT_MC_MGCG |
1005 AMD_CG_SUPPORT_MC_LS;
1007 adev->external_rev_id = 0x1;
1010 adev->asic_funcs = &soc15_asic_funcs;
1011 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1012 AMD_CG_SUPPORT_GFX_MGLS |
1013 AMD_CG_SUPPORT_GFX_CGCG |
1014 AMD_CG_SUPPORT_GFX_CGLS |
1015 AMD_CG_SUPPORT_GFX_3D_CGCG |
1016 AMD_CG_SUPPORT_GFX_3D_CGLS |
1017 AMD_CG_SUPPORT_GFX_CP_LS |
1018 AMD_CG_SUPPORT_MC_LS |
1019 AMD_CG_SUPPORT_MC_MGCG |
1020 AMD_CG_SUPPORT_SDMA_MGCG |
1021 AMD_CG_SUPPORT_SDMA_LS |
1022 AMD_CG_SUPPORT_BIF_MGCG |
1023 AMD_CG_SUPPORT_BIF_LS |
1024 AMD_CG_SUPPORT_HDP_MGCG |
1025 AMD_CG_SUPPORT_HDP_LS |
1026 AMD_CG_SUPPORT_ROM_MGCG |
1027 AMD_CG_SUPPORT_VCE_MGCG |
1028 AMD_CG_SUPPORT_UVD_MGCG;
1030 adev->external_rev_id = adev->rev_id + 0x14;
1033 adev->asic_funcs = &vega20_asic_funcs;
1034 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1035 AMD_CG_SUPPORT_GFX_MGLS |
1036 AMD_CG_SUPPORT_GFX_CGCG |
1037 AMD_CG_SUPPORT_GFX_CGLS |
1038 AMD_CG_SUPPORT_GFX_3D_CGCG |
1039 AMD_CG_SUPPORT_GFX_3D_CGLS |
1040 AMD_CG_SUPPORT_GFX_CP_LS |
1041 AMD_CG_SUPPORT_MC_LS |
1042 AMD_CG_SUPPORT_MC_MGCG |
1043 AMD_CG_SUPPORT_SDMA_MGCG |
1044 AMD_CG_SUPPORT_SDMA_LS |
1045 AMD_CG_SUPPORT_BIF_MGCG |
1046 AMD_CG_SUPPORT_BIF_LS |
1047 AMD_CG_SUPPORT_HDP_MGCG |
1048 AMD_CG_SUPPORT_HDP_LS |
1049 AMD_CG_SUPPORT_ROM_MGCG |
1050 AMD_CG_SUPPORT_VCE_MGCG |
1051 AMD_CG_SUPPORT_UVD_MGCG;
1053 adev->external_rev_id = adev->rev_id + 0x28;
1056 adev->asic_funcs = &soc15_asic_funcs;
1057 if (adev->rev_id >= 0x8)
1058 adev->external_rev_id = adev->rev_id + 0x79;
1059 else if (adev->pdev->device == 0x15d8)
1060 adev->external_rev_id = adev->rev_id + 0x41;
1061 else if (adev->rev_id == 1)
1062 adev->external_rev_id = adev->rev_id + 0x20;
1064 adev->external_rev_id = adev->rev_id + 0x01;
1066 if (adev->rev_id >= 0x8) {
1067 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1068 AMD_CG_SUPPORT_GFX_MGLS |
1069 AMD_CG_SUPPORT_GFX_CP_LS |
1070 AMD_CG_SUPPORT_GFX_3D_CGCG |
1071 AMD_CG_SUPPORT_GFX_3D_CGLS |
1072 AMD_CG_SUPPORT_GFX_CGCG |
1073 AMD_CG_SUPPORT_GFX_CGLS |
1074 AMD_CG_SUPPORT_BIF_LS |
1075 AMD_CG_SUPPORT_HDP_LS |
1076 AMD_CG_SUPPORT_ROM_MGCG |
1077 AMD_CG_SUPPORT_MC_MGCG |
1078 AMD_CG_SUPPORT_MC_LS |
1079 AMD_CG_SUPPORT_SDMA_MGCG |
1080 AMD_CG_SUPPORT_SDMA_LS |
1081 AMD_CG_SUPPORT_VCN_MGCG;
1083 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
1084 } else if (adev->pdev->device == 0x15d8) {
1085 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1086 AMD_CG_SUPPORT_GFX_MGLS |
1087 AMD_CG_SUPPORT_GFX_CP_LS |
1088 AMD_CG_SUPPORT_GFX_3D_CGCG |
1089 AMD_CG_SUPPORT_GFX_3D_CGLS |
1090 AMD_CG_SUPPORT_GFX_CGCG |
1091 AMD_CG_SUPPORT_GFX_CGLS |
1092 AMD_CG_SUPPORT_BIF_LS |
1093 AMD_CG_SUPPORT_HDP_LS |
1094 AMD_CG_SUPPORT_ROM_MGCG |
1095 AMD_CG_SUPPORT_MC_MGCG |
1096 AMD_CG_SUPPORT_MC_LS |
1097 AMD_CG_SUPPORT_SDMA_MGCG |
1098 AMD_CG_SUPPORT_SDMA_LS;
1100 adev->pg_flags = AMD_PG_SUPPORT_SDMA |
1101 AMD_PG_SUPPORT_MMHUB |
1102 AMD_PG_SUPPORT_VCN |
1103 AMD_PG_SUPPORT_VCN_DPG;
1105 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1106 AMD_CG_SUPPORT_GFX_MGLS |
1107 AMD_CG_SUPPORT_GFX_RLC_LS |
1108 AMD_CG_SUPPORT_GFX_CP_LS |
1109 AMD_CG_SUPPORT_GFX_3D_CGCG |
1110 AMD_CG_SUPPORT_GFX_3D_CGLS |
1111 AMD_CG_SUPPORT_GFX_CGCG |
1112 AMD_CG_SUPPORT_GFX_CGLS |
1113 AMD_CG_SUPPORT_BIF_MGCG |
1114 AMD_CG_SUPPORT_BIF_LS |
1115 AMD_CG_SUPPORT_HDP_MGCG |
1116 AMD_CG_SUPPORT_HDP_LS |
1117 AMD_CG_SUPPORT_DRM_MGCG |
1118 AMD_CG_SUPPORT_DRM_LS |
1119 AMD_CG_SUPPORT_ROM_MGCG |
1120 AMD_CG_SUPPORT_MC_MGCG |
1121 AMD_CG_SUPPORT_MC_LS |
1122 AMD_CG_SUPPORT_SDMA_MGCG |
1123 AMD_CG_SUPPORT_SDMA_LS |
1124 AMD_CG_SUPPORT_VCN_MGCG;
1126 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
1129 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
1130 adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
1132 AMD_PG_SUPPORT_RLC_SMU_HS;
1135 adev->asic_funcs = &vega20_asic_funcs;
1136 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1137 AMD_CG_SUPPORT_GFX_MGLS |
1138 AMD_CG_SUPPORT_GFX_CGCG |
1139 AMD_CG_SUPPORT_GFX_CGLS |
1140 AMD_CG_SUPPORT_GFX_CP_LS |
1141 AMD_CG_SUPPORT_HDP_MGCG |
1142 AMD_CG_SUPPORT_HDP_LS |
1143 AMD_CG_SUPPORT_SDMA_MGCG |
1144 AMD_CG_SUPPORT_SDMA_LS |
1145 AMD_CG_SUPPORT_MC_MGCG |
1146 AMD_CG_SUPPORT_MC_LS;
1148 adev->external_rev_id = adev->rev_id + 0x32;
1151 adev->asic_funcs = &soc15_asic_funcs;
1154 adev->external_rev_id = adev->rev_id + 0x91;
1157 /* FIXME: not supported yet */
1161 if (amdgpu_sriov_vf(adev)) {
1162 amdgpu_virt_init_setting(adev);
1163 xgpu_ai_mailbox_set_irq_funcs(adev);
1169 static int soc15_common_late_init(void *handle)
1171 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1173 if (amdgpu_sriov_vf(adev))
1174 xgpu_ai_mailbox_get_irq(adev);
1179 static int soc15_common_sw_init(void *handle)
1181 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1183 if (amdgpu_sriov_vf(adev))
1184 xgpu_ai_mailbox_add_irq_id(adev);
1186 adev->df_funcs->sw_init(adev);
1191 static int soc15_common_sw_fini(void *handle)
1196 static void soc15_doorbell_range_init(struct amdgpu_device *adev)
1199 struct amdgpu_ring *ring;
1201 /* sdma/ih doorbell range are programed by hypervisor */
1202 if (!amdgpu_sriov_vf(adev)) {
1203 for (i = 0; i < adev->sdma.num_instances; i++) {
1204 ring = &adev->sdma.instance[i].ring;
1205 adev->nbio_funcs->sdma_doorbell_range(adev, i,
1206 ring->use_doorbell, ring->doorbell_index,
1207 adev->doorbell_index.sdma_doorbell_range);
1210 adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
1211 adev->irq.ih.doorbell_index);
1215 static int soc15_common_hw_init(void *handle)
1217 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1219 /* enable pcie gen2/3 link */
1220 soc15_pcie_gen3_enable(adev);
1222 soc15_program_aspm(adev);
1223 /* setup nbio registers */
1224 adev->nbio_funcs->init_registers(adev);
1225 /* remap HDP registers to a hole in mmio space,
1226 * for the purpose of expose those registers
1229 if (adev->nbio_funcs->remap_hdp_registers)
1230 adev->nbio_funcs->remap_hdp_registers(adev);
1232 /* enable the doorbell aperture */
1233 soc15_enable_doorbell_aperture(adev, true);
1234 /* HW doorbell routing policy: doorbell writing not
1235 * in SDMA/IH/MM/ACV range will be routed to CP. So
1236 * we need to init SDMA/IH/MM/ACV doorbell range prior
1237 * to CP ip block init and ring test.
1239 soc15_doorbell_range_init(adev);
1244 static int soc15_common_hw_fini(void *handle)
1246 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1248 /* disable the doorbell aperture */
1249 soc15_enable_doorbell_aperture(adev, false);
1250 if (amdgpu_sriov_vf(adev))
1251 xgpu_ai_mailbox_put_irq(adev);
1256 static int soc15_common_suspend(void *handle)
1258 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1260 return soc15_common_hw_fini(adev);
1263 static int soc15_common_resume(void *handle)
1265 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1267 return soc15_common_hw_init(adev);
1270 static bool soc15_common_is_idle(void *handle)
1275 static int soc15_common_wait_for_idle(void *handle)
1280 static int soc15_common_soft_reset(void *handle)
1285 static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable)
1289 if (adev->asic_type == CHIP_VEGA20 ||
1290 adev->asic_type == CHIP_ARCTURUS) {
1291 def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL));
1293 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1294 data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
1295 HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
1296 HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
1297 HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK;
1299 data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
1300 HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
1301 HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
1302 HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK);
1305 WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data);
1307 def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
1309 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1310 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1312 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1315 WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
1319 static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
1323 def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
1325 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG))
1326 data &= ~(0x01000000 |
1335 data |= (0x01000000 |
1345 WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0), data);
1348 static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable)
1352 def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
1354 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
1360 WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data);
1363 static void soc15_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
1368 def = data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0));
1370 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
1371 data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1372 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
1374 data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1375 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
1378 WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0), data);
1381 static int soc15_common_set_clockgating_state(void *handle,
1382 enum amd_clockgating_state state)
1384 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1386 if (amdgpu_sriov_vf(adev))
1389 switch (adev->asic_type) {
1393 adev->nbio_funcs->update_medium_grain_clock_gating(adev,
1394 state == AMD_CG_STATE_GATE ? true : false);
1395 adev->nbio_funcs->update_medium_grain_light_sleep(adev,
1396 state == AMD_CG_STATE_GATE ? true : false);
1397 soc15_update_hdp_light_sleep(adev,
1398 state == AMD_CG_STATE_GATE ? true : false);
1399 soc15_update_drm_clock_gating(adev,
1400 state == AMD_CG_STATE_GATE ? true : false);
1401 soc15_update_drm_light_sleep(adev,
1402 state == AMD_CG_STATE_GATE ? true : false);
1403 soc15_update_rom_medium_grain_clock_gating(adev,
1404 state == AMD_CG_STATE_GATE ? true : false);
1405 adev->df_funcs->update_medium_grain_clock_gating(adev,
1406 state == AMD_CG_STATE_GATE ? true : false);
1409 adev->nbio_funcs->update_medium_grain_clock_gating(adev,
1410 state == AMD_CG_STATE_GATE ? true : false);
1411 adev->nbio_funcs->update_medium_grain_light_sleep(adev,
1412 state == AMD_CG_STATE_GATE ? true : false);
1413 soc15_update_hdp_light_sleep(adev,
1414 state == AMD_CG_STATE_GATE ? true : false);
1415 soc15_update_drm_clock_gating(adev,
1416 state == AMD_CG_STATE_GATE ? true : false);
1417 soc15_update_drm_light_sleep(adev,
1418 state == AMD_CG_STATE_GATE ? true : false);
1419 soc15_update_rom_medium_grain_clock_gating(adev,
1420 state == AMD_CG_STATE_GATE ? true : false);
1423 soc15_update_hdp_light_sleep(adev,
1424 state == AMD_CG_STATE_GATE ? true : false);
1432 static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
1434 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1437 if (amdgpu_sriov_vf(adev))
1440 adev->nbio_funcs->get_clockgating_state(adev, flags);
1442 /* AMD_CG_SUPPORT_HDP_LS */
1443 data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
1444 if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
1445 *flags |= AMD_CG_SUPPORT_HDP_LS;
1447 /* AMD_CG_SUPPORT_DRM_MGCG */
1448 data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
1449 if (!(data & 0x01000000))
1450 *flags |= AMD_CG_SUPPORT_DRM_MGCG;
1452 /* AMD_CG_SUPPORT_DRM_LS */
1453 data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
1455 *flags |= AMD_CG_SUPPORT_DRM_LS;
1457 /* AMD_CG_SUPPORT_ROM_MGCG */
1458 data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0));
1459 if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
1460 *flags |= AMD_CG_SUPPORT_ROM_MGCG;
1462 adev->df_funcs->get_clockgating_state(adev, flags);
1465 static int soc15_common_set_powergating_state(void *handle,
1466 enum amd_powergating_state state)
1472 const struct amd_ip_funcs soc15_common_ip_funcs = {
1473 .name = "soc15_common",
1474 .early_init = soc15_common_early_init,
1475 .late_init = soc15_common_late_init,
1476 .sw_init = soc15_common_sw_init,
1477 .sw_fini = soc15_common_sw_fini,
1478 .hw_init = soc15_common_hw_init,
1479 .hw_fini = soc15_common_hw_fini,
1480 .suspend = soc15_common_suspend,
1481 .resume = soc15_common_resume,
1482 .is_idle = soc15_common_is_idle,
1483 .wait_for_idle = soc15_common_wait_for_idle,
1484 .soft_reset = soc15_common_soft_reset,
1485 .set_clockgating_state = soc15_common_set_clockgating_state,
1486 .set_powergating_state = soc15_common_set_powergating_state,
1487 .get_clockgating_state= soc15_common_get_clockgating_state,