]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/amd/amdgpu/soc15.c
drm/amdgpu: Add PCIe replay count sysfs file
[linux.git] / drivers / gpu / drm / amd / amdgpu / soc15.c
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include <drm/drmP.h>
27 #include "amdgpu.h"
28 #include "amdgpu_atombios.h"
29 #include "amdgpu_ih.h"
30 #include "amdgpu_uvd.h"
31 #include "amdgpu_vce.h"
32 #include "amdgpu_ucode.h"
33 #include "amdgpu_psp.h"
34 #include "atom.h"
35 #include "amd_pcie.h"
36
37 #include "uvd/uvd_7_0_offset.h"
38 #include "gc/gc_9_0_offset.h"
39 #include "gc/gc_9_0_sh_mask.h"
40 #include "sdma0/sdma0_4_0_offset.h"
41 #include "sdma1/sdma1_4_0_offset.h"
42 #include "hdp/hdp_4_0_offset.h"
43 #include "hdp/hdp_4_0_sh_mask.h"
44 #include "smuio/smuio_9_0_offset.h"
45 #include "smuio/smuio_9_0_sh_mask.h"
46 #include "nbio/nbio_7_0_default.h"
47 #include "nbio/nbio_7_0_offset.h"
48 #include "nbio/nbio_7_0_sh_mask.h"
49 #include "nbio/nbio_7_0_smn.h"
50 #include "mp/mp_9_0_offset.h"
51
52 #include "soc15.h"
53 #include "soc15_common.h"
54 #include "gfx_v9_0.h"
55 #include "gmc_v9_0.h"
56 #include "gfxhub_v1_0.h"
57 #include "mmhub_v1_0.h"
58 #include "df_v1_7.h"
59 #include "df_v3_6.h"
60 #include "vega10_ih.h"
61 #include "sdma_v4_0.h"
62 #include "uvd_v7_0.h"
63 #include "vce_v4_0.h"
64 #include "vcn_v1_0.h"
65 #include "dce_virtual.h"
66 #include "mxgpu_ai.h"
67 #include "amdgpu_smu.h"
68 #include <uapi/linux/kfd_ioctl.h>
69
70 #define mmMP0_MISC_CGTT_CTRL0                                                                   0x01b9
71 #define mmMP0_MISC_CGTT_CTRL0_BASE_IDX                                                          0
72 #define mmMP0_MISC_LIGHT_SLEEP_CTRL                                                             0x01ba
73 #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX                                                    0
74
75 /* for Vega20 register name change */
76 #define mmHDP_MEM_POWER_CTRL    0x00d4
77 #define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK  0x00000001L
78 #define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK    0x00000002L
79 #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK   0x00010000L
80 #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK             0x00020000L
81 #define mmHDP_MEM_POWER_CTRL_BASE_IDX   0
82 /*
83  * Indirect registers accessor
84  */
85 static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
86 {
87         unsigned long flags, address, data;
88         u32 r;
89         address = adev->nbio_funcs->get_pcie_index_offset(adev);
90         data = adev->nbio_funcs->get_pcie_data_offset(adev);
91
92         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
93         WREG32(address, reg);
94         (void)RREG32(address);
95         r = RREG32(data);
96         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
97         return r;
98 }
99
100 static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
101 {
102         unsigned long flags, address, data;
103
104         address = adev->nbio_funcs->get_pcie_index_offset(adev);
105         data = adev->nbio_funcs->get_pcie_data_offset(adev);
106
107         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
108         WREG32(address, reg);
109         (void)RREG32(address);
110         WREG32(data, v);
111         (void)RREG32(data);
112         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
113 }
114
115 static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
116 {
117         unsigned long flags, address, data;
118         u32 r;
119
120         address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
121         data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
122
123         spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
124         WREG32(address, ((reg) & 0x1ff));
125         r = RREG32(data);
126         spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
127         return r;
128 }
129
130 static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
131 {
132         unsigned long flags, address, data;
133
134         address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
135         data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
136
137         spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
138         WREG32(address, ((reg) & 0x1ff));
139         WREG32(data, (v));
140         spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
141 }
142
143 static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg)
144 {
145         unsigned long flags, address, data;
146         u32 r;
147
148         address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
149         data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
150
151         spin_lock_irqsave(&adev->didt_idx_lock, flags);
152         WREG32(address, (reg));
153         r = RREG32(data);
154         spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
155         return r;
156 }
157
158 static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
159 {
160         unsigned long flags, address, data;
161
162         address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
163         data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
164
165         spin_lock_irqsave(&adev->didt_idx_lock, flags);
166         WREG32(address, (reg));
167         WREG32(data, (v));
168         spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
169 }
170
171 static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
172 {
173         unsigned long flags;
174         u32 r;
175
176         spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
177         WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
178         r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA);
179         spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
180         return r;
181 }
182
183 static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
184 {
185         unsigned long flags;
186
187         spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
188         WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
189         WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v));
190         spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
191 }
192
193 static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg)
194 {
195         unsigned long flags;
196         u32 r;
197
198         spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
199         WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
200         r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA);
201         spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
202         return r;
203 }
204
205 static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
206 {
207         unsigned long flags;
208
209         spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
210         WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
211         WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v));
212         spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
213 }
214
215 static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
216 {
217         return adev->nbio_funcs->get_memsize(adev);
218 }
219
220 static u32 soc15_get_xclk(struct amdgpu_device *adev)
221 {
222         return adev->clock.spll.reference_freq;
223 }
224
225
226 void soc15_grbm_select(struct amdgpu_device *adev,
227                      u32 me, u32 pipe, u32 queue, u32 vmid)
228 {
229         u32 grbm_gfx_cntl = 0;
230         grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
231         grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
232         grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
233         grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
234
235         WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl);
236 }
237
238 static void soc15_vga_set_state(struct amdgpu_device *adev, bool state)
239 {
240         /* todo */
241 }
242
243 static bool soc15_read_disabled_bios(struct amdgpu_device *adev)
244 {
245         /* todo */
246         return false;
247 }
248
249 static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
250                                      u8 *bios, u32 length_bytes)
251 {
252         u32 *dw_ptr;
253         u32 i, length_dw;
254
255         if (bios == NULL)
256                 return false;
257         if (length_bytes == 0)
258                 return false;
259         /* APU vbios image is part of sbios image */
260         if (adev->flags & AMD_IS_APU)
261                 return false;
262
263         dw_ptr = (u32 *)bios;
264         length_dw = ALIGN(length_bytes, 4) / 4;
265
266         /* set rom index to 0 */
267         WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
268         /* read out the rom data */
269         for (i = 0; i < length_dw; i++)
270                 dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
271
272         return true;
273 }
274
275 struct soc15_allowed_register_entry {
276         uint32_t hwip;
277         uint32_t inst;
278         uint32_t seg;
279         uint32_t reg_offset;
280         bool grbm_indexed;
281 };
282
283
284 static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
285         { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
286         { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
287         { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
288         { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
289         { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
290         { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
291         { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
292         { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
293         { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
294         { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
295         { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
296         { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
297         { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
298         { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
299         { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
300         { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
301         { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
302         { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
303         { SOC15_REG_ENTRY(GC, 0, mmDB_DEBUG2)},
304 };
305
306 static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
307                                          u32 sh_num, u32 reg_offset)
308 {
309         uint32_t val;
310
311         mutex_lock(&adev->grbm_idx_mutex);
312         if (se_num != 0xffffffff || sh_num != 0xffffffff)
313                 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
314
315         val = RREG32(reg_offset);
316
317         if (se_num != 0xffffffff || sh_num != 0xffffffff)
318                 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
319         mutex_unlock(&adev->grbm_idx_mutex);
320         return val;
321 }
322
323 static uint32_t soc15_get_register_value(struct amdgpu_device *adev,
324                                          bool indexed, u32 se_num,
325                                          u32 sh_num, u32 reg_offset)
326 {
327         if (indexed) {
328                 return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset);
329         } else {
330                 if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
331                         return adev->gfx.config.gb_addr_config;
332                 else if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2))
333                         return adev->gfx.config.db_debug2;
334                 return RREG32(reg_offset);
335         }
336 }
337
338 static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
339                             u32 sh_num, u32 reg_offset, u32 *value)
340 {
341         uint32_t i;
342         struct soc15_allowed_register_entry  *en;
343
344         *value = 0;
345         for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
346                 en = &soc15_allowed_read_registers[i];
347                 if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
348                                         + en->reg_offset))
349                         continue;
350
351                 *value = soc15_get_register_value(adev,
352                                                   soc15_allowed_read_registers[i].grbm_indexed,
353                                                   se_num, sh_num, reg_offset);
354                 return 0;
355         }
356         return -EINVAL;
357 }
358
359
360 /**
361  * soc15_program_register_sequence - program an array of registers.
362  *
363  * @adev: amdgpu_device pointer
364  * @regs: pointer to the register array
365  * @array_size: size of the register array
366  *
367  * Programs an array or registers with and and or masks.
368  * This is a helper for setting golden registers.
369  */
370
371 void soc15_program_register_sequence(struct amdgpu_device *adev,
372                                              const struct soc15_reg_golden *regs,
373                                              const u32 array_size)
374 {
375         const struct soc15_reg_golden *entry;
376         u32 tmp, reg;
377         int i;
378
379         for (i = 0; i < array_size; ++i) {
380                 entry = &regs[i];
381                 reg =  adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
382
383                 if (entry->and_mask == 0xffffffff) {
384                         tmp = entry->or_mask;
385                 } else {
386                         tmp = RREG32(reg);
387                         tmp &= ~(entry->and_mask);
388                         tmp |= entry->or_mask;
389                 }
390                 WREG32(reg, tmp);
391         }
392
393 }
394
395 static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
396 {
397         u32 i;
398         int ret = 0;
399
400         amdgpu_atombios_scratch_regs_engine_hung(adev, true);
401
402         dev_info(adev->dev, "GPU mode1 reset\n");
403
404         /* disable BM */
405         pci_clear_master(adev->pdev);
406
407         pci_save_state(adev->pdev);
408
409         ret = psp_gpu_reset(adev);
410         if (ret)
411                 dev_err(adev->dev, "GPU mode1 reset failed\n");
412
413         pci_restore_state(adev->pdev);
414
415         /* wait for asic to come out of reset */
416         for (i = 0; i < adev->usec_timeout; i++) {
417                 u32 memsize = adev->nbio_funcs->get_memsize(adev);
418
419                 if (memsize != 0xffffffff)
420                         break;
421                 udelay(1);
422         }
423
424         amdgpu_atombios_scratch_regs_engine_hung(adev, false);
425
426         return ret;
427 }
428
429 static int soc15_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap)
430 {
431         void *pp_handle = adev->powerplay.pp_handle;
432         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
433
434         if (!pp_funcs || !pp_funcs->get_asic_baco_capability) {
435                 *cap = false;
436                 return -ENOENT;
437         }
438
439         return pp_funcs->get_asic_baco_capability(pp_handle, cap);
440 }
441
442 static int soc15_asic_baco_reset(struct amdgpu_device *adev)
443 {
444         void *pp_handle = adev->powerplay.pp_handle;
445         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
446
447         if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state)
448                 return -ENOENT;
449
450         /* enter BACO state */
451         if (pp_funcs->set_asic_baco_state(pp_handle, 1))
452                 return -EIO;
453
454         /* exit BACO state */
455         if (pp_funcs->set_asic_baco_state(pp_handle, 0))
456                 return -EIO;
457
458         dev_info(adev->dev, "GPU BACO reset\n");
459
460         adev->in_baco_reset = 1;
461
462         return 0;
463 }
464
465 static int soc15_asic_reset(struct amdgpu_device *adev)
466 {
467         int ret;
468         bool baco_reset;
469
470         switch (adev->asic_type) {
471         case CHIP_VEGA10:
472         case CHIP_VEGA12:
473                 soc15_asic_get_baco_capability(adev, &baco_reset);
474                 break;
475         case CHIP_VEGA20:
476                 if (adev->psp.sos_fw_version >= 0x80067)
477                         soc15_asic_get_baco_capability(adev, &baco_reset);
478                 else
479                         baco_reset = false;
480                 break;
481         default:
482                 baco_reset = false;
483                 break;
484         }
485
486         if (baco_reset)
487                 ret = soc15_asic_baco_reset(adev);
488         else
489                 ret = soc15_asic_mode1_reset(adev);
490
491         return ret;
492 }
493
494 /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
495                         u32 cntl_reg, u32 status_reg)
496 {
497         return 0;
498 }*/
499
500 static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
501 {
502         /*int r;
503
504         r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
505         if (r)
506                 return r;
507
508         r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
509         */
510         return 0;
511 }
512
513 static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
514 {
515         /* todo */
516
517         return 0;
518 }
519
520 static void soc15_pcie_gen3_enable(struct amdgpu_device *adev)
521 {
522         if (pci_is_root_bus(adev->pdev->bus))
523                 return;
524
525         if (amdgpu_pcie_gen2 == 0)
526                 return;
527
528         if (adev->flags & AMD_IS_APU)
529                 return;
530
531         if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
532                                         CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
533                 return;
534
535         /* todo */
536 }
537
538 static void soc15_program_aspm(struct amdgpu_device *adev)
539 {
540
541         if (amdgpu_aspm == 0)
542                 return;
543
544         /* todo */
545 }
546
547 static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
548                                            bool enable)
549 {
550         adev->nbio_funcs->enable_doorbell_aperture(adev, enable);
551         adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable);
552 }
553
554 static const struct amdgpu_ip_block_version vega10_common_ip_block =
555 {
556         .type = AMD_IP_BLOCK_TYPE_COMMON,
557         .major = 2,
558         .minor = 0,
559         .rev = 0,
560         .funcs = &soc15_common_ip_funcs,
561 };
562
563 static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
564 {
565         return adev->nbio_funcs->get_rev_id(adev);
566 }
567
568 int soc15_set_ip_blocks(struct amdgpu_device *adev)
569 {
570         /* Set IP register base before any HW register access */
571         switch (adev->asic_type) {
572         case CHIP_VEGA10:
573         case CHIP_VEGA12:
574         case CHIP_RAVEN:
575                 vega10_reg_base_init(adev);
576                 break;
577         case CHIP_VEGA20:
578                 vega20_reg_base_init(adev);
579                 break;
580         default:
581                 return -EINVAL;
582         }
583
584         if (adev->asic_type == CHIP_VEGA20)
585                 adev->gmc.xgmi.supported = true;
586
587         if (adev->flags & AMD_IS_APU)
588                 adev->nbio_funcs = &nbio_v7_0_funcs;
589         else if (adev->asic_type == CHIP_VEGA20)
590                 adev->nbio_funcs = &nbio_v7_4_funcs;
591         else
592                 adev->nbio_funcs = &nbio_v6_1_funcs;
593
594         if (adev->asic_type == CHIP_VEGA20)
595                 adev->df_funcs = &df_v3_6_funcs;
596         else
597                 adev->df_funcs = &df_v1_7_funcs;
598
599         adev->rev_id = soc15_get_rev_id(adev);
600         adev->nbio_funcs->detect_hw_virt(adev);
601
602         if (amdgpu_sriov_vf(adev))
603                 adev->virt.ops = &xgpu_ai_virt_ops;
604
605         switch (adev->asic_type) {
606         case CHIP_VEGA10:
607         case CHIP_VEGA12:
608         case CHIP_VEGA20:
609                 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
610                 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
611                 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
612                 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
613                         if (adev->asic_type == CHIP_VEGA20)
614                                 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
615                         else
616                                 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
617                 }
618                 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
619                 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
620                 if (!amdgpu_sriov_vf(adev)) {
621                         if (is_support_sw_smu(adev))
622                                 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
623                         else
624                                 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
625                 }
626                 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
627                         amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
628 #if defined(CONFIG_DRM_AMD_DC)
629                 else if (amdgpu_device_has_dc_support(adev))
630                         amdgpu_device_ip_block_add(adev, &dm_ip_block);
631 #else
632 #       warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
633 #endif
634                 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) {
635                         amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
636                         amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
637                 }
638                 break;
639         case CHIP_RAVEN:
640                 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
641                 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
642                 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
643                 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
644                         amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
645                 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
646                 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
647                 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
648                 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
649                         amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
650 #if defined(CONFIG_DRM_AMD_DC)
651                 else if (amdgpu_device_has_dc_support(adev))
652                         amdgpu_device_ip_block_add(adev, &dm_ip_block);
653 #else
654 #       warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
655 #endif
656                 amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
657                 break;
658         default:
659                 return -EINVAL;
660         }
661
662         return 0;
663 }
664
665 static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
666 {
667         adev->nbio_funcs->hdp_flush(adev, ring);
668 }
669
670 static void soc15_invalidate_hdp(struct amdgpu_device *adev,
671                                  struct amdgpu_ring *ring)
672 {
673         if (!ring || !ring->funcs->emit_wreg)
674                 WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
675         else
676                 amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
677                         HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
678 }
679
680 static bool soc15_need_full_reset(struct amdgpu_device *adev)
681 {
682         /* change this when we implement soft reset */
683         return true;
684 }
685 static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
686                                  uint64_t *count1)
687 {
688         uint32_t perfctr = 0;
689         uint64_t cnt0_of, cnt1_of;
690         int tmp;
691
692         /* This reports 0 on APUs, so return to avoid writing/reading registers
693          * that may or may not be different from their GPU counterparts
694          */
695          if (adev->flags & AMD_IS_APU)
696                  return;
697
698         /* Set the 2 events that we wish to watch, defined above */
699         /* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */
700         perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
701         perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
702
703         /* Write to enable desired perf counters */
704         WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK, perfctr);
705         /* Zero out and enable the perf counters
706          * Write 0x5:
707          * Bit 0 = Start all counters(1)
708          * Bit 2 = Global counter reset enable(1)
709          */
710         WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
711
712         msleep(1000);
713
714         /* Load the shadow and disable the perf counters
715          * Write 0x2:
716          * Bit 0 = Stop counters(0)
717          * Bit 1 = Load the shadow counters(1)
718          */
719         WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
720
721         /* Read register values to get any >32bit overflow */
722         tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK);
723         cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
724         cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
725
726         /* Get the values and add the overflow */
727         *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
728         *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
729 }
730
731 static bool soc15_need_reset_on_init(struct amdgpu_device *adev)
732 {
733         u32 sol_reg;
734
735         if (adev->flags & AMD_IS_APU)
736                 return false;
737
738         /* Check sOS sign of life register to confirm sys driver and sOS
739          * are already been loaded.
740          */
741         sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
742         if (sol_reg)
743                 return true;
744
745         return false;
746 }
747
748 static uint64_t soc15_get_pcie_replay_count(struct amdgpu_device *adev)
749 {
750         uint64_t nak_r, nak_g;
751
752         /* Get the number of NAKs received and generated */
753         nak_r = RREG32_PCIE(smnPCIE_RX_NUM_NAK);
754         nak_g = RREG32_PCIE(smnPCIE_RX_NUM_NAK_GENERATED);
755
756         /* Add the total number of NAKs, i.e the number of replays */
757         return (nak_r + nak_g);
758 }
759
760 static const struct amdgpu_asic_funcs soc15_asic_funcs =
761 {
762         .read_disabled_bios = &soc15_read_disabled_bios,
763         .read_bios_from_rom = &soc15_read_bios_from_rom,
764         .read_register = &soc15_read_register,
765         .reset = &soc15_asic_reset,
766         .set_vga_state = &soc15_vga_set_state,
767         .get_xclk = &soc15_get_xclk,
768         .set_uvd_clocks = &soc15_set_uvd_clocks,
769         .set_vce_clocks = &soc15_set_vce_clocks,
770         .get_config_memsize = &soc15_get_config_memsize,
771         .flush_hdp = &soc15_flush_hdp,
772         .invalidate_hdp = &soc15_invalidate_hdp,
773         .need_full_reset = &soc15_need_full_reset,
774         .init_doorbell_index = &vega10_doorbell_index_init,
775         .get_pcie_usage = &soc15_get_pcie_usage,
776         .need_reset_on_init = &soc15_need_reset_on_init,
777         .get_pcie_replay_count = &soc15_get_pcie_replay_count,
778 };
779
780 static const struct amdgpu_asic_funcs vega20_asic_funcs =
781 {
782         .read_disabled_bios = &soc15_read_disabled_bios,
783         .read_bios_from_rom = &soc15_read_bios_from_rom,
784         .read_register = &soc15_read_register,
785         .reset = &soc15_asic_reset,
786         .set_vga_state = &soc15_vga_set_state,
787         .get_xclk = &soc15_get_xclk,
788         .set_uvd_clocks = &soc15_set_uvd_clocks,
789         .set_vce_clocks = &soc15_set_vce_clocks,
790         .get_config_memsize = &soc15_get_config_memsize,
791         .flush_hdp = &soc15_flush_hdp,
792         .invalidate_hdp = &soc15_invalidate_hdp,
793         .need_full_reset = &soc15_need_full_reset,
794         .init_doorbell_index = &vega20_doorbell_index_init,
795         .get_pcie_usage = &soc15_get_pcie_usage,
796         .need_reset_on_init = &soc15_need_reset_on_init,
797         .get_pcie_replay_count = &soc15_get_pcie_replay_count,
798 };
799
800 static int soc15_common_early_init(void *handle)
801 {
802 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
803         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
804
805         adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
806         adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
807         adev->smc_rreg = NULL;
808         adev->smc_wreg = NULL;
809         adev->pcie_rreg = &soc15_pcie_rreg;
810         adev->pcie_wreg = &soc15_pcie_wreg;
811         adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg;
812         adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
813         adev->didt_rreg = &soc15_didt_rreg;
814         adev->didt_wreg = &soc15_didt_wreg;
815         adev->gc_cac_rreg = &soc15_gc_cac_rreg;
816         adev->gc_cac_wreg = &soc15_gc_cac_wreg;
817         adev->se_cac_rreg = &soc15_se_cac_rreg;
818         adev->se_cac_wreg = &soc15_se_cac_wreg;
819
820
821         adev->external_rev_id = 0xFF;
822         switch (adev->asic_type) {
823         case CHIP_VEGA10:
824                 adev->asic_funcs = &soc15_asic_funcs;
825                 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
826                         AMD_CG_SUPPORT_GFX_MGLS |
827                         AMD_CG_SUPPORT_GFX_RLC_LS |
828                         AMD_CG_SUPPORT_GFX_CP_LS |
829                         AMD_CG_SUPPORT_GFX_3D_CGCG |
830                         AMD_CG_SUPPORT_GFX_3D_CGLS |
831                         AMD_CG_SUPPORT_GFX_CGCG |
832                         AMD_CG_SUPPORT_GFX_CGLS |
833                         AMD_CG_SUPPORT_BIF_MGCG |
834                         AMD_CG_SUPPORT_BIF_LS |
835                         AMD_CG_SUPPORT_HDP_LS |
836                         AMD_CG_SUPPORT_DRM_MGCG |
837                         AMD_CG_SUPPORT_DRM_LS |
838                         AMD_CG_SUPPORT_ROM_MGCG |
839                         AMD_CG_SUPPORT_DF_MGCG |
840                         AMD_CG_SUPPORT_SDMA_MGCG |
841                         AMD_CG_SUPPORT_SDMA_LS |
842                         AMD_CG_SUPPORT_MC_MGCG |
843                         AMD_CG_SUPPORT_MC_LS;
844                 adev->pg_flags = 0;
845                 adev->external_rev_id = 0x1;
846                 break;
847         case CHIP_VEGA12:
848                 adev->asic_funcs = &soc15_asic_funcs;
849                 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
850                         AMD_CG_SUPPORT_GFX_MGLS |
851                         AMD_CG_SUPPORT_GFX_CGCG |
852                         AMD_CG_SUPPORT_GFX_CGLS |
853                         AMD_CG_SUPPORT_GFX_3D_CGCG |
854                         AMD_CG_SUPPORT_GFX_3D_CGLS |
855                         AMD_CG_SUPPORT_GFX_CP_LS |
856                         AMD_CG_SUPPORT_MC_LS |
857                         AMD_CG_SUPPORT_MC_MGCG |
858                         AMD_CG_SUPPORT_SDMA_MGCG |
859                         AMD_CG_SUPPORT_SDMA_LS |
860                         AMD_CG_SUPPORT_BIF_MGCG |
861                         AMD_CG_SUPPORT_BIF_LS |
862                         AMD_CG_SUPPORT_HDP_MGCG |
863                         AMD_CG_SUPPORT_HDP_LS |
864                         AMD_CG_SUPPORT_ROM_MGCG |
865                         AMD_CG_SUPPORT_VCE_MGCG |
866                         AMD_CG_SUPPORT_UVD_MGCG;
867                 adev->pg_flags = 0;
868                 adev->external_rev_id = adev->rev_id + 0x14;
869                 break;
870         case CHIP_VEGA20:
871                 adev->asic_funcs = &vega20_asic_funcs;
872                 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
873                         AMD_CG_SUPPORT_GFX_MGLS |
874                         AMD_CG_SUPPORT_GFX_CGCG |
875                         AMD_CG_SUPPORT_GFX_CGLS |
876                         AMD_CG_SUPPORT_GFX_3D_CGCG |
877                         AMD_CG_SUPPORT_GFX_3D_CGLS |
878                         AMD_CG_SUPPORT_GFX_CP_LS |
879                         AMD_CG_SUPPORT_MC_LS |
880                         AMD_CG_SUPPORT_MC_MGCG |
881                         AMD_CG_SUPPORT_SDMA_MGCG |
882                         AMD_CG_SUPPORT_SDMA_LS |
883                         AMD_CG_SUPPORT_BIF_MGCG |
884                         AMD_CG_SUPPORT_BIF_LS |
885                         AMD_CG_SUPPORT_HDP_MGCG |
886                         AMD_CG_SUPPORT_HDP_LS |
887                         AMD_CG_SUPPORT_ROM_MGCG |
888                         AMD_CG_SUPPORT_VCE_MGCG |
889                         AMD_CG_SUPPORT_UVD_MGCG;
890                 adev->pg_flags = 0;
891                 adev->external_rev_id = adev->rev_id + 0x28;
892                 break;
893         case CHIP_RAVEN:
894                 adev->asic_funcs = &soc15_asic_funcs;
895                 if (adev->rev_id >= 0x8)
896                         adev->external_rev_id = adev->rev_id + 0x79;
897                 else if (adev->pdev->device == 0x15d8)
898                         adev->external_rev_id = adev->rev_id + 0x41;
899                 else if (adev->rev_id == 1)
900                         adev->external_rev_id = adev->rev_id + 0x20;
901                 else
902                         adev->external_rev_id = adev->rev_id + 0x01;
903
904                 if (adev->rev_id >= 0x8) {
905                         adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
906                                 AMD_CG_SUPPORT_GFX_MGLS |
907                                 AMD_CG_SUPPORT_GFX_CP_LS |
908                                 AMD_CG_SUPPORT_GFX_3D_CGCG |
909                                 AMD_CG_SUPPORT_GFX_3D_CGLS |
910                                 AMD_CG_SUPPORT_GFX_CGCG |
911                                 AMD_CG_SUPPORT_GFX_CGLS |
912                                 AMD_CG_SUPPORT_BIF_LS |
913                                 AMD_CG_SUPPORT_HDP_LS |
914                                 AMD_CG_SUPPORT_ROM_MGCG |
915                                 AMD_CG_SUPPORT_MC_MGCG |
916                                 AMD_CG_SUPPORT_MC_LS |
917                                 AMD_CG_SUPPORT_SDMA_MGCG |
918                                 AMD_CG_SUPPORT_SDMA_LS |
919                                 AMD_CG_SUPPORT_VCN_MGCG;
920
921                         adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
922                 } else if (adev->pdev->device == 0x15d8) {
923                         adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
924                                 AMD_CG_SUPPORT_GFX_MGLS |
925                                 AMD_CG_SUPPORT_GFX_CP_LS |
926                                 AMD_CG_SUPPORT_GFX_3D_CGCG |
927                                 AMD_CG_SUPPORT_GFX_3D_CGLS |
928                                 AMD_CG_SUPPORT_GFX_CGCG |
929                                 AMD_CG_SUPPORT_GFX_CGLS |
930                                 AMD_CG_SUPPORT_BIF_LS |
931                                 AMD_CG_SUPPORT_HDP_LS |
932                                 AMD_CG_SUPPORT_ROM_MGCG |
933                                 AMD_CG_SUPPORT_MC_MGCG |
934                                 AMD_CG_SUPPORT_MC_LS |
935                                 AMD_CG_SUPPORT_SDMA_MGCG |
936                                 AMD_CG_SUPPORT_SDMA_LS;
937
938                         adev->pg_flags = AMD_PG_SUPPORT_SDMA |
939                                 AMD_PG_SUPPORT_MMHUB |
940                                 AMD_PG_SUPPORT_VCN |
941                                 AMD_PG_SUPPORT_VCN_DPG;
942                 } else {
943                         adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
944                                 AMD_CG_SUPPORT_GFX_MGLS |
945                                 AMD_CG_SUPPORT_GFX_RLC_LS |
946                                 AMD_CG_SUPPORT_GFX_CP_LS |
947                                 AMD_CG_SUPPORT_GFX_3D_CGCG |
948                                 AMD_CG_SUPPORT_GFX_3D_CGLS |
949                                 AMD_CG_SUPPORT_GFX_CGCG |
950                                 AMD_CG_SUPPORT_GFX_CGLS |
951                                 AMD_CG_SUPPORT_BIF_MGCG |
952                                 AMD_CG_SUPPORT_BIF_LS |
953                                 AMD_CG_SUPPORT_HDP_MGCG |
954                                 AMD_CG_SUPPORT_HDP_LS |
955                                 AMD_CG_SUPPORT_DRM_MGCG |
956                                 AMD_CG_SUPPORT_DRM_LS |
957                                 AMD_CG_SUPPORT_ROM_MGCG |
958                                 AMD_CG_SUPPORT_MC_MGCG |
959                                 AMD_CG_SUPPORT_MC_LS |
960                                 AMD_CG_SUPPORT_SDMA_MGCG |
961                                 AMD_CG_SUPPORT_SDMA_LS |
962                                 AMD_CG_SUPPORT_VCN_MGCG;
963
964                         adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
965                 }
966
967                 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
968                         adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
969                                 AMD_PG_SUPPORT_CP |
970                                 AMD_PG_SUPPORT_RLC_SMU_HS;
971                 break;
972         default:
973                 /* FIXME: not supported yet */
974                 return -EINVAL;
975         }
976
977         if (amdgpu_sriov_vf(adev)) {
978                 amdgpu_virt_init_setting(adev);
979                 xgpu_ai_mailbox_set_irq_funcs(adev);
980         }
981
982         return 0;
983 }
984
985 static int soc15_common_late_init(void *handle)
986 {
987         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
988
989         if (amdgpu_sriov_vf(adev))
990                 xgpu_ai_mailbox_get_irq(adev);
991
992         return 0;
993 }
994
995 static int soc15_common_sw_init(void *handle)
996 {
997         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
998
999         if (amdgpu_sriov_vf(adev))
1000                 xgpu_ai_mailbox_add_irq_id(adev);
1001
1002         return 0;
1003 }
1004
1005 static int soc15_common_sw_fini(void *handle)
1006 {
1007         return 0;
1008 }
1009
1010 static void soc15_doorbell_range_init(struct amdgpu_device *adev)
1011 {
1012         int i;
1013         struct amdgpu_ring *ring;
1014
1015         for (i = 0; i < adev->sdma.num_instances; i++) {
1016                 ring = &adev->sdma.instance[i].ring;
1017                 adev->nbio_funcs->sdma_doorbell_range(adev, i,
1018                         ring->use_doorbell, ring->doorbell_index,
1019                         adev->doorbell_index.sdma_doorbell_range);
1020         }
1021
1022         adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
1023                                                 adev->irq.ih.doorbell_index);
1024 }
1025
1026 static int soc15_common_hw_init(void *handle)
1027 {
1028         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1029
1030         /* enable pcie gen2/3 link */
1031         soc15_pcie_gen3_enable(adev);
1032         /* enable aspm */
1033         soc15_program_aspm(adev);
1034         /* setup nbio registers */
1035         adev->nbio_funcs->init_registers(adev);
1036         /* remap HDP registers to a hole in mmio space,
1037          * for the purpose of expose those registers
1038          * to process space
1039          */
1040         if (adev->nbio_funcs->remap_hdp_registers)
1041                 adev->nbio_funcs->remap_hdp_registers(adev);
1042         /* enable the doorbell aperture */
1043         soc15_enable_doorbell_aperture(adev, true);
1044         /* HW doorbell routing policy: doorbell writing not
1045          * in SDMA/IH/MM/ACV range will be routed to CP. So
1046          * we need to init SDMA/IH/MM/ACV doorbell range prior
1047          * to CP ip block init and ring test.
1048          */
1049         soc15_doorbell_range_init(adev);
1050
1051         return 0;
1052 }
1053
1054 static int soc15_common_hw_fini(void *handle)
1055 {
1056         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1057
1058         /* disable the doorbell aperture */
1059         soc15_enable_doorbell_aperture(adev, false);
1060         if (amdgpu_sriov_vf(adev))
1061                 xgpu_ai_mailbox_put_irq(adev);
1062
1063         return 0;
1064 }
1065
1066 static int soc15_common_suspend(void *handle)
1067 {
1068         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1069
1070         return soc15_common_hw_fini(adev);
1071 }
1072
1073 static int soc15_common_resume(void *handle)
1074 {
1075         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1076
1077         return soc15_common_hw_init(adev);
1078 }
1079
1080 static bool soc15_common_is_idle(void *handle)
1081 {
1082         return true;
1083 }
1084
1085 static int soc15_common_wait_for_idle(void *handle)
1086 {
1087         return 0;
1088 }
1089
1090 static int soc15_common_soft_reset(void *handle)
1091 {
1092         return 0;
1093 }
1094
1095 static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable)
1096 {
1097         uint32_t def, data;
1098
1099         if (adev->asic_type == CHIP_VEGA20) {
1100                 def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL));
1101
1102                 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1103                         data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
1104                                 HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
1105                                 HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
1106                                 HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK;
1107                 else
1108                         data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
1109                                 HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
1110                                 HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
1111                                 HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK);
1112
1113                 if (def != data)
1114                         WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data);
1115         } else {
1116                 def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
1117
1118                 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1119                         data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1120                 else
1121                         data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1122
1123                 if (def != data)
1124                         WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
1125         }
1126 }
1127
1128 static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
1129 {
1130         uint32_t def, data;
1131
1132         def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
1133
1134         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG))
1135                 data &= ~(0x01000000 |
1136                           0x02000000 |
1137                           0x04000000 |
1138                           0x08000000 |
1139                           0x10000000 |
1140                           0x20000000 |
1141                           0x40000000 |
1142                           0x80000000);
1143         else
1144                 data |= (0x01000000 |
1145                          0x02000000 |
1146                          0x04000000 |
1147                          0x08000000 |
1148                          0x10000000 |
1149                          0x20000000 |
1150                          0x40000000 |
1151                          0x80000000);
1152
1153         if (def != data)
1154                 WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0), data);
1155 }
1156
1157 static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable)
1158 {
1159         uint32_t def, data;
1160
1161         def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
1162
1163         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
1164                 data |= 1;
1165         else
1166                 data &= ~1;
1167
1168         if (def != data)
1169                 WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data);
1170 }
1171
1172 static void soc15_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
1173                                                        bool enable)
1174 {
1175         uint32_t def, data;
1176
1177         def = data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0));
1178
1179         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
1180                 data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1181                         CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
1182         else
1183                 data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1184                         CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
1185
1186         if (def != data)
1187                 WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0), data);
1188 }
1189
1190 static int soc15_common_set_clockgating_state(void *handle,
1191                                             enum amd_clockgating_state state)
1192 {
1193         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1194
1195         if (amdgpu_sriov_vf(adev))
1196                 return 0;
1197
1198         switch (adev->asic_type) {
1199         case CHIP_VEGA10:
1200         case CHIP_VEGA12:
1201         case CHIP_VEGA20:
1202                 adev->nbio_funcs->update_medium_grain_clock_gating(adev,
1203                                 state == AMD_CG_STATE_GATE ? true : false);
1204                 adev->nbio_funcs->update_medium_grain_light_sleep(adev,
1205                                 state == AMD_CG_STATE_GATE ? true : false);
1206                 soc15_update_hdp_light_sleep(adev,
1207                                 state == AMD_CG_STATE_GATE ? true : false);
1208                 soc15_update_drm_clock_gating(adev,
1209                                 state == AMD_CG_STATE_GATE ? true : false);
1210                 soc15_update_drm_light_sleep(adev,
1211                                 state == AMD_CG_STATE_GATE ? true : false);
1212                 soc15_update_rom_medium_grain_clock_gating(adev,
1213                                 state == AMD_CG_STATE_GATE ? true : false);
1214                 adev->df_funcs->update_medium_grain_clock_gating(adev,
1215                                 state == AMD_CG_STATE_GATE ? true : false);
1216                 break;
1217         case CHIP_RAVEN:
1218                 adev->nbio_funcs->update_medium_grain_clock_gating(adev,
1219                                 state == AMD_CG_STATE_GATE ? true : false);
1220                 adev->nbio_funcs->update_medium_grain_light_sleep(adev,
1221                                 state == AMD_CG_STATE_GATE ? true : false);
1222                 soc15_update_hdp_light_sleep(adev,
1223                                 state == AMD_CG_STATE_GATE ? true : false);
1224                 soc15_update_drm_clock_gating(adev,
1225                                 state == AMD_CG_STATE_GATE ? true : false);
1226                 soc15_update_drm_light_sleep(adev,
1227                                 state == AMD_CG_STATE_GATE ? true : false);
1228                 soc15_update_rom_medium_grain_clock_gating(adev,
1229                                 state == AMD_CG_STATE_GATE ? true : false);
1230                 break;
1231         default:
1232                 break;
1233         }
1234         return 0;
1235 }
1236
1237 static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
1238 {
1239         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1240         int data;
1241
1242         if (amdgpu_sriov_vf(adev))
1243                 *flags = 0;
1244
1245         adev->nbio_funcs->get_clockgating_state(adev, flags);
1246
1247         /* AMD_CG_SUPPORT_HDP_LS */
1248         data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
1249         if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
1250                 *flags |= AMD_CG_SUPPORT_HDP_LS;
1251
1252         /* AMD_CG_SUPPORT_DRM_MGCG */
1253         data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
1254         if (!(data & 0x01000000))
1255                 *flags |= AMD_CG_SUPPORT_DRM_MGCG;
1256
1257         /* AMD_CG_SUPPORT_DRM_LS */
1258         data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
1259         if (data & 0x1)
1260                 *flags |= AMD_CG_SUPPORT_DRM_LS;
1261
1262         /* AMD_CG_SUPPORT_ROM_MGCG */
1263         data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0));
1264         if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
1265                 *flags |= AMD_CG_SUPPORT_ROM_MGCG;
1266
1267         adev->df_funcs->get_clockgating_state(adev, flags);
1268 }
1269
1270 static int soc15_common_set_powergating_state(void *handle,
1271                                             enum amd_powergating_state state)
1272 {
1273         /* todo */
1274         return 0;
1275 }
1276
1277 const struct amd_ip_funcs soc15_common_ip_funcs = {
1278         .name = "soc15_common",
1279         .early_init = soc15_common_early_init,
1280         .late_init = soc15_common_late_init,
1281         .sw_init = soc15_common_sw_init,
1282         .sw_fini = soc15_common_sw_fini,
1283         .hw_init = soc15_common_hw_init,
1284         .hw_fini = soc15_common_hw_fini,
1285         .suspend = soc15_common_suspend,
1286         .resume = soc15_common_resume,
1287         .is_idle = soc15_common_is_idle,
1288         .wait_for_idle = soc15_common_wait_for_idle,
1289         .soft_reset = soc15_common_soft_reset,
1290         .set_clockgating_state = soc15_common_set_clockgating_state,
1291         .set_powergating_state = soc15_common_set_powergating_state,
1292         .get_clockgating_state= soc15_common_get_clockgating_state,
1293 };