2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/kernel.h>
24 #include <linux/firmware.h>
27 #include "amdgpu_gfx.h"
31 #include "vega10/soc15ip.h"
32 #include "vega10/GC/gc_9_0_offset.h"
33 #include "vega10/GC/gc_9_0_sh_mask.h"
34 #include "vega10/vega10_enum.h"
35 #include "vega10/HDP/hdp_4_0_offset.h"
37 #include "soc15_common.h"
38 #include "clearstate_gfx9.h"
39 #include "v9_structs.h"
41 #define GFX9_NUM_GFX_RINGS 1
42 #define GFX9_MEC_HPD_SIZE 2048
43 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
44 #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
45 #define GFX9_RLC_FORMAT_DIRECT_REG_LIST_LENGTH 34
47 #define mmPWR_MISC_CNTL_STATUS 0x0183
48 #define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0
49 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0
50 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1
51 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L
52 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L
54 MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
55 MODULE_FIRMWARE("amdgpu/vega10_pfp.bin");
56 MODULE_FIRMWARE("amdgpu/vega10_me.bin");
57 MODULE_FIRMWARE("amdgpu/vega10_mec.bin");
58 MODULE_FIRMWARE("amdgpu/vega10_mec2.bin");
59 MODULE_FIRMWARE("amdgpu/vega10_rlc.bin");
61 MODULE_FIRMWARE("amdgpu/raven_ce.bin");
62 MODULE_FIRMWARE("amdgpu/raven_pfp.bin");
63 MODULE_FIRMWARE("amdgpu/raven_me.bin");
64 MODULE_FIRMWARE("amdgpu/raven_mec.bin");
65 MODULE_FIRMWARE("amdgpu/raven_mec2.bin");
66 MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
68 static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
70 { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE),
71 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
72 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0),
73 SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) },
74 { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_BASE),
75 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_SIZE),
76 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID1),
77 SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID1) },
78 { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_BASE),
79 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_SIZE),
80 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID2),
81 SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID2) },
82 { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_BASE),
83 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_SIZE),
84 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID3),
85 SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID3) },
86 { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_BASE),
87 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_SIZE),
88 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID4),
89 SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID4) },
90 { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_BASE),
91 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_SIZE),
92 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID5),
93 SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID5) },
94 { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_BASE),
95 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_SIZE),
96 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID6),
97 SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID6) },
98 { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_BASE),
99 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_SIZE),
100 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID7),
101 SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID7) },
102 { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_BASE),
103 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_SIZE),
104 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID8),
105 SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID8) },
106 { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_BASE),
107 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_SIZE),
108 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID9),
109 SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID9) },
110 { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_BASE),
111 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_SIZE),
112 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID10),
113 SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID10) },
114 { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_BASE),
115 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_SIZE),
116 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID11),
117 SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID11) },
118 { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_BASE),
119 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_SIZE),
120 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID12),
121 SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID12)},
122 { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_BASE),
123 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_SIZE),
124 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID13),
125 SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID13) },
126 { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_BASE),
127 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_SIZE),
128 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID14),
129 SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID14) },
130 { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_BASE),
131 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_SIZE),
132 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID15),
133 SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID15) }
136 static const u32 golden_settings_gc_9_0[] =
138 SOC15_REG_OFFSET(GC, 0, mmCPC_UTCL1_CNTL), 0x08000000, 0x08000080,
139 SOC15_REG_OFFSET(GC, 0, mmCPF_UTCL1_CNTL), 0x08000000, 0x08000080,
140 SOC15_REG_OFFSET(GC, 0, mmCPG_UTCL1_CNTL), 0x08000000, 0x08000080,
141 SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2), 0xf00fffff, 0x00000420,
142 SOC15_REG_OFFSET(GC, 0, mmGB_GPU_ID), 0x0000000f, 0x00000000,
143 SOC15_REG_OFFSET(GC, 0, mmIA_UTCL1_CNTL), 0x08000000, 0x08000080,
144 SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3), 0x00000003, 0x82400024,
145 SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE), 0x3fffffff, 0x00000001,
146 SOC15_REG_OFFSET(GC, 0, mmPA_SC_LINE_STIPPLE_STATE), 0x0000ff0f, 0x00000000,
147 SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_0), 0x08000000, 0x08000080,
148 SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_1), 0x08000000, 0x08000080,
149 SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_2), 0x08000000, 0x08000080,
150 SOC15_REG_OFFSET(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL), 0x08000000, 0x08000080,
151 SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_UTCL1_CNTL), 0x08000000, 0x08000080,
152 SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), 0x00001000, 0x00001000,
153 SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_1), 0x0000000f, 0x01000107,
154 SOC15_REG_OFFSET(GC, 0, mmSQC_CONFIG), 0x03000000, 0x020a2000,
155 SOC15_REG_OFFSET(GC, 0, mmTA_CNTL_AUX), 0xfffffeef, 0x010b0000,
156 SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_HI), 0xffffffff, 0x4a2c0e68,
157 SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_LO), 0xffffffff, 0xb5d3f197,
158 SOC15_REG_OFFSET(GC, 0, mmVGT_CACHE_INVALIDATION), 0x3fff3af3, 0x19200000,
159 SOC15_REG_OFFSET(GC, 0, mmVGT_GS_MAX_WAVE_ID), 0x00000fff, 0x000003ff,
160 SOC15_REG_OFFSET(GC, 0, mmWD_UTCL1_CNTL), 0x08000000, 0x08000080
163 static const u32 golden_settings_gc_9_0_vg10[] =
165 SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL), 0x0000f000, 0x00012107,
166 SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL_3), 0x30000000, 0x10000000,
167 SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG), 0xffff77ff, 0x2a114042,
168 SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG_READ), 0xffff77ff, 0x2a114042,
169 SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1), 0x00008000, 0x00048000,
170 SOC15_REG_OFFSET(GC, 0, mmRMI_UTCL1_CNTL2), 0x00030000, 0x00020000,
171 SOC15_REG_OFFSET(GC, 0, mmTD_CNTL), 0x00001800, 0x00000800
174 static const u32 golden_settings_gc_9_1[] =
176 SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL), 0xfffdf3cf, 0x00014104,
177 SOC15_REG_OFFSET(GC, 0, mmCPC_UTCL1_CNTL), 0x08000000, 0x08000080,
178 SOC15_REG_OFFSET(GC, 0, mmCPF_UTCL1_CNTL), 0x08000000, 0x08000080,
179 SOC15_REG_OFFSET(GC, 0, mmCPG_UTCL1_CNTL), 0x08000000, 0x08000080,
180 SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2), 0xf00fffff, 0x00000420,
181 SOC15_REG_OFFSET(GC, 0, mmGB_GPU_ID), 0x0000000f, 0x00000000,
182 SOC15_REG_OFFSET(GC, 0, mmIA_UTCL1_CNTL), 0x08000000, 0x08000080,
183 SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3), 0x00000003, 0x82400024,
184 SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE), 0x3fffffff, 0x00000001,
185 SOC15_REG_OFFSET(GC, 0, mmPA_SC_LINE_STIPPLE_STATE), 0x0000ff0f, 0x00000000,
186 SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_0), 0x08000000, 0x08000080,
187 SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_1), 0x08000000, 0x08000080,
188 SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_2), 0x08000000, 0x08000080,
189 SOC15_REG_OFFSET(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL), 0x08000000, 0x08000080,
190 SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_UTCL1_CNTL), 0x08000000, 0x08000080,
191 SOC15_REG_OFFSET(GC, 0, mmTA_CNTL_AUX), 0xfffffeef, 0x010b0000,
192 SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_HI), 0xffffffff, 0x00000000,
193 SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_LO), 0xffffffff, 0x00003120,
194 SOC15_REG_OFFSET(GC, 0, mmVGT_CACHE_INVALIDATION), 0x3fff3af3, 0x19200000,
195 SOC15_REG_OFFSET(GC, 0, mmVGT_GS_MAX_WAVE_ID), 0x00000fff, 0x000000ff,
196 SOC15_REG_OFFSET(GC, 0, mmWD_UTCL1_CNTL), 0x08000000, 0x08000080
199 static const u32 golden_settings_gc_9_1_rv1[] =
201 SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL_3), 0x30000000, 0x10000000,
202 SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG), 0xffff77ff, 0x24000042,
203 SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG_READ), 0xffff77ff, 0x24000042,
204 SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1), 0xffffffff, 0x04048000,
205 SOC15_REG_OFFSET(GC, 0, mmPA_SC_MODE_CNTL_1), 0x06000000, 0x06000000,
206 SOC15_REG_OFFSET(GC, 0, mmRMI_UTCL1_CNTL2), 0x00030000, 0x00020000,
207 SOC15_REG_OFFSET(GC, 0, mmTD_CNTL), 0x01bd9f33, 0x00000800
210 static const u32 golden_settings_gc_9_x_common[] =
212 SOC15_REG_OFFSET(GC, 0, mmGRBM_CAM_INDEX), 0xffffffff, 0x00000000,
213 SOC15_REG_OFFSET(GC, 0, mmGRBM_CAM_DATA), 0xffffffff, 0x2544c382
216 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
217 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
219 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev);
220 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev);
221 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev);
222 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
223 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
224 struct amdgpu_cu_info *cu_info);
225 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
226 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
227 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
229 static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
231 switch (adev->asic_type) {
233 amdgpu_program_register_sequence(adev,
234 golden_settings_gc_9_0,
235 ARRAY_SIZE(golden_settings_gc_9_0));
236 amdgpu_program_register_sequence(adev,
237 golden_settings_gc_9_0_vg10,
238 ARRAY_SIZE(golden_settings_gc_9_0_vg10));
241 amdgpu_program_register_sequence(adev,
242 golden_settings_gc_9_1,
243 ARRAY_SIZE(golden_settings_gc_9_1));
244 amdgpu_program_register_sequence(adev,
245 golden_settings_gc_9_1_rv1,
246 ARRAY_SIZE(golden_settings_gc_9_1_rv1));
252 amdgpu_program_register_sequence(adev, golden_settings_gc_9_x_common,
253 (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
256 static void gfx_v9_0_scratch_init(struct amdgpu_device *adev)
258 adev->gfx.scratch.num_reg = 8;
259 adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
260 adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
263 static void gfx_v9_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
264 bool wc, uint32_t reg, uint32_t val)
266 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
267 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
268 WRITE_DATA_DST_SEL(0) |
269 (wc ? WR_CONFIRM : 0));
270 amdgpu_ring_write(ring, reg);
271 amdgpu_ring_write(ring, 0);
272 amdgpu_ring_write(ring, val);
275 static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
276 int mem_space, int opt, uint32_t addr0,
277 uint32_t addr1, uint32_t ref, uint32_t mask,
280 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
281 amdgpu_ring_write(ring,
282 /* memory (1) or register (0) */
283 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
284 WAIT_REG_MEM_OPERATION(opt) | /* wait */
285 WAIT_REG_MEM_FUNCTION(3) | /* equal */
286 WAIT_REG_MEM_ENGINE(eng_sel)));
289 BUG_ON(addr0 & 0x3); /* Dword align */
290 amdgpu_ring_write(ring, addr0);
291 amdgpu_ring_write(ring, addr1);
292 amdgpu_ring_write(ring, ref);
293 amdgpu_ring_write(ring, mask);
294 amdgpu_ring_write(ring, inv); /* poll interval */
297 static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
299 struct amdgpu_device *adev = ring->adev;
305 r = amdgpu_gfx_scratch_get(adev, &scratch);
307 DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
310 WREG32(scratch, 0xCAFEDEAD);
311 r = amdgpu_ring_alloc(ring, 3);
313 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
315 amdgpu_gfx_scratch_free(adev, scratch);
318 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
319 amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
320 amdgpu_ring_write(ring, 0xDEADBEEF);
321 amdgpu_ring_commit(ring);
323 for (i = 0; i < adev->usec_timeout; i++) {
324 tmp = RREG32(scratch);
325 if (tmp == 0xDEADBEEF)
329 if (i < adev->usec_timeout) {
330 DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
333 DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
334 ring->idx, scratch, tmp);
337 amdgpu_gfx_scratch_free(adev, scratch);
341 static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
343 struct amdgpu_device *adev = ring->adev;
345 struct dma_fence *f = NULL;
350 r = amdgpu_gfx_scratch_get(adev, &scratch);
352 DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
355 WREG32(scratch, 0xCAFEDEAD);
356 memset(&ib, 0, sizeof(ib));
357 r = amdgpu_ib_get(adev, NULL, 256, &ib);
359 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
362 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
363 ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
364 ib.ptr[2] = 0xDEADBEEF;
367 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
371 r = dma_fence_wait_timeout(f, false, timeout);
373 DRM_ERROR("amdgpu: IB test timed out.\n");
377 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
380 tmp = RREG32(scratch);
381 if (tmp == 0xDEADBEEF) {
382 DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
385 DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
390 amdgpu_ib_free(adev, &ib, NULL);
393 amdgpu_gfx_scratch_free(adev, scratch);
398 static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
400 release_firmware(adev->gfx.pfp_fw);
401 adev->gfx.pfp_fw = NULL;
402 release_firmware(adev->gfx.me_fw);
403 adev->gfx.me_fw = NULL;
404 release_firmware(adev->gfx.ce_fw);
405 adev->gfx.ce_fw = NULL;
406 release_firmware(adev->gfx.rlc_fw);
407 adev->gfx.rlc_fw = NULL;
408 release_firmware(adev->gfx.mec_fw);
409 adev->gfx.mec_fw = NULL;
410 release_firmware(adev->gfx.mec2_fw);
411 adev->gfx.mec2_fw = NULL;
413 kfree(adev->gfx.rlc.register_list_format);
416 static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
418 const char *chip_name;
421 struct amdgpu_firmware_info *info = NULL;
422 const struct common_firmware_header *header = NULL;
423 const struct gfx_firmware_header_v1_0 *cp_hdr;
424 const struct rlc_firmware_header_v2_0 *rlc_hdr;
425 unsigned int *tmp = NULL;
430 switch (adev->asic_type) {
432 chip_name = "vega10";
441 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
442 err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
445 err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
448 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
449 adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
450 adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
452 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
453 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
456 err = amdgpu_ucode_validate(adev->gfx.me_fw);
459 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
460 adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
461 adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
463 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
464 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
467 err = amdgpu_ucode_validate(adev->gfx.ce_fw);
470 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
471 adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
472 adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
474 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
475 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
478 err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
479 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
480 adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
481 adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
482 adev->gfx.rlc.save_and_restore_offset =
483 le32_to_cpu(rlc_hdr->save_and_restore_offset);
484 adev->gfx.rlc.clear_state_descriptor_offset =
485 le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
486 adev->gfx.rlc.avail_scratch_ram_locations =
487 le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
488 adev->gfx.rlc.reg_restore_list_size =
489 le32_to_cpu(rlc_hdr->reg_restore_list_size);
490 adev->gfx.rlc.reg_list_format_start =
491 le32_to_cpu(rlc_hdr->reg_list_format_start);
492 adev->gfx.rlc.reg_list_format_separate_start =
493 le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
494 adev->gfx.rlc.starting_offsets_start =
495 le32_to_cpu(rlc_hdr->starting_offsets_start);
496 adev->gfx.rlc.reg_list_format_size_bytes =
497 le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
498 adev->gfx.rlc.reg_list_size_bytes =
499 le32_to_cpu(rlc_hdr->reg_list_size_bytes);
500 adev->gfx.rlc.register_list_format =
501 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
502 adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
503 if (!adev->gfx.rlc.register_list_format) {
508 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
509 le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
510 for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
511 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
513 adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
515 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
516 le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
517 for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
518 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
520 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
521 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
524 err = amdgpu_ucode_validate(adev->gfx.mec_fw);
527 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
528 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
529 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
532 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
533 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
535 err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
538 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
539 adev->gfx.mec2_fw->data;
540 adev->gfx.mec2_fw_version =
541 le32_to_cpu(cp_hdr->header.ucode_version);
542 adev->gfx.mec2_feature_version =
543 le32_to_cpu(cp_hdr->ucode_feature_version);
546 adev->gfx.mec2_fw = NULL;
549 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
550 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
551 info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
552 info->fw = adev->gfx.pfp_fw;
553 header = (const struct common_firmware_header *)info->fw->data;
554 adev->firmware.fw_size +=
555 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
557 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
558 info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
559 info->fw = adev->gfx.me_fw;
560 header = (const struct common_firmware_header *)info->fw->data;
561 adev->firmware.fw_size +=
562 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
564 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
565 info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
566 info->fw = adev->gfx.ce_fw;
567 header = (const struct common_firmware_header *)info->fw->data;
568 adev->firmware.fw_size +=
569 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
571 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
572 info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
573 info->fw = adev->gfx.rlc_fw;
574 header = (const struct common_firmware_header *)info->fw->data;
575 adev->firmware.fw_size +=
576 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
578 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
579 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
580 info->fw = adev->gfx.mec_fw;
581 header = (const struct common_firmware_header *)info->fw->data;
582 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
583 adev->firmware.fw_size +=
584 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
586 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
587 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
588 info->fw = adev->gfx.mec_fw;
589 adev->firmware.fw_size +=
590 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
592 if (adev->gfx.mec2_fw) {
593 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
594 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
595 info->fw = adev->gfx.mec2_fw;
596 header = (const struct common_firmware_header *)info->fw->data;
597 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
598 adev->firmware.fw_size +=
599 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
600 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
601 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
602 info->fw = adev->gfx.mec2_fw;
603 adev->firmware.fw_size +=
604 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
612 "gfx9: Failed to load firmware \"%s\"\n",
614 release_firmware(adev->gfx.pfp_fw);
615 adev->gfx.pfp_fw = NULL;
616 release_firmware(adev->gfx.me_fw);
617 adev->gfx.me_fw = NULL;
618 release_firmware(adev->gfx.ce_fw);
619 adev->gfx.ce_fw = NULL;
620 release_firmware(adev->gfx.rlc_fw);
621 adev->gfx.rlc_fw = NULL;
622 release_firmware(adev->gfx.mec_fw);
623 adev->gfx.mec_fw = NULL;
624 release_firmware(adev->gfx.mec2_fw);
625 adev->gfx.mec2_fw = NULL;
630 static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
633 const struct cs_section_def *sect = NULL;
634 const struct cs_extent_def *ext = NULL;
636 /* begin clear state */
638 /* context control state */
641 for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
642 for (ext = sect->section; ext->extent != NULL; ++ext) {
643 if (sect->id == SECT_CONTEXT)
644 count += 2 + ext->reg_count;
650 /* end clear state */
658 static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
659 volatile u32 *buffer)
662 const struct cs_section_def *sect = NULL;
663 const struct cs_extent_def *ext = NULL;
665 if (adev->gfx.rlc.cs_data == NULL)
670 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
671 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
673 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
674 buffer[count++] = cpu_to_le32(0x80000000);
675 buffer[count++] = cpu_to_le32(0x80000000);
677 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
678 for (ext = sect->section; ext->extent != NULL; ++ext) {
679 if (sect->id == SECT_CONTEXT) {
681 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
682 buffer[count++] = cpu_to_le32(ext->reg_index -
683 PACKET3_SET_CONTEXT_REG_START);
684 for (i = 0; i < ext->reg_count; i++)
685 buffer[count++] = cpu_to_le32(ext->extent[i]);
692 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
693 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
695 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
696 buffer[count++] = cpu_to_le32(0);
699 static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
703 /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
704 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
705 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x0333A5A7);
706 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
707 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x30 | 0x40 << 8 | 0x02FA << 16));
709 /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
710 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
712 /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
713 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000500);
715 mutex_lock(&adev->grbm_idx_mutex);
716 /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
717 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
718 WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
720 /* set mmRLC_LB_PARAMS = 0x003F_1006 */
721 data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
722 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
723 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
724 WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
726 /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
727 data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
730 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
732 /* set RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF */
733 WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, 0xFFF);
735 /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
736 * but used for RLC_LB_CNTL configuration */
737 data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
738 data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
739 data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
740 WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
741 mutex_unlock(&adev->grbm_idx_mutex);
744 static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
746 WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
749 static void rv_init_cp_jump_table(struct amdgpu_device *adev)
751 const __le32 *fw_data;
752 volatile u32 *dst_ptr;
753 int me, i, max_me = 5;
755 u32 table_offset, table_size;
757 /* write the cp table buffer */
758 dst_ptr = adev->gfx.rlc.cp_table_ptr;
759 for (me = 0; me < max_me; me++) {
761 const struct gfx_firmware_header_v1_0 *hdr =
762 (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
763 fw_data = (const __le32 *)
764 (adev->gfx.ce_fw->data +
765 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
766 table_offset = le32_to_cpu(hdr->jt_offset);
767 table_size = le32_to_cpu(hdr->jt_size);
768 } else if (me == 1) {
769 const struct gfx_firmware_header_v1_0 *hdr =
770 (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
771 fw_data = (const __le32 *)
772 (adev->gfx.pfp_fw->data +
773 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
774 table_offset = le32_to_cpu(hdr->jt_offset);
775 table_size = le32_to_cpu(hdr->jt_size);
776 } else if (me == 2) {
777 const struct gfx_firmware_header_v1_0 *hdr =
778 (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
779 fw_data = (const __le32 *)
780 (adev->gfx.me_fw->data +
781 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
782 table_offset = le32_to_cpu(hdr->jt_offset);
783 table_size = le32_to_cpu(hdr->jt_size);
784 } else if (me == 3) {
785 const struct gfx_firmware_header_v1_0 *hdr =
786 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
787 fw_data = (const __le32 *)
788 (adev->gfx.mec_fw->data +
789 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
790 table_offset = le32_to_cpu(hdr->jt_offset);
791 table_size = le32_to_cpu(hdr->jt_size);
792 } else if (me == 4) {
793 const struct gfx_firmware_header_v1_0 *hdr =
794 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
795 fw_data = (const __le32 *)
796 (adev->gfx.mec2_fw->data +
797 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
798 table_offset = le32_to_cpu(hdr->jt_offset);
799 table_size = le32_to_cpu(hdr->jt_size);
802 for (i = 0; i < table_size; i ++) {
803 dst_ptr[bo_offset + i] =
804 cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
807 bo_offset += table_size;
811 static void gfx_v9_0_rlc_fini(struct amdgpu_device *adev)
813 /* clear state block */
814 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
815 &adev->gfx.rlc.clear_state_gpu_addr,
816 (void **)&adev->gfx.rlc.cs_ptr);
818 /* jump table block */
819 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
820 &adev->gfx.rlc.cp_table_gpu_addr,
821 (void **)&adev->gfx.rlc.cp_table_ptr);
824 static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
826 volatile u32 *dst_ptr;
828 const struct cs_section_def *cs_data;
831 adev->gfx.rlc.cs_data = gfx9_cs_data;
833 cs_data = adev->gfx.rlc.cs_data;
836 /* clear state block */
837 adev->gfx.rlc.clear_state_size = dws = gfx_v9_0_get_csb_size(adev);
838 r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
839 AMDGPU_GEM_DOMAIN_VRAM,
840 &adev->gfx.rlc.clear_state_obj,
841 &adev->gfx.rlc.clear_state_gpu_addr,
842 (void **)&adev->gfx.rlc.cs_ptr);
844 dev_err(adev->dev, "(%d) failed to create rlc csb bo\n",
846 gfx_v9_0_rlc_fini(adev);
849 /* set up the cs buffer */
850 dst_ptr = adev->gfx.rlc.cs_ptr;
851 gfx_v9_0_get_csb_buffer(adev, dst_ptr);
852 amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
853 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
856 if (adev->asic_type == CHIP_RAVEN) {
857 /* TODO: double check the cp_table_size for RV */
858 adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
859 r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
860 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
861 &adev->gfx.rlc.cp_table_obj,
862 &adev->gfx.rlc.cp_table_gpu_addr,
863 (void **)&adev->gfx.rlc.cp_table_ptr);
866 "(%d) failed to create cp table bo\n", r);
867 gfx_v9_0_rlc_fini(adev);
871 rv_init_cp_jump_table(adev);
872 amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
873 amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
875 gfx_v9_0_init_lbpw(adev);
881 static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
883 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
884 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
887 static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
891 const __le32 *fw_data;
896 const struct gfx_firmware_header_v1_0 *mec_hdr;
898 bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
900 /* take ownership of the relevant compute queues */
901 amdgpu_gfx_compute_queue_acquire(adev);
902 mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
904 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
905 AMDGPU_GEM_DOMAIN_GTT,
906 &adev->gfx.mec.hpd_eop_obj,
907 &adev->gfx.mec.hpd_eop_gpu_addr,
910 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
911 gfx_v9_0_mec_fini(adev);
915 memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size);
917 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
918 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
920 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
922 fw_data = (const __le32 *)
923 (adev->gfx.mec_fw->data +
924 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
925 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
927 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
928 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
929 &adev->gfx.mec.mec_fw_obj,
930 &adev->gfx.mec.mec_fw_gpu_addr,
933 dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
934 gfx_v9_0_mec_fini(adev);
938 memcpy(fw, fw_data, fw_size);
940 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
941 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
946 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
948 WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
949 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
950 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
951 (address << SQ_IND_INDEX__INDEX__SHIFT) |
952 (SQ_IND_INDEX__FORCE_READ_MASK));
953 return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
956 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
957 uint32_t wave, uint32_t thread,
958 uint32_t regno, uint32_t num, uint32_t *out)
960 WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
961 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
962 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
963 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
964 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
965 (SQ_IND_INDEX__FORCE_READ_MASK) |
966 (SQ_IND_INDEX__AUTO_INCR_MASK));
968 *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
971 static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
973 /* type 1 wave data */
974 dst[(*no_fields)++] = 1;
975 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
976 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
977 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
978 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
979 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
980 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
981 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
982 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
983 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
984 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
985 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
986 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
987 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
988 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
991 static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
992 uint32_t wave, uint32_t start,
993 uint32_t size, uint32_t *dst)
997 start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
1000 static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
1001 uint32_t wave, uint32_t thread,
1002 uint32_t start, uint32_t size,
1006 adev, simd, wave, thread,
1007 start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
1010 static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
1011 .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
1012 .select_se_sh = &gfx_v9_0_select_se_sh,
1013 .read_wave_data = &gfx_v9_0_read_wave_data,
1014 .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
1015 .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
1018 static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
1022 adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
1024 switch (adev->asic_type) {
1026 adev->gfx.config.max_hw_contexts = 8;
1027 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1028 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1029 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1030 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1031 gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN;
1034 adev->gfx.config.max_hw_contexts = 8;
1035 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1036 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1037 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1038 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1039 gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
1046 adev->gfx.config.gb_addr_config = gb_addr_config;
1048 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
1050 adev->gfx.config.gb_addr_config,
1054 adev->gfx.config.max_tile_pipes =
1055 adev->gfx.config.gb_addr_config_fields.num_pipes;
1057 adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
1059 adev->gfx.config.gb_addr_config,
1062 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
1064 adev->gfx.config.gb_addr_config,
1066 MAX_COMPRESSED_FRAGS);
1067 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
1069 adev->gfx.config.gb_addr_config,
1072 adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
1074 adev->gfx.config.gb_addr_config,
1076 NUM_SHADER_ENGINES);
1077 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
1079 adev->gfx.config.gb_addr_config,
1081 PIPE_INTERLEAVE_SIZE));
1084 static int gfx_v9_0_ngg_create_buf(struct amdgpu_device *adev,
1085 struct amdgpu_ngg_buf *ngg_buf,
1087 int default_size_se)
1092 dev_err(adev->dev, "Buffer size is invalid: %d\n", size_se);
1095 size_se = size_se ? size_se : default_size_se;
1097 ngg_buf->size = size_se * adev->gfx.config.max_shader_engines;
1098 r = amdgpu_bo_create_kernel(adev, ngg_buf->size,
1099 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
1104 dev_err(adev->dev, "(%d) failed to create NGG buffer\n", r);
1107 ngg_buf->bo_size = amdgpu_bo_size(ngg_buf->bo);
1112 static int gfx_v9_0_ngg_fini(struct amdgpu_device *adev)
1116 for (i = 0; i < NGG_BUF_MAX; i++)
1117 amdgpu_bo_free_kernel(&adev->gfx.ngg.buf[i].bo,
1118 &adev->gfx.ngg.buf[i].gpu_addr,
1121 memset(&adev->gfx.ngg.buf[0], 0,
1122 sizeof(struct amdgpu_ngg_buf) * NGG_BUF_MAX);
1124 adev->gfx.ngg.init = false;
1129 static int gfx_v9_0_ngg_init(struct amdgpu_device *adev)
1133 if (!amdgpu_ngg || adev->gfx.ngg.init == true)
1136 /* GDS reserve memory: 64 bytes alignment */
1137 adev->gfx.ngg.gds_reserve_size = ALIGN(5 * 4, 0x40);
1138 adev->gds.mem.total_size -= adev->gfx.ngg.gds_reserve_size;
1139 adev->gds.mem.gfx_partition_size -= adev->gfx.ngg.gds_reserve_size;
1140 adev->gfx.ngg.gds_reserve_addr = amdgpu_gds_reg_offset[0].mem_base;
1141 adev->gfx.ngg.gds_reserve_addr += adev->gds.mem.gfx_partition_size;
1143 /* Primitive Buffer */
1144 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PRIM],
1145 amdgpu_prim_buf_per_se,
1148 dev_err(adev->dev, "Failed to create Primitive Buffer\n");
1152 /* Position Buffer */
1153 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_POS],
1154 amdgpu_pos_buf_per_se,
1157 dev_err(adev->dev, "Failed to create Position Buffer\n");
1161 /* Control Sideband */
1162 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_CNTL],
1163 amdgpu_cntl_sb_buf_per_se,
1166 dev_err(adev->dev, "Failed to create Control Sideband Buffer\n");
1170 /* Parameter Cache, not created by default */
1171 if (amdgpu_param_buf_per_se <= 0)
1174 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PARAM],
1175 amdgpu_param_buf_per_se,
1178 dev_err(adev->dev, "Failed to create Parameter Cache\n");
1183 adev->gfx.ngg.init = true;
1186 gfx_v9_0_ngg_fini(adev);
1190 static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
1192 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
1199 /* Program buffer size */
1200 data = REG_SET_FIELD(0, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE,
1201 adev->gfx.ngg.buf[NGG_PRIM].size >> 8);
1202 data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE,
1203 adev->gfx.ngg.buf[NGG_POS].size >> 8);
1204 WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_1, data);
1206 data = REG_SET_FIELD(0, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE,
1207 adev->gfx.ngg.buf[NGG_CNTL].size >> 8);
1208 data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE,
1209 adev->gfx.ngg.buf[NGG_PARAM].size >> 10);
1210 WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_2, data);
1212 /* Program buffer base address */
1213 base = lower_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
1214 data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE, BASE, base);
1215 WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE, data);
1217 base = upper_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
1218 data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE_HI, BASE_HI, base);
1219 WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE_HI, data);
1221 base = lower_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
1222 data = REG_SET_FIELD(0, WD_POS_BUF_BASE, BASE, base);
1223 WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE, data);
1225 base = upper_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
1226 data = REG_SET_FIELD(0, WD_POS_BUF_BASE_HI, BASE_HI, base);
1227 WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE_HI, data);
1229 base = lower_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
1230 data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE, BASE, base);
1231 WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE, data);
1233 base = upper_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
1234 data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE_HI, BASE_HI, base);
1235 WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE_HI, data);
1237 /* Clear GDS reserved memory */
1238 r = amdgpu_ring_alloc(ring, 17);
1240 DRM_ERROR("amdgpu: NGG failed to lock ring %d (%d).\n",
1245 gfx_v9_0_write_data_to_reg(ring, 0, false,
1246 amdgpu_gds_reg_offset[0].mem_size,
1247 (adev->gds.mem.total_size +
1248 adev->gfx.ngg.gds_reserve_size) >>
1251 amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
1252 amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
1253 PACKET3_DMA_DATA_SRC_SEL(2)));
1254 amdgpu_ring_write(ring, 0);
1255 amdgpu_ring_write(ring, 0);
1256 amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_addr);
1257 amdgpu_ring_write(ring, 0);
1258 amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_size);
1261 gfx_v9_0_write_data_to_reg(ring, 0, false,
1262 amdgpu_gds_reg_offset[0].mem_size, 0);
1264 amdgpu_ring_commit(ring);
1269 static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
1270 int mec, int pipe, int queue)
1274 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
1276 ring = &adev->gfx.compute_ring[ring_id];
1281 ring->queue = queue;
1283 ring->ring_obj = NULL;
1284 ring->use_doorbell = true;
1285 ring->doorbell_index = (AMDGPU_DOORBELL_MEC_RING0 + ring_id) << 1;
1286 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
1287 + (ring_id * GFX9_MEC_HPD_SIZE);
1288 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1290 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1291 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1294 /* type-2 packets are deprecated on MEC, use type-3 instead */
1295 r = amdgpu_ring_init(adev, ring, 1024,
1296 &adev->gfx.eop_irq, irq_type);
1304 static int gfx_v9_0_sw_init(void *handle)
1306 int i, j, k, r, ring_id;
1307 struct amdgpu_ring *ring;
1308 struct amdgpu_kiq *kiq;
1309 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1311 switch (adev->asic_type) {
1314 adev->gfx.mec.num_mec = 2;
1317 adev->gfx.mec.num_mec = 1;
1321 adev->gfx.mec.num_pipe_per_mec = 4;
1322 adev->gfx.mec.num_queue_per_pipe = 8;
1325 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 178, &adev->gfx.kiq.irq);
1330 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 181, &adev->gfx.eop_irq);
1334 /* Privileged reg */
1335 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 184,
1336 &adev->gfx.priv_reg_irq);
1340 /* Privileged inst */
1341 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 185,
1342 &adev->gfx.priv_inst_irq);
1346 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1348 gfx_v9_0_scratch_init(adev);
1350 r = gfx_v9_0_init_microcode(adev);
1352 DRM_ERROR("Failed to load gfx firmware!\n");
1356 r = gfx_v9_0_rlc_init(adev);
1358 DRM_ERROR("Failed to init rlc BOs!\n");
1362 r = gfx_v9_0_mec_init(adev);
1364 DRM_ERROR("Failed to init MEC BOs!\n");
1368 /* set up the gfx ring */
1369 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
1370 ring = &adev->gfx.gfx_ring[i];
1371 ring->ring_obj = NULL;
1373 sprintf(ring->name, "gfx");
1375 sprintf(ring->name, "gfx_%d", i);
1376 ring->use_doorbell = true;
1377 ring->doorbell_index = AMDGPU_DOORBELL64_GFX_RING0 << 1;
1378 r = amdgpu_ring_init(adev, ring, 1024,
1379 &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
1384 /* set up the compute queues - allocate horizontally across pipes */
1386 for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1387 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1388 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
1389 if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j))
1392 r = gfx_v9_0_compute_ring_init(adev,
1403 r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE);
1405 DRM_ERROR("Failed to init KIQ BOs!\n");
1409 kiq = &adev->gfx.kiq;
1410 r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
1414 /* create MQD for all compute queues as wel as KIQ for SRIOV case */
1415 r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation));
1419 /* reserve GDS, GWS and OA resource for gfx */
1420 r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size,
1421 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS,
1422 &adev->gds.gds_gfx_bo, NULL, NULL);
1426 r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size,
1427 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS,
1428 &adev->gds.gws_gfx_bo, NULL, NULL);
1432 r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size,
1433 PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA,
1434 &adev->gds.oa_gfx_bo, NULL, NULL);
1438 adev->gfx.ce_ram_size = 0x8000;
1440 gfx_v9_0_gpu_early_init(adev);
1442 r = gfx_v9_0_ngg_init(adev);
1450 static int gfx_v9_0_sw_fini(void *handle)
1453 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1455 amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
1456 amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
1457 amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
1459 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
1460 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
1461 for (i = 0; i < adev->gfx.num_compute_rings; i++)
1462 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1464 amdgpu_gfx_compute_mqd_sw_fini(adev);
1465 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
1466 amdgpu_gfx_kiq_fini(adev);
1467 amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
1469 gfx_v9_0_mec_fini(adev);
1470 gfx_v9_0_ngg_fini(adev);
1471 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
1472 &adev->gfx.rlc.clear_state_gpu_addr,
1473 (void **)&adev->gfx.rlc.cs_ptr);
1474 if (adev->asic_type == CHIP_RAVEN) {
1475 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
1476 &adev->gfx.rlc.cp_table_gpu_addr,
1477 (void **)&adev->gfx.rlc.cp_table_ptr);
1479 gfx_v9_0_free_microcode(adev);
1485 static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
1490 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance)
1494 if (instance == 0xffffffff)
1495 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
1497 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
1499 if (se_num == 0xffffffff)
1500 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
1502 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1504 if (sh_num == 0xffffffff)
1505 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
1507 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
1509 WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
1512 static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1516 data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
1517 data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);
1519 data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
1520 data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
1522 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
1523 adev->gfx.config.max_sh_per_se);
1525 return (~data) & mask;
1528 static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
1533 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
1534 adev->gfx.config.max_sh_per_se;
1536 mutex_lock(&adev->grbm_idx_mutex);
1537 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1538 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1539 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1540 data = gfx_v9_0_get_rb_active_bitmap(adev);
1541 active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
1542 rb_bitmap_width_per_sh);
1545 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1546 mutex_unlock(&adev->grbm_idx_mutex);
1548 adev->gfx.config.backend_enable_mask = active_rbs;
1549 adev->gfx.config.num_rbs = hweight32(active_rbs);
1552 #define DEFAULT_SH_MEM_BASES (0x6000)
1553 #define FIRST_COMPUTE_VMID (8)
1554 #define LAST_COMPUTE_VMID (16)
1555 static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
1558 uint32_t sh_mem_config;
1559 uint32_t sh_mem_bases;
1562 * Configure apertures:
1563 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
1564 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
1565 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
1567 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1569 sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
1570 SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
1571 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
1573 mutex_lock(&adev->srbm_mutex);
1574 for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
1575 soc15_grbm_select(adev, 0, 0, 0, i);
1576 /* CP and shaders */
1577 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
1578 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
1580 soc15_grbm_select(adev, 0, 0, 0, 0);
1581 mutex_unlock(&adev->srbm_mutex);
1584 static void gfx_v9_0_gpu_init(struct amdgpu_device *adev)
1589 WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
1591 gfx_v9_0_tiling_mode_table_init(adev);
1593 gfx_v9_0_setup_rb(adev);
1594 gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
1596 /* XXX SH_MEM regs */
1597 /* where to put LDS, scratch, GPUVM in FSA64 space */
1598 mutex_lock(&adev->srbm_mutex);
1599 for (i = 0; i < 16; i++) {
1600 soc15_grbm_select(adev, 0, 0, 0, i);
1601 /* CP and shaders */
1603 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
1604 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1605 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
1606 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, 0);
1608 soc15_grbm_select(adev, 0, 0, 0, 0);
1610 mutex_unlock(&adev->srbm_mutex);
1612 gfx_v9_0_init_compute_vmid(adev);
1614 mutex_lock(&adev->grbm_idx_mutex);
1616 * making sure that the following register writes will be broadcasted
1617 * to all the shaders
1619 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1621 WREG32_SOC15(GC, 0, mmPA_SC_FIFO_SIZE,
1622 (adev->gfx.config.sc_prim_fifo_size_frontend <<
1623 PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
1624 (adev->gfx.config.sc_prim_fifo_size_backend <<
1625 PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
1626 (adev->gfx.config.sc_hiz_tile_fifo_size <<
1627 PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
1628 (adev->gfx.config.sc_earlyz_tile_fifo_size <<
1629 PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT));
1630 mutex_unlock(&adev->grbm_idx_mutex);
1634 static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
1639 mutex_lock(&adev->grbm_idx_mutex);
1640 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1641 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1642 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1643 for (k = 0; k < adev->usec_timeout; k++) {
1644 if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0)
1648 if (k == adev->usec_timeout) {
1649 gfx_v9_0_select_se_sh(adev, 0xffffffff,
1650 0xffffffff, 0xffffffff);
1651 mutex_unlock(&adev->grbm_idx_mutex);
1652 DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
1658 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1659 mutex_unlock(&adev->grbm_idx_mutex);
1661 mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
1662 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
1663 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
1664 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
1665 for (k = 0; k < adev->usec_timeout; k++) {
1666 if ((RREG32_SOC15(GC, 0, mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
1672 static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1675 u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
1677 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
1678 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
1679 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
1680 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
1682 WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
1685 static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
1688 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
1689 adev->gfx.rlc.clear_state_gpu_addr >> 32);
1690 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO),
1691 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
1692 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH),
1693 adev->gfx.rlc.clear_state_size);
1696 static void gfx_v9_0_parse_ind_reg_list(int *register_list_format,
1697 int indirect_offset,
1699 int *unique_indirect_regs,
1700 int *unique_indirect_reg_count,
1701 int max_indirect_reg_count,
1702 int *indirect_start_offsets,
1703 int *indirect_start_offsets_count,
1704 int max_indirect_start_offsets_count)
1707 bool new_entry = true;
1709 for (; indirect_offset < list_size; indirect_offset++) {
1713 indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
1714 *indirect_start_offsets_count = *indirect_start_offsets_count + 1;
1715 BUG_ON(*indirect_start_offsets_count >= max_indirect_start_offsets_count);
1718 if (register_list_format[indirect_offset] == 0xFFFFFFFF) {
1723 indirect_offset += 2;
1725 /* look for the matching indice */
1726 for (idx = 0; idx < *unique_indirect_reg_count; idx++) {
1727 if (unique_indirect_regs[idx] ==
1728 register_list_format[indirect_offset])
1732 if (idx >= *unique_indirect_reg_count) {
1733 unique_indirect_regs[*unique_indirect_reg_count] =
1734 register_list_format[indirect_offset];
1735 idx = *unique_indirect_reg_count;
1736 *unique_indirect_reg_count = *unique_indirect_reg_count + 1;
1737 BUG_ON(*unique_indirect_reg_count >= max_indirect_reg_count);
1740 register_list_format[indirect_offset] = idx;
1744 static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
1746 int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
1747 int unique_indirect_reg_count = 0;
1749 int indirect_start_offsets[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
1750 int indirect_start_offsets_count = 0;
1756 u32 *register_list_format =
1757 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
1758 if (!register_list_format)
1760 memcpy(register_list_format, adev->gfx.rlc.register_list_format,
1761 adev->gfx.rlc.reg_list_format_size_bytes);
1763 /* setup unique_indirect_regs array and indirect_start_offsets array */
1764 gfx_v9_0_parse_ind_reg_list(register_list_format,
1765 GFX9_RLC_FORMAT_DIRECT_REG_LIST_LENGTH,
1766 adev->gfx.rlc.reg_list_format_size_bytes >> 2,
1767 unique_indirect_regs,
1768 &unique_indirect_reg_count,
1769 ARRAY_SIZE(unique_indirect_regs),
1770 indirect_start_offsets,
1771 &indirect_start_offsets_count,
1772 ARRAY_SIZE(indirect_start_offsets));
1774 /* enable auto inc in case it is disabled */
1775 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
1776 tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
1777 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
1779 /* write register_restore table to offset 0x0 using RLC_SRM_ARAM_ADDR/DATA */
1780 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR),
1781 RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET);
1782 for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
1783 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
1784 adev->gfx.rlc.register_restore[i]);
1786 /* load direct register */
1787 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR), 0);
1788 for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
1789 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
1790 adev->gfx.rlc.register_restore[i]);
1792 /* load indirect register */
1793 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
1794 adev->gfx.rlc.reg_list_format_start);
1795 for (i = 0; i < adev->gfx.rlc.reg_list_format_size_bytes >> 2; i++)
1796 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
1797 register_list_format[i]);
1799 /* set save/restore list size */
1800 list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
1801 list_size = list_size >> 1;
1802 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
1803 adev->gfx.rlc.reg_restore_list_size);
1804 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), list_size);
1806 /* write the starting offsets to RLC scratch ram */
1807 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
1808 adev->gfx.rlc.starting_offsets_start);
1809 for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
1810 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
1811 indirect_start_offsets[i]);
1813 /* load unique indirect regs*/
1814 for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) {
1815 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0) + i,
1816 unique_indirect_regs[i] & 0x3FFFF);
1817 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0) + i,
1818 unique_indirect_regs[i] >> 20);
1821 kfree(register_list_format);
1825 static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev)
1827 WREG32_FIELD15(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1);
1830 static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
1834 uint32_t default_data = 0;
1836 default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS));
1837 if (enable == true) {
1838 /* enable GFXIP control over CGPG */
1839 data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
1840 if(default_data != data)
1841 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
1844 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK;
1845 data |= (2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT);
1846 if(default_data != data)
1847 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
1849 /* restore GFXIP control over GCPG */
1850 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
1851 if(default_data != data)
1852 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
1856 static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev)
1860 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
1861 AMD_PG_SUPPORT_GFX_SMG |
1862 AMD_PG_SUPPORT_GFX_DMG)) {
1863 /* init IDLE_POLL_COUNT = 60 */
1864 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL));
1865 data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
1866 data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
1867 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL), data);
1869 /* init RLC PG Delay */
1871 data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT);
1872 data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT);
1873 data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT);
1874 data |= (0x40 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT);
1875 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY), data);
1877 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2));
1878 data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK;
1879 data |= (0x4 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT);
1880 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2), data);
1882 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3));
1883 data &= ~RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK;
1884 data |= (0xff << RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT);
1885 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3), data);
1887 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL));
1888 data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
1890 /* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */
1891 data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
1892 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data);
1894 pwr_10_0_gfxip_control_over_cgpg(adev, true);
1898 static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
1902 uint32_t default_data = 0;
1904 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
1905 data = REG_SET_FIELD(data, RLC_PG_CNTL,
1906 SMU_CLK_SLOWDOWN_ON_PU_ENABLE,
1908 if (default_data != data)
1909 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1912 static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
1916 uint32_t default_data = 0;
1918 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
1919 data = REG_SET_FIELD(data, RLC_PG_CNTL,
1920 SMU_CLK_SLOWDOWN_ON_PD_ENABLE,
1922 if(default_data != data)
1923 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1926 static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
1930 uint32_t default_data = 0;
1932 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
1933 data = REG_SET_FIELD(data, RLC_PG_CNTL,
1936 if(default_data != data)
1937 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1940 static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
1943 uint32_t data, default_data;
1945 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
1946 data = REG_SET_FIELD(data, RLC_PG_CNTL,
1947 GFX_POWER_GATING_ENABLE,
1949 if(default_data != data)
1950 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1953 static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev,
1956 uint32_t data, default_data;
1958 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
1959 data = REG_SET_FIELD(data, RLC_PG_CNTL,
1960 GFX_PIPELINE_PG_ENABLE,
1962 if(default_data != data)
1963 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1966 /* read any GFX register to wake up GFX */
1967 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_RENDER_CONTROL));
1970 static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
1973 uint32_t data, default_data;
1975 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
1976 data = REG_SET_FIELD(data, RLC_PG_CNTL,
1977 STATIC_PER_CU_PG_ENABLE,
1979 if(default_data != data)
1980 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1983 static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
1986 uint32_t data, default_data;
1988 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
1989 data = REG_SET_FIELD(data, RLC_PG_CNTL,
1990 DYN_PER_CU_PG_ENABLE,
1992 if(default_data != data)
1993 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1996 static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
1998 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
1999 AMD_PG_SUPPORT_GFX_SMG |
2000 AMD_PG_SUPPORT_GFX_DMG |
2002 AMD_PG_SUPPORT_GDS |
2003 AMD_PG_SUPPORT_RLC_SMU_HS)) {
2004 gfx_v9_0_init_csb(adev);
2005 gfx_v9_0_init_rlc_save_restore_list(adev);
2006 gfx_v9_0_enable_save_restore_machine(adev);
2008 if (adev->asic_type == CHIP_RAVEN) {
2009 WREG32(mmRLC_JUMP_TABLE_RESTORE,
2010 adev->gfx.rlc.cp_table_gpu_addr >> 8);
2011 gfx_v9_0_init_gfx_power_gating(adev);
2013 if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
2014 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
2015 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
2017 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
2018 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
2021 if (adev->pg_flags & AMD_PG_SUPPORT_CP)
2022 gfx_v9_0_enable_cp_power_gating(adev, true);
2024 gfx_v9_0_enable_cp_power_gating(adev, false);
2029 void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
2031 WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0);
2032 gfx_v9_0_enable_gui_idle_interrupt(adev, false);
2033 gfx_v9_0_wait_for_rlc_serdes(adev);
2036 static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev)
2038 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2040 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
2044 static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
2046 #ifdef AMDGPU_RLC_DEBUG_RETRY
2050 WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
2052 /* carrizo do enable cp interrupt after cp inited */
2053 if (!(adev->flags & AMD_IS_APU))
2054 gfx_v9_0_enable_gui_idle_interrupt(adev, true);
2058 #ifdef AMDGPU_RLC_DEBUG_RETRY
2059 /* RLC_GPM_GENERAL_6 : RLC Ucode version */
2060 rlc_ucode_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6);
2061 if(rlc_ucode_ver == 0x108) {
2062 DRM_INFO("Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
2063 rlc_ucode_ver, adev->gfx.rlc_fw_version);
2064 /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
2065 * default is 0x9C4 to create a 100us interval */
2066 WREG32_SOC15(GC, 0, mmRLC_GPM_TIMER_INT_3, 0x9C4);
2067 /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
2068 * to disable the page fault retry interrupts, default is
2070 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_12, 0x100);
2075 static int gfx_v9_0_rlc_load_microcode(struct amdgpu_device *adev)
2077 const struct rlc_firmware_header_v2_0 *hdr;
2078 const __le32 *fw_data;
2079 unsigned i, fw_size;
2081 if (!adev->gfx.rlc_fw)
2084 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
2085 amdgpu_ucode_print_rlc_hdr(&hdr->header);
2087 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2088 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2089 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
2091 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
2092 RLCG_UCODE_LOADING_START_ADDRESS);
2093 for (i = 0; i < fw_size; i++)
2094 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
2095 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
2100 static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
2104 if (amdgpu_sriov_vf(adev)) {
2105 gfx_v9_0_init_csb(adev);
2109 gfx_v9_0_rlc_stop(adev);
2112 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
2115 WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, 0);
2117 gfx_v9_0_rlc_reset(adev);
2119 gfx_v9_0_init_pg(adev);
2121 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
2122 /* legacy rlc firmware loading */
2123 r = gfx_v9_0_rlc_load_microcode(adev);
2128 if (adev->asic_type == CHIP_RAVEN) {
2129 if (amdgpu_lbpw != 0)
2130 gfx_v9_0_enable_lbpw(adev, true);
2132 gfx_v9_0_enable_lbpw(adev, false);
2135 gfx_v9_0_rlc_start(adev);
2140 static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2143 u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
2145 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
2146 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
2147 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
2149 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2150 adev->gfx.gfx_ring[i].ready = false;
2152 WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
2156 static int gfx_v9_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2158 const struct gfx_firmware_header_v1_0 *pfp_hdr;
2159 const struct gfx_firmware_header_v1_0 *ce_hdr;
2160 const struct gfx_firmware_header_v1_0 *me_hdr;
2161 const __le32 *fw_data;
2162 unsigned i, fw_size;
2164 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
2167 pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
2168 adev->gfx.pfp_fw->data;
2169 ce_hdr = (const struct gfx_firmware_header_v1_0 *)
2170 adev->gfx.ce_fw->data;
2171 me_hdr = (const struct gfx_firmware_header_v1_0 *)
2172 adev->gfx.me_fw->data;
2174 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2175 amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
2176 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2178 gfx_v9_0_cp_gfx_enable(adev, false);
2181 fw_data = (const __le32 *)
2182 (adev->gfx.pfp_fw->data +
2183 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2184 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
2185 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, 0);
2186 for (i = 0; i < fw_size; i++)
2187 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
2188 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
2191 fw_data = (const __le32 *)
2192 (adev->gfx.ce_fw->data +
2193 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
2194 fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
2195 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, 0);
2196 for (i = 0; i < fw_size; i++)
2197 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
2198 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
2201 fw_data = (const __le32 *)
2202 (adev->gfx.me_fw->data +
2203 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2204 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
2205 WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, 0);
2206 for (i = 0; i < fw_size; i++)
2207 WREG32_SOC15(GC, 0, mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
2208 WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
2213 static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
2215 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
2216 const struct cs_section_def *sect = NULL;
2217 const struct cs_extent_def *ext = NULL;
2221 WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
2222 WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);
2224 gfx_v9_0_cp_gfx_enable(adev, true);
2226 r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
2228 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
2232 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2233 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
2235 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2236 amdgpu_ring_write(ring, 0x80000000);
2237 amdgpu_ring_write(ring, 0x80000000);
2239 for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
2240 for (ext = sect->section; ext->extent != NULL; ++ext) {
2241 if (sect->id == SECT_CONTEXT) {
2242 amdgpu_ring_write(ring,
2243 PACKET3(PACKET3_SET_CONTEXT_REG,
2245 amdgpu_ring_write(ring,
2246 ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
2247 for (i = 0; i < ext->reg_count; i++)
2248 amdgpu_ring_write(ring, ext->extent[i]);
2253 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2254 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
2256 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2257 amdgpu_ring_write(ring, 0);
2259 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
2260 amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
2261 amdgpu_ring_write(ring, 0x8000);
2262 amdgpu_ring_write(ring, 0x8000);
2264 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG,1));
2265 tmp = (PACKET3_SET_UCONFIG_REG_INDEX_TYPE |
2266 (SOC15_REG_OFFSET(GC, 0, mmVGT_INDEX_TYPE) - PACKET3_SET_UCONFIG_REG_START));
2267 amdgpu_ring_write(ring, tmp);
2268 amdgpu_ring_write(ring, 0);
2270 amdgpu_ring_commit(ring);
2275 static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
2277 struct amdgpu_ring *ring;
2280 u64 rb_addr, rptr_addr, wptr_gpu_addr;
2282 /* Set the write pointer delay */
2283 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
2285 /* set the RB to use vmid 0 */
2286 WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);
2288 /* Set ring buffer size */
2289 ring = &adev->gfx.gfx_ring[0];
2290 rb_bufsz = order_base_2(ring->ring_size / 8);
2291 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
2292 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
2294 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
2296 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2298 /* Initialize the ring buffer's write pointers */
2300 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
2301 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
2303 /* set the wb address wether it's enabled or not */
2304 rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2305 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
2306 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
2308 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2309 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
2310 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
2313 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2315 rb_addr = ring->gpu_addr >> 8;
2316 WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
2317 WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
2319 tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
2320 if (ring->use_doorbell) {
2321 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2322 DOORBELL_OFFSET, ring->doorbell_index);
2323 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2326 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
2328 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
2330 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
2331 DOORBELL_RANGE_LOWER, ring->doorbell_index);
2332 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
2334 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
2335 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
2338 /* start the ring */
2339 gfx_v9_0_cp_gfx_start(adev);
2345 static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
2350 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0);
2352 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
2353 (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
2354 for (i = 0; i < adev->gfx.num_compute_rings; i++)
2355 adev->gfx.compute_ring[i].ready = false;
2356 adev->gfx.kiq.ring.ready = false;
2361 static int gfx_v9_0_cp_compute_load_microcode(struct amdgpu_device *adev)
2363 const struct gfx_firmware_header_v1_0 *mec_hdr;
2364 const __le32 *fw_data;
2368 if (!adev->gfx.mec_fw)
2371 gfx_v9_0_cp_compute_enable(adev, false);
2373 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
2374 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
2376 fw_data = (const __le32 *)
2377 (adev->gfx.mec_fw->data +
2378 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
2380 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
2381 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2382 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);
2384 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
2385 adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
2386 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
2387 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
2390 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
2391 mec_hdr->jt_offset);
2392 for (i = 0; i < mec_hdr->jt_size; i++)
2393 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
2394 le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
2396 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
2397 adev->gfx.mec_fw_version);
2398 /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
2404 static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
2407 struct amdgpu_device *adev = ring->adev;
2409 /* tell RLC which is KIQ queue */
2410 tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
2412 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
2413 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2415 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2418 static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
2420 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
2421 uint32_t scratch, tmp = 0;
2422 uint64_t queue_mask = 0;
2425 for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
2426 if (!test_bit(i, adev->gfx.mec.queue_bitmap))
2429 /* This situation may be hit in the future if a new HW
2430 * generation exposes more than 64 queues. If so, the
2431 * definition of queue_mask needs updating */
2432 if (WARN_ON(i >= (sizeof(queue_mask)*8))) {
2433 DRM_ERROR("Invalid KCQ enabled: %d\n", i);
2437 queue_mask |= (1ull << i);
2440 r = amdgpu_gfx_scratch_get(adev, &scratch);
2442 DRM_ERROR("Failed to get scratch reg (%d).\n", r);
2445 WREG32(scratch, 0xCAFEDEAD);
2447 r = amdgpu_ring_alloc(kiq_ring, (7 * adev->gfx.num_compute_rings) + 11);
2449 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
2450 amdgpu_gfx_scratch_free(adev, scratch);
2455 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
2456 amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
2457 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
2458 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
2459 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
2460 amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
2461 amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
2462 amdgpu_ring_write(kiq_ring, 0); /* oac mask */
2463 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
2464 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2465 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
2466 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
2467 uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2469 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
2470 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
2471 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
2472 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
2473 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
2474 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
2475 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
2476 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
2477 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
2478 PACKET3_MAP_QUEUES_ALLOC_FORMAT(1) | /* alloc format: all_on_one_pipe */
2479 PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */
2480 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
2481 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
2482 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
2483 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
2484 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
2485 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
2487 /* write to scratch for completion */
2488 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
2489 amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
2490 amdgpu_ring_write(kiq_ring, 0xDEADBEEF);
2491 amdgpu_ring_commit(kiq_ring);
2493 for (i = 0; i < adev->usec_timeout; i++) {
2494 tmp = RREG32(scratch);
2495 if (tmp == 0xDEADBEEF)
2499 if (i >= adev->usec_timeout) {
2500 DRM_ERROR("KCQ enable failed (scratch(0x%04X)=0x%08X)\n",
2504 amdgpu_gfx_scratch_free(adev, scratch);
2509 static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
2511 struct amdgpu_device *adev = ring->adev;
2512 struct v9_mqd *mqd = ring->mqd_ptr;
2513 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
2516 mqd->header = 0xC0310800;
2517 mqd->compute_pipelinestat_enable = 0x00000001;
2518 mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
2519 mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
2520 mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
2521 mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
2522 mqd->compute_misc_reserved = 0x00000003;
2524 mqd->dynamic_cu_mask_addr_lo =
2525 lower_32_bits(ring->mqd_gpu_addr
2526 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
2527 mqd->dynamic_cu_mask_addr_hi =
2528 upper_32_bits(ring->mqd_gpu_addr
2529 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
2531 eop_base_addr = ring->eop_gpu_addr >> 8;
2532 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
2533 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
2535 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2536 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
2537 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
2538 (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
2540 mqd->cp_hqd_eop_control = tmp;
2542 /* enable doorbell? */
2543 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
2545 if (ring->use_doorbell) {
2546 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2547 DOORBELL_OFFSET, ring->doorbell_index);
2548 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2550 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2551 DOORBELL_SOURCE, 0);
2552 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2555 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2559 mqd->cp_hqd_pq_doorbell_control = tmp;
2561 /* disable the queue if it's active */
2563 mqd->cp_hqd_dequeue_request = 0;
2564 mqd->cp_hqd_pq_rptr = 0;
2565 mqd->cp_hqd_pq_wptr_lo = 0;
2566 mqd->cp_hqd_pq_wptr_hi = 0;
2568 /* set the pointer to the MQD */
2569 mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
2570 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
2572 /* set MQD vmid to 0 */
2573 tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
2574 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
2575 mqd->cp_mqd_control = tmp;
2577 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2578 hqd_gpu_addr = ring->gpu_addr >> 8;
2579 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
2580 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
2582 /* set up the HQD, this is similar to CP_RB0_CNTL */
2583 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
2584 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
2585 (order_base_2(ring->ring_size / 4) - 1));
2586 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
2587 ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
2589 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
2591 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
2592 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
2593 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
2594 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
2595 mqd->cp_hqd_pq_control = tmp;
2597 /* set the wb address whether it's enabled or not */
2598 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2599 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
2600 mqd->cp_hqd_pq_rptr_report_addr_hi =
2601 upper_32_bits(wb_gpu_addr) & 0xffff;
2603 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2604 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2605 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
2606 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
2609 /* enable the doorbell if requested */
2610 if (ring->use_doorbell) {
2611 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
2612 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2613 DOORBELL_OFFSET, ring->doorbell_index);
2615 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2617 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2618 DOORBELL_SOURCE, 0);
2619 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2623 mqd->cp_hqd_pq_doorbell_control = tmp;
2625 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2627 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
2629 /* set the vmid for the queue */
2630 mqd->cp_hqd_vmid = 0;
2632 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
2633 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
2634 mqd->cp_hqd_persistent_state = tmp;
2636 /* set MIN_IB_AVAIL_SIZE */
2637 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
2638 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
2639 mqd->cp_hqd_ib_control = tmp;
2641 /* activate the queue */
2642 mqd->cp_hqd_active = 1;
2647 static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
2649 struct amdgpu_device *adev = ring->adev;
2650 struct v9_mqd *mqd = ring->mqd_ptr;
2653 /* disable wptr polling */
2654 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
2656 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
2657 mqd->cp_hqd_eop_base_addr_lo);
2658 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
2659 mqd->cp_hqd_eop_base_addr_hi);
2661 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2662 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL,
2663 mqd->cp_hqd_eop_control);
2665 /* enable doorbell? */
2666 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
2667 mqd->cp_hqd_pq_doorbell_control);
2669 /* disable the queue if it's active */
2670 if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
2671 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
2672 for (j = 0; j < adev->usec_timeout; j++) {
2673 if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
2677 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
2678 mqd->cp_hqd_dequeue_request);
2679 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR,
2680 mqd->cp_hqd_pq_rptr);
2681 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
2682 mqd->cp_hqd_pq_wptr_lo);
2683 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
2684 mqd->cp_hqd_pq_wptr_hi);
2687 /* set the pointer to the MQD */
2688 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR,
2689 mqd->cp_mqd_base_addr_lo);
2690 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI,
2691 mqd->cp_mqd_base_addr_hi);
2693 /* set MQD vmid to 0 */
2694 WREG32_SOC15(GC, 0, mmCP_MQD_CONTROL,
2695 mqd->cp_mqd_control);
2697 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2698 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE,
2699 mqd->cp_hqd_pq_base_lo);
2700 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI,
2701 mqd->cp_hqd_pq_base_hi);
2703 /* set up the HQD, this is similar to CP_RB0_CNTL */
2704 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL,
2705 mqd->cp_hqd_pq_control);
2707 /* set the wb address whether it's enabled or not */
2708 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
2709 mqd->cp_hqd_pq_rptr_report_addr_lo);
2710 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
2711 mqd->cp_hqd_pq_rptr_report_addr_hi);
2713 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2714 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
2715 mqd->cp_hqd_pq_wptr_poll_addr_lo);
2716 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
2717 mqd->cp_hqd_pq_wptr_poll_addr_hi);
2719 /* enable the doorbell if requested */
2720 if (ring->use_doorbell) {
2721 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
2722 (AMDGPU_DOORBELL64_KIQ *2) << 2);
2723 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
2724 (AMDGPU_DOORBELL64_USERQUEUE_END * 2) << 2);
2727 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
2728 mqd->cp_hqd_pq_doorbell_control);
2730 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2731 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
2732 mqd->cp_hqd_pq_wptr_lo);
2733 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
2734 mqd->cp_hqd_pq_wptr_hi);
2736 /* set the vmid for the queue */
2737 WREG32_SOC15(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
2739 WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE,
2740 mqd->cp_hqd_persistent_state);
2742 /* activate the queue */
2743 WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE,
2744 mqd->cp_hqd_active);
2746 if (ring->use_doorbell)
2747 WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
2752 static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
2754 struct amdgpu_device *adev = ring->adev;
2755 struct v9_mqd *mqd = ring->mqd_ptr;
2756 int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
2758 gfx_v9_0_kiq_setting(ring);
2760 if (adev->in_gpu_reset) { /* for GPU_RESET case */
2761 /* reset MQD to a clean status */
2762 if (adev->gfx.mec.mqd_backup[mqd_idx])
2763 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
2765 /* reset ring buffer */
2767 amdgpu_ring_clear_ring(ring);
2769 mutex_lock(&adev->srbm_mutex);
2770 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
2771 gfx_v9_0_kiq_init_register(ring);
2772 soc15_grbm_select(adev, 0, 0, 0, 0);
2773 mutex_unlock(&adev->srbm_mutex);
2775 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
2776 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
2777 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
2778 mutex_lock(&adev->srbm_mutex);
2779 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
2780 gfx_v9_0_mqd_init(ring);
2781 gfx_v9_0_kiq_init_register(ring);
2782 soc15_grbm_select(adev, 0, 0, 0, 0);
2783 mutex_unlock(&adev->srbm_mutex);
2785 if (adev->gfx.mec.mqd_backup[mqd_idx])
2786 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
2792 static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
2794 struct amdgpu_device *adev = ring->adev;
2795 struct v9_mqd *mqd = ring->mqd_ptr;
2796 int mqd_idx = ring - &adev->gfx.compute_ring[0];
2798 if (!adev->in_gpu_reset && !adev->gfx.in_suspend) {
2799 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
2800 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
2801 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
2802 mutex_lock(&adev->srbm_mutex);
2803 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
2804 gfx_v9_0_mqd_init(ring);
2805 soc15_grbm_select(adev, 0, 0, 0, 0);
2806 mutex_unlock(&adev->srbm_mutex);
2808 if (adev->gfx.mec.mqd_backup[mqd_idx])
2809 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
2810 } else if (adev->in_gpu_reset) { /* for GPU_RESET case */
2811 /* reset MQD to a clean status */
2812 if (adev->gfx.mec.mqd_backup[mqd_idx])
2813 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
2815 /* reset ring buffer */
2817 amdgpu_ring_clear_ring(ring);
2819 amdgpu_ring_clear_ring(ring);
2825 static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
2827 struct amdgpu_ring *ring = NULL;
2830 gfx_v9_0_cp_compute_enable(adev, true);
2832 ring = &adev->gfx.kiq.ring;
2834 r = amdgpu_bo_reserve(ring->mqd_obj, false);
2835 if (unlikely(r != 0))
2838 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
2840 r = gfx_v9_0_kiq_init_queue(ring);
2841 amdgpu_bo_kunmap(ring->mqd_obj);
2842 ring->mqd_ptr = NULL;
2844 amdgpu_bo_unreserve(ring->mqd_obj);
2848 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2849 ring = &adev->gfx.compute_ring[i];
2851 r = amdgpu_bo_reserve(ring->mqd_obj, false);
2852 if (unlikely(r != 0))
2854 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
2856 r = gfx_v9_0_kcq_init_queue(ring);
2857 amdgpu_bo_kunmap(ring->mqd_obj);
2858 ring->mqd_ptr = NULL;
2860 amdgpu_bo_unreserve(ring->mqd_obj);
2865 r = gfx_v9_0_kiq_kcq_enable(adev);
2870 static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
2873 struct amdgpu_ring *ring;
2875 if (!(adev->flags & AMD_IS_APU))
2876 gfx_v9_0_enable_gui_idle_interrupt(adev, false);
2878 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
2879 /* legacy firmware loading */
2880 r = gfx_v9_0_cp_gfx_load_microcode(adev);
2884 r = gfx_v9_0_cp_compute_load_microcode(adev);
2889 r = gfx_v9_0_cp_gfx_resume(adev);
2893 r = gfx_v9_0_kiq_resume(adev);
2897 ring = &adev->gfx.gfx_ring[0];
2898 r = amdgpu_ring_test_ring(ring);
2900 ring->ready = false;
2904 ring = &adev->gfx.kiq.ring;
2906 r = amdgpu_ring_test_ring(ring);
2908 ring->ready = false;
2910 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2911 ring = &adev->gfx.compute_ring[i];
2914 r = amdgpu_ring_test_ring(ring);
2916 ring->ready = false;
2919 gfx_v9_0_enable_gui_idle_interrupt(adev, true);
2924 static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
2926 gfx_v9_0_cp_gfx_enable(adev, enable);
2927 gfx_v9_0_cp_compute_enable(adev, enable);
2930 static int gfx_v9_0_hw_init(void *handle)
2933 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2935 gfx_v9_0_init_golden_registers(adev);
2937 gfx_v9_0_gpu_init(adev);
2939 r = gfx_v9_0_rlc_resume(adev);
2943 r = gfx_v9_0_cp_resume(adev);
2947 r = gfx_v9_0_ngg_en(adev);
2954 static int gfx_v9_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring *ring)
2956 struct amdgpu_device *adev = kiq_ring->adev;
2957 uint32_t scratch, tmp = 0;
2960 r = amdgpu_gfx_scratch_get(adev, &scratch);
2962 DRM_ERROR("Failed to get scratch reg (%d).\n", r);
2965 WREG32(scratch, 0xCAFEDEAD);
2967 r = amdgpu_ring_alloc(kiq_ring, 10);
2969 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
2970 amdgpu_gfx_scratch_free(adev, scratch);
2975 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
2976 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
2977 PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
2978 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
2979 PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
2980 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
2981 amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
2982 amdgpu_ring_write(kiq_ring, 0);
2983 amdgpu_ring_write(kiq_ring, 0);
2984 amdgpu_ring_write(kiq_ring, 0);
2985 /* write to scratch for completion */
2986 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
2987 amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
2988 amdgpu_ring_write(kiq_ring, 0xDEADBEEF);
2989 amdgpu_ring_commit(kiq_ring);
2991 for (i = 0; i < adev->usec_timeout; i++) {
2992 tmp = RREG32(scratch);
2993 if (tmp == 0xDEADBEEF)
2997 if (i >= adev->usec_timeout) {
2998 DRM_ERROR("KCQ disabled failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp);
3001 amdgpu_gfx_scratch_free(adev, scratch);
3006 static int gfx_v9_0_hw_fini(void *handle)
3008 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3011 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
3012 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
3014 /* disable KCQ to avoid CPC touch memory not valid anymore */
3015 for (i = 0; i < adev->gfx.num_compute_rings; i++)
3016 gfx_v9_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]);
3018 if (amdgpu_sriov_vf(adev)) {
3019 pr_debug("For SRIOV client, shouldn't do anything.\n");
3022 gfx_v9_0_cp_enable(adev, false);
3023 gfx_v9_0_rlc_stop(adev);
3028 static int gfx_v9_0_suspend(void *handle)
3030 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3032 adev->gfx.in_suspend = true;
3033 return gfx_v9_0_hw_fini(adev);
3036 static int gfx_v9_0_resume(void *handle)
3038 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3041 r = gfx_v9_0_hw_init(adev);
3042 adev->gfx.in_suspend = false;
3046 static bool gfx_v9_0_is_idle(void *handle)
3048 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3050 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
3051 GRBM_STATUS, GUI_ACTIVE))
3057 static int gfx_v9_0_wait_for_idle(void *handle)
3060 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3062 for (i = 0; i < adev->usec_timeout; i++) {
3063 if (gfx_v9_0_is_idle(handle))
3070 static int gfx_v9_0_soft_reset(void *handle)
3072 u32 grbm_soft_reset = 0;
3074 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3077 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
3078 if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
3079 GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
3080 GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
3081 GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
3082 GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
3083 GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
3084 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3085 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
3086 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3087 GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
3090 if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
3091 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3092 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
3096 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
3097 if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
3098 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3099 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
3102 if (grbm_soft_reset) {
3104 gfx_v9_0_rlc_stop(adev);
3106 /* Disable GFX parsing/prefetching */
3107 gfx_v9_0_cp_gfx_enable(adev, false);
3109 /* Disable MEC parsing/prefetching */
3110 gfx_v9_0_cp_compute_enable(adev, false);
3112 if (grbm_soft_reset) {
3113 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3114 tmp |= grbm_soft_reset;
3115 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
3116 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
3117 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3121 tmp &= ~grbm_soft_reset;
3122 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
3123 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3126 /* Wait a little for things to settle down */
3132 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
3136 mutex_lock(&adev->gfx.gpu_clock_mutex);
3137 WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
3138 clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
3139 ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
3140 mutex_unlock(&adev->gfx.gpu_clock_mutex);
3144 static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
3146 uint32_t gds_base, uint32_t gds_size,
3147 uint32_t gws_base, uint32_t gws_size,
3148 uint32_t oa_base, uint32_t oa_size)
3150 gds_base = gds_base >> AMDGPU_GDS_SHIFT;
3151 gds_size = gds_size >> AMDGPU_GDS_SHIFT;
3153 gws_base = gws_base >> AMDGPU_GWS_SHIFT;
3154 gws_size = gws_size >> AMDGPU_GWS_SHIFT;
3156 oa_base = oa_base >> AMDGPU_OA_SHIFT;
3157 oa_size = oa_size >> AMDGPU_OA_SHIFT;
3160 gfx_v9_0_write_data_to_reg(ring, 0, false,
3161 amdgpu_gds_reg_offset[vmid].mem_base,
3165 gfx_v9_0_write_data_to_reg(ring, 0, false,
3166 amdgpu_gds_reg_offset[vmid].mem_size,
3170 gfx_v9_0_write_data_to_reg(ring, 0, false,
3171 amdgpu_gds_reg_offset[vmid].gws,
3172 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
3175 gfx_v9_0_write_data_to_reg(ring, 0, false,
3176 amdgpu_gds_reg_offset[vmid].oa,
3177 (1 << (oa_size + oa_base)) - (1 << oa_base));
3180 static int gfx_v9_0_early_init(void *handle)
3182 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3184 adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
3185 adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
3186 gfx_v9_0_set_ring_funcs(adev);
3187 gfx_v9_0_set_irq_funcs(adev);
3188 gfx_v9_0_set_gds_init(adev);
3189 gfx_v9_0_set_rlc_funcs(adev);
3194 static int gfx_v9_0_late_init(void *handle)
3196 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3199 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
3203 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
3210 static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
3212 uint32_t rlc_setting, data;
3215 if (adev->gfx.rlc.in_safe_mode)
3218 /* if RLC is not enabled, do nothing */
3219 rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
3220 if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
3223 if (adev->cg_flags &
3224 (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
3225 AMD_CG_SUPPORT_GFX_3D_CGCG)) {
3226 data = RLC_SAFE_MODE__CMD_MASK;
3227 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
3228 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
3230 /* wait for RLC_SAFE_MODE */
3231 for (i = 0; i < adev->usec_timeout; i++) {
3232 if (!REG_GET_FIELD(SOC15_REG_OFFSET(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
3236 adev->gfx.rlc.in_safe_mode = true;
3240 static void gfx_v9_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
3242 uint32_t rlc_setting, data;
3244 if (!adev->gfx.rlc.in_safe_mode)
3247 /* if RLC is not enabled, do nothing */
3248 rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
3249 if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
3252 if (adev->cg_flags &
3253 (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
3255 * Try to exit safe mode only if it is already in safe
3258 data = RLC_SAFE_MODE__CMD_MASK;
3259 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
3260 adev->gfx.rlc.in_safe_mode = false;
3264 static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
3267 /* TODO: double check if we need to perform under safe mdoe */
3268 /* gfx_v9_0_enter_rlc_safe_mode(adev); */
3270 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
3271 gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
3272 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
3273 gfx_v9_0_enable_gfx_pipeline_powergating(adev, true);
3275 gfx_v9_0_enable_gfx_cg_power_gating(adev, false);
3276 gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
3279 /* gfx_v9_0_exit_rlc_safe_mode(adev); */
3282 static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
3285 /* TODO: double check if we need to perform under safe mode */
3286 /* gfx_v9_0_enter_rlc_safe_mode(adev); */
3288 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
3289 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, true);
3291 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, false);
3293 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
3294 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, true);
3296 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, false);
3298 /* gfx_v9_0_exit_rlc_safe_mode(adev); */
3301 static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
3306 /* It is disabled by HW by default */
3307 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
3308 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
3309 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3310 data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK |
3311 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3312 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
3313 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
3315 /* only for Vega10 & Raven1 */
3316 data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
3319 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3321 /* MGLS is a global flag to control all MGLS in GFX */
3322 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
3323 /* 2 - RLC memory Light sleep */
3324 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
3325 def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3326 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
3328 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
3330 /* 3 - CP memory Light sleep */
3331 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
3332 def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3333 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3335 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
3339 /* 1 - MGCG_OVERRIDE */
3340 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3341 data |= (RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK |
3342 RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
3343 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3344 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
3345 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
3347 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3349 /* 2 - disable MGLS in RLC */
3350 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3351 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
3352 data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
3353 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
3356 /* 3 - disable MGLS in CP */
3357 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3358 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
3359 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3360 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
3365 static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
3370 adev->gfx.rlc.funcs->enter_safe_mode(adev);
3372 /* Enable 3D CGCG/CGLS */
3373 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
3374 /* write cmd to clear cgcg/cgls ov */
3375 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3376 /* unset CGCG override */
3377 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
3378 /* update CGCG and CGLS override bits */
3380 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3381 /* enable 3Dcgcg FSM(0x0020003f) */
3382 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3383 data = (0x2000 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
3384 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
3385 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
3386 data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
3387 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
3389 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
3391 /* set IDLE_POLL_COUNT(0x00900100) */
3392 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
3393 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
3394 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3396 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
3398 /* Disable CGCG/CGLS */
3399 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3400 /* disable cgcg, cgls should be disabled */
3401 data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
3402 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
3403 /* disable cgcg and cgls in FSM */
3405 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
3408 adev->gfx.rlc.funcs->exit_safe_mode(adev);
3411 static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
3416 adev->gfx.rlc.funcs->enter_safe_mode(adev);
3418 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
3419 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3420 /* unset CGCG override */
3421 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
3422 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
3423 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
3425 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
3426 /* update CGCG and CGLS override bits */
3428 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3430 /* enable cgcg FSM(0x0020003F) */
3431 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3432 data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
3433 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
3434 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
3435 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
3436 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
3438 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
3440 /* set IDLE_POLL_COUNT(0x00900100) */
3441 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
3442 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
3443 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3445 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
3447 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3448 /* reset CGCG/CGLS bits */
3449 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
3450 /* disable cgcg and cgls in FSM */
3452 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
3455 adev->gfx.rlc.funcs->exit_safe_mode(adev);
3458 static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
3462 /* CGCG/CGLS should be enabled after MGCG/MGLS
3463 * === MGCG + MGLS ===
3465 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
3466 /* === CGCG /CGLS for GFX 3D Only === */
3467 gfx_v9_0_update_3d_clock_gating(adev, enable);
3468 /* === CGCG + CGLS === */
3469 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
3471 /* CGCG/CGLS should be disabled before MGCG/MGLS
3472 * === CGCG + CGLS ===
3474 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
3475 /* === CGCG /CGLS for GFX 3D Only === */
3476 gfx_v9_0_update_3d_clock_gating(adev, enable);
3477 /* === MGCG + MGLS === */
3478 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
3483 static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
3484 .enter_safe_mode = gfx_v9_0_enter_rlc_safe_mode,
3485 .exit_safe_mode = gfx_v9_0_exit_rlc_safe_mode
3488 static int gfx_v9_0_set_powergating_state(void *handle,
3489 enum amd_powergating_state state)
3491 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3492 bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
3494 switch (adev->asic_type) {
3496 if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
3497 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
3498 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
3500 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
3501 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
3504 if (adev->pg_flags & AMD_PG_SUPPORT_CP)
3505 gfx_v9_0_enable_cp_power_gating(adev, true);
3507 gfx_v9_0_enable_cp_power_gating(adev, false);
3509 /* update gfx cgpg state */
3510 gfx_v9_0_update_gfx_cg_power_gating(adev, enable);
3512 /* update mgcg state */
3513 gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
3522 static int gfx_v9_0_set_clockgating_state(void *handle,
3523 enum amd_clockgating_state state)
3525 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3527 if (amdgpu_sriov_vf(adev))
3530 switch (adev->asic_type) {
3533 gfx_v9_0_update_gfx_clock_gating(adev,
3534 state == AMD_CG_STATE_GATE ? true : false);
3542 static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
3544 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3547 if (amdgpu_sriov_vf(adev))
3550 /* AMD_CG_SUPPORT_GFX_MGCG */
3551 data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3552 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
3553 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
3555 /* AMD_CG_SUPPORT_GFX_CGCG */
3556 data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3557 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
3558 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
3560 /* AMD_CG_SUPPORT_GFX_CGLS */
3561 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
3562 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
3564 /* AMD_CG_SUPPORT_GFX_RLC_LS */
3565 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3566 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
3567 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
3569 /* AMD_CG_SUPPORT_GFX_CP_LS */
3570 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3571 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
3572 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
3574 /* AMD_CG_SUPPORT_GFX_3D_CGCG */
3575 data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3576 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
3577 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
3579 /* AMD_CG_SUPPORT_GFX_3D_CGLS */
3580 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
3581 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
3584 static u64 gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
3586 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 is 32bit rptr*/
3589 static u64 gfx_v9_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
3591 struct amdgpu_device *adev = ring->adev;
3594 /* XXX check if swapping is necessary on BE */
3595 if (ring->use_doorbell) {
3596 wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]);
3598 wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR);
3599 wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32;
3605 static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
3607 struct amdgpu_device *adev = ring->adev;
3609 if (ring->use_doorbell) {
3610 /* XXX check if swapping is necessary on BE */
3611 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
3612 WDOORBELL64(ring->doorbell_index, ring->wptr);
3614 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
3615 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
3619 static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
3621 u32 ref_and_mask, reg_mem_engine;
3622 const struct nbio_hdp_flush_reg *nbio_hf_reg;
3624 if (ring->adev->flags & AMD_IS_APU)
3625 nbio_hf_reg = &nbio_v7_0_hdp_flush_reg;
3627 nbio_hf_reg = &nbio_v6_1_hdp_flush_reg;
3629 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
3632 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
3635 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
3642 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
3643 reg_mem_engine = 1; /* pfp */
3646 gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
3647 nbio_hf_reg->hdp_flush_req_offset,
3648 nbio_hf_reg->hdp_flush_done_offset,
3649 ref_and_mask, ref_and_mask, 0x20);
3652 static void gfx_v9_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
3654 gfx_v9_0_write_data_to_reg(ring, 0, true,
3655 SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
3658 static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
3659 struct amdgpu_ib *ib,
3660 unsigned vm_id, bool ctx_switch)
3662 u32 header, control = 0;
3664 if (ib->flags & AMDGPU_IB_FLAG_CE)
3665 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
3667 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
3669 control |= ib->length_dw | (vm_id << 24);
3671 if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
3672 control |= INDIRECT_BUFFER_PRE_ENB(1);
3674 if (!(ib->flags & AMDGPU_IB_FLAG_CE))
3675 gfx_v9_0_ring_emit_de_meta(ring);
3678 amdgpu_ring_write(ring, header);
3679 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
3680 amdgpu_ring_write(ring,
3684 lower_32_bits(ib->gpu_addr));
3685 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
3686 amdgpu_ring_write(ring, control);
3689 static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
3690 struct amdgpu_ib *ib,
3691 unsigned vm_id, bool ctx_switch)
3693 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24);
3695 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
3696 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
3697 amdgpu_ring_write(ring,
3701 lower_32_bits(ib->gpu_addr));
3702 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
3703 amdgpu_ring_write(ring, control);
3706 static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
3707 u64 seq, unsigned flags)
3709 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
3710 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
3712 /* RELEASE_MEM - flush caches, send int */
3713 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
3714 amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
3716 EOP_TC_WB_ACTION_EN |
3717 EOP_TC_MD_ACTION_EN |
3718 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
3720 amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
3723 * the address should be Qword aligned if 64bit write, Dword
3724 * aligned if only send 32bit data low (discard data high)
3730 amdgpu_ring_write(ring, lower_32_bits(addr));
3731 amdgpu_ring_write(ring, upper_32_bits(addr));
3732 amdgpu_ring_write(ring, lower_32_bits(seq));
3733 amdgpu_ring_write(ring, upper_32_bits(seq));
3734 amdgpu_ring_write(ring, 0);
3737 static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
3739 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3740 uint32_t seq = ring->fence_drv.sync_seq;
3741 uint64_t addr = ring->fence_drv.gpu_addr;
3743 gfx_v9_0_wait_reg_mem(ring, usepfp, 1, 0,
3744 lower_32_bits(addr), upper_32_bits(addr),
3745 seq, 0xffffffff, 4);
3748 static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
3749 unsigned vm_id, uint64_t pd_addr)
3751 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
3752 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3753 uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
3754 unsigned eng = ring->vm_inv_eng;
3756 pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
3757 pd_addr |= AMDGPU_PTE_VALID;
3759 gfx_v9_0_write_data_to_reg(ring, usepfp, true,
3760 hub->ctx0_ptb_addr_lo32 + (2 * vm_id),
3761 lower_32_bits(pd_addr));
3763 gfx_v9_0_write_data_to_reg(ring, usepfp, true,
3764 hub->ctx0_ptb_addr_hi32 + (2 * vm_id),
3765 upper_32_bits(pd_addr));
3767 gfx_v9_0_write_data_to_reg(ring, usepfp, true,
3768 hub->vm_inv_eng0_req + eng, req);
3770 /* wait for the invalidate to complete */
3771 gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, hub->vm_inv_eng0_ack +
3772 eng, 0, 1 << vm_id, 1 << vm_id, 0x20);
3774 /* compute doesn't have PFP */
3776 /* sync PFP to ME, otherwise we might get invalid PFP reads */
3777 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
3778 amdgpu_ring_write(ring, 0x0);
3782 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
3784 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */
3787 static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
3791 /* XXX check if swapping is necessary on BE */
3792 if (ring->use_doorbell)
3793 wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
3799 static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
3801 struct amdgpu_device *adev = ring->adev;
3803 /* XXX check if swapping is necessary on BE */
3804 if (ring->use_doorbell) {
3805 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
3806 WDOORBELL64(ring->doorbell_index, ring->wptr);
3808 BUG(); /* only DOORBELL method supported on gfx9 now */
3812 static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
3813 u64 seq, unsigned int flags)
3815 /* we only allocate 32bit for each seq wb address */
3816 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
3818 /* write fence seq to the "addr" */
3819 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3820 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3821 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
3822 amdgpu_ring_write(ring, lower_32_bits(addr));
3823 amdgpu_ring_write(ring, upper_32_bits(addr));
3824 amdgpu_ring_write(ring, lower_32_bits(seq));
3826 if (flags & AMDGPU_FENCE_FLAG_INT) {
3827 /* set register to trigger INT */
3828 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3829 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3830 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
3831 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
3832 amdgpu_ring_write(ring, 0);
3833 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
3837 static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring)
3839 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3840 amdgpu_ring_write(ring, 0);
3843 static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
3845 struct v9_ce_ib_state ce_payload = {0};
3849 cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
3850 csa_addr = AMDGPU_VA_RESERVED_SIZE - 2 * 4096;
3852 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
3853 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
3854 WRITE_DATA_DST_SEL(8) |
3856 WRITE_DATA_CACHE_POLICY(0));
3857 amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
3858 amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
3859 amdgpu_ring_write_multiple(ring, (void *)&ce_payload, sizeof(ce_payload) >> 2);
3862 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
3864 struct v9_de_ib_state de_payload = {0};
3865 uint64_t csa_addr, gds_addr;
3868 csa_addr = AMDGPU_VA_RESERVED_SIZE - 2 * 4096;
3869 gds_addr = csa_addr + 4096;
3870 de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
3871 de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
3873 cnt = (sizeof(de_payload) >> 2) + 4 - 2;
3874 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
3875 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
3876 WRITE_DATA_DST_SEL(8) |
3878 WRITE_DATA_CACHE_POLICY(0));
3879 amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
3880 amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
3881 amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
3884 static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
3886 amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
3887 amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
3890 static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
3894 if (amdgpu_sriov_vf(ring->adev))
3895 gfx_v9_0_ring_emit_ce_meta(ring);
3897 gfx_v9_0_ring_emit_tmz(ring, true);
3899 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
3900 if (flags & AMDGPU_HAVE_CTX_SWITCH) {
3901 /* set load_global_config & load_global_uconfig */
3903 /* set load_cs_sh_regs */
3905 /* set load_per_context_state & load_gfx_sh_regs for GFX */
3908 /* set load_ce_ram if preamble presented */
3909 if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
3912 /* still load_ce_ram if this is the first time preamble presented
3913 * although there is no context switch happens.
3915 if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
3919 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
3920 amdgpu_ring_write(ring, dw2);
3921 amdgpu_ring_write(ring, 0);
3924 static unsigned gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
3927 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
3928 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
3929 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
3930 amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
3931 ret = ring->wptr & ring->buf_mask;
3932 amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
3936 static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
3939 BUG_ON(offset > ring->buf_mask);
3940 BUG_ON(ring->ring[offset] != 0x55aa55aa);
3942 cur = (ring->wptr & ring->buf_mask) - 1;
3943 if (likely(cur > offset))
3944 ring->ring[offset] = cur - offset;
3946 ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
3949 static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
3951 struct amdgpu_device *adev = ring->adev;
3953 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
3954 amdgpu_ring_write(ring, 0 | /* src: register*/
3955 (5 << 8) | /* dst: memory */
3956 (1 << 20)); /* write confirm */
3957 amdgpu_ring_write(ring, reg);
3958 amdgpu_ring_write(ring, 0);
3959 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
3960 adev->virt.reg_val_offs * 4));
3961 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
3962 adev->virt.reg_val_offs * 4));
3965 static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
3968 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3969 amdgpu_ring_write(ring, (1 << 16)); /* no inc addr */
3970 amdgpu_ring_write(ring, reg);
3971 amdgpu_ring_write(ring, 0);
3972 amdgpu_ring_write(ring, val);
3975 static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
3976 enum amdgpu_interrupt_state state)
3979 case AMDGPU_IRQ_STATE_DISABLE:
3980 case AMDGPU_IRQ_STATE_ENABLE:
3981 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
3982 TIME_STAMP_INT_ENABLE,
3983 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3990 static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
3992 enum amdgpu_interrupt_state state)
3994 u32 mec_int_cntl, mec_int_cntl_reg;
3997 * amdgpu controls only the first MEC. That's why this function only
3998 * handles the setting of interrupts for this specific MEC. All other
3999 * pipes' interrupts are set by amdkfd.
4005 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
4008 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
4011 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
4014 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
4017 DRM_DEBUG("invalid pipe %d\n", pipe);
4021 DRM_DEBUG("invalid me %d\n", me);
4026 case AMDGPU_IRQ_STATE_DISABLE:
4027 mec_int_cntl = RREG32(mec_int_cntl_reg);
4028 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4029 TIME_STAMP_INT_ENABLE, 0);
4030 WREG32(mec_int_cntl_reg, mec_int_cntl);
4032 case AMDGPU_IRQ_STATE_ENABLE:
4033 mec_int_cntl = RREG32(mec_int_cntl_reg);
4034 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4035 TIME_STAMP_INT_ENABLE, 1);
4036 WREG32(mec_int_cntl_reg, mec_int_cntl);
4043 static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
4044 struct amdgpu_irq_src *source,
4046 enum amdgpu_interrupt_state state)
4049 case AMDGPU_IRQ_STATE_DISABLE:
4050 case AMDGPU_IRQ_STATE_ENABLE:
4051 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4052 PRIV_REG_INT_ENABLE,
4053 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4062 static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
4063 struct amdgpu_irq_src *source,
4065 enum amdgpu_interrupt_state state)
4068 case AMDGPU_IRQ_STATE_DISABLE:
4069 case AMDGPU_IRQ_STATE_ENABLE:
4070 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4071 PRIV_INSTR_INT_ENABLE,
4072 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4080 static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev,
4081 struct amdgpu_irq_src *src,
4083 enum amdgpu_interrupt_state state)
4086 case AMDGPU_CP_IRQ_GFX_EOP:
4087 gfx_v9_0_set_gfx_eop_interrupt_state(adev, state);
4089 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
4090 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
4092 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
4093 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
4095 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
4096 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
4098 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
4099 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
4101 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
4102 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
4104 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
4105 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
4107 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
4108 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
4110 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
4111 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
4119 static int gfx_v9_0_eop_irq(struct amdgpu_device *adev,
4120 struct amdgpu_irq_src *source,
4121 struct amdgpu_iv_entry *entry)
4124 u8 me_id, pipe_id, queue_id;
4125 struct amdgpu_ring *ring;
4127 DRM_DEBUG("IH: CP EOP\n");
4128 me_id = (entry->ring_id & 0x0c) >> 2;
4129 pipe_id = (entry->ring_id & 0x03) >> 0;
4130 queue_id = (entry->ring_id & 0x70) >> 4;
4134 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
4138 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4139 ring = &adev->gfx.compute_ring[i];
4140 /* Per-queue interrupt is supported for MEC starting from VI.
4141 * The interrupt can only be enabled/disabled per pipe instead of per queue.
4143 if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
4144 amdgpu_fence_process(ring);
4151 static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
4152 struct amdgpu_irq_src *source,
4153 struct amdgpu_iv_entry *entry)
4155 DRM_ERROR("Illegal register access in command stream\n");
4156 schedule_work(&adev->reset_work);
4160 static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
4161 struct amdgpu_irq_src *source,
4162 struct amdgpu_iv_entry *entry)
4164 DRM_ERROR("Illegal instruction in command stream\n");
4165 schedule_work(&adev->reset_work);
4169 static int gfx_v9_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
4170 struct amdgpu_irq_src *src,
4172 enum amdgpu_interrupt_state state)
4174 uint32_t tmp, target;
4175 struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
4178 target = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
4180 target = SOC15_REG_OFFSET(GC, 0, mmCP_ME2_PIPE0_INT_CNTL);
4181 target += ring->pipe;
4184 case AMDGPU_CP_KIQ_IRQ_DRIVER0:
4185 if (state == AMDGPU_IRQ_STATE_DISABLE) {
4186 tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
4187 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
4188 GENERIC2_INT_ENABLE, 0);
4189 WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
4191 tmp = RREG32(target);
4192 tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
4193 GENERIC2_INT_ENABLE, 0);
4194 WREG32(target, tmp);
4196 tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
4197 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
4198 GENERIC2_INT_ENABLE, 1);
4199 WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
4201 tmp = RREG32(target);
4202 tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
4203 GENERIC2_INT_ENABLE, 1);
4204 WREG32(target, tmp);
4208 BUG(); /* kiq only support GENERIC2_INT now */
4214 static int gfx_v9_0_kiq_irq(struct amdgpu_device *adev,
4215 struct amdgpu_irq_src *source,
4216 struct amdgpu_iv_entry *entry)
4218 u8 me_id, pipe_id, queue_id;
4219 struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
4221 me_id = (entry->ring_id & 0x0c) >> 2;
4222 pipe_id = (entry->ring_id & 0x03) >> 0;
4223 queue_id = (entry->ring_id & 0x70) >> 4;
4224 DRM_DEBUG("IH: CPC GENERIC2_INT, me:%d, pipe:%d, queue:%d\n",
4225 me_id, pipe_id, queue_id);
4227 amdgpu_fence_process(ring);
4231 static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
4233 .early_init = gfx_v9_0_early_init,
4234 .late_init = gfx_v9_0_late_init,
4235 .sw_init = gfx_v9_0_sw_init,
4236 .sw_fini = gfx_v9_0_sw_fini,
4237 .hw_init = gfx_v9_0_hw_init,
4238 .hw_fini = gfx_v9_0_hw_fini,
4239 .suspend = gfx_v9_0_suspend,
4240 .resume = gfx_v9_0_resume,
4241 .is_idle = gfx_v9_0_is_idle,
4242 .wait_for_idle = gfx_v9_0_wait_for_idle,
4243 .soft_reset = gfx_v9_0_soft_reset,
4244 .set_clockgating_state = gfx_v9_0_set_clockgating_state,
4245 .set_powergating_state = gfx_v9_0_set_powergating_state,
4246 .get_clockgating_state = gfx_v9_0_get_clockgating_state,
4249 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
4250 .type = AMDGPU_RING_TYPE_GFX,
4252 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4253 .support_64bit_ptrs = true,
4254 .vmhub = AMDGPU_GFXHUB,
4255 .get_rptr = gfx_v9_0_ring_get_rptr_gfx,
4256 .get_wptr = gfx_v9_0_ring_get_wptr_gfx,
4257 .set_wptr = gfx_v9_0_ring_set_wptr_gfx,
4258 .emit_frame_size = /* totally 242 maximum if 16 IBs */
4260 7 + /* PIPELINE_SYNC */
4262 8 + /* FENCE for VM_FLUSH */
4263 20 + /* GDS switch */
4264 4 + /* double SWITCH_BUFFER,
4265 the first COND_EXEC jump to the place just
4266 prior to this double SWITCH_BUFFER */
4274 8 + 8 + /* FENCE x2 */
4275 2, /* SWITCH_BUFFER */
4276 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
4277 .emit_ib = gfx_v9_0_ring_emit_ib_gfx,
4278 .emit_fence = gfx_v9_0_ring_emit_fence,
4279 .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
4280 .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
4281 .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
4282 .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
4283 .emit_hdp_invalidate = gfx_v9_0_ring_emit_hdp_invalidate,
4284 .test_ring = gfx_v9_0_ring_test_ring,
4285 .test_ib = gfx_v9_0_ring_test_ib,
4286 .insert_nop = amdgpu_ring_insert_nop,
4287 .pad_ib = amdgpu_ring_generic_pad_ib,
4288 .emit_switch_buffer = gfx_v9_ring_emit_sb,
4289 .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
4290 .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
4291 .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
4292 .emit_tmz = gfx_v9_0_ring_emit_tmz,
4295 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
4296 .type = AMDGPU_RING_TYPE_COMPUTE,
4298 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4299 .support_64bit_ptrs = true,
4300 .vmhub = AMDGPU_GFXHUB,
4301 .get_rptr = gfx_v9_0_ring_get_rptr_compute,
4302 .get_wptr = gfx_v9_0_ring_get_wptr_compute,
4303 .set_wptr = gfx_v9_0_ring_set_wptr_compute,
4305 20 + /* gfx_v9_0_ring_emit_gds_switch */
4306 7 + /* gfx_v9_0_ring_emit_hdp_flush */
4307 5 + /* gfx_v9_0_ring_emit_hdp_invalidate */
4308 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
4309 24 + /* gfx_v9_0_ring_emit_vm_flush */
4310 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
4311 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
4312 .emit_ib = gfx_v9_0_ring_emit_ib_compute,
4313 .emit_fence = gfx_v9_0_ring_emit_fence,
4314 .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
4315 .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
4316 .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
4317 .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
4318 .emit_hdp_invalidate = gfx_v9_0_ring_emit_hdp_invalidate,
4319 .test_ring = gfx_v9_0_ring_test_ring,
4320 .test_ib = gfx_v9_0_ring_test_ib,
4321 .insert_nop = amdgpu_ring_insert_nop,
4322 .pad_ib = amdgpu_ring_generic_pad_ib,
4325 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
4326 .type = AMDGPU_RING_TYPE_KIQ,
4328 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4329 .support_64bit_ptrs = true,
4330 .vmhub = AMDGPU_GFXHUB,
4331 .get_rptr = gfx_v9_0_ring_get_rptr_compute,
4332 .get_wptr = gfx_v9_0_ring_get_wptr_compute,
4333 .set_wptr = gfx_v9_0_ring_set_wptr_compute,
4335 20 + /* gfx_v9_0_ring_emit_gds_switch */
4336 7 + /* gfx_v9_0_ring_emit_hdp_flush */
4337 5 + /* gfx_v9_0_ring_emit_hdp_invalidate */
4338 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
4339 24 + /* gfx_v9_0_ring_emit_vm_flush */
4340 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
4341 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
4342 .emit_ib = gfx_v9_0_ring_emit_ib_compute,
4343 .emit_fence = gfx_v9_0_ring_emit_fence_kiq,
4344 .test_ring = gfx_v9_0_ring_test_ring,
4345 .test_ib = gfx_v9_0_ring_test_ib,
4346 .insert_nop = amdgpu_ring_insert_nop,
4347 .pad_ib = amdgpu_ring_generic_pad_ib,
4348 .emit_rreg = gfx_v9_0_ring_emit_rreg,
4349 .emit_wreg = gfx_v9_0_ring_emit_wreg,
4352 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
4356 adev->gfx.kiq.ring.funcs = &gfx_v9_0_ring_funcs_kiq;
4358 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
4359 adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx;
4361 for (i = 0; i < adev->gfx.num_compute_rings; i++)
4362 adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute;
4365 static const struct amdgpu_irq_src_funcs gfx_v9_0_kiq_irq_funcs = {
4366 .set = gfx_v9_0_kiq_set_interrupt_state,
4367 .process = gfx_v9_0_kiq_irq,
4370 static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = {
4371 .set = gfx_v9_0_set_eop_interrupt_state,
4372 .process = gfx_v9_0_eop_irq,
4375 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs = {
4376 .set = gfx_v9_0_set_priv_reg_fault_state,
4377 .process = gfx_v9_0_priv_reg_irq,
4380 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
4381 .set = gfx_v9_0_set_priv_inst_fault_state,
4382 .process = gfx_v9_0_priv_inst_irq,
4385 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
4387 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
4388 adev->gfx.eop_irq.funcs = &gfx_v9_0_eop_irq_funcs;
4390 adev->gfx.priv_reg_irq.num_types = 1;
4391 adev->gfx.priv_reg_irq.funcs = &gfx_v9_0_priv_reg_irq_funcs;
4393 adev->gfx.priv_inst_irq.num_types = 1;
4394 adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
4396 adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
4397 adev->gfx.kiq.irq.funcs = &gfx_v9_0_kiq_irq_funcs;
4400 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
4402 switch (adev->asic_type) {
4405 adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
4412 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
4414 /* init asci gds info */
4415 adev->gds.mem.total_size = RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE);
4416 adev->gds.gws.total_size = 64;
4417 adev->gds.oa.total_size = 16;
4419 if (adev->gds.mem.total_size == 64 * 1024) {
4420 adev->gds.mem.gfx_partition_size = 4096;
4421 adev->gds.mem.cs_partition_size = 4096;
4423 adev->gds.gws.gfx_partition_size = 4;
4424 adev->gds.gws.cs_partition_size = 4;
4426 adev->gds.oa.gfx_partition_size = 4;
4427 adev->gds.oa.cs_partition_size = 1;
4429 adev->gds.mem.gfx_partition_size = 1024;
4430 adev->gds.mem.cs_partition_size = 1024;
4432 adev->gds.gws.gfx_partition_size = 16;
4433 adev->gds.gws.cs_partition_size = 16;
4435 adev->gds.oa.gfx_partition_size = 4;
4436 adev->gds.oa.cs_partition_size = 4;
4440 static void gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
4448 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4449 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4451 WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data);
4454 static u32 gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device *adev)
4458 data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
4459 data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
4461 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4462 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4464 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
4466 return (~data) & mask;
4469 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
4470 struct amdgpu_cu_info *cu_info)
4472 int i, j, k, counter, active_cu_number = 0;
4473 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
4474 unsigned disable_masks[4 * 2];
4476 if (!adev || !cu_info)
4479 amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
4481 mutex_lock(&adev->grbm_idx_mutex);
4482 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
4483 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
4487 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
4489 gfx_v9_0_set_user_cu_inactive_bitmap(
4490 adev, disable_masks[i * 2 + j]);
4491 bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
4492 cu_info->bitmap[i][j] = bitmap;
4494 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
4495 if (bitmap & mask) {
4496 if (counter < adev->gfx.config.max_cu_per_sh)
4502 active_cu_number += counter;
4504 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
4505 cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
4508 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
4509 mutex_unlock(&adev->grbm_idx_mutex);
4511 cu_info->number = active_cu_number;
4512 cu_info->ao_cu_mask = ao_cu_mask;
4517 const struct amdgpu_ip_block_version gfx_v9_0_ip_block =
4519 .type = AMD_IP_BLOCK_TYPE_GFX,
4523 .funcs = &gfx_v9_0_ip_funcs,