2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
26 #include "amdgpu_gfx.h"
27 #include "amdgpu_psp.h"
28 #include "amdgpu_smu.h"
32 #include "gc/gc_10_1_0_offset.h"
33 #include "gc/gc_10_1_0_sh_mask.h"
34 #include "navi10_enum.h"
35 #include "hdp/hdp_5_0_0_offset.h"
36 #include "ivsrcid/gfx/irqsrcs_gfx_10_1.h"
39 #include "soc15_common.h"
40 #include "clearstate_gfx10.h"
41 #include "v10_structs.h"
42 #include "gfx_v10_0.h"
43 #include "nbio_v2_3.h"
46 * Navi10 has two graphic rings to share each graphic pipe.
50 * In bring-up phase, it just used primary ring so set gfx ring number as 1 at
53 #define GFX10_NUM_GFX_RINGS 2
54 #define GFX10_MEC_HPD_SIZE 2048
56 #define F32_CE_PROGRAM_RAM_SIZE 65536
57 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
59 MODULE_FIRMWARE("amdgpu/navi10_ce.bin");
60 MODULE_FIRMWARE("amdgpu/navi10_pfp.bin");
61 MODULE_FIRMWARE("amdgpu/navi10_me.bin");
62 MODULE_FIRMWARE("amdgpu/navi10_mec.bin");
63 MODULE_FIRMWARE("amdgpu/navi10_mec2.bin");
64 MODULE_FIRMWARE("amdgpu/navi10_rlc.bin");
66 static const struct soc15_reg_golden golden_settings_gc_10_1[] =
68 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014),
69 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_CPF_CLK_CTRL, 0xfcff8fff, 0xf8000100),
70 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xc0000000, 0xc0000100),
71 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0x60000ff0, 0x60000100),
72 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0x40000000, 0x40000100),
73 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
74 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_WD_CLK_CTRL, 0xfeff8fff, 0xfeff8100),
75 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
76 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_VC5_ENABLE, 0x00000002, 0x00000000),
77 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0x000007ff, 0x000005ff),
78 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0x20000000, 0x20000000),
79 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xffffffff, 0x00000420),
80 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x00000200, 0x00000200),
81 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0x07900000, 0x04900000),
82 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f),
83 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204),
84 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff0ffff, 0x00000500),
85 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PRIV_CONTROL, 0x000007ff, 0x000001fe),
86 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
87 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x10321032),
88 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x02310231),
89 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
90 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
91 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CGTT_SCLK_CTRL, 0x10000000, 0x10000100),
92 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xffffffff, 0x1402002f),
93 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffff9fff, 0x00001188),
94 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x08000009),
95 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00400000, 0x04440000),
96 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
97 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
98 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
99 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000100, 0x00000130),
100 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
101 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
102 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0x60000010, 0x479c0010),
103 SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CGTT_CLK_CTRL, 0xfeff0fff, 0x40000100),
104 SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00800000, 0x00800000)
107 static const struct soc15_reg_golden golden_settings_gc_10_0_nv10[] =
109 /* Pending on emulation bring up */
112 #define DEFAULT_SH_MEM_CONFIG \
113 ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
114 (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
115 (SH_MEM_RETRY_MODE_ALL << SH_MEM_CONFIG__RETRY_MODE__SHIFT) | \
116 (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT))
119 static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev);
120 static void gfx_v10_0_set_irq_funcs(struct amdgpu_device *adev);
121 static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev);
122 static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev);
123 static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev,
124 struct amdgpu_cu_info *cu_info);
125 static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev);
126 static void gfx_v10_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
127 u32 sh_num, u32 instance);
128 static u32 gfx_v10_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev);
130 static int gfx_v10_0_rlc_backdoor_autoload_buffer_init(struct amdgpu_device *adev);
131 static void gfx_v10_0_rlc_backdoor_autoload_buffer_fini(struct amdgpu_device *adev);
132 static int gfx_v10_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev);
133 static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
134 static void gfx_v10_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume);
135 static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
136 static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start);
138 static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
140 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
141 amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
142 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
143 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
144 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
145 amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
146 amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
147 amdgpu_ring_write(kiq_ring, 0); /* oac mask */
148 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
151 static void gfx10_kiq_map_queues(struct amdgpu_ring *kiq_ring,
152 struct amdgpu_ring *ring)
154 struct amdgpu_device *adev = kiq_ring->adev;
155 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
156 uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
157 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
159 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
160 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
161 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
162 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
163 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
164 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
165 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
166 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
167 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
168 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
169 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
170 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
171 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
172 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
173 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
174 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
175 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
178 static void gfx10_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
179 struct amdgpu_ring *ring,
180 enum amdgpu_unmap_queues_action action,
181 u64 gpu_addr, u64 seq)
183 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
185 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
186 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
187 PACKET3_UNMAP_QUEUES_ACTION(action) |
188 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
189 PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
190 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
191 amdgpu_ring_write(kiq_ring,
192 PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
194 if (action == PREEMPT_QUEUES_NO_UNMAP) {
195 amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
196 amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
197 amdgpu_ring_write(kiq_ring, seq);
199 amdgpu_ring_write(kiq_ring, 0);
200 amdgpu_ring_write(kiq_ring, 0);
201 amdgpu_ring_write(kiq_ring, 0);
205 static void gfx10_kiq_query_status(struct amdgpu_ring *kiq_ring,
206 struct amdgpu_ring *ring,
210 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
212 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
213 amdgpu_ring_write(kiq_ring,
214 PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
215 PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
216 PACKET3_QUERY_STATUS_COMMAND(2));
217 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
218 PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
219 PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
220 amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
221 amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
222 amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
223 amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
226 static const struct kiq_pm4_funcs gfx_v10_0_kiq_pm4_funcs = {
227 .kiq_set_resources = gfx10_kiq_set_resources,
228 .kiq_map_queues = gfx10_kiq_map_queues,
229 .kiq_unmap_queues = gfx10_kiq_unmap_queues,
230 .kiq_query_status = gfx10_kiq_query_status,
231 .set_resources_size = 8,
232 .map_queues_size = 7,
233 .unmap_queues_size = 6,
234 .query_status_size = 7,
237 static void gfx_v10_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
239 adev->gfx.kiq.pmf = &gfx_v10_0_kiq_pm4_funcs;
242 static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
244 switch (adev->asic_type) {
246 soc15_program_register_sequence(adev,
247 golden_settings_gc_10_1,
248 (const u32)ARRAY_SIZE(golden_settings_gc_10_1));
249 soc15_program_register_sequence(adev,
250 golden_settings_gc_10_0_nv10,
251 (const u32)ARRAY_SIZE(golden_settings_gc_10_0_nv10));
258 static void gfx_v10_0_scratch_init(struct amdgpu_device *adev)
260 adev->gfx.scratch.num_reg = 8;
261 adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
262 adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
265 static void gfx_v10_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
266 bool wc, uint32_t reg, uint32_t val)
268 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
269 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
270 WRITE_DATA_DST_SEL(0) | (wc ? WR_CONFIRM : 0));
271 amdgpu_ring_write(ring, reg);
272 amdgpu_ring_write(ring, 0);
273 amdgpu_ring_write(ring, val);
276 static void gfx_v10_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
277 int mem_space, int opt, uint32_t addr0,
278 uint32_t addr1, uint32_t ref, uint32_t mask,
281 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
282 amdgpu_ring_write(ring,
283 /* memory (1) or register (0) */
284 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
285 WAIT_REG_MEM_OPERATION(opt) | /* wait */
286 WAIT_REG_MEM_FUNCTION(3) | /* equal */
287 WAIT_REG_MEM_ENGINE(eng_sel)));
290 BUG_ON(addr0 & 0x3); /* Dword align */
291 amdgpu_ring_write(ring, addr0);
292 amdgpu_ring_write(ring, addr1);
293 amdgpu_ring_write(ring, ref);
294 amdgpu_ring_write(ring, mask);
295 amdgpu_ring_write(ring, inv); /* poll interval */
298 static int gfx_v10_0_ring_test_ring(struct amdgpu_ring *ring)
300 struct amdgpu_device *adev = ring->adev;
306 r = amdgpu_gfx_scratch_get(adev, &scratch);
308 DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
312 WREG32(scratch, 0xCAFEDEAD);
314 r = amdgpu_ring_alloc(ring, 3);
316 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
318 amdgpu_gfx_scratch_free(adev, scratch);
322 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
323 amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
324 amdgpu_ring_write(ring, 0xDEADBEEF);
325 amdgpu_ring_commit(ring);
327 for (i = 0; i < adev->usec_timeout; i++) {
328 tmp = RREG32(scratch);
329 if (tmp == 0xDEADBEEF)
331 if (amdgpu_emu_mode == 1)
336 if (i < adev->usec_timeout) {
337 if (amdgpu_emu_mode == 1)
338 DRM_INFO("ring test on %d succeeded in %d msecs\n",
341 DRM_INFO("ring test on %d succeeded in %d usecs\n",
344 DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
345 ring->idx, scratch, tmp);
348 amdgpu_gfx_scratch_free(adev, scratch);
353 static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
355 struct amdgpu_device *adev = ring->adev;
357 struct dma_fence *f = NULL;
362 r = amdgpu_gfx_scratch_get(adev, &scratch);
364 DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
368 WREG32(scratch, 0xCAFEDEAD);
370 memset(&ib, 0, sizeof(ib));
371 r = amdgpu_ib_get(adev, NULL, 256, &ib);
373 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
377 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
378 ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
379 ib.ptr[2] = 0xDEADBEEF;
382 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
386 r = dma_fence_wait_timeout(f, false, timeout);
388 DRM_ERROR("amdgpu: IB test timed out.\n");
392 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
396 tmp = RREG32(scratch);
397 if (tmp == 0xDEADBEEF) {
398 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
401 DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
406 amdgpu_ib_free(adev, &ib, NULL);
409 amdgpu_gfx_scratch_free(adev, scratch);
414 static void gfx_v10_0_free_microcode(struct amdgpu_device *adev)
416 release_firmware(adev->gfx.pfp_fw);
417 adev->gfx.pfp_fw = NULL;
418 release_firmware(adev->gfx.me_fw);
419 adev->gfx.me_fw = NULL;
420 release_firmware(adev->gfx.ce_fw);
421 adev->gfx.ce_fw = NULL;
422 release_firmware(adev->gfx.rlc_fw);
423 adev->gfx.rlc_fw = NULL;
424 release_firmware(adev->gfx.mec_fw);
425 adev->gfx.mec_fw = NULL;
426 release_firmware(adev->gfx.mec2_fw);
427 adev->gfx.mec2_fw = NULL;
429 kfree(adev->gfx.rlc.register_list_format);
432 static void gfx_v10_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
434 const struct rlc_firmware_header_v2_1 *rlc_hdr;
436 rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
437 adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
438 adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
439 adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
440 adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
441 adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
442 adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
443 adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
444 adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
445 adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
446 adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
447 adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
448 adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
449 adev->gfx.rlc.reg_list_format_direct_reg_list_length =
450 le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
453 static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev)
455 switch (adev->asic_type) {
457 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
464 static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
466 const char *chip_name;
469 struct amdgpu_firmware_info *info = NULL;
470 const struct common_firmware_header *header = NULL;
471 const struct gfx_firmware_header_v1_0 *cp_hdr;
472 const struct rlc_firmware_header_v2_0 *rlc_hdr;
473 unsigned int *tmp = NULL;
475 uint16_t version_major;
476 uint16_t version_minor;
480 switch (adev->asic_type) {
482 chip_name = "navi10";
488 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
489 err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
492 err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
495 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
496 adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
497 adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
499 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
500 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
503 err = amdgpu_ucode_validate(adev->gfx.me_fw);
506 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
507 adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
508 adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
510 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
511 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
514 err = amdgpu_ucode_validate(adev->gfx.ce_fw);
517 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
518 adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
519 adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
521 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
522 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
525 err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
526 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
527 version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
528 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
529 if (version_major == 2 && version_minor == 1)
530 adev->gfx.rlc.is_rlc_v2_1 = true;
532 adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
533 adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
534 adev->gfx.rlc.save_and_restore_offset =
535 le32_to_cpu(rlc_hdr->save_and_restore_offset);
536 adev->gfx.rlc.clear_state_descriptor_offset =
537 le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
538 adev->gfx.rlc.avail_scratch_ram_locations =
539 le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
540 adev->gfx.rlc.reg_restore_list_size =
541 le32_to_cpu(rlc_hdr->reg_restore_list_size);
542 adev->gfx.rlc.reg_list_format_start =
543 le32_to_cpu(rlc_hdr->reg_list_format_start);
544 adev->gfx.rlc.reg_list_format_separate_start =
545 le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
546 adev->gfx.rlc.starting_offsets_start =
547 le32_to_cpu(rlc_hdr->starting_offsets_start);
548 adev->gfx.rlc.reg_list_format_size_bytes =
549 le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
550 adev->gfx.rlc.reg_list_size_bytes =
551 le32_to_cpu(rlc_hdr->reg_list_size_bytes);
552 adev->gfx.rlc.register_list_format =
553 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
554 adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
555 if (!adev->gfx.rlc.register_list_format) {
560 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
561 le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
562 for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
563 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
565 adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
567 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
568 le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
569 for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
570 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
572 if (adev->gfx.rlc.is_rlc_v2_1)
573 gfx_v10_0_init_rlc_ext_microcode(adev);
575 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
576 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
579 err = amdgpu_ucode_validate(adev->gfx.mec_fw);
582 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
583 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
584 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
586 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
587 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
589 err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
592 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
593 adev->gfx.mec2_fw->data;
594 adev->gfx.mec2_fw_version =
595 le32_to_cpu(cp_hdr->header.ucode_version);
596 adev->gfx.mec2_feature_version =
597 le32_to_cpu(cp_hdr->ucode_feature_version);
600 adev->gfx.mec2_fw = NULL;
603 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
604 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
605 info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
606 info->fw = adev->gfx.pfp_fw;
607 header = (const struct common_firmware_header *)info->fw->data;
608 adev->firmware.fw_size +=
609 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
611 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
612 info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
613 info->fw = adev->gfx.me_fw;
614 header = (const struct common_firmware_header *)info->fw->data;
615 adev->firmware.fw_size +=
616 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
618 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
619 info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
620 info->fw = adev->gfx.ce_fw;
621 header = (const struct common_firmware_header *)info->fw->data;
622 adev->firmware.fw_size +=
623 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
625 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
626 info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
627 info->fw = adev->gfx.rlc_fw;
628 header = (const struct common_firmware_header *)info->fw->data;
629 adev->firmware.fw_size +=
630 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
632 if (adev->gfx.rlc.is_rlc_v2_1 &&
633 adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
634 adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
635 adev->gfx.rlc.save_restore_list_srm_size_bytes) {
636 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
637 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
638 info->fw = adev->gfx.rlc_fw;
639 adev->firmware.fw_size +=
640 ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
642 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
643 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
644 info->fw = adev->gfx.rlc_fw;
645 adev->firmware.fw_size +=
646 ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
648 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
649 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
650 info->fw = adev->gfx.rlc_fw;
651 adev->firmware.fw_size +=
652 ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
655 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
656 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
657 info->fw = adev->gfx.mec_fw;
658 header = (const struct common_firmware_header *)info->fw->data;
659 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
660 adev->firmware.fw_size +=
661 ALIGN(le32_to_cpu(header->ucode_size_bytes) -
662 le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
664 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
665 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
666 info->fw = adev->gfx.mec_fw;
667 adev->firmware.fw_size +=
668 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
670 if (adev->gfx.mec2_fw) {
671 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
672 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
673 info->fw = adev->gfx.mec2_fw;
674 header = (const struct common_firmware_header *)info->fw->data;
675 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
676 adev->firmware.fw_size +=
677 ALIGN(le32_to_cpu(header->ucode_size_bytes) -
678 le32_to_cpu(cp_hdr->jt_size) * 4,
680 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
681 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
682 info->fw = adev->gfx.mec2_fw;
683 adev->firmware.fw_size +=
684 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4,
692 "gfx10: Failed to load firmware \"%s\"\n",
694 release_firmware(adev->gfx.pfp_fw);
695 adev->gfx.pfp_fw = NULL;
696 release_firmware(adev->gfx.me_fw);
697 adev->gfx.me_fw = NULL;
698 release_firmware(adev->gfx.ce_fw);
699 adev->gfx.ce_fw = NULL;
700 release_firmware(adev->gfx.rlc_fw);
701 adev->gfx.rlc_fw = NULL;
702 release_firmware(adev->gfx.mec_fw);
703 adev->gfx.mec_fw = NULL;
704 release_firmware(adev->gfx.mec2_fw);
705 adev->gfx.mec2_fw = NULL;
708 gfx_v10_0_check_gfxoff_flag(adev);
713 static u32 gfx_v10_0_get_csb_size(struct amdgpu_device *adev)
716 const struct cs_section_def *sect = NULL;
717 const struct cs_extent_def *ext = NULL;
719 /* begin clear state */
721 /* context control state */
724 for (sect = gfx10_cs_data; sect->section != NULL; ++sect) {
725 for (ext = sect->section; ext->extent != NULL; ++ext) {
726 if (sect->id == SECT_CONTEXT)
727 count += 2 + ext->reg_count;
733 /* set PA_SC_TILE_STEERING_OVERRIDE */
735 /* end clear state */
743 static void gfx_v10_0_get_csb_buffer(struct amdgpu_device *adev,
744 volatile u32 *buffer)
747 const struct cs_section_def *sect = NULL;
748 const struct cs_extent_def *ext = NULL;
751 if (adev->gfx.rlc.cs_data == NULL)
756 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
757 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
759 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
760 buffer[count++] = cpu_to_le32(0x80000000);
761 buffer[count++] = cpu_to_le32(0x80000000);
763 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
764 for (ext = sect->section; ext->extent != NULL; ++ext) {
765 if (sect->id == SECT_CONTEXT) {
767 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
768 buffer[count++] = cpu_to_le32(ext->reg_index -
769 PACKET3_SET_CONTEXT_REG_START);
770 for (i = 0; i < ext->reg_count; i++)
771 buffer[count++] = cpu_to_le32(ext->extent[i]);
779 SOC15_REG_OFFSET(GC, 0, mmPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
780 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
781 buffer[count++] = cpu_to_le32(ctx_reg_offset);
782 buffer[count++] = cpu_to_le32(adev->gfx.config.pa_sc_tile_steering_override);
784 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
785 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
787 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
788 buffer[count++] = cpu_to_le32(0);
791 static void gfx_v10_0_rlc_fini(struct amdgpu_device *adev)
793 /* clear state block */
794 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
795 &adev->gfx.rlc.clear_state_gpu_addr,
796 (void **)&adev->gfx.rlc.cs_ptr);
798 /* jump table block */
799 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
800 &adev->gfx.rlc.cp_table_gpu_addr,
801 (void **)&adev->gfx.rlc.cp_table_ptr);
804 static int gfx_v10_0_rlc_init(struct amdgpu_device *adev)
806 const struct cs_section_def *cs_data;
809 adev->gfx.rlc.cs_data = gfx10_cs_data;
811 cs_data = adev->gfx.rlc.cs_data;
814 /* init clear state block */
815 r = amdgpu_gfx_rlc_init_csb(adev);
823 static int gfx_v10_0_csb_vram_pin(struct amdgpu_device *adev)
827 r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
828 if (unlikely(r != 0))
831 r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj,
832 AMDGPU_GEM_DOMAIN_VRAM);
834 adev->gfx.rlc.clear_state_gpu_addr =
835 amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj);
837 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
842 static void gfx_v10_0_csb_vram_unpin(struct amdgpu_device *adev)
846 if (!adev->gfx.rlc.clear_state_obj)
849 r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
850 if (likely(r == 0)) {
851 amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
852 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
856 static void gfx_v10_0_mec_fini(struct amdgpu_device *adev)
858 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
859 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
862 static int gfx_v10_0_me_init(struct amdgpu_device *adev)
866 bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
868 amdgpu_gfx_graphics_queue_acquire(adev);
870 r = gfx_v10_0_init_microcode(adev);
872 DRM_ERROR("Failed to load gfx firmware!\n");
877 static int gfx_v10_0_mec_init(struct amdgpu_device *adev)
881 const __le32 *fw_data = NULL;
886 const struct gfx_firmware_header_v1_0 *mec_hdr = NULL;
888 bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
890 /* take ownership of the relevant compute queues */
891 amdgpu_gfx_compute_queue_acquire(adev);
892 mec_hpd_size = adev->gfx.num_compute_rings * GFX10_MEC_HPD_SIZE;
894 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
895 AMDGPU_GEM_DOMAIN_GTT,
896 &adev->gfx.mec.hpd_eop_obj,
897 &adev->gfx.mec.hpd_eop_gpu_addr,
900 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
901 gfx_v10_0_mec_fini(adev);
905 memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size);
907 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
908 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
910 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
911 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
913 fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
914 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
915 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
917 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
918 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
919 &adev->gfx.mec.mec_fw_obj,
920 &adev->gfx.mec.mec_fw_gpu_addr,
923 dev_err(adev->dev, "(%d) failed to create mec fw bo\n", r);
924 gfx_v10_0_mec_fini(adev);
928 memcpy(fw, fw_data, fw_size);
930 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
931 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
937 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t wave, uint32_t address)
939 WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
940 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
941 (address << SQ_IND_INDEX__INDEX__SHIFT));
942 return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
945 static void wave_read_regs(struct amdgpu_device *adev, uint32_t wave,
946 uint32_t thread, uint32_t regno,
947 uint32_t num, uint32_t *out)
949 WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
950 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
951 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
952 (thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) |
953 (SQ_IND_INDEX__AUTO_INCR_MASK));
955 *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
958 static void gfx_v10_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
960 /* in gfx10 the SIMD_ID is specified as part of the INSTANCE
961 * field when performing a select_se_sh so it should be
965 /* type 2 wave data */
966 dst[(*no_fields)++] = 2;
967 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATUS);
968 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_LO);
969 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_HI);
970 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_LO);
971 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_HI);
972 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID1);
973 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID2);
974 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_INST_DW0);
975 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_GPR_ALLOC);
976 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_LDS_ALLOC);
977 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_TRAPSTS);
978 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS);
979 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2);
980 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1);
981 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0);
984 static void gfx_v10_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
985 uint32_t wave, uint32_t start,
986 uint32_t size, uint32_t *dst)
991 adev, wave, 0, start + SQIND_WAVE_SGPRS_OFFSET, size,
995 static void gfx_v10_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
996 uint32_t wave, uint32_t thread,
997 uint32_t start, uint32_t size,
1002 start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
1005 static void gfx_v10_0_select_me_pipe_q(struct amdgpu_device *adev,
1006 u32 me, u32 pipe, u32 q, u32 vm)
1008 nv_grbm_select(adev, me, pipe, q, vm);
1012 static const struct amdgpu_gfx_funcs gfx_v10_0_gfx_funcs = {
1013 .get_gpu_clock_counter = &gfx_v10_0_get_gpu_clock_counter,
1014 .select_se_sh = &gfx_v10_0_select_se_sh,
1015 .read_wave_data = &gfx_v10_0_read_wave_data,
1016 .read_wave_sgprs = &gfx_v10_0_read_wave_sgprs,
1017 .read_wave_vgprs = &gfx_v10_0_read_wave_vgprs,
1018 .select_me_pipe_q = &gfx_v10_0_select_me_pipe_q,
1021 static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev)
1025 adev->gfx.funcs = &gfx_v10_0_gfx_funcs;
1027 switch (adev->asic_type) {
1029 adev->gfx.config.max_hw_contexts = 8;
1030 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1031 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1032 adev->gfx.config.sc_hiz_tile_fifo_size = 0;
1033 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1034 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
1041 adev->gfx.config.gb_addr_config = gb_addr_config;
1043 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
1044 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
1045 GB_ADDR_CONFIG, NUM_PIPES);
1047 adev->gfx.config.max_tile_pipes =
1048 adev->gfx.config.gb_addr_config_fields.num_pipes;
1050 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
1051 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
1052 GB_ADDR_CONFIG, MAX_COMPRESSED_FRAGS);
1053 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
1054 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
1055 GB_ADDR_CONFIG, NUM_RB_PER_SE);
1056 adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
1057 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
1058 GB_ADDR_CONFIG, NUM_SHADER_ENGINES);
1059 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
1060 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
1061 GB_ADDR_CONFIG, PIPE_INTERLEAVE_SIZE));
1064 static int gfx_v10_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
1065 int me, int pipe, int queue)
1068 struct amdgpu_ring *ring;
1069 unsigned int irq_type;
1071 ring = &adev->gfx.gfx_ring[ring_id];
1075 ring->queue = queue;
1077 ring->ring_obj = NULL;
1078 ring->use_doorbell = true;
1081 ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
1083 ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1;
1084 sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1086 irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe;
1087 r = amdgpu_ring_init(adev, ring, 1024,
1088 &adev->gfx.eop_irq, irq_type);
1094 static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
1095 int mec, int pipe, int queue)
1099 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
1101 ring = &adev->gfx.compute_ring[ring_id];
1106 ring->queue = queue;
1108 ring->ring_obj = NULL;
1109 ring->use_doorbell = true;
1110 ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
1111 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
1112 + (ring_id * GFX10_MEC_HPD_SIZE);
1113 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1115 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1116 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1119 /* type-2 packets are deprecated on MEC, use type-3 instead */
1120 r = amdgpu_ring_init(adev, ring, 1024,
1121 &adev->gfx.eop_irq, irq_type);
1128 static int gfx_v10_0_sw_init(void *handle)
1130 int i, j, k, r, ring_id = 0;
1131 struct amdgpu_kiq *kiq;
1132 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1134 switch (adev->asic_type) {
1136 adev->gfx.me.num_me = 1;
1137 adev->gfx.me.num_pipe_per_me = 2;
1138 adev->gfx.me.num_queue_per_pipe = 1;
1139 adev->gfx.mec.num_mec = 2;
1140 adev->gfx.mec.num_pipe_per_mec = 4;
1141 adev->gfx.mec.num_queue_per_pipe = 8;
1144 adev->gfx.me.num_me = 1;
1145 adev->gfx.me.num_pipe_per_me = 1;
1146 adev->gfx.me.num_queue_per_pipe = 1;
1147 adev->gfx.mec.num_mec = 1;
1148 adev->gfx.mec.num_pipe_per_mec = 4;
1149 adev->gfx.mec.num_queue_per_pipe = 8;
1154 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
1155 GFX_10_1__SRCID__CP_IB2_INTERRUPT_PKT,
1156 &adev->gfx.kiq.irq);
1161 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
1162 GFX_10_1__SRCID__CP_EOP_INTERRUPT,
1163 &adev->gfx.eop_irq);
1167 /* Privileged reg */
1168 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_10_1__SRCID__CP_PRIV_REG_FAULT,
1169 &adev->gfx.priv_reg_irq);
1173 /* Privileged inst */
1174 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_10_1__SRCID__CP_PRIV_INSTR_FAULT,
1175 &adev->gfx.priv_inst_irq);
1179 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1181 gfx_v10_0_scratch_init(adev);
1183 r = gfx_v10_0_me_init(adev);
1187 r = gfx_v10_0_rlc_init(adev);
1189 DRM_ERROR("Failed to init rlc BOs!\n");
1193 r = gfx_v10_0_mec_init(adev);
1195 DRM_ERROR("Failed to init MEC BOs!\n");
1199 /* set up the gfx ring */
1200 for (i = 0; i < adev->gfx.me.num_me; i++) {
1201 for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
1202 for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
1203 if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
1206 r = gfx_v10_0_gfx_ring_init(adev, ring_id,
1216 /* set up the compute queues - allocate horizontally across pipes */
1217 for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1218 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1219 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
1220 if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k,
1224 r = gfx_v10_0_compute_ring_init(adev, ring_id,
1234 r = amdgpu_gfx_kiq_init(adev, GFX10_MEC_HPD_SIZE);
1236 DRM_ERROR("Failed to init KIQ BOs!\n");
1240 kiq = &adev->gfx.kiq;
1241 r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
1245 r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v10_compute_mqd));
1249 /* allocate visible FB for rlc auto-loading fw */
1250 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1251 r = gfx_v10_0_rlc_backdoor_autoload_buffer_init(adev);
1256 adev->gfx.ce_ram_size = F32_CE_PROGRAM_RAM_SIZE;
1258 gfx_v10_0_gpu_early_init(adev);
1263 static void gfx_v10_0_pfp_fini(struct amdgpu_device *adev)
1265 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_obj,
1266 &adev->gfx.pfp.pfp_fw_gpu_addr,
1267 (void **)&adev->gfx.pfp.pfp_fw_ptr);
1270 static void gfx_v10_0_ce_fini(struct amdgpu_device *adev)
1272 amdgpu_bo_free_kernel(&adev->gfx.ce.ce_fw_obj,
1273 &adev->gfx.ce.ce_fw_gpu_addr,
1274 (void **)&adev->gfx.ce.ce_fw_ptr);
1277 static void gfx_v10_0_me_fini(struct amdgpu_device *adev)
1279 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_obj,
1280 &adev->gfx.me.me_fw_gpu_addr,
1281 (void **)&adev->gfx.me.me_fw_ptr);
1284 static int gfx_v10_0_sw_fini(void *handle)
1287 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1289 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
1290 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
1291 for (i = 0; i < adev->gfx.num_compute_rings; i++)
1292 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1294 amdgpu_gfx_mqd_sw_fini(adev);
1295 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
1296 amdgpu_gfx_kiq_fini(adev);
1298 gfx_v10_0_pfp_fini(adev);
1299 gfx_v10_0_ce_fini(adev);
1300 gfx_v10_0_me_fini(adev);
1301 gfx_v10_0_rlc_fini(adev);
1302 gfx_v10_0_mec_fini(adev);
1304 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
1305 gfx_v10_0_rlc_backdoor_autoload_buffer_fini(adev);
1307 gfx_v10_0_free_microcode(adev);
1313 static void gfx_v10_0_tiling_mode_table_init(struct amdgpu_device *adev)
1318 static void gfx_v10_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
1319 u32 sh_num, u32 instance)
1323 if (instance == 0xffffffff)
1324 data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
1325 INSTANCE_BROADCAST_WRITES, 1);
1327 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX,
1330 if (se_num == 0xffffffff)
1331 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES,
1334 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1336 if (sh_num == 0xffffffff)
1337 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_BROADCAST_WRITES,
1340 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_INDEX, sh_num);
1342 WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
1345 static u32 gfx_v10_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1349 data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
1350 data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);
1352 data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
1353 data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
1355 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
1356 adev->gfx.config.max_sh_per_se);
1358 return (~data) & mask;
1361 static void gfx_v10_0_setup_rb(struct amdgpu_device *adev)
1366 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
1367 adev->gfx.config.max_sh_per_se;
1369 mutex_lock(&adev->grbm_idx_mutex);
1370 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1371 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1372 gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff);
1373 data = gfx_v10_0_get_rb_active_bitmap(adev);
1374 active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
1375 rb_bitmap_width_per_sh);
1378 gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1379 mutex_unlock(&adev->grbm_idx_mutex);
1381 adev->gfx.config.backend_enable_mask = active_rbs;
1382 adev->gfx.config.num_rbs = hweight32(active_rbs);
1385 static u32 gfx_v10_0_init_pa_sc_tile_steering_override(struct amdgpu_device *adev)
1388 uint32_t enabled_rb_per_sh;
1389 uint32_t active_rb_bitmap;
1390 uint32_t num_rb_per_sc;
1391 uint32_t num_packer_per_sc;
1392 uint32_t pa_sc_tile_steering_override;
1395 num_sc = adev->gfx.config.max_shader_engines * adev->gfx.config.max_sh_per_se *
1396 adev->gfx.config.num_sc_per_sh;
1397 /* init num_rb_per_sc */
1398 active_rb_bitmap = gfx_v10_0_get_rb_active_bitmap(adev);
1399 enabled_rb_per_sh = hweight32(active_rb_bitmap);
1400 num_rb_per_sc = enabled_rb_per_sh / adev->gfx.config.num_sc_per_sh;
1401 /* init num_packer_per_sc */
1402 num_packer_per_sc = adev->gfx.config.num_packer_per_sc;
1404 pa_sc_tile_steering_override = 0;
1405 pa_sc_tile_steering_override |=
1406 (order_base_2(num_sc) << PA_SC_TILE_STEERING_OVERRIDE__NUM_SC__SHIFT) &
1407 PA_SC_TILE_STEERING_OVERRIDE__NUM_SC_MASK;
1408 pa_sc_tile_steering_override |=
1409 (order_base_2(num_rb_per_sc) << PA_SC_TILE_STEERING_OVERRIDE__NUM_RB_PER_SC__SHIFT) &
1410 PA_SC_TILE_STEERING_OVERRIDE__NUM_RB_PER_SC_MASK;
1411 pa_sc_tile_steering_override |=
1412 (order_base_2(num_packer_per_sc) << PA_SC_TILE_STEERING_OVERRIDE__NUM_PACKER_PER_SC__SHIFT) &
1413 PA_SC_TILE_STEERING_OVERRIDE__NUM_PACKER_PER_SC_MASK;
1415 return pa_sc_tile_steering_override;
1418 #define DEFAULT_SH_MEM_BASES (0x6000)
1419 #define FIRST_COMPUTE_VMID (8)
1420 #define LAST_COMPUTE_VMID (16)
1422 static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev)
1425 uint32_t sh_mem_bases;
1428 * Configure apertures:
1429 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
1430 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
1431 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
1433 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1435 mutex_lock(&adev->srbm_mutex);
1436 for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
1437 nv_grbm_select(adev, 0, 0, 0, i);
1438 /* CP and shaders */
1439 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1440 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
1442 nv_grbm_select(adev, 0, 0, 0, 0);
1443 mutex_unlock(&adev->srbm_mutex);
1446 static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev)
1449 int max_wgp_per_sh = adev->gfx.config.max_cu_per_sh >> 1;
1450 u32 tmp, wgp_active_bitmap = 0;
1451 u32 gcrd_targets_disable_tcp = 0;
1452 u32 utcl_invreq_disable = 0;
1454 * GCRD_TARGETS_DISABLE field contains
1455 * for Navi10: GL1C=[18:15], SQC=[14:10], TCP=[9:0]
1457 u32 gcrd_targets_disable_mask = amdgpu_gfx_create_bitmask(
1458 2 * max_wgp_per_sh + /* TCP */
1459 max_wgp_per_sh + /* SQC */
1462 * UTCL1_UTCL0_INVREQ_DISABLE field contains
1463 * for Navi10: SQG=[24], RMI=[23:20], SQC=[19:10], TCP=[9:0]
1465 u32 utcl_invreq_disable_mask = amdgpu_gfx_create_bitmask(
1466 2 * max_wgp_per_sh + /* TCP */
1467 2 * max_wgp_per_sh + /* SQC */
1471 if (adev->asic_type == CHIP_NAVI10) {
1472 mutex_lock(&adev->grbm_idx_mutex);
1473 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1474 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1475 gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff);
1476 wgp_active_bitmap = gfx_v10_0_get_wgp_active_bitmap_per_sh(adev);
1478 * Set corresponding TCP bits for the inactive WGPs in
1479 * GCRD_SA_TARGETS_DISABLE
1481 gcrd_targets_disable_tcp = 0;
1482 /* Set TCP & SQC bits in UTCL1_UTCL0_INVREQ_DISABLE */
1483 utcl_invreq_disable = 0;
1485 for (k = 0; k < max_wgp_per_sh; k++) {
1486 if (!(wgp_active_bitmap & (1 << k))) {
1487 gcrd_targets_disable_tcp |= 3 << (2 * k);
1488 utcl_invreq_disable |= (3 << (2 * k)) |
1489 (3 << (2 * (max_wgp_per_sh + k)));
1493 tmp = RREG32_SOC15(GC, 0, mmUTCL1_UTCL0_INVREQ_DISABLE);
1494 /* only override TCP & SQC bits */
1495 tmp &= 0xffffffff << (4 * max_wgp_per_sh);
1496 tmp |= (utcl_invreq_disable & utcl_invreq_disable_mask);
1497 WREG32_SOC15(GC, 0, mmUTCL1_UTCL0_INVREQ_DISABLE, tmp);
1499 tmp = RREG32_SOC15(GC, 0, mmGCRD_SA_TARGETS_DISABLE);
1500 /* only override TCP bits */
1501 tmp &= 0xffffffff << (2 * max_wgp_per_sh);
1502 tmp |= (gcrd_targets_disable_tcp & gcrd_targets_disable_mask);
1503 WREG32_SOC15(GC, 0, mmGCRD_SA_TARGETS_DISABLE, tmp);
1507 gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1508 mutex_unlock(&adev->grbm_idx_mutex);
1512 static void gfx_v10_0_constants_init(struct amdgpu_device *adev)
1517 WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
1519 gfx_v10_0_tiling_mode_table_init(adev);
1521 gfx_v10_0_setup_rb(adev);
1522 gfx_v10_0_get_cu_info(adev, &adev->gfx.cu_info);
1523 adev->gfx.config.pa_sc_tile_steering_override =
1524 gfx_v10_0_init_pa_sc_tile_steering_override(adev);
1526 /* XXX SH_MEM regs */
1527 /* where to put LDS, scratch, GPUVM in FSA64 space */
1528 mutex_lock(&adev->srbm_mutex);
1529 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids; i++) {
1530 nv_grbm_select(adev, 0, 0, 0, i);
1531 /* CP and shaders */
1532 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1534 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1535 (adev->gmc.private_aperture_start >> 48));
1536 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
1537 (adev->gmc.shared_aperture_start >> 48));
1538 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp);
1541 nv_grbm_select(adev, 0, 0, 0, 0);
1543 mutex_unlock(&adev->srbm_mutex);
1545 gfx_v10_0_init_compute_vmid(adev);
1549 static void gfx_v10_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1552 u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
1554 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
1556 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
1558 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
1560 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
1563 WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
1566 static void gfx_v10_0_init_csb(struct amdgpu_device *adev)
1569 WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_HI,
1570 adev->gfx.rlc.clear_state_gpu_addr >> 32);
1571 WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_LO,
1572 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
1573 WREG32_SOC15(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
1576 static void gfx_v10_0_init_pg(struct amdgpu_device *adev)
1578 gfx_v10_0_init_csb(adev);
1580 amdgpu_gmc_flush_gpu_tlb(adev, 0, 0);
1582 /* TODO: init power gating */
1586 void gfx_v10_0_rlc_stop(struct amdgpu_device *adev)
1588 u32 tmp = RREG32_SOC15(GC, 0, mmRLC_CNTL);
1590 tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
1591 WREG32_SOC15(GC, 0, mmRLC_CNTL, tmp);
1594 static void gfx_v10_0_rlc_reset(struct amdgpu_device *adev)
1596 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
1598 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
1602 static void gfx_v10_0_rlc_smu_handshake_cntl(struct amdgpu_device *adev,
1605 uint32_t rlc_pg_cntl;
1607 rlc_pg_cntl = RREG32_SOC15(GC, 0, mmRLC_PG_CNTL);
1610 /* RLC_PG_CNTL[23] = 0 (default)
1611 * RLC will wait for handshake acks with SMU
1612 * GFXOFF will be enabled
1613 * RLC_PG_CNTL[23] = 1
1614 * RLC will not issue any message to SMU
1615 * hence no handshake between SMU & RLC
1616 * GFXOFF will be disabled
1618 rlc_pg_cntl |= 0x80000;
1620 rlc_pg_cntl &= ~0x80000;
1621 WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, rlc_pg_cntl);
1624 static void gfx_v10_0_rlc_start(struct amdgpu_device *adev)
1626 /* TODO: enable rlc & smu handshake until smu
1627 * and gfxoff feature works as expected */
1628 if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK))
1629 gfx_v10_0_rlc_smu_handshake_cntl(adev, false);
1631 WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
1635 static void gfx_v10_0_rlc_enable_srm(struct amdgpu_device *adev)
1639 /* enable Save Restore Machine */
1640 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
1641 tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
1642 tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
1643 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
1646 static int gfx_v10_0_rlc_load_microcode(struct amdgpu_device *adev)
1648 const struct rlc_firmware_header_v2_0 *hdr;
1649 const __le32 *fw_data;
1650 unsigned i, fw_size;
1652 if (!adev->gfx.rlc_fw)
1655 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1656 amdgpu_ucode_print_rlc_hdr(&hdr->header);
1658 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1659 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1660 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1662 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
1663 RLCG_UCODE_LOADING_START_ADDRESS);
1665 for (i = 0; i < fw_size; i++)
1666 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA,
1667 le32_to_cpup(fw_data++));
1669 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
1674 static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev)
1678 if (amdgpu_sriov_vf(adev))
1681 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1682 r = gfx_v10_0_wait_for_rlc_autoload_complete(adev);
1685 gfx_v10_0_init_pg(adev);
1687 /* enable RLC SRM */
1688 gfx_v10_0_rlc_enable_srm(adev);
1691 adev->gfx.rlc.funcs->stop(adev);
1694 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
1697 WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, 0);
1699 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1700 /* legacy rlc firmware loading */
1701 r = gfx_v10_0_rlc_load_microcode(adev);
1704 } else if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1705 /* rlc backdoor autoload firmware */
1706 r = gfx_v10_0_rlc_backdoor_autoload_enable(adev);
1711 gfx_v10_0_init_pg(adev);
1712 adev->gfx.rlc.funcs->start(adev);
1714 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1715 r = gfx_v10_0_wait_for_rlc_autoload_complete(adev);
1725 unsigned int offset;
1727 } rlc_autoload_info[FIRMWARE_ID_MAX];
1729 static int gfx_v10_0_parse_rlc_toc(struct amdgpu_device *adev)
1732 RLC_TABLE_OF_CONTENT *rlc_toc;
1734 ret = amdgpu_bo_create_reserved(adev, adev->psp.toc_bin_size, PAGE_SIZE,
1735 AMDGPU_GEM_DOMAIN_GTT,
1736 &adev->gfx.rlc.rlc_toc_bo,
1737 &adev->gfx.rlc.rlc_toc_gpu_addr,
1738 (void **)&adev->gfx.rlc.rlc_toc_buf);
1740 dev_err(adev->dev, "(%d) failed to create rlc toc bo\n", ret);
1744 /* Copy toc from psp sos fw to rlc toc buffer */
1745 memcpy(adev->gfx.rlc.rlc_toc_buf, adev->psp.toc_start_addr, adev->psp.toc_bin_size);
1747 rlc_toc = (RLC_TABLE_OF_CONTENT *)adev->gfx.rlc.rlc_toc_buf;
1748 while (rlc_toc && (rlc_toc->id > FIRMWARE_ID_INVALID) &&
1749 (rlc_toc->id < FIRMWARE_ID_MAX)) {
1750 if ((rlc_toc->id >= FIRMWARE_ID_CP_CE) &&
1751 (rlc_toc->id <= FIRMWARE_ID_CP_MES)) {
1752 /* Offset needs 4KB alignment */
1753 rlc_toc->offset = ALIGN(rlc_toc->offset * 4, PAGE_SIZE);
1756 rlc_autoload_info[rlc_toc->id].id = rlc_toc->id;
1757 rlc_autoload_info[rlc_toc->id].offset = rlc_toc->offset * 4;
1758 rlc_autoload_info[rlc_toc->id].size = rlc_toc->size * 4;
1766 static uint32_t gfx_v10_0_calc_toc_total_size(struct amdgpu_device *adev)
1768 uint32_t total_size = 0;
1772 ret = gfx_v10_0_parse_rlc_toc(adev);
1774 dev_err(adev->dev, "failed to parse rlc toc\n");
1778 for (id = FIRMWARE_ID_RLC_G_UCODE; id < FIRMWARE_ID_MAX; id++)
1779 total_size += rlc_autoload_info[id].size;
1781 /* In case the offset in rlc toc ucode is aligned */
1782 if (total_size < rlc_autoload_info[FIRMWARE_ID_MAX-1].offset)
1783 total_size = rlc_autoload_info[FIRMWARE_ID_MAX-1].offset +
1784 rlc_autoload_info[FIRMWARE_ID_MAX-1].size;
1789 static int gfx_v10_0_rlc_backdoor_autoload_buffer_init(struct amdgpu_device *adev)
1792 uint32_t total_size;
1794 total_size = gfx_v10_0_calc_toc_total_size(adev);
1796 r = amdgpu_bo_create_reserved(adev, total_size, PAGE_SIZE,
1797 AMDGPU_GEM_DOMAIN_GTT,
1798 &adev->gfx.rlc.rlc_autoload_bo,
1799 &adev->gfx.rlc.rlc_autoload_gpu_addr,
1800 (void **)&adev->gfx.rlc.rlc_autoload_ptr);
1802 dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r);
1809 static void gfx_v10_0_rlc_backdoor_autoload_buffer_fini(struct amdgpu_device *adev)
1811 amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_toc_bo,
1812 &adev->gfx.rlc.rlc_toc_gpu_addr,
1813 (void **)&adev->gfx.rlc.rlc_toc_buf);
1814 amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_autoload_bo,
1815 &adev->gfx.rlc.rlc_autoload_gpu_addr,
1816 (void **)&adev->gfx.rlc.rlc_autoload_ptr);
1819 static void gfx_v10_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *adev,
1821 const void *fw_data,
1824 uint32_t toc_offset;
1825 uint32_t toc_fw_size;
1826 char *ptr = adev->gfx.rlc.rlc_autoload_ptr;
1828 if (id <= FIRMWARE_ID_INVALID || id >= FIRMWARE_ID_MAX)
1831 toc_offset = rlc_autoload_info[id].offset;
1832 toc_fw_size = rlc_autoload_info[id].size;
1835 fw_size = toc_fw_size;
1837 if (fw_size > toc_fw_size)
1838 fw_size = toc_fw_size;
1840 memcpy(ptr + toc_offset, fw_data, fw_size);
1842 if (fw_size < toc_fw_size)
1843 memset(ptr + toc_offset + fw_size, 0, toc_fw_size - fw_size);
1846 static void gfx_v10_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev)
1851 data = adev->gfx.rlc.rlc_toc_buf;
1852 size = rlc_autoload_info[FIRMWARE_ID_RLC_TOC].size;
1854 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
1855 FIRMWARE_ID_RLC_TOC,
1859 static void gfx_v10_0_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device *adev)
1861 const __le32 *fw_data;
1863 const struct gfx_firmware_header_v1_0 *cp_hdr;
1864 const struct rlc_firmware_header_v2_0 *rlc_hdr;
1867 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1868 adev->gfx.pfp_fw->data;
1869 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
1870 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
1871 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1872 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
1877 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1878 adev->gfx.ce_fw->data;
1879 fw_data = (const __le32 *)(adev->gfx.ce_fw->data +
1880 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
1881 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1882 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
1887 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1888 adev->gfx.me_fw->data;
1889 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
1890 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
1891 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1892 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
1897 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)
1898 adev->gfx.rlc_fw->data;
1899 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1900 le32_to_cpu(rlc_hdr->header.ucode_array_offset_bytes));
1901 fw_size = le32_to_cpu(rlc_hdr->header.ucode_size_bytes);
1902 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
1903 FIRMWARE_ID_RLC_G_UCODE,
1907 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1908 adev->gfx.mec_fw->data;
1909 fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1910 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
1911 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
1912 cp_hdr->jt_size * 4;
1913 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
1916 /* mec2 ucode is not necessary if mec2 ucode is same as mec1 */
1919 /* Temporarily put sdma part here */
1920 static void gfx_v10_0_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device *adev)
1922 const __le32 *fw_data;
1924 const struct sdma_firmware_header_v1_0 *sdma_hdr;
1927 for (i = 0; i < adev->sdma.num_instances; i++) {
1928 sdma_hdr = (const struct sdma_firmware_header_v1_0 *)
1929 adev->sdma.instance[i].fw->data;
1930 fw_data = (const __le32 *) (adev->sdma.instance[i].fw->data +
1931 le32_to_cpu(sdma_hdr->header.ucode_array_offset_bytes));
1932 fw_size = le32_to_cpu(sdma_hdr->header.ucode_size_bytes);
1935 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
1936 FIRMWARE_ID_SDMA0_UCODE, fw_data, fw_size);
1937 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
1938 FIRMWARE_ID_SDMA0_JT,
1939 (uint32_t *)fw_data +
1940 sdma_hdr->jt_offset,
1941 sdma_hdr->jt_size * 4);
1942 } else if (i == 1) {
1943 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
1944 FIRMWARE_ID_SDMA1_UCODE, fw_data, fw_size);
1945 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
1946 FIRMWARE_ID_SDMA1_JT,
1947 (uint32_t *)fw_data +
1948 sdma_hdr->jt_offset,
1949 sdma_hdr->jt_size * 4);
1954 static int gfx_v10_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev)
1956 uint32_t rlc_g_offset, rlc_g_size, tmp;
1959 gfx_v10_0_rlc_backdoor_autoload_copy_toc_ucode(adev);
1960 gfx_v10_0_rlc_backdoor_autoload_copy_sdma_ucode(adev);
1961 gfx_v10_0_rlc_backdoor_autoload_copy_gfx_ucode(adev);
1963 rlc_g_offset = rlc_autoload_info[FIRMWARE_ID_RLC_G_UCODE].offset;
1964 rlc_g_size = rlc_autoload_info[FIRMWARE_ID_RLC_G_UCODE].size;
1965 gpu_addr = adev->gfx.rlc.rlc_autoload_gpu_addr + rlc_g_offset;
1967 WREG32_SOC15(GC, 0, mmRLC_HYP_BOOTLOAD_ADDR_HI, upper_32_bits(gpu_addr));
1968 WREG32_SOC15(GC, 0, mmRLC_HYP_BOOTLOAD_ADDR_LO, lower_32_bits(gpu_addr));
1969 WREG32_SOC15(GC, 0, mmRLC_HYP_BOOTLOAD_SIZE, rlc_g_size);
1971 tmp = RREG32_SOC15(GC, 0, mmRLC_HYP_RESET_VECTOR);
1972 if (!(tmp & (RLC_HYP_RESET_VECTOR__COLD_BOOT_EXIT_MASK |
1973 RLC_HYP_RESET_VECTOR__VDDGFX_EXIT_MASK))) {
1974 DRM_ERROR("Neither COLD_BOOT_EXIT nor VDDGFX_EXIT is set\n");
1978 tmp = RREG32_SOC15(GC, 0, mmRLC_CNTL);
1979 if (tmp & RLC_CNTL__RLC_ENABLE_F32_MASK) {
1980 DRM_ERROR("RLC ROM should halt itself\n");
1987 static int gfx_v10_0_rlc_backdoor_autoload_config_me_cache(struct amdgpu_device *adev)
1989 uint32_t usec_timeout = 50000; /* wait for 50ms */
1994 /* Trigger an invalidation of the L1 instruction caches */
1995 tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL);
1996 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, INVALIDATE_CACHE, 1);
1997 WREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL, tmp);
1999 /* Wait for invalidation complete */
2000 for (i = 0; i < usec_timeout; i++) {
2001 tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL);
2002 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2003 INVALIDATE_CACHE_COMPLETE))
2008 if (i >= usec_timeout) {
2009 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2013 /* Program me ucode address into intruction cache address register */
2014 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2015 rlc_autoload_info[FIRMWARE_ID_CP_ME].offset;
2016 WREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_LO,
2017 lower_32_bits(addr) & 0xFFFFF000);
2018 WREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_HI,
2019 upper_32_bits(addr));
2024 static int gfx_v10_0_rlc_backdoor_autoload_config_ce_cache(struct amdgpu_device *adev)
2026 uint32_t usec_timeout = 50000; /* wait for 50ms */
2031 /* Trigger an invalidation of the L1 instruction caches */
2032 tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL);
2033 tmp = REG_SET_FIELD(tmp, CP_CE_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2034 WREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL, tmp);
2036 /* Wait for invalidation complete */
2037 for (i = 0; i < usec_timeout; i++) {
2038 tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL);
2039 if (1 == REG_GET_FIELD(tmp, CP_CE_IC_OP_CNTL,
2040 INVALIDATE_CACHE_COMPLETE))
2045 if (i >= usec_timeout) {
2046 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2050 /* Program ce ucode address into intruction cache address register */
2051 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2052 rlc_autoload_info[FIRMWARE_ID_CP_CE].offset;
2053 WREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_LO,
2054 lower_32_bits(addr) & 0xFFFFF000);
2055 WREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_HI,
2056 upper_32_bits(addr));
2061 static int gfx_v10_0_rlc_backdoor_autoload_config_pfp_cache(struct amdgpu_device *adev)
2063 uint32_t usec_timeout = 50000; /* wait for 50ms */
2068 /* Trigger an invalidation of the L1 instruction caches */
2069 tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL);
2070 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2071 WREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL, tmp);
2073 /* Wait for invalidation complete */
2074 for (i = 0; i < usec_timeout; i++) {
2075 tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL);
2076 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2077 INVALIDATE_CACHE_COMPLETE))
2082 if (i >= usec_timeout) {
2083 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2087 /* Program pfp ucode address into intruction cache address register */
2088 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2089 rlc_autoload_info[FIRMWARE_ID_CP_PFP].offset;
2090 WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_LO,
2091 lower_32_bits(addr) & 0xFFFFF000);
2092 WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_HI,
2093 upper_32_bits(addr));
2098 static int gfx_v10_0_rlc_backdoor_autoload_config_mec_cache(struct amdgpu_device *adev)
2100 uint32_t usec_timeout = 50000; /* wait for 50ms */
2105 /* Trigger an invalidation of the L1 instruction caches */
2106 tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL);
2107 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2108 WREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL, tmp);
2110 /* Wait for invalidation complete */
2111 for (i = 0; i < usec_timeout; i++) {
2112 tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL);
2113 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
2114 INVALIDATE_CACHE_COMPLETE))
2119 if (i >= usec_timeout) {
2120 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2124 /* Program mec1 ucode address into intruction cache address register */
2125 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2126 rlc_autoload_info[FIRMWARE_ID_CP_MEC].offset;
2127 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
2128 lower_32_bits(addr) & 0xFFFFF000);
2129 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
2130 upper_32_bits(addr));
2135 static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)
2138 uint32_t bootload_status;
2141 for (i = 0; i < adev->usec_timeout; i++) {
2142 cp_status = RREG32_SOC15(GC, 0, mmCP_STAT);
2143 bootload_status = RREG32_SOC15(GC, 0, mmRLC_RLCS_BOOTLOAD_STATUS);
2144 if ((cp_status == 0) &&
2145 (REG_GET_FIELD(bootload_status,
2146 RLC_RLCS_BOOTLOAD_STATUS, BOOTLOAD_COMPLETE) == 1)) {
2152 if (i >= adev->usec_timeout) {
2153 dev_err(adev->dev, "rlc autoload: gc ucode autoload timeout\n");
2157 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
2158 r = gfx_v10_0_rlc_backdoor_autoload_config_me_cache(adev);
2162 r = gfx_v10_0_rlc_backdoor_autoload_config_ce_cache(adev);
2166 r = gfx_v10_0_rlc_backdoor_autoload_config_pfp_cache(adev);
2170 r = gfx_v10_0_rlc_backdoor_autoload_config_mec_cache(adev);
2178 static void gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2181 u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
2183 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
2184 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
2185 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
2187 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2188 adev->gfx.gfx_ring[i].sched.ready = false;
2190 WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
2194 static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
2197 const struct gfx_firmware_header_v1_0 *pfp_hdr;
2198 const __le32 *fw_data;
2199 unsigned i, fw_size;
2201 uint32_t usec_timeout = 50000; /* wait for 50ms */
2203 pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
2204 adev->gfx.pfp_fw->data;
2206 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2208 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
2209 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2210 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes);
2212 r = amdgpu_bo_create_reserved(adev, pfp_hdr->header.ucode_size_bytes,
2213 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
2214 &adev->gfx.pfp.pfp_fw_obj,
2215 &adev->gfx.pfp.pfp_fw_gpu_addr,
2216 (void **)&adev->gfx.pfp.pfp_fw_ptr);
2218 dev_err(adev->dev, "(%d) failed to create pfp fw bo\n", r);
2219 gfx_v10_0_pfp_fini(adev);
2223 memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_data, fw_size);
2225 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj);
2226 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj);
2228 /* Trigger an invalidation of the L1 instruction caches */
2229 tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL);
2230 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2231 WREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL, tmp);
2233 /* Wait for invalidation complete */
2234 for (i = 0; i < usec_timeout; i++) {
2235 tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL);
2236 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2237 INVALIDATE_CACHE_COMPLETE))
2242 if (i >= usec_timeout) {
2243 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2247 if (amdgpu_emu_mode == 1)
2248 adev->nbio_funcs->hdp_flush(adev, NULL);
2250 tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL);
2251 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
2252 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0);
2253 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0);
2254 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2255 WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL, tmp);
2256 WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_LO,
2257 adev->gfx.pfp.pfp_fw_gpu_addr & 0xFFFFF000);
2258 WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_HI,
2259 upper_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
2264 static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev)
2267 const struct gfx_firmware_header_v1_0 *ce_hdr;
2268 const __le32 *fw_data;
2269 unsigned i, fw_size;
2271 uint32_t usec_timeout = 50000; /* wait for 50ms */
2273 ce_hdr = (const struct gfx_firmware_header_v1_0 *)
2274 adev->gfx.ce_fw->data;
2276 amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
2278 fw_data = (const __le32 *)(adev->gfx.ce_fw->data +
2279 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
2280 fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes);
2282 r = amdgpu_bo_create_reserved(adev, ce_hdr->header.ucode_size_bytes,
2283 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
2284 &adev->gfx.ce.ce_fw_obj,
2285 &adev->gfx.ce.ce_fw_gpu_addr,
2286 (void **)&adev->gfx.ce.ce_fw_ptr);
2288 dev_err(adev->dev, "(%d) failed to create ce fw bo\n", r);
2289 gfx_v10_0_ce_fini(adev);
2293 memcpy(adev->gfx.ce.ce_fw_ptr, fw_data, fw_size);
2295 amdgpu_bo_kunmap(adev->gfx.ce.ce_fw_obj);
2296 amdgpu_bo_unreserve(adev->gfx.ce.ce_fw_obj);
2298 /* Trigger an invalidation of the L1 instruction caches */
2299 tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL);
2300 tmp = REG_SET_FIELD(tmp, CP_CE_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2301 WREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL, tmp);
2303 /* Wait for invalidation complete */
2304 for (i = 0; i < usec_timeout; i++) {
2305 tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL);
2306 if (1 == REG_GET_FIELD(tmp, CP_CE_IC_OP_CNTL,
2307 INVALIDATE_CACHE_COMPLETE))
2312 if (i >= usec_timeout) {
2313 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2317 if (amdgpu_emu_mode == 1)
2318 adev->nbio_funcs->hdp_flush(adev, NULL);
2320 tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_CNTL);
2321 tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, VMID, 0);
2322 tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, CACHE_POLICY, 0);
2323 tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, EXE_DISABLE, 0);
2324 tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2325 WREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_LO,
2326 adev->gfx.ce.ce_fw_gpu_addr & 0xFFFFF000);
2327 WREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_HI,
2328 upper_32_bits(adev->gfx.ce.ce_fw_gpu_addr));
2333 static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
2336 const struct gfx_firmware_header_v1_0 *me_hdr;
2337 const __le32 *fw_data;
2338 unsigned i, fw_size;
2340 uint32_t usec_timeout = 50000; /* wait for 50ms */
2342 me_hdr = (const struct gfx_firmware_header_v1_0 *)
2343 adev->gfx.me_fw->data;
2345 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2347 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
2348 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2349 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes);
2351 r = amdgpu_bo_create_reserved(adev, me_hdr->header.ucode_size_bytes,
2352 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
2353 &adev->gfx.me.me_fw_obj,
2354 &adev->gfx.me.me_fw_gpu_addr,
2355 (void **)&adev->gfx.me.me_fw_ptr);
2357 dev_err(adev->dev, "(%d) failed to create me fw bo\n", r);
2358 gfx_v10_0_me_fini(adev);
2362 memcpy(adev->gfx.me.me_fw_ptr, fw_data, fw_size);
2364 amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj);
2365 amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj);
2367 /* Trigger an invalidation of the L1 instruction caches */
2368 tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL);
2369 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2370 WREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL, tmp);
2372 /* Wait for invalidation complete */
2373 for (i = 0; i < usec_timeout; i++) {
2374 tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL);
2375 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2376 INVALIDATE_CACHE_COMPLETE))
2381 if (i >= usec_timeout) {
2382 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2386 if (amdgpu_emu_mode == 1)
2387 adev->nbio_funcs->hdp_flush(adev, NULL);
2389 tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_CNTL);
2390 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
2391 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0);
2392 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0);
2393 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2394 WREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_LO,
2395 adev->gfx.me.me_fw_gpu_addr & 0xFFFFF000);
2396 WREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_HI,
2397 upper_32_bits(adev->gfx.me.me_fw_gpu_addr));
2402 static int gfx_v10_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2406 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
2409 gfx_v10_0_cp_gfx_enable(adev, false);
2411 r = gfx_v10_0_cp_gfx_load_pfp_microcode(adev);
2413 dev_err(adev->dev, "(%d) failed to load pfp fw\n", r);
2417 r = gfx_v10_0_cp_gfx_load_ce_microcode(adev);
2419 dev_err(adev->dev, "(%d) failed to load ce fw\n", r);
2423 r = gfx_v10_0_cp_gfx_load_me_microcode(adev);
2425 dev_err(adev->dev, "(%d) failed to load me fw\n", r);
2432 static int gfx_v10_0_cp_gfx_start(struct amdgpu_device *adev)
2434 struct amdgpu_ring *ring;
2435 const struct cs_section_def *sect = NULL;
2436 const struct cs_extent_def *ext = NULL;
2441 WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT,
2442 adev->gfx.config.max_hw_contexts - 1);
2443 WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);
2445 gfx_v10_0_cp_gfx_enable(adev, true);
2447 ring = &adev->gfx.gfx_ring[0];
2448 r = amdgpu_ring_alloc(ring, gfx_v10_0_get_csb_size(adev) + 4);
2450 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
2454 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2455 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
2457 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2458 amdgpu_ring_write(ring, 0x80000000);
2459 amdgpu_ring_write(ring, 0x80000000);
2461 for (sect = gfx10_cs_data; sect->section != NULL; ++sect) {
2462 for (ext = sect->section; ext->extent != NULL; ++ext) {
2463 if (sect->id == SECT_CONTEXT) {
2464 amdgpu_ring_write(ring,
2465 PACKET3(PACKET3_SET_CONTEXT_REG,
2467 amdgpu_ring_write(ring, ext->reg_index -
2468 PACKET3_SET_CONTEXT_REG_START);
2469 for (i = 0; i < ext->reg_count; i++)
2470 amdgpu_ring_write(ring, ext->extent[i]);
2476 SOC15_REG_OFFSET(GC, 0, mmPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
2477 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
2478 amdgpu_ring_write(ring, ctx_reg_offset);
2479 amdgpu_ring_write(ring, adev->gfx.config.pa_sc_tile_steering_override);
2481 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2482 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
2484 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2485 amdgpu_ring_write(ring, 0);
2487 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
2488 amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
2489 amdgpu_ring_write(ring, 0x8000);
2490 amdgpu_ring_write(ring, 0x8000);
2492 amdgpu_ring_commit(ring);
2494 /* submit cs packet to copy state 0 to next available state */
2495 ring = &adev->gfx.gfx_ring[1];
2496 r = amdgpu_ring_alloc(ring, 2);
2498 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
2502 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2503 amdgpu_ring_write(ring, 0);
2505 amdgpu_ring_commit(ring);
2510 static void gfx_v10_0_cp_gfx_switch_pipe(struct amdgpu_device *adev,
2515 tmp = RREG32_SOC15(GC, 0, mmGRBM_GFX_CNTL);
2516 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, pipe);
2518 WREG32_SOC15(GC, 0, mmGRBM_GFX_CNTL, tmp);
2521 static void gfx_v10_0_cp_gfx_set_doorbell(struct amdgpu_device *adev,
2522 struct amdgpu_ring *ring)
2526 tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
2527 if (ring->use_doorbell) {
2528 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2529 DOORBELL_OFFSET, ring->doorbell_index);
2530 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2533 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2536 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
2537 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
2538 DOORBELL_RANGE_LOWER, ring->doorbell_index);
2539 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
2541 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
2542 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
2545 static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
2547 struct amdgpu_ring *ring;
2550 u64 rb_addr, rptr_addr, wptr_gpu_addr;
2553 /* Set the write pointer delay */
2554 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
2556 /* set the RB to use vmid 0 */
2557 WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);
2559 /* Init gfx ring 0 for pipe 0 */
2560 mutex_lock(&adev->srbm_mutex);
2561 gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
2562 mutex_unlock(&adev->srbm_mutex);
2563 /* Set ring buffer size */
2564 ring = &adev->gfx.gfx_ring[0];
2565 rb_bufsz = order_base_2(ring->ring_size / 8);
2566 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
2567 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
2569 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
2571 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2573 /* Initialize the ring buffer's write pointers */
2575 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
2576 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
2578 /* set the wb address wether it's enabled or not */
2579 rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2580 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
2581 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
2582 CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
2584 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2585 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO,
2586 lower_32_bits(wptr_gpu_addr));
2587 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI,
2588 upper_32_bits(wptr_gpu_addr));
2591 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2593 rb_addr = ring->gpu_addr >> 8;
2594 WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
2595 WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
2597 WREG32_SOC15(GC, 0, mmCP_RB_ACTIVE, 1);
2599 gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
2601 /* Init gfx ring 1 for pipe 1 */
2602 mutex_lock(&adev->srbm_mutex);
2603 gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1);
2604 mutex_unlock(&adev->srbm_mutex);
2605 ring = &adev->gfx.gfx_ring[1];
2606 rb_bufsz = order_base_2(ring->ring_size / 8);
2607 tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
2608 tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2);
2609 WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
2610 /* Initialize the ring buffer's write pointers */
2612 WREG32_SOC15(GC, 0, mmCP_RB1_WPTR, lower_32_bits(ring->wptr));
2613 WREG32_SOC15(GC, 0, mmCP_RB1_WPTR_HI, upper_32_bits(ring->wptr));
2614 /* Set the wb address wether it's enabled or not */
2615 rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2616 WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
2617 WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
2618 CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
2619 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2620 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO,
2621 lower_32_bits(wptr_gpu_addr));
2622 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI,
2623 upper_32_bits(wptr_gpu_addr));
2626 WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
2628 rb_addr = ring->gpu_addr >> 8;
2629 WREG32_SOC15(GC, 0, mmCP_RB1_BASE, rb_addr);
2630 WREG32_SOC15(GC, 0, mmCP_RB1_BASE_HI, upper_32_bits(rb_addr));
2631 WREG32_SOC15(GC, 0, mmCP_RB1_ACTIVE, 1);
2633 gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
2635 /* Switch to pipe 0 */
2636 mutex_lock(&adev->srbm_mutex);
2637 gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
2638 mutex_unlock(&adev->srbm_mutex);
2640 /* start the ring */
2641 gfx_v10_0_cp_gfx_start(adev);
2643 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
2644 ring = &adev->gfx.gfx_ring[i];
2645 ring->sched.ready = true;
2651 static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
2656 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0);
2658 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
2659 (CP_MEC_CNTL__MEC_ME1_HALT_MASK |
2660 CP_MEC_CNTL__MEC_ME2_HALT_MASK));
2661 for (i = 0; i < adev->gfx.num_compute_rings; i++)
2662 adev->gfx.compute_ring[i].sched.ready = false;
2663 adev->gfx.kiq.ring.sched.ready = false;
2668 static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev)
2670 const struct gfx_firmware_header_v1_0 *mec_hdr;
2671 const __le32 *fw_data;
2674 u32 usec_timeout = 50000; /* Wait for 50 ms */
2676 if (!adev->gfx.mec_fw)
2679 gfx_v10_0_cp_compute_enable(adev, false);
2681 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
2682 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
2684 fw_data = (const __le32 *)
2685 (adev->gfx.mec_fw->data +
2686 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
2688 /* Trigger an invalidation of the L1 instruction caches */
2689 tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL);
2690 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2691 WREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL, tmp);
2693 /* Wait for invalidation complete */
2694 for (i = 0; i < usec_timeout; i++) {
2695 tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL);
2696 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
2697 INVALIDATE_CACHE_COMPLETE))
2702 if (i >= usec_timeout) {
2703 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2707 if (amdgpu_emu_mode == 1)
2708 adev->nbio_funcs->hdp_flush(adev, NULL);
2710 tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL);
2711 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2712 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
2713 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2714 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);
2716 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO, adev->gfx.mec.mec_fw_gpu_addr &
2718 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
2719 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
2722 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR, 0);
2724 for (i = 0; i < mec_hdr->jt_size; i++)
2725 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
2726 le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
2728 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR, adev->gfx.mec_fw_version);
2731 * TODO: Loading MEC2 firmware is only necessary if MEC2 should run
2732 * different microcode than MEC1.
2738 static void gfx_v10_0_kiq_setting(struct amdgpu_ring *ring)
2741 struct amdgpu_device *adev = ring->adev;
2743 /* tell RLC which is KIQ queue */
2744 tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
2746 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
2747 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2749 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2752 static int gfx_v10_0_gfx_mqd_init(struct amdgpu_ring *ring)
2754 struct amdgpu_device *adev = ring->adev;
2755 struct v10_gfx_mqd *mqd = ring->mqd_ptr;
2756 uint64_t hqd_gpu_addr, wb_gpu_addr;
2760 /* set up gfx hqd wptr */
2761 mqd->cp_gfx_hqd_wptr = 0;
2762 mqd->cp_gfx_hqd_wptr_hi = 0;
2764 /* set the pointer to the MQD */
2765 mqd->cp_mqd_base_addr = ring->mqd_gpu_addr & 0xfffffffc;
2766 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
2768 /* set up mqd control */
2769 tmp = RREG32_SOC15(GC, 0, mmCP_GFX_MQD_CONTROL);
2770 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0);
2771 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1);
2772 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0);
2773 mqd->cp_gfx_mqd_control = tmp;
2775 /* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */
2776 tmp = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_VMID);
2777 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0);
2778 mqd->cp_gfx_hqd_vmid = 0;
2780 /* set up default queue priority level
2781 * 0x0 = low priority, 0x1 = high priority */
2782 tmp = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUEUE_PRIORITY);
2783 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, 0);
2784 mqd->cp_gfx_hqd_queue_priority = tmp;
2786 /* set up time quantum */
2787 tmp = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUANTUM);
2788 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1);
2789 mqd->cp_gfx_hqd_quantum = tmp;
2791 /* set up gfx hqd base. this is similar as CP_RB_BASE */
2792 hqd_gpu_addr = ring->gpu_addr >> 8;
2793 mqd->cp_gfx_hqd_base = hqd_gpu_addr;
2794 mqd->cp_gfx_hqd_base_hi = upper_32_bits(hqd_gpu_addr);
2796 /* set up hqd_rptr_addr/_hi, similar as CP_RB_RPTR */
2797 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2798 mqd->cp_gfx_hqd_rptr_addr = wb_gpu_addr & 0xfffffffc;
2799 mqd->cp_gfx_hqd_rptr_addr_hi =
2800 upper_32_bits(wb_gpu_addr) & 0xffff;
2802 /* set up rb_wptr_poll addr */
2803 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2804 mqd->cp_rb_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
2805 mqd->cp_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
2807 /* set up the gfx_hqd_control, similar as CP_RB0_CNTL */
2808 rb_bufsz = order_base_2(ring->ring_size / 4) - 1;
2809 tmp = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_CNTL);
2810 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz);
2811 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2);
2813 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1);
2815 mqd->cp_gfx_hqd_cntl = tmp;
2817 /* set up cp_doorbell_control */
2818 tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
2819 if (ring->use_doorbell) {
2820 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2821 DOORBELL_OFFSET, ring->doorbell_index);
2822 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2825 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2827 mqd->cp_rb_doorbell_control = tmp;
2829 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2831 mqd->cp_gfx_hqd_rptr = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_RPTR);
2833 /* active the queue */
2834 mqd->cp_gfx_hqd_active = 1;
2839 #ifdef BRING_UP_DEBUG
2840 static int gfx_v10_0_gfx_queue_init_register(struct amdgpu_ring *ring)
2842 struct amdgpu_device *adev = ring->adev;
2843 struct v10_gfx_mqd *mqd = ring->mqd_ptr;
2845 /* set mmCP_GFX_HQD_WPTR/_HI to 0 */
2846 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_WPTR, mqd->cp_gfx_hqd_wptr);
2847 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_WPTR_HI, mqd->cp_gfx_hqd_wptr_hi);
2849 /* set GFX_MQD_BASE */
2850 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr);
2851 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi);
2853 /* set GFX_MQD_CONTROL */
2854 WREG32_SOC15(GC, 0, mmCP_GFX_MQD_CONTROL, mqd->cp_gfx_mqd_control);
2856 /* set GFX_HQD_VMID to 0 */
2857 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_VMID, mqd->cp_gfx_hqd_vmid);
2859 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUEUE_PRIORITY,
2860 mqd->cp_gfx_hqd_queue_priority);
2861 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUANTUM, mqd->cp_gfx_hqd_quantum);
2863 /* set GFX_HQD_BASE, similar as CP_RB_BASE */
2864 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_BASE, mqd->cp_gfx_hqd_base);
2865 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_BASE_HI, mqd->cp_gfx_hqd_base_hi);
2867 /* set GFX_HQD_RPTR_ADDR, similar as CP_RB_RPTR */
2868 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_RPTR_ADDR, mqd->cp_gfx_hqd_rptr_addr);
2869 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_RPTR_ADDR_HI, mqd->cp_gfx_hqd_rptr_addr_hi);
2871 /* set GFX_HQD_CNTL, similar as CP_RB_CNTL */
2872 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_CNTL, mqd->cp_gfx_hqd_cntl);
2874 /* set RB_WPTR_POLL_ADDR */
2875 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, mqd->cp_rb_wptr_poll_addr_lo);
2876 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, mqd->cp_rb_wptr_poll_addr_hi);
2878 /* set RB_DOORBELL_CONTROL */
2879 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, mqd->cp_rb_doorbell_control);
2881 /* active the queue */
2882 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_ACTIVE, mqd->cp_gfx_hqd_active);
2888 static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
2890 struct amdgpu_device *adev = ring->adev;
2891 struct v10_gfx_mqd *mqd = ring->mqd_ptr;
2893 if (!adev->in_gpu_reset && !adev->in_suspend) {
2894 memset((void *)mqd, 0, sizeof(*mqd));
2895 mutex_lock(&adev->srbm_mutex);
2896 nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
2897 gfx_v10_0_gfx_mqd_init(ring);
2898 #ifdef BRING_UP_DEBUG
2899 gfx_v10_0_gfx_queue_init_register(ring);
2901 nv_grbm_select(adev, 0, 0, 0, 0);
2902 mutex_unlock(&adev->srbm_mutex);
2903 if (adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS])
2904 memcpy(adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS], mqd, sizeof(*mqd));
2905 } else if (adev->in_gpu_reset) {
2906 /* reset mqd with the backup copy */
2907 if (adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS])
2908 memcpy(mqd, adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS], sizeof(*mqd));
2909 /* reset the ring */
2911 amdgpu_ring_clear_ring(ring);
2912 #ifdef BRING_UP_DEBUG
2913 mutex_lock(&adev->srbm_mutex);
2914 nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
2915 gfx_v10_0_gfx_queue_init_register(ring);
2916 nv_grbm_select(adev, 0, 0, 0, 0);
2917 mutex_unlock(&adev->srbm_mutex);
2920 amdgpu_ring_clear_ring(ring);
2926 #ifndef BRING_UP_DEBUG
2927 static int gfx_v10_0_kiq_enable_kgq(struct amdgpu_device *adev)
2929 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
2930 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
2933 if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
2936 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
2937 adev->gfx.num_gfx_rings);
2939 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
2943 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2944 kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.gfx_ring[i]);
2946 r = amdgpu_ring_test_ring(kiq_ring);
2948 DRM_ERROR("kfq enable failed\n");
2949 kiq_ring->sched.ready = false;
2955 static int gfx_v10_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
2958 struct amdgpu_ring *ring;
2960 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
2961 ring = &adev->gfx.gfx_ring[i];
2963 r = amdgpu_bo_reserve(ring->mqd_obj, false);
2964 if (unlikely(r != 0))
2967 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
2969 r = gfx_v10_0_gfx_init_queue(ring);
2970 amdgpu_bo_kunmap(ring->mqd_obj);
2971 ring->mqd_ptr = NULL;
2973 amdgpu_bo_unreserve(ring->mqd_obj);
2977 #ifndef BRING_UP_DEBUG
2978 r = gfx_v10_0_kiq_enable_kgq(adev);
2982 r = gfx_v10_0_cp_gfx_start(adev);
2986 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
2987 ring = &adev->gfx.gfx_ring[i];
2988 ring->sched.ready = true;
2994 static int gfx_v10_0_compute_mqd_init(struct amdgpu_ring *ring)
2996 struct amdgpu_device *adev = ring->adev;
2997 struct v10_compute_mqd *mqd = ring->mqd_ptr;
2998 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
3001 mqd->header = 0xC0310800;
3002 mqd->compute_pipelinestat_enable = 0x00000001;
3003 mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
3004 mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
3005 mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
3006 mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
3007 mqd->compute_misc_reserved = 0x00000003;
3009 eop_base_addr = ring->eop_gpu_addr >> 8;
3010 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
3011 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
3013 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3014 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
3015 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
3016 (order_base_2(GFX10_MEC_HPD_SIZE / 4) - 1));
3018 mqd->cp_hqd_eop_control = tmp;
3020 /* enable doorbell? */
3021 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3023 if (ring->use_doorbell) {
3024 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3025 DOORBELL_OFFSET, ring->doorbell_index);
3026 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3028 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3029 DOORBELL_SOURCE, 0);
3030 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3033 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3037 mqd->cp_hqd_pq_doorbell_control = tmp;
3039 /* disable the queue if it's active */
3041 mqd->cp_hqd_dequeue_request = 0;
3042 mqd->cp_hqd_pq_rptr = 0;
3043 mqd->cp_hqd_pq_wptr_lo = 0;
3044 mqd->cp_hqd_pq_wptr_hi = 0;
3046 /* set the pointer to the MQD */
3047 mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
3048 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
3050 /* set MQD vmid to 0 */
3051 tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
3052 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
3053 mqd->cp_mqd_control = tmp;
3055 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3056 hqd_gpu_addr = ring->gpu_addr >> 8;
3057 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
3058 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
3060 /* set up the HQD, this is similar to CP_RB0_CNTL */
3061 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
3062 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
3063 (order_base_2(ring->ring_size / 4) - 1));
3064 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
3065 ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
3067 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
3069 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
3070 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
3071 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
3072 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
3073 mqd->cp_hqd_pq_control = tmp;
3075 /* set the wb address whether it's enabled or not */
3076 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
3077 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
3078 mqd->cp_hqd_pq_rptr_report_addr_hi =
3079 upper_32_bits(wb_gpu_addr) & 0xffff;
3081 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3082 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
3083 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3084 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3087 /* enable the doorbell if requested */
3088 if (ring->use_doorbell) {
3089 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3090 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3091 DOORBELL_OFFSET, ring->doorbell_index);
3093 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3095 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3096 DOORBELL_SOURCE, 0);
3097 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3101 mqd->cp_hqd_pq_doorbell_control = tmp;
3103 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3105 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
3107 /* set the vmid for the queue */
3108 mqd->cp_hqd_vmid = 0;
3110 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
3111 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
3112 mqd->cp_hqd_persistent_state = tmp;
3114 /* set MIN_IB_AVAIL_SIZE */
3115 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
3116 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
3117 mqd->cp_hqd_ib_control = tmp;
3119 /* activate the queue */
3120 mqd->cp_hqd_active = 1;
3125 static int gfx_v10_0_kiq_init_register(struct amdgpu_ring *ring)
3127 struct amdgpu_device *adev = ring->adev;
3128 struct v10_compute_mqd *mqd = ring->mqd_ptr;
3131 /* disable wptr polling */
3132 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3134 /* write the EOP addr */
3135 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
3136 mqd->cp_hqd_eop_base_addr_lo);
3137 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
3138 mqd->cp_hqd_eop_base_addr_hi);
3140 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3141 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL,
3142 mqd->cp_hqd_eop_control);
3144 /* enable doorbell? */
3145 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3146 mqd->cp_hqd_pq_doorbell_control);
3148 /* disable the queue if it's active */
3149 if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3150 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3151 for (j = 0; j < adev->usec_timeout; j++) {
3152 if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3156 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3157 mqd->cp_hqd_dequeue_request);
3158 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR,
3159 mqd->cp_hqd_pq_rptr);
3160 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3161 mqd->cp_hqd_pq_wptr_lo);
3162 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3163 mqd->cp_hqd_pq_wptr_hi);
3166 /* set the pointer to the MQD */
3167 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR,
3168 mqd->cp_mqd_base_addr_lo);
3169 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI,
3170 mqd->cp_mqd_base_addr_hi);
3172 /* set MQD vmid to 0 */
3173 WREG32_SOC15(GC, 0, mmCP_MQD_CONTROL,
3174 mqd->cp_mqd_control);
3176 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3177 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE,
3178 mqd->cp_hqd_pq_base_lo);
3179 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI,
3180 mqd->cp_hqd_pq_base_hi);
3182 /* set up the HQD, this is similar to CP_RB0_CNTL */
3183 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL,
3184 mqd->cp_hqd_pq_control);
3186 /* set the wb address whether it's enabled or not */
3187 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
3188 mqd->cp_hqd_pq_rptr_report_addr_lo);
3189 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3190 mqd->cp_hqd_pq_rptr_report_addr_hi);
3192 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3193 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
3194 mqd->cp_hqd_pq_wptr_poll_addr_lo);
3195 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3196 mqd->cp_hqd_pq_wptr_poll_addr_hi);
3198 /* enable the doorbell if requested */
3199 if (ring->use_doorbell) {
3200 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
3201 (adev->doorbell_index.kiq * 2) << 2);
3202 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
3203 (adev->doorbell_index.userqueue_end * 2) << 2);
3206 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3207 mqd->cp_hqd_pq_doorbell_control);
3209 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3210 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3211 mqd->cp_hqd_pq_wptr_lo);
3212 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3213 mqd->cp_hqd_pq_wptr_hi);
3215 /* set the vmid for the queue */
3216 WREG32_SOC15(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
3218 WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE,
3219 mqd->cp_hqd_persistent_state);
3221 /* activate the queue */
3222 WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE,
3223 mqd->cp_hqd_active);
3225 if (ring->use_doorbell)
3226 WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
3231 static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring)
3233 struct amdgpu_device *adev = ring->adev;
3234 struct v10_compute_mqd *mqd = ring->mqd_ptr;
3235 int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
3237 gfx_v10_0_kiq_setting(ring);
3239 if (adev->in_gpu_reset) { /* for GPU_RESET case */
3240 /* reset MQD to a clean status */
3241 if (adev->gfx.mec.mqd_backup[mqd_idx])
3242 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
3244 /* reset ring buffer */
3246 amdgpu_ring_clear_ring(ring);
3248 mutex_lock(&adev->srbm_mutex);
3249 nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3250 gfx_v10_0_kiq_init_register(ring);
3251 nv_grbm_select(adev, 0, 0, 0, 0);
3252 mutex_unlock(&adev->srbm_mutex);
3254 memset((void *)mqd, 0, sizeof(*mqd));
3255 mutex_lock(&adev->srbm_mutex);
3256 nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3257 gfx_v10_0_compute_mqd_init(ring);
3258 gfx_v10_0_kiq_init_register(ring);
3259 nv_grbm_select(adev, 0, 0, 0, 0);
3260 mutex_unlock(&adev->srbm_mutex);
3262 if (adev->gfx.mec.mqd_backup[mqd_idx])
3263 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
3269 static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring)
3271 struct amdgpu_device *adev = ring->adev;
3272 struct v10_compute_mqd *mqd = ring->mqd_ptr;
3273 int mqd_idx = ring - &adev->gfx.compute_ring[0];
3275 if (!adev->in_gpu_reset && !adev->in_suspend) {
3276 memset((void *)mqd, 0, sizeof(*mqd));
3277 mutex_lock(&adev->srbm_mutex);
3278 nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3279 gfx_v10_0_compute_mqd_init(ring);
3280 nv_grbm_select(adev, 0, 0, 0, 0);
3281 mutex_unlock(&adev->srbm_mutex);
3283 if (adev->gfx.mec.mqd_backup[mqd_idx])
3284 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
3285 } else if (adev->in_gpu_reset) { /* for GPU_RESET case */
3286 /* reset MQD to a clean status */
3287 if (adev->gfx.mec.mqd_backup[mqd_idx])
3288 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
3290 /* reset ring buffer */
3292 amdgpu_ring_clear_ring(ring);
3294 amdgpu_ring_clear_ring(ring);
3300 static int gfx_v10_0_kiq_resume(struct amdgpu_device *adev)
3302 struct amdgpu_ring *ring;
3305 ring = &adev->gfx.kiq.ring;
3307 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3308 if (unlikely(r != 0))
3311 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3312 if (unlikely(r != 0))
3315 gfx_v10_0_kiq_init_queue(ring);
3316 amdgpu_bo_kunmap(ring->mqd_obj);
3317 ring->mqd_ptr = NULL;
3318 amdgpu_bo_unreserve(ring->mqd_obj);
3319 ring->sched.ready = true;
3323 static int gfx_v10_0_kcq_resume(struct amdgpu_device *adev)
3325 struct amdgpu_ring *ring = NULL;
3328 gfx_v10_0_cp_compute_enable(adev, true);
3330 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3331 ring = &adev->gfx.compute_ring[i];
3333 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3334 if (unlikely(r != 0))
3336 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3338 r = gfx_v10_0_kcq_init_queue(ring);
3339 amdgpu_bo_kunmap(ring->mqd_obj);
3340 ring->mqd_ptr = NULL;
3342 amdgpu_bo_unreserve(ring->mqd_obj);
3347 r = amdgpu_gfx_enable_kcq(adev);
3352 static int gfx_v10_0_cp_resume(struct amdgpu_device *adev)
3355 struct amdgpu_ring *ring;
3357 if (!(adev->flags & AMD_IS_APU))
3358 gfx_v10_0_enable_gui_idle_interrupt(adev, false);
3360 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
3361 /* legacy firmware loading */
3362 r = gfx_v10_0_cp_gfx_load_microcode(adev);
3366 r = gfx_v10_0_cp_compute_load_microcode(adev);
3371 r = gfx_v10_0_kiq_resume(adev);
3375 r = gfx_v10_0_kcq_resume(adev);
3379 if (!amdgpu_async_gfx_ring) {
3380 r = gfx_v10_0_cp_gfx_resume(adev);
3384 r = gfx_v10_0_cp_async_gfx_ring_resume(adev);
3389 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
3390 ring = &adev->gfx.gfx_ring[i];
3391 DRM_INFO("gfx %d ring me %d pipe %d q %d\n",
3392 i, ring->me, ring->pipe, ring->queue);
3393 r = amdgpu_ring_test_ring(ring);
3395 ring->sched.ready = false;
3400 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3401 ring = &adev->gfx.compute_ring[i];
3402 ring->sched.ready = true;
3403 DRM_INFO("compute ring %d mec %d pipe %d q %d\n",
3404 i, ring->me, ring->pipe, ring->queue);
3405 r = amdgpu_ring_test_ring(ring);
3407 ring->sched.ready = false;
3413 static void gfx_v10_0_cp_enable(struct amdgpu_device *adev, bool enable)
3415 gfx_v10_0_cp_gfx_enable(adev, enable);
3416 gfx_v10_0_cp_compute_enable(adev, enable);
3419 static bool gfx_v10_0_check_grbm_cam_remapping(struct amdgpu_device *adev)
3421 uint32_t data, pattern = 0xDEADBEEF;
3423 /* check if mmVGT_ESGS_RING_SIZE_UMD
3424 * has been remapped to mmVGT_ESGS_RING_SIZE */
3425 data = RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE);
3427 WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE, 0);
3429 WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, pattern);
3431 if (RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE) == pattern) {
3432 WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, data);
3435 WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE, data);
3440 static void gfx_v10_0_setup_grbm_cam_remapping(struct amdgpu_device *adev)
3444 /* initialize cam_index to 0
3445 * index will auto-inc after each data writting */
3446 WREG32_SOC15(GC, 0, mmGRBM_CAM_INDEX, 0);
3448 /* mmVGT_TF_RING_SIZE_UMD -> mmVGT_TF_RING_SIZE */
3449 data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_RING_SIZE_UMD) <<
3450 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3451 (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_RING_SIZE) <<
3452 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3453 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3454 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3456 /* mmVGT_TF_MEMORY_BASE_UMD -> mmVGT_TF_MEMORY_BASE */
3457 data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_UMD) <<
3458 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3459 (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE) <<
3460 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3461 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3462 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3464 /* mmVGT_TF_MEMORY_BASE_HI_UMD -> mmVGT_TF_MEMORY_BASE_HI */
3465 data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_HI_UMD) <<
3466 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3467 (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_HI) <<
3468 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3469 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3470 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3472 /* mmVGT_HS_OFFCHIP_PARAM_UMD -> mmVGT_HS_OFFCHIP_PARAM */
3473 data = (SOC15_REG_OFFSET(GC, 0, mmVGT_HS_OFFCHIP_PARAM_UMD) <<
3474 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3475 (SOC15_REG_OFFSET(GC, 0, mmVGT_HS_OFFCHIP_PARAM) <<
3476 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3477 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3478 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3480 /* mmVGT_ESGS_RING_SIZE_UMD -> mmVGT_ESGS_RING_SIZE */
3481 data = (SOC15_REG_OFFSET(GC, 0, mmVGT_ESGS_RING_SIZE_UMD) <<
3482 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3483 (SOC15_REG_OFFSET(GC, 0, mmVGT_ESGS_RING_SIZE) <<
3484 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3485 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3486 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3488 /* mmVGT_GSVS_RING_SIZE_UMD -> mmVGT_GSVS_RING_SIZE */
3489 data = (SOC15_REG_OFFSET(GC, 0, mmVGT_GSVS_RING_SIZE_UMD) <<
3490 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3491 (SOC15_REG_OFFSET(GC, 0, mmVGT_GSVS_RING_SIZE) <<
3492 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3493 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3494 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3496 /* mmSPI_CONFIG_CNTL_REMAP -> mmSPI_CONFIG_CNTL */
3497 data = (SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_REMAP) <<
3498 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3499 (SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL) <<
3500 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3501 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3502 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3505 static int gfx_v10_0_hw_init(void *handle)
3508 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3510 r = gfx_v10_0_csb_vram_pin(adev);
3514 if (!amdgpu_emu_mode)
3515 gfx_v10_0_init_golden_registers(adev);
3517 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
3519 * For gfx 10, rlc firmware loading relies on smu firmware is
3520 * loaded firstly, so in direct type, it has to load smc ucode
3523 r = smu_load_microcode(&adev->smu);
3527 r = smu_check_fw_status(&adev->smu);
3529 pr_err("SMC firmware status is not correct\n");
3534 /* if GRBM CAM not remapped, set up the remapping */
3535 if (!gfx_v10_0_check_grbm_cam_remapping(adev))
3536 gfx_v10_0_setup_grbm_cam_remapping(adev);
3538 gfx_v10_0_constants_init(adev);
3540 r = gfx_v10_0_rlc_resume(adev);
3545 * init golden registers and rlc resume may override some registers,
3546 * reconfig them here
3548 gfx_v10_0_tcp_harvest(adev);
3550 r = gfx_v10_0_cp_resume(adev);
3557 #ifndef BRING_UP_DEBUG
3558 static int gfx_v10_0_kiq_disable_kgq(struct amdgpu_device *adev)
3560 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
3561 struct amdgpu_ring *kiq_ring = &kiq->ring;
3564 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
3567 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
3568 adev->gfx.num_gfx_rings))
3571 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
3572 kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i],
3573 PREEMPT_QUEUES, 0, 0);
3575 return amdgpu_ring_test_ring(kiq_ring);
3579 static int gfx_v10_0_hw_fini(void *handle)
3581 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3584 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
3585 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
3586 #ifndef BRING_UP_DEBUG
3587 if (amdgpu_async_gfx_ring) {
3588 r = gfx_v10_0_kiq_disable_kgq(adev);
3590 DRM_ERROR("KGQ disable failed\n");
3593 if (amdgpu_gfx_disable_kcq(adev))
3594 DRM_ERROR("KCQ disable failed\n");
3595 if (amdgpu_sriov_vf(adev)) {
3596 pr_debug("For SRIOV client, shouldn't do anything.\n");
3599 gfx_v10_0_cp_enable(adev, false);
3600 gfx_v10_0_enable_gui_idle_interrupt(adev, false);
3601 gfx_v10_0_csb_vram_unpin(adev);
3606 static int gfx_v10_0_suspend(void *handle)
3608 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3610 adev->in_suspend = true;
3611 return gfx_v10_0_hw_fini(adev);
3614 static int gfx_v10_0_resume(void *handle)
3616 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3619 r = gfx_v10_0_hw_init(adev);
3620 adev->in_suspend = false;
3624 static bool gfx_v10_0_is_idle(void *handle)
3626 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3628 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
3629 GRBM_STATUS, GUI_ACTIVE))
3635 static int gfx_v10_0_wait_for_idle(void *handle)
3639 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3641 for (i = 0; i < adev->usec_timeout; i++) {
3642 /* read MC_STATUS */
3643 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS) &
3644 GRBM_STATUS__GUI_ACTIVE_MASK;
3646 if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE))
3653 static int gfx_v10_0_soft_reset(void *handle)
3655 u32 grbm_soft_reset = 0;
3657 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3660 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
3661 if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
3662 GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
3663 GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__DB_BUSY_MASK |
3664 GRBM_STATUS__CB_BUSY_MASK | GRBM_STATUS__GDS_BUSY_MASK |
3665 GRBM_STATUS__SPI_BUSY_MASK | GRBM_STATUS__GE_BUSY_NO_DMA_MASK
3666 | GRBM_STATUS__BCI_BUSY_MASK)) {
3667 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3668 GRBM_SOFT_RESET, SOFT_RESET_CP,
3670 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3671 GRBM_SOFT_RESET, SOFT_RESET_GFX,
3675 if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
3676 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3677 GRBM_SOFT_RESET, SOFT_RESET_CP,
3682 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
3683 if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
3684 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3685 GRBM_SOFT_RESET, SOFT_RESET_RLC,
3688 if (grbm_soft_reset) {
3690 gfx_v10_0_rlc_stop(adev);
3692 /* Disable GFX parsing/prefetching */
3693 gfx_v10_0_cp_gfx_enable(adev, false);
3695 /* Disable MEC parsing/prefetching */
3696 gfx_v10_0_cp_compute_enable(adev, false);
3698 if (grbm_soft_reset) {
3699 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3700 tmp |= grbm_soft_reset;
3701 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
3702 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
3703 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3707 tmp &= ~grbm_soft_reset;
3708 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
3709 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3712 /* Wait a little for things to settle down */
3718 static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
3722 mutex_lock(&adev->gfx.gpu_clock_mutex);
3723 WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
3724 clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
3725 ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
3726 mutex_unlock(&adev->gfx.gpu_clock_mutex);
3730 static void gfx_v10_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
3732 uint32_t gds_base, uint32_t gds_size,
3733 uint32_t gws_base, uint32_t gws_size,
3734 uint32_t oa_base, uint32_t oa_size)
3736 struct amdgpu_device *adev = ring->adev;
3739 gfx_v10_0_write_data_to_reg(ring, 0, false,
3740 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid,
3744 gfx_v10_0_write_data_to_reg(ring, 0, false,
3745 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid,
3749 gfx_v10_0_write_data_to_reg(ring, 0, false,
3750 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid,
3751 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
3754 gfx_v10_0_write_data_to_reg(ring, 0, false,
3755 SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid,
3756 (1 << (oa_size + oa_base)) - (1 << oa_base));
3759 static int gfx_v10_0_early_init(void *handle)
3761 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3763 adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS;
3764 adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
3766 gfx_v10_0_set_kiq_pm4_funcs(adev);
3767 gfx_v10_0_set_ring_funcs(adev);
3768 gfx_v10_0_set_irq_funcs(adev);
3769 gfx_v10_0_set_gds_init(adev);
3770 gfx_v10_0_set_rlc_funcs(adev);
3775 static int gfx_v10_0_late_init(void *handle)
3777 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3780 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
3784 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
3791 static bool gfx_v10_0_is_rlc_enabled(struct amdgpu_device *adev)
3795 /* if RLC is not enabled, do nothing */
3796 rlc_cntl = RREG32_SOC15(GC, 0, mmRLC_CNTL);
3797 return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false;
3800 static void gfx_v10_0_set_safe_mode(struct amdgpu_device *adev)
3805 data = RLC_SAFE_MODE__CMD_MASK;
3806 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
3807 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
3809 /* wait for RLC_SAFE_MODE */
3810 for (i = 0; i < adev->usec_timeout; i++) {
3811 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
3817 static void gfx_v10_0_unset_safe_mode(struct amdgpu_device *adev)
3821 data = RLC_SAFE_MODE__CMD_MASK;
3822 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
3825 static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
3830 /* It is disabled by HW by default */
3831 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
3832 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
3833 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3834 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3835 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
3836 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
3838 /* only for Vega10 & Raven1 */
3839 data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
3842 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3844 /* MGLS is a global flag to control all MGLS in GFX */
3845 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
3846 /* 2 - RLC memory Light sleep */
3847 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
3848 def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3849 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
3851 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
3853 /* 3 - CP memory Light sleep */
3854 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
3855 def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3856 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3858 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
3862 /* 1 - MGCG_OVERRIDE */
3863 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3864 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
3865 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3866 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
3867 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
3869 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3871 /* 2 - disable MGLS in RLC */
3872 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3873 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
3874 data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
3875 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
3878 /* 3 - disable MGLS in CP */
3879 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3880 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
3881 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3882 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
3887 static void gfx_v10_0_update_3d_clock_gating(struct amdgpu_device *adev,
3892 /* Enable 3D CGCG/CGLS */
3893 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
3894 /* write cmd to clear cgcg/cgls ov */
3895 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3896 /* unset CGCG override */
3897 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
3898 /* update CGCG and CGLS override bits */
3900 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3901 /* enable 3Dcgcg FSM(0x0000363f) */
3902 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3903 data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
3904 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
3905 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
3906 data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
3907 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
3909 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
3911 /* set IDLE_POLL_COUNT(0x00900100) */
3912 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
3913 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
3914 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3916 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
3918 /* Disable CGCG/CGLS */
3919 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3920 /* disable cgcg, cgls should be disabled */
3921 data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
3922 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
3923 /* disable cgcg and cgls in FSM */
3925 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
3929 static void gfx_v10_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
3934 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
3935 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3936 /* unset CGCG override */
3937 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
3938 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
3939 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
3941 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
3942 /* update CGCG and CGLS override bits */
3944 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3946 /* enable cgcg FSM(0x0000363F) */
3947 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3948 data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
3949 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
3950 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
3951 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
3952 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
3954 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
3956 /* set IDLE_POLL_COUNT(0x00900100) */
3957 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
3958 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
3959 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3961 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
3963 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3964 /* reset CGCG/CGLS bits */
3965 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
3966 /* disable cgcg and cgls in FSM */
3968 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
3972 static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
3975 amdgpu_gfx_rlc_enter_safe_mode(adev);
3978 /* CGCG/CGLS should be enabled after MGCG/MGLS
3979 * === MGCG + MGLS ===
3981 gfx_v10_0_update_medium_grain_clock_gating(adev, enable);
3982 /* === CGCG /CGLS for GFX 3D Only === */
3983 gfx_v10_0_update_3d_clock_gating(adev, enable);
3984 /* === CGCG + CGLS === */
3985 gfx_v10_0_update_coarse_grain_clock_gating(adev, enable);
3987 /* CGCG/CGLS should be disabled before MGCG/MGLS
3988 * === CGCG + CGLS ===
3990 gfx_v10_0_update_coarse_grain_clock_gating(adev, enable);
3991 /* === CGCG /CGLS for GFX 3D Only === */
3992 gfx_v10_0_update_3d_clock_gating(adev, enable);
3993 /* === MGCG + MGLS === */
3994 gfx_v10_0_update_medium_grain_clock_gating(adev, enable);
3997 if (adev->cg_flags &
3998 (AMD_CG_SUPPORT_GFX_MGCG |
3999 AMD_CG_SUPPORT_GFX_CGLS |
4000 AMD_CG_SUPPORT_GFX_CGCG |
4001 AMD_CG_SUPPORT_GFX_CGLS |
4002 AMD_CG_SUPPORT_GFX_3D_CGCG |
4003 AMD_CG_SUPPORT_GFX_3D_CGLS))
4004 gfx_v10_0_enable_gui_idle_interrupt(adev, enable);
4006 amdgpu_gfx_rlc_exit_safe_mode(adev);
4011 static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = {
4012 .is_rlc_enabled = gfx_v10_0_is_rlc_enabled,
4013 .set_safe_mode = gfx_v10_0_set_safe_mode,
4014 .unset_safe_mode = gfx_v10_0_unset_safe_mode,
4015 .init = gfx_v10_0_rlc_init,
4016 .get_csb_size = gfx_v10_0_get_csb_size,
4017 .get_csb_buffer = gfx_v10_0_get_csb_buffer,
4018 .resume = gfx_v10_0_rlc_resume,
4019 .stop = gfx_v10_0_rlc_stop,
4020 .reset = gfx_v10_0_rlc_reset,
4021 .start = gfx_v10_0_rlc_start
4024 static int gfx_v10_0_set_powergating_state(void *handle,
4025 enum amd_powergating_state state)
4027 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4028 bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
4029 switch (adev->asic_type) {
4032 amdgpu_gfx_off_ctrl(adev, false);
4033 cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
4035 amdgpu_gfx_off_ctrl(adev, true);
4043 static int gfx_v10_0_set_clockgating_state(void *handle,
4044 enum amd_clockgating_state state)
4046 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4048 switch (adev->asic_type) {
4050 gfx_v10_0_update_gfx_clock_gating(adev,
4051 state == AMD_CG_STATE_GATE ? true : false);
4059 static void gfx_v10_0_get_clockgating_state(void *handle, u32 *flags)
4061 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4064 /* AMD_CG_SUPPORT_GFX_MGCG */
4065 data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4066 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
4067 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
4069 /* AMD_CG_SUPPORT_GFX_CGCG */
4070 data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
4071 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
4072 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
4074 /* AMD_CG_SUPPORT_GFX_CGLS */
4075 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
4076 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
4078 /* AMD_CG_SUPPORT_GFX_RLC_LS */
4079 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4080 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
4081 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
4083 /* AMD_CG_SUPPORT_GFX_CP_LS */
4084 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4085 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
4086 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
4088 /* AMD_CG_SUPPORT_GFX_3D_CGCG */
4089 data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4090 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
4091 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
4093 /* AMD_CG_SUPPORT_GFX_3D_CGLS */
4094 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
4095 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
4098 static u64 gfx_v10_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
4100 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx10 is 32bit rptr*/
4103 static u64 gfx_v10_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
4105 struct amdgpu_device *adev = ring->adev;
4108 /* XXX check if swapping is necessary on BE */
4109 if (ring->use_doorbell) {
4110 wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]);
4112 wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR);
4113 wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32;
4119 static void gfx_v10_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
4121 struct amdgpu_device *adev = ring->adev;
4123 if (ring->use_doorbell) {
4124 /* XXX check if swapping is necessary on BE */
4125 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
4126 WDOORBELL64(ring->doorbell_index, ring->wptr);
4128 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
4129 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
4133 static u64 gfx_v10_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
4135 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx10 hardware is 32bit rptr */
4138 static u64 gfx_v10_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
4142 /* XXX check if swapping is necessary on BE */
4143 if (ring->use_doorbell)
4144 wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
4150 static void gfx_v10_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
4152 struct amdgpu_device *adev = ring->adev;
4154 /* XXX check if swapping is necessary on BE */
4155 if (ring->use_doorbell) {
4156 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
4157 WDOORBELL64(ring->doorbell_index, ring->wptr);
4159 BUG(); /* only DOORBELL method supported on gfx10 now */
4163 static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
4165 struct amdgpu_device *adev = ring->adev;
4166 u32 ref_and_mask, reg_mem_engine;
4167 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
4169 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
4172 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
4175 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
4182 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
4183 reg_mem_engine = 1; /* pfp */
4186 gfx_v10_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
4187 adev->nbio_funcs->get_hdp_flush_req_offset(adev),
4188 adev->nbio_funcs->get_hdp_flush_done_offset(adev),
4189 ref_and_mask, ref_and_mask, 0x20);
4192 static void gfx_v10_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
4193 struct amdgpu_job *job,
4194 struct amdgpu_ib *ib,
4197 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
4198 u32 header, control = 0;
4200 /* Prevent a hw deadlock due to a wave ID mismatch between ME and GDS.
4201 * This resets the wave ID counters. (needed by transform feedback)
4202 * TODO: This might only be needed on a VMID switch when we change
4203 * the GDS OA mapping, not sure.
4205 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
4206 amdgpu_ring_write(ring, mmVGT_GS_MAX_WAVE_ID);
4207 amdgpu_ring_write(ring, ring->adev->gds.vgt_gs_max_wave_id);
4209 if (ib->flags & AMDGPU_IB_FLAG_CE)
4210 header = PACKET3(PACKET3_INDIRECT_BUFFER_CNST, 2);
4212 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
4214 control |= ib->length_dw | (vmid << 24);
4216 if (amdgpu_mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
4217 control |= INDIRECT_BUFFER_PRE_ENB(1);
4219 if (flags & AMDGPU_IB_PREEMPTED)
4220 control |= INDIRECT_BUFFER_PRE_RESUME(1);
4222 if (!(ib->flags & AMDGPU_IB_FLAG_CE))
4223 gfx_v10_0_ring_emit_de_meta(ring,
4224 flags & AMDGPU_IB_PREEMPTED ? true : false);
4227 amdgpu_ring_write(ring, header);
4228 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
4229 amdgpu_ring_write(ring,
4233 lower_32_bits(ib->gpu_addr));
4234 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
4235 amdgpu_ring_write(ring, control);
4238 static void gfx_v10_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
4239 struct amdgpu_job *job,
4240 struct amdgpu_ib *ib,
4243 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
4244 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
4246 /* Currently, there is a high possibility to get wave ID mismatch
4247 * between ME and GDS, leading to a hw deadlock, because ME generates
4248 * different wave IDs than the GDS expects. This situation happens
4249 * randomly when at least 5 compute pipes use GDS ordered append.
4250 * The wave IDs generated by ME are also wrong after suspend/resume.
4251 * Those are probably bugs somewhere else in the kernel driver.
4253 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
4254 * GDS to 0 for this ring (me/pipe).
4256 if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
4257 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
4258 amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID);
4259 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
4262 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
4263 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
4264 amdgpu_ring_write(ring,
4268 lower_32_bits(ib->gpu_addr));
4269 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
4270 amdgpu_ring_write(ring, control);
4273 static void gfx_v10_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
4274 u64 seq, unsigned flags)
4276 struct amdgpu_device *adev = ring->adev;
4277 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
4278 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
4280 /* Interrupt not work fine on GFX10.1 model yet. Use fallback instead */
4281 if (adev->pdev->device == 0x50)
4284 /* RELEASE_MEM - flush caches, send int */
4285 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
4286 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ |
4287 PACKET3_RELEASE_MEM_GCR_GL2_WB |
4288 PACKET3_RELEASE_MEM_GCR_GLM_INV | /* must be set with GLM_WB */
4289 PACKET3_RELEASE_MEM_GCR_GLM_WB |
4290 PACKET3_RELEASE_MEM_CACHE_POLICY(3) |
4291 PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
4292 PACKET3_RELEASE_MEM_EVENT_INDEX(5)));
4293 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 1) |
4294 PACKET3_RELEASE_MEM_INT_SEL(int_sel ? 2 : 0)));
4297 * the address should be Qword aligned if 64bit write, Dword
4298 * aligned if only send 32bit data low (discard data high)
4304 amdgpu_ring_write(ring, lower_32_bits(addr));
4305 amdgpu_ring_write(ring, upper_32_bits(addr));
4306 amdgpu_ring_write(ring, lower_32_bits(seq));
4307 amdgpu_ring_write(ring, upper_32_bits(seq));
4308 amdgpu_ring_write(ring, 0);
4311 static void gfx_v10_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
4313 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
4314 uint32_t seq = ring->fence_drv.sync_seq;
4315 uint64_t addr = ring->fence_drv.gpu_addr;
4317 gfx_v10_0_wait_reg_mem(ring, usepfp, 1, 0, lower_32_bits(addr),
4318 upper_32_bits(addr), seq, 0xffffffff, 4);
4321 static void gfx_v10_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
4322 unsigned vmid, uint64_t pd_addr)
4324 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
4326 /* compute doesn't have PFP */
4327 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
4328 /* sync PFP to ME, otherwise we might get invalid PFP reads */
4329 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
4330 amdgpu_ring_write(ring, 0x0);
4334 static void gfx_v10_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
4335 u64 seq, unsigned int flags)
4337 struct amdgpu_device *adev = ring->adev;
4339 /* we only allocate 32bit for each seq wb address */
4340 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
4342 /* write fence seq to the "addr" */
4343 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4344 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4345 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
4346 amdgpu_ring_write(ring, lower_32_bits(addr));
4347 amdgpu_ring_write(ring, upper_32_bits(addr));
4348 amdgpu_ring_write(ring, lower_32_bits(seq));
4350 if (flags & AMDGPU_FENCE_FLAG_INT) {
4351 /* set register to trigger INT */
4352 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4353 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4354 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
4355 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
4356 amdgpu_ring_write(ring, 0);
4357 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
4361 static void gfx_v10_0_ring_emit_sb(struct amdgpu_ring *ring)
4363 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4364 amdgpu_ring_write(ring, 0);
4367 static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
4372 gfx_v10_0_ring_emit_ce_meta(ring,
4373 flags & AMDGPU_IB_PREEMPTED ? true : false);
4375 gfx_v10_0_ring_emit_tmz(ring, true);
4377 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
4378 if (flags & AMDGPU_HAVE_CTX_SWITCH) {
4379 /* set load_global_config & load_global_uconfig */
4381 /* set load_cs_sh_regs */
4383 /* set load_per_context_state & load_gfx_sh_regs for GFX */
4386 /* set load_ce_ram if preamble presented */
4387 if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
4390 /* still load_ce_ram if this is the first time preamble presented
4391 * although there is no context switch happens.
4393 if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
4397 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
4398 amdgpu_ring_write(ring, dw2);
4399 amdgpu_ring_write(ring, 0);
4402 static unsigned gfx_v10_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
4406 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
4407 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
4408 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
4409 amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
4410 ret = ring->wptr & ring->buf_mask;
4411 amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
4416 static void gfx_v10_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
4419 BUG_ON(offset > ring->buf_mask);
4420 BUG_ON(ring->ring[offset] != 0x55aa55aa);
4422 cur = (ring->wptr - 1) & ring->buf_mask;
4423 if (likely(cur > offset))
4424 ring->ring[offset] = cur - offset;
4426 ring->ring[offset] = (ring->buf_mask + 1) - offset + cur;
4429 static int gfx_v10_0_ring_preempt_ib(struct amdgpu_ring *ring)
4432 struct amdgpu_device *adev = ring->adev;
4433 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
4434 struct amdgpu_ring *kiq_ring = &kiq->ring;
4436 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
4439 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size))
4442 /* assert preemption condition */
4443 amdgpu_ring_set_preempt_cond_exec(ring, false);
4445 /* assert IB preemption, emit the trailing fence */
4446 kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP,
4447 ring->trail_fence_gpu_addr,
4449 amdgpu_ring_commit(kiq_ring);
4451 /* poll the trailing fence */
4452 for (i = 0; i < adev->usec_timeout; i++) {
4453 if (ring->trail_seq ==
4454 le32_to_cpu(*(ring->trail_fence_cpu_addr)))
4459 if (i >= adev->usec_timeout) {
4461 DRM_ERROR("ring %d failed to preempt ib\n", ring->idx);
4464 /* deassert preemption condition */
4465 amdgpu_ring_set_preempt_cond_exec(ring, true);
4469 static void gfx_v10_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume)
4471 struct amdgpu_device *adev = ring->adev;
4472 struct v10_ce_ib_state ce_payload = {0};
4476 cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
4477 csa_addr = amdgpu_csa_vaddr(ring->adev);
4479 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
4480 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
4481 WRITE_DATA_DST_SEL(8) |
4483 WRITE_DATA_CACHE_POLICY(0));
4484 amdgpu_ring_write(ring, lower_32_bits(csa_addr +
4485 offsetof(struct v10_gfx_meta_data, ce_payload)));
4486 amdgpu_ring_write(ring, upper_32_bits(csa_addr +
4487 offsetof(struct v10_gfx_meta_data, ce_payload)));
4490 amdgpu_ring_write_multiple(ring, adev->virt.csa_cpu_addr +
4491 offsetof(struct v10_gfx_meta_data,
4493 sizeof(ce_payload) >> 2);
4495 amdgpu_ring_write_multiple(ring, (void *)&ce_payload,
4496 sizeof(ce_payload) >> 2);
4499 static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume)
4501 struct amdgpu_device *adev = ring->adev;
4502 struct v10_de_ib_state de_payload = {0};
4503 uint64_t csa_addr, gds_addr;
4506 csa_addr = amdgpu_csa_vaddr(ring->adev);
4507 gds_addr = ALIGN(csa_addr + AMDGPU_CSA_SIZE - adev->gds.gds_size,
4509 de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
4510 de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
4512 cnt = (sizeof(de_payload) >> 2) + 4 - 2;
4513 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
4514 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
4515 WRITE_DATA_DST_SEL(8) |
4517 WRITE_DATA_CACHE_POLICY(0));
4518 amdgpu_ring_write(ring, lower_32_bits(csa_addr +
4519 offsetof(struct v10_gfx_meta_data, de_payload)));
4520 amdgpu_ring_write(ring, upper_32_bits(csa_addr +
4521 offsetof(struct v10_gfx_meta_data, de_payload)));
4524 amdgpu_ring_write_multiple(ring, adev->virt.csa_cpu_addr +
4525 offsetof(struct v10_gfx_meta_data,
4527 sizeof(de_payload) >> 2);
4529 amdgpu_ring_write_multiple(ring, (void *)&de_payload,
4530 sizeof(de_payload) >> 2);
4533 static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
4535 amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
4536 amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
4539 static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
4541 struct amdgpu_device *adev = ring->adev;
4543 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
4544 amdgpu_ring_write(ring, 0 | /* src: register*/
4545 (5 << 8) | /* dst: memory */
4546 (1 << 20)); /* write confirm */
4547 amdgpu_ring_write(ring, reg);
4548 amdgpu_ring_write(ring, 0);
4549 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
4550 adev->virt.reg_val_offs * 4));
4551 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
4552 adev->virt.reg_val_offs * 4));
4555 static void gfx_v10_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
4560 switch (ring->funcs->type) {
4561 case AMDGPU_RING_TYPE_GFX:
4562 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
4564 case AMDGPU_RING_TYPE_KIQ:
4565 cmd = (1 << 16); /* no inc addr */
4571 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4572 amdgpu_ring_write(ring, cmd);
4573 amdgpu_ring_write(ring, reg);
4574 amdgpu_ring_write(ring, 0);
4575 amdgpu_ring_write(ring, val);
4578 static void gfx_v10_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
4579 uint32_t val, uint32_t mask)
4581 gfx_v10_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
4585 gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
4586 uint32_t me, uint32_t pipe,
4587 enum amdgpu_interrupt_state state)
4589 uint32_t cp_int_cntl, cp_int_cntl_reg;
4594 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0);
4597 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING1);
4600 DRM_DEBUG("invalid pipe %d\n", pipe);
4604 DRM_DEBUG("invalid me %d\n", me);
4609 case AMDGPU_IRQ_STATE_DISABLE:
4610 cp_int_cntl = RREG32(cp_int_cntl_reg);
4611 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
4612 TIME_STAMP_INT_ENABLE, 0);
4613 WREG32(cp_int_cntl_reg, cp_int_cntl);
4614 case AMDGPU_IRQ_STATE_ENABLE:
4615 cp_int_cntl = RREG32(cp_int_cntl_reg);
4616 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
4617 TIME_STAMP_INT_ENABLE, 1);
4618 WREG32(cp_int_cntl_reg, cp_int_cntl);
4625 static void gfx_v10_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
4627 enum amdgpu_interrupt_state state)
4629 u32 mec_int_cntl, mec_int_cntl_reg;
4632 * amdgpu controls only the first MEC. That's why this function only
4633 * handles the setting of interrupts for this specific MEC. All other
4634 * pipes' interrupts are set by amdkfd.
4640 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
4643 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
4646 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
4649 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
4652 DRM_DEBUG("invalid pipe %d\n", pipe);
4656 DRM_DEBUG("invalid me %d\n", me);
4661 case AMDGPU_IRQ_STATE_DISABLE:
4662 mec_int_cntl = RREG32(mec_int_cntl_reg);
4663 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4664 TIME_STAMP_INT_ENABLE, 0);
4665 WREG32(mec_int_cntl_reg, mec_int_cntl);
4667 case AMDGPU_IRQ_STATE_ENABLE:
4668 mec_int_cntl = RREG32(mec_int_cntl_reg);
4669 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4670 TIME_STAMP_INT_ENABLE, 1);
4671 WREG32(mec_int_cntl_reg, mec_int_cntl);
4678 static int gfx_v10_0_set_eop_interrupt_state(struct amdgpu_device *adev,
4679 struct amdgpu_irq_src *src,
4681 enum amdgpu_interrupt_state state)
4684 case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
4685 gfx_v10_0_set_gfx_eop_interrupt_state(adev, 0, 0, state);
4687 case AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP:
4688 gfx_v10_0_set_gfx_eop_interrupt_state(adev, 0, 1, state);
4690 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
4691 gfx_v10_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
4693 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
4694 gfx_v10_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
4696 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
4697 gfx_v10_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
4699 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
4700 gfx_v10_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
4702 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
4703 gfx_v10_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
4705 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
4706 gfx_v10_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
4708 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
4709 gfx_v10_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
4711 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
4712 gfx_v10_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
4720 static int gfx_v10_0_eop_irq(struct amdgpu_device *adev,
4721 struct amdgpu_irq_src *source,
4722 struct amdgpu_iv_entry *entry)
4725 u8 me_id, pipe_id, queue_id;
4726 struct amdgpu_ring *ring;
4728 DRM_DEBUG("IH: CP EOP\n");
4729 me_id = (entry->ring_id & 0x0c) >> 2;
4730 pipe_id = (entry->ring_id & 0x03) >> 0;
4731 queue_id = (entry->ring_id & 0x70) >> 4;
4736 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
4738 amdgpu_fence_process(&adev->gfx.gfx_ring[1]);
4742 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4743 ring = &adev->gfx.compute_ring[i];
4744 /* Per-queue interrupt is supported for MEC starting from VI.
4745 * The interrupt can only be enabled/disabled per pipe instead of per queue.
4747 if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
4748 amdgpu_fence_process(ring);
4755 static int gfx_v10_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
4756 struct amdgpu_irq_src *source,
4758 enum amdgpu_interrupt_state state)
4761 case AMDGPU_IRQ_STATE_DISABLE:
4762 case AMDGPU_IRQ_STATE_ENABLE:
4763 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4764 PRIV_REG_INT_ENABLE,
4765 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4774 static int gfx_v10_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
4775 struct amdgpu_irq_src *source,
4777 enum amdgpu_interrupt_state state)
4780 case AMDGPU_IRQ_STATE_DISABLE:
4781 case AMDGPU_IRQ_STATE_ENABLE:
4782 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4783 PRIV_INSTR_INT_ENABLE,
4784 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4792 static void gfx_v10_0_handle_priv_fault(struct amdgpu_device *adev,
4793 struct amdgpu_iv_entry *entry)
4795 u8 me_id, pipe_id, queue_id;
4796 struct amdgpu_ring *ring;
4799 me_id = (entry->ring_id & 0x0c) >> 2;
4800 pipe_id = (entry->ring_id & 0x03) >> 0;
4801 queue_id = (entry->ring_id & 0x70) >> 4;
4805 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4806 ring = &adev->gfx.gfx_ring[i];
4807 /* we only enabled 1 gfx queue per pipe for now */
4808 if (ring->me == me_id && ring->pipe == pipe_id)
4809 drm_sched_fault(&ring->sched);
4814 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4815 ring = &adev->gfx.compute_ring[i];
4816 if (ring->me == me_id && ring->pipe == pipe_id &&
4817 ring->queue == queue_id)
4818 drm_sched_fault(&ring->sched);
4826 static int gfx_v10_0_priv_reg_irq(struct amdgpu_device *adev,
4827 struct amdgpu_irq_src *source,
4828 struct amdgpu_iv_entry *entry)
4830 DRM_ERROR("Illegal register access in command stream\n");
4831 gfx_v10_0_handle_priv_fault(adev, entry);
4835 static int gfx_v10_0_priv_inst_irq(struct amdgpu_device *adev,
4836 struct amdgpu_irq_src *source,
4837 struct amdgpu_iv_entry *entry)
4839 DRM_ERROR("Illegal instruction in command stream\n");
4840 gfx_v10_0_handle_priv_fault(adev, entry);
4844 static int gfx_v10_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
4845 struct amdgpu_irq_src *src,
4847 enum amdgpu_interrupt_state state)
4849 uint32_t tmp, target;
4850 struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
4853 target = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
4855 target = SOC15_REG_OFFSET(GC, 0, mmCP_ME2_PIPE0_INT_CNTL);
4856 target += ring->pipe;
4859 case AMDGPU_CP_KIQ_IRQ_DRIVER0:
4860 if (state == AMDGPU_IRQ_STATE_DISABLE) {
4861 tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
4862 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
4863 GENERIC2_INT_ENABLE, 0);
4864 WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
4866 tmp = RREG32(target);
4867 tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
4868 GENERIC2_INT_ENABLE, 0);
4869 WREG32(target, tmp);
4871 tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
4872 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
4873 GENERIC2_INT_ENABLE, 1);
4874 WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
4876 tmp = RREG32(target);
4877 tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
4878 GENERIC2_INT_ENABLE, 1);
4879 WREG32(target, tmp);
4883 BUG(); /* kiq only support GENERIC2_INT now */
4889 static int gfx_v10_0_kiq_irq(struct amdgpu_device *adev,
4890 struct amdgpu_irq_src *source,
4891 struct amdgpu_iv_entry *entry)
4893 u8 me_id, pipe_id, queue_id;
4894 struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
4896 me_id = (entry->ring_id & 0x0c) >> 2;
4897 pipe_id = (entry->ring_id & 0x03) >> 0;
4898 queue_id = (entry->ring_id & 0x70) >> 4;
4899 DRM_DEBUG("IH: CPC GENERIC2_INT, me:%d, pipe:%d, queue:%d\n",
4900 me_id, pipe_id, queue_id);
4902 amdgpu_fence_process(ring);
4906 static const struct amd_ip_funcs gfx_v10_0_ip_funcs = {
4907 .name = "gfx_v10_0",
4908 .early_init = gfx_v10_0_early_init,
4909 .late_init = gfx_v10_0_late_init,
4910 .sw_init = gfx_v10_0_sw_init,
4911 .sw_fini = gfx_v10_0_sw_fini,
4912 .hw_init = gfx_v10_0_hw_init,
4913 .hw_fini = gfx_v10_0_hw_fini,
4914 .suspend = gfx_v10_0_suspend,
4915 .resume = gfx_v10_0_resume,
4916 .is_idle = gfx_v10_0_is_idle,
4917 .wait_for_idle = gfx_v10_0_wait_for_idle,
4918 .soft_reset = gfx_v10_0_soft_reset,
4919 .set_clockgating_state = gfx_v10_0_set_clockgating_state,
4920 .set_powergating_state = gfx_v10_0_set_powergating_state,
4921 .get_clockgating_state = gfx_v10_0_get_clockgating_state,
4924 static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
4925 .type = AMDGPU_RING_TYPE_GFX,
4927 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4928 .support_64bit_ptrs = true,
4929 .vmhub = AMDGPU_GFXHUB,
4930 .get_rptr = gfx_v10_0_ring_get_rptr_gfx,
4931 .get_wptr = gfx_v10_0_ring_get_wptr_gfx,
4932 .set_wptr = gfx_v10_0_ring_set_wptr_gfx,
4933 .emit_frame_size = /* totally 242 maximum if 16 IBs */
4935 7 + /* PIPELINE_SYNC */
4936 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4937 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4939 8 + /* FENCE for VM_FLUSH */
4940 20 + /* GDS switch */
4941 4 + /* double SWITCH_BUFFER,
4942 * the first COND_EXEC jump to the place
4943 * just prior to this double SWITCH_BUFFER
4952 8 + 8 + /* FENCE x2 */
4953 2, /* SWITCH_BUFFER */
4954 .emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_gfx */
4955 .emit_ib = gfx_v10_0_ring_emit_ib_gfx,
4956 .emit_fence = gfx_v10_0_ring_emit_fence,
4957 .emit_pipeline_sync = gfx_v10_0_ring_emit_pipeline_sync,
4958 .emit_vm_flush = gfx_v10_0_ring_emit_vm_flush,
4959 .emit_gds_switch = gfx_v10_0_ring_emit_gds_switch,
4960 .emit_hdp_flush = gfx_v10_0_ring_emit_hdp_flush,
4961 .test_ring = gfx_v10_0_ring_test_ring,
4962 .test_ib = gfx_v10_0_ring_test_ib,
4963 .insert_nop = amdgpu_ring_insert_nop,
4964 .pad_ib = amdgpu_ring_generic_pad_ib,
4965 .emit_switch_buffer = gfx_v10_0_ring_emit_sb,
4966 .emit_cntxcntl = gfx_v10_0_ring_emit_cntxcntl,
4967 .init_cond_exec = gfx_v10_0_ring_emit_init_cond_exec,
4968 .patch_cond_exec = gfx_v10_0_ring_emit_patch_cond_exec,
4969 .preempt_ib = gfx_v10_0_ring_preempt_ib,
4970 .emit_tmz = gfx_v10_0_ring_emit_tmz,
4971 .emit_wreg = gfx_v10_0_ring_emit_wreg,
4972 .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
4975 static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
4976 .type = AMDGPU_RING_TYPE_COMPUTE,
4978 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4979 .support_64bit_ptrs = true,
4980 .vmhub = AMDGPU_GFXHUB,
4981 .get_rptr = gfx_v10_0_ring_get_rptr_compute,
4982 .get_wptr = gfx_v10_0_ring_get_wptr_compute,
4983 .set_wptr = gfx_v10_0_ring_set_wptr_compute,
4985 20 + /* gfx_v10_0_ring_emit_gds_switch */
4986 7 + /* gfx_v10_0_ring_emit_hdp_flush */
4987 5 + /* hdp invalidate */
4988 7 + /* gfx_v10_0_ring_emit_pipeline_sync */
4989 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4990 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4991 2 + /* gfx_v10_0_ring_emit_vm_flush */
4992 8 + 8 + 8, /* gfx_v10_0_ring_emit_fence x3 for user fence, vm fence */
4993 .emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_compute */
4994 .emit_ib = gfx_v10_0_ring_emit_ib_compute,
4995 .emit_fence = gfx_v10_0_ring_emit_fence,
4996 .emit_pipeline_sync = gfx_v10_0_ring_emit_pipeline_sync,
4997 .emit_vm_flush = gfx_v10_0_ring_emit_vm_flush,
4998 .emit_gds_switch = gfx_v10_0_ring_emit_gds_switch,
4999 .emit_hdp_flush = gfx_v10_0_ring_emit_hdp_flush,
5000 .test_ring = gfx_v10_0_ring_test_ring,
5001 .test_ib = gfx_v10_0_ring_test_ib,
5002 .insert_nop = amdgpu_ring_insert_nop,
5003 .pad_ib = amdgpu_ring_generic_pad_ib,
5004 .emit_wreg = gfx_v10_0_ring_emit_wreg,
5005 .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
5008 static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
5009 .type = AMDGPU_RING_TYPE_KIQ,
5011 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
5012 .support_64bit_ptrs = true,
5013 .vmhub = AMDGPU_GFXHUB,
5014 .get_rptr = gfx_v10_0_ring_get_rptr_compute,
5015 .get_wptr = gfx_v10_0_ring_get_wptr_compute,
5016 .set_wptr = gfx_v10_0_ring_set_wptr_compute,
5018 20 + /* gfx_v10_0_ring_emit_gds_switch */
5019 7 + /* gfx_v10_0_ring_emit_hdp_flush */
5020 5 + /*hdp invalidate */
5021 7 + /* gfx_v10_0_ring_emit_pipeline_sync */
5022 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
5023 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
5024 2 + /* gfx_v10_0_ring_emit_vm_flush */
5025 8 + 8 + 8, /* gfx_v10_0_ring_emit_fence_kiq x3 for user fence, vm fence */
5026 .emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_compute */
5027 .emit_ib = gfx_v10_0_ring_emit_ib_compute,
5028 .emit_fence = gfx_v10_0_ring_emit_fence_kiq,
5029 .test_ring = gfx_v10_0_ring_test_ring,
5030 .test_ib = gfx_v10_0_ring_test_ib,
5031 .insert_nop = amdgpu_ring_insert_nop,
5032 .pad_ib = amdgpu_ring_generic_pad_ib,
5033 .emit_rreg = gfx_v10_0_ring_emit_rreg,
5034 .emit_wreg = gfx_v10_0_ring_emit_wreg,
5035 .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
5038 static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev)
5042 adev->gfx.kiq.ring.funcs = &gfx_v10_0_ring_funcs_kiq;
5044 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
5045 adev->gfx.gfx_ring[i].funcs = &gfx_v10_0_ring_funcs_gfx;
5047 for (i = 0; i < adev->gfx.num_compute_rings; i++)
5048 adev->gfx.compute_ring[i].funcs = &gfx_v10_0_ring_funcs_compute;
5051 static const struct amdgpu_irq_src_funcs gfx_v10_0_eop_irq_funcs = {
5052 .set = gfx_v10_0_set_eop_interrupt_state,
5053 .process = gfx_v10_0_eop_irq,
5056 static const struct amdgpu_irq_src_funcs gfx_v10_0_priv_reg_irq_funcs = {
5057 .set = gfx_v10_0_set_priv_reg_fault_state,
5058 .process = gfx_v10_0_priv_reg_irq,
5061 static const struct amdgpu_irq_src_funcs gfx_v10_0_priv_inst_irq_funcs = {
5062 .set = gfx_v10_0_set_priv_inst_fault_state,
5063 .process = gfx_v10_0_priv_inst_irq,
5066 static const struct amdgpu_irq_src_funcs gfx_v10_0_kiq_irq_funcs = {
5067 .set = gfx_v10_0_kiq_set_interrupt_state,
5068 .process = gfx_v10_0_kiq_irq,
5071 static void gfx_v10_0_set_irq_funcs(struct amdgpu_device *adev)
5073 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
5074 adev->gfx.eop_irq.funcs = &gfx_v10_0_eop_irq_funcs;
5076 adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
5077 adev->gfx.kiq.irq.funcs = &gfx_v10_0_kiq_irq_funcs;
5079 adev->gfx.priv_reg_irq.num_types = 1;
5080 adev->gfx.priv_reg_irq.funcs = &gfx_v10_0_priv_reg_irq_funcs;
5082 adev->gfx.priv_inst_irq.num_types = 1;
5083 adev->gfx.priv_inst_irq.funcs = &gfx_v10_0_priv_inst_irq_funcs;
5086 static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev)
5088 switch (adev->asic_type) {
5090 adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs;
5097 static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev)
5099 /* init asic gds info */
5100 switch (adev->asic_type) {
5103 adev->gds.gds_size = 0x10000;
5104 adev->gds.gds_compute_max_wave_id = 0x4ff;
5105 adev->gds.vgt_gs_max_wave_id = 0x3ff;
5109 adev->gds.gws_size = 64;
5110 adev->gds.oa_size = 16;
5113 static void gfx_v10_0_set_user_wgp_inactive_bitmap_per_sh(struct amdgpu_device *adev,
5121 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
5122 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
5124 WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data);
5127 static u32 gfx_v10_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev)
5129 u32 data, wgp_bitmask;
5130 data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
5131 data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
5133 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
5134 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
5137 amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1);
5139 return (~data) & wgp_bitmask;
5142 static u32 gfx_v10_0_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev)
5144 u32 wgp_idx, wgp_active_bitmap;
5145 u32 cu_bitmap_per_wgp, cu_active_bitmap;
5147 wgp_active_bitmap = gfx_v10_0_get_wgp_active_bitmap_per_sh(adev);
5148 cu_active_bitmap = 0;
5150 for (wgp_idx = 0; wgp_idx < 16; wgp_idx++) {
5151 /* if there is one WGP enabled, it means 2 CUs will be enabled */
5152 cu_bitmap_per_wgp = 3 << (2 * wgp_idx);
5153 if (wgp_active_bitmap & (1 << wgp_idx))
5154 cu_active_bitmap |= cu_bitmap_per_wgp;
5157 return cu_active_bitmap;
5160 static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev,
5161 struct amdgpu_cu_info *cu_info)
5163 int i, j, k, counter, active_cu_number = 0;
5164 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
5165 unsigned disable_masks[4 * 2];
5167 if (!adev || !cu_info)
5170 amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
5172 mutex_lock(&adev->grbm_idx_mutex);
5173 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
5174 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
5178 gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff);
5180 gfx_v10_0_set_user_wgp_inactive_bitmap_per_sh(
5181 adev, disable_masks[i * 2 + j]);
5182 bitmap = gfx_v10_0_get_cu_active_bitmap_per_sh(adev);
5183 cu_info->bitmap[i][j] = bitmap;
5185 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
5186 if (bitmap & mask) {
5187 if (counter < adev->gfx.config.max_cu_per_sh)
5193 active_cu_number += counter;
5195 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
5196 cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
5199 gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
5200 mutex_unlock(&adev->grbm_idx_mutex);
5202 cu_info->number = active_cu_number;
5203 cu_info->ao_cu_mask = ao_cu_mask;
5204 cu_info->simd_per_cu = NUM_SIMD_PER_CU;
5209 const struct amdgpu_ip_block_version gfx_v10_0_ip_block =
5211 .type = AMD_IP_BLOCK_TYPE_GFX,
5215 .funcs = &gfx_v10_0_ip_funcs,