2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/delay.h>
25 #include <linux/kernel.h>
26 #include <linux/firmware.h>
27 #include <linux/module.h>
28 #include <linux/pci.h>
30 #include "amdgpu_gfx.h"
31 #include "amdgpu_psp.h"
32 #include "amdgpu_smu.h"
36 #include "gc/gc_10_1_0_offset.h"
37 #include "gc/gc_10_1_0_sh_mask.h"
38 #include "navi10_enum.h"
39 #include "hdp/hdp_5_0_0_offset.h"
40 #include "ivsrcid/gfx/irqsrcs_gfx_10_1.h"
43 #include "soc15_common.h"
44 #include "clearstate_gfx10.h"
45 #include "v10_structs.h"
46 #include "gfx_v10_0.h"
47 #include "nbio_v2_3.h"
50 * Navi10 has two graphic rings to share each graphic pipe.
54 * In bring-up phase, it just used primary ring so set gfx ring number as 1 at
57 #define GFX10_NUM_GFX_RINGS 2
58 #define GFX10_MEC_HPD_SIZE 2048
60 #define F32_CE_PROGRAM_RAM_SIZE 65536
61 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
63 #define mmCGTT_GS_NGG_CLK_CTRL 0x5087
64 #define mmCGTT_GS_NGG_CLK_CTRL_BASE_IDX 1
66 MODULE_FIRMWARE("amdgpu/navi10_ce.bin");
67 MODULE_FIRMWARE("amdgpu/navi10_pfp.bin");
68 MODULE_FIRMWARE("amdgpu/navi10_me.bin");
69 MODULE_FIRMWARE("amdgpu/navi10_mec.bin");
70 MODULE_FIRMWARE("amdgpu/navi10_mec2.bin");
71 MODULE_FIRMWARE("amdgpu/navi10_rlc.bin");
73 MODULE_FIRMWARE("amdgpu/navi14_ce_wks.bin");
74 MODULE_FIRMWARE("amdgpu/navi14_pfp_wks.bin");
75 MODULE_FIRMWARE("amdgpu/navi14_me_wks.bin");
76 MODULE_FIRMWARE("amdgpu/navi14_mec_wks.bin");
77 MODULE_FIRMWARE("amdgpu/navi14_mec2_wks.bin");
78 MODULE_FIRMWARE("amdgpu/navi14_ce.bin");
79 MODULE_FIRMWARE("amdgpu/navi14_pfp.bin");
80 MODULE_FIRMWARE("amdgpu/navi14_me.bin");
81 MODULE_FIRMWARE("amdgpu/navi14_mec.bin");
82 MODULE_FIRMWARE("amdgpu/navi14_mec2.bin");
83 MODULE_FIRMWARE("amdgpu/navi14_rlc.bin");
85 MODULE_FIRMWARE("amdgpu/navi12_ce.bin");
86 MODULE_FIRMWARE("amdgpu/navi12_pfp.bin");
87 MODULE_FIRMWARE("amdgpu/navi12_me.bin");
88 MODULE_FIRMWARE("amdgpu/navi12_mec.bin");
89 MODULE_FIRMWARE("amdgpu/navi12_mec2.bin");
90 MODULE_FIRMWARE("amdgpu/navi12_rlc.bin");
92 static const struct soc15_reg_golden golden_settings_gc_10_1[] =
94 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014),
95 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_CPF_CLK_CTRL, 0xfcff8fff, 0xf8000100),
96 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xcd000000, 0x0d000100),
97 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0x60000ff0, 0x60000100),
98 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0x40000000, 0x40000100),
99 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
100 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_WD_CLK_CTRL, 0xfeff8fff, 0xfeff8100),
101 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
102 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_VC5_ENABLE, 0x00000002, 0x00000000),
103 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0x000007ff, 0x000005ff),
104 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0x20000000, 0x20000000),
105 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xffffffff, 0x00000420),
106 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x00000200, 0x00000200),
107 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0x07900000, 0x04900000),
108 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f),
109 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204),
110 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff0ffff, 0x00000500),
111 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PRIV_CONTROL, 0x000007ff, 0x000001fe),
112 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
113 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x10321032),
114 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x02310231),
115 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
116 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
117 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CGTT_SCLK_CTRL, 0x10000000, 0x10000100),
118 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xffffffff, 0x1402002f),
119 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffff9fff, 0x00001188),
120 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x08000009),
121 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00400000, 0x04440000),
122 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
123 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
124 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
125 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000100, 0x00000130),
126 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
127 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
128 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0x60000010, 0x479c0010),
129 SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CGTT_CLK_CTRL, 0xfeff0fff, 0x40000100),
130 SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00c00000, 0x00c00000)
133 static const struct soc15_reg_golden golden_settings_gc_10_0_nv10[] =
135 /* Pending on emulation bring up */
138 static const struct soc15_reg_golden golden_settings_gc_10_1_1[] =
140 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x003c0014),
141 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100),
142 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100),
143 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xcd000000, 0x0d000100),
144 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0xf8ff0fff, 0x60000100),
145 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0x40000ff0, 0x40000100),
146 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
147 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_WD_CLK_CTRL, 0xffff8fff, 0xffff8100),
148 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
149 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_VC5_ENABLE, 0x00000002, 0x00000000),
150 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0x800007ff, 0x000005ff),
151 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0xffffffff, 0x20000000),
152 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xffffffff, 0x00000420),
153 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x00000200, 0x00000200),
154 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04900000),
155 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f),
156 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204),
157 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff0ffff, 0x00000500),
158 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PRIV_CONTROL, 0x000007ff, 0x000001fe),
159 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
160 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffe7),
161 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffe7),
162 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CGTT_SCLK_CTRL, 0xffff0fff, 0x10000100),
163 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xffffffff, 0x1402002f),
164 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffffbfff, 0x00000188),
165 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x08000009),
166 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00400000, 0x04440000),
167 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
168 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
169 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
170 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130),
171 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
172 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
173 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0x60000010, 0x479c0010),
174 SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00c00000, 0x00c00000),
177 static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
179 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0x003e001f, 0x003c0014),
180 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100),
181 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100),
182 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xff7f0fff, 0x0d000100),
183 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0xffffcfff, 0x60000100),
184 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0xffff0fff, 0x40000100),
185 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
186 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_WD_CLK_CTRL, 0xffff8fff, 0xffff8100),
187 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
188 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_VC5_ENABLE, 0x00000003, 0x00000000),
189 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0x800007ff, 0x000005ff),
190 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0xffffffff, 0x20000000),
191 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xffffffff, 0x00000420),
192 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000200),
193 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04800000),
194 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f),
195 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204),
196 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff0ffff, 0x00000500),
197 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PRIV_CONTROL, 0x00007fff, 0x000001fe),
198 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
199 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x10321032),
200 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x02310231),
201 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
202 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
203 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CGTT_SCLK_CTRL, 0xffff0fff, 0x10000100),
204 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xffffffff, 0x1402002f),
205 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffffbfff, 0x00000188),
206 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_0, 0xffffffff, 0x842a4c02),
207 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800),
208 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x08000009),
209 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04440000),
210 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000820, 0x00000820),
211 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
212 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
213 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
214 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130),
215 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
216 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
217 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0xffdf80ff, 0x479c0010),
218 SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00800000)
221 static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] =
223 /* Pending on emulation bring up */
226 static const struct soc15_reg_golden golden_settings_gc_10_1_2_nv12[] =
228 /* Pending on emulation bring up */
231 #define DEFAULT_SH_MEM_CONFIG \
232 ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
233 (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
234 (SH_MEM_RETRY_MODE_ALL << SH_MEM_CONFIG__RETRY_MODE__SHIFT) | \
235 (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT))
238 static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev);
239 static void gfx_v10_0_set_irq_funcs(struct amdgpu_device *adev);
240 static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev);
241 static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev);
242 static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev,
243 struct amdgpu_cu_info *cu_info);
244 static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev);
245 static void gfx_v10_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
246 u32 sh_num, u32 instance);
247 static u32 gfx_v10_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev);
249 static int gfx_v10_0_rlc_backdoor_autoload_buffer_init(struct amdgpu_device *adev);
250 static void gfx_v10_0_rlc_backdoor_autoload_buffer_fini(struct amdgpu_device *adev);
251 static int gfx_v10_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev);
252 static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
253 static void gfx_v10_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume);
254 static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
255 static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start);
257 static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
259 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
260 amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
261 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
262 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
263 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
264 amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
265 amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
266 amdgpu_ring_write(kiq_ring, 0); /* oac mask */
267 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
270 static void gfx10_kiq_map_queues(struct amdgpu_ring *kiq_ring,
271 struct amdgpu_ring *ring)
273 struct amdgpu_device *adev = kiq_ring->adev;
274 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
275 uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
276 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
278 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
279 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
280 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
281 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
282 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
283 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
284 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
285 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
286 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
287 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
288 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
289 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
290 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
291 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
292 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
293 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
294 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
297 static void gfx10_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
298 struct amdgpu_ring *ring,
299 enum amdgpu_unmap_queues_action action,
300 u64 gpu_addr, u64 seq)
302 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
304 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
305 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
306 PACKET3_UNMAP_QUEUES_ACTION(action) |
307 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
308 PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
309 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
310 amdgpu_ring_write(kiq_ring,
311 PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
313 if (action == PREEMPT_QUEUES_NO_UNMAP) {
314 amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
315 amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
316 amdgpu_ring_write(kiq_ring, seq);
318 amdgpu_ring_write(kiq_ring, 0);
319 amdgpu_ring_write(kiq_ring, 0);
320 amdgpu_ring_write(kiq_ring, 0);
324 static void gfx10_kiq_query_status(struct amdgpu_ring *kiq_ring,
325 struct amdgpu_ring *ring,
329 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
331 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
332 amdgpu_ring_write(kiq_ring,
333 PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
334 PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
335 PACKET3_QUERY_STATUS_COMMAND(2));
336 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
337 PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
338 PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
339 amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
340 amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
341 amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
342 amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
345 static const struct kiq_pm4_funcs gfx_v10_0_kiq_pm4_funcs = {
346 .kiq_set_resources = gfx10_kiq_set_resources,
347 .kiq_map_queues = gfx10_kiq_map_queues,
348 .kiq_unmap_queues = gfx10_kiq_unmap_queues,
349 .kiq_query_status = gfx10_kiq_query_status,
350 .set_resources_size = 8,
351 .map_queues_size = 7,
352 .unmap_queues_size = 6,
353 .query_status_size = 7,
356 static void gfx_v10_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
358 adev->gfx.kiq.pmf = &gfx_v10_0_kiq_pm4_funcs;
361 static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
363 switch (adev->asic_type) {
365 soc15_program_register_sequence(adev,
366 golden_settings_gc_10_1,
367 (const u32)ARRAY_SIZE(golden_settings_gc_10_1));
368 soc15_program_register_sequence(adev,
369 golden_settings_gc_10_0_nv10,
370 (const u32)ARRAY_SIZE(golden_settings_gc_10_0_nv10));
373 soc15_program_register_sequence(adev,
374 golden_settings_gc_10_1_1,
375 (const u32)ARRAY_SIZE(golden_settings_gc_10_1_1));
376 soc15_program_register_sequence(adev,
377 golden_settings_gc_10_1_nv14,
378 (const u32)ARRAY_SIZE(golden_settings_gc_10_1_nv14));
381 soc15_program_register_sequence(adev,
382 golden_settings_gc_10_1_2,
383 (const u32)ARRAY_SIZE(golden_settings_gc_10_1_2));
384 soc15_program_register_sequence(adev,
385 golden_settings_gc_10_1_2_nv12,
386 (const u32)ARRAY_SIZE(golden_settings_gc_10_1_2_nv12));
393 static void gfx_v10_0_scratch_init(struct amdgpu_device *adev)
395 adev->gfx.scratch.num_reg = 8;
396 adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
397 adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
400 static void gfx_v10_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
401 bool wc, uint32_t reg, uint32_t val)
403 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
404 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
405 WRITE_DATA_DST_SEL(0) | (wc ? WR_CONFIRM : 0));
406 amdgpu_ring_write(ring, reg);
407 amdgpu_ring_write(ring, 0);
408 amdgpu_ring_write(ring, val);
411 static void gfx_v10_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
412 int mem_space, int opt, uint32_t addr0,
413 uint32_t addr1, uint32_t ref, uint32_t mask,
416 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
417 amdgpu_ring_write(ring,
418 /* memory (1) or register (0) */
419 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
420 WAIT_REG_MEM_OPERATION(opt) | /* wait */
421 WAIT_REG_MEM_FUNCTION(3) | /* equal */
422 WAIT_REG_MEM_ENGINE(eng_sel)));
425 BUG_ON(addr0 & 0x3); /* Dword align */
426 amdgpu_ring_write(ring, addr0);
427 amdgpu_ring_write(ring, addr1);
428 amdgpu_ring_write(ring, ref);
429 amdgpu_ring_write(ring, mask);
430 amdgpu_ring_write(ring, inv); /* poll interval */
433 static int gfx_v10_0_ring_test_ring(struct amdgpu_ring *ring)
435 struct amdgpu_device *adev = ring->adev;
441 r = amdgpu_gfx_scratch_get(adev, &scratch);
443 DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
447 WREG32(scratch, 0xCAFEDEAD);
449 r = amdgpu_ring_alloc(ring, 3);
451 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
453 amdgpu_gfx_scratch_free(adev, scratch);
457 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
458 amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
459 amdgpu_ring_write(ring, 0xDEADBEEF);
460 amdgpu_ring_commit(ring);
462 for (i = 0; i < adev->usec_timeout; i++) {
463 tmp = RREG32(scratch);
464 if (tmp == 0xDEADBEEF)
466 if (amdgpu_emu_mode == 1)
471 if (i < adev->usec_timeout) {
472 if (amdgpu_emu_mode == 1)
473 DRM_INFO("ring test on %d succeeded in %d msecs\n",
476 DRM_INFO("ring test on %d succeeded in %d usecs\n",
479 DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
480 ring->idx, scratch, tmp);
483 amdgpu_gfx_scratch_free(adev, scratch);
488 static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
490 struct amdgpu_device *adev = ring->adev;
492 struct dma_fence *f = NULL;
497 r = amdgpu_gfx_scratch_get(adev, &scratch);
499 DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
503 WREG32(scratch, 0xCAFEDEAD);
505 memset(&ib, 0, sizeof(ib));
506 r = amdgpu_ib_get(adev, NULL, 256, &ib);
508 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
512 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
513 ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
514 ib.ptr[2] = 0xDEADBEEF;
517 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
521 r = dma_fence_wait_timeout(f, false, timeout);
523 DRM_ERROR("amdgpu: IB test timed out.\n");
527 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
531 tmp = RREG32(scratch);
532 if (tmp == 0xDEADBEEF) {
533 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
536 DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
541 amdgpu_ib_free(adev, &ib, NULL);
544 amdgpu_gfx_scratch_free(adev, scratch);
549 static void gfx_v10_0_free_microcode(struct amdgpu_device *adev)
551 release_firmware(adev->gfx.pfp_fw);
552 adev->gfx.pfp_fw = NULL;
553 release_firmware(adev->gfx.me_fw);
554 adev->gfx.me_fw = NULL;
555 release_firmware(adev->gfx.ce_fw);
556 adev->gfx.ce_fw = NULL;
557 release_firmware(adev->gfx.rlc_fw);
558 adev->gfx.rlc_fw = NULL;
559 release_firmware(adev->gfx.mec_fw);
560 adev->gfx.mec_fw = NULL;
561 release_firmware(adev->gfx.mec2_fw);
562 adev->gfx.mec2_fw = NULL;
564 kfree(adev->gfx.rlc.register_list_format);
567 static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev)
569 adev->gfx.cp_fw_write_wait = false;
571 switch (adev->asic_type) {
575 if ((adev->gfx.me_fw_version >= 0x00000046) &&
576 (adev->gfx.me_feature_version >= 27) &&
577 (adev->gfx.pfp_fw_version >= 0x00000068) &&
578 (adev->gfx.pfp_feature_version >= 27) &&
579 (adev->gfx.mec_fw_version >= 0x0000005b) &&
580 (adev->gfx.mec_feature_version >= 27))
581 adev->gfx.cp_fw_write_wait = true;
587 if (adev->gfx.cp_fw_write_wait == false)
588 DRM_WARN_ONCE("Warning: check cp_fw_version and update it to realize \
589 GRBM requires 1-cycle delay in cp firmware\n");
593 static void gfx_v10_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
595 const struct rlc_firmware_header_v2_1 *rlc_hdr;
597 rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
598 adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
599 adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
600 adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
601 adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
602 adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
603 adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
604 adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
605 adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
606 adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
607 adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
608 adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
609 adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
610 adev->gfx.rlc.reg_list_format_direct_reg_list_length =
611 le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
614 static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev)
616 switch (adev->asic_type) {
618 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
625 static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
627 const char *chip_name;
631 struct amdgpu_firmware_info *info = NULL;
632 const struct common_firmware_header *header = NULL;
633 const struct gfx_firmware_header_v1_0 *cp_hdr;
634 const struct rlc_firmware_header_v2_0 *rlc_hdr;
635 unsigned int *tmp = NULL;
637 uint16_t version_major;
638 uint16_t version_minor;
642 memset(wks, 0, sizeof(wks));
643 switch (adev->asic_type) {
645 chip_name = "navi10";
648 chip_name = "navi14";
649 if (!(adev->pdev->device == 0x7340 &&
650 adev->pdev->revision != 0x00))
651 snprintf(wks, sizeof(wks), "_wks");
654 chip_name = "navi12";
660 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp%s.bin", chip_name, wks);
661 err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
664 err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
667 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
668 adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
669 adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
671 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me%s.bin", chip_name, wks);
672 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
675 err = amdgpu_ucode_validate(adev->gfx.me_fw);
678 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
679 adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
680 adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
682 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce%s.bin", chip_name, wks);
683 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
686 err = amdgpu_ucode_validate(adev->gfx.ce_fw);
689 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
690 adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
691 adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
693 if (!amdgpu_sriov_vf(adev)) {
694 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
695 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
698 err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
699 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
700 version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
701 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
702 if (version_major == 2 && version_minor == 1)
703 adev->gfx.rlc.is_rlc_v2_1 = true;
705 adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
706 adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
707 adev->gfx.rlc.save_and_restore_offset =
708 le32_to_cpu(rlc_hdr->save_and_restore_offset);
709 adev->gfx.rlc.clear_state_descriptor_offset =
710 le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
711 adev->gfx.rlc.avail_scratch_ram_locations =
712 le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
713 adev->gfx.rlc.reg_restore_list_size =
714 le32_to_cpu(rlc_hdr->reg_restore_list_size);
715 adev->gfx.rlc.reg_list_format_start =
716 le32_to_cpu(rlc_hdr->reg_list_format_start);
717 adev->gfx.rlc.reg_list_format_separate_start =
718 le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
719 adev->gfx.rlc.starting_offsets_start =
720 le32_to_cpu(rlc_hdr->starting_offsets_start);
721 adev->gfx.rlc.reg_list_format_size_bytes =
722 le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
723 adev->gfx.rlc.reg_list_size_bytes =
724 le32_to_cpu(rlc_hdr->reg_list_size_bytes);
725 adev->gfx.rlc.register_list_format =
726 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
727 adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
728 if (!adev->gfx.rlc.register_list_format) {
733 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
734 le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
735 for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
736 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
738 adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
740 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
741 le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
742 for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
743 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
745 if (adev->gfx.rlc.is_rlc_v2_1)
746 gfx_v10_0_init_rlc_ext_microcode(adev);
749 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec%s.bin", chip_name, wks);
750 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
753 err = amdgpu_ucode_validate(adev->gfx.mec_fw);
756 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
757 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
758 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
760 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2%s.bin", chip_name, wks);
761 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
763 err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
766 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
767 adev->gfx.mec2_fw->data;
768 adev->gfx.mec2_fw_version =
769 le32_to_cpu(cp_hdr->header.ucode_version);
770 adev->gfx.mec2_feature_version =
771 le32_to_cpu(cp_hdr->ucode_feature_version);
774 adev->gfx.mec2_fw = NULL;
777 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
778 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
779 info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
780 info->fw = adev->gfx.pfp_fw;
781 header = (const struct common_firmware_header *)info->fw->data;
782 adev->firmware.fw_size +=
783 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
785 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
786 info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
787 info->fw = adev->gfx.me_fw;
788 header = (const struct common_firmware_header *)info->fw->data;
789 adev->firmware.fw_size +=
790 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
792 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
793 info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
794 info->fw = adev->gfx.ce_fw;
795 header = (const struct common_firmware_header *)info->fw->data;
796 adev->firmware.fw_size +=
797 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
799 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
800 info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
801 info->fw = adev->gfx.rlc_fw;
802 header = (const struct common_firmware_header *)info->fw->data;
803 adev->firmware.fw_size +=
804 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
806 if (adev->gfx.rlc.is_rlc_v2_1 &&
807 adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
808 adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
809 adev->gfx.rlc.save_restore_list_srm_size_bytes) {
810 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
811 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
812 info->fw = adev->gfx.rlc_fw;
813 adev->firmware.fw_size +=
814 ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
816 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
817 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
818 info->fw = adev->gfx.rlc_fw;
819 adev->firmware.fw_size +=
820 ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
822 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
823 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
824 info->fw = adev->gfx.rlc_fw;
825 adev->firmware.fw_size +=
826 ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
829 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
830 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
831 info->fw = adev->gfx.mec_fw;
832 header = (const struct common_firmware_header *)info->fw->data;
833 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
834 adev->firmware.fw_size +=
835 ALIGN(le32_to_cpu(header->ucode_size_bytes) -
836 le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
838 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
839 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
840 info->fw = adev->gfx.mec_fw;
841 adev->firmware.fw_size +=
842 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
844 if (adev->gfx.mec2_fw) {
845 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
846 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
847 info->fw = adev->gfx.mec2_fw;
848 header = (const struct common_firmware_header *)info->fw->data;
849 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
850 adev->firmware.fw_size +=
851 ALIGN(le32_to_cpu(header->ucode_size_bytes) -
852 le32_to_cpu(cp_hdr->jt_size) * 4,
854 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
855 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
856 info->fw = adev->gfx.mec2_fw;
857 adev->firmware.fw_size +=
858 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4,
863 gfx_v10_0_check_fw_write_wait(adev);
867 "gfx10: Failed to load firmware \"%s\"\n",
869 release_firmware(adev->gfx.pfp_fw);
870 adev->gfx.pfp_fw = NULL;
871 release_firmware(adev->gfx.me_fw);
872 adev->gfx.me_fw = NULL;
873 release_firmware(adev->gfx.ce_fw);
874 adev->gfx.ce_fw = NULL;
875 release_firmware(adev->gfx.rlc_fw);
876 adev->gfx.rlc_fw = NULL;
877 release_firmware(adev->gfx.mec_fw);
878 adev->gfx.mec_fw = NULL;
879 release_firmware(adev->gfx.mec2_fw);
880 adev->gfx.mec2_fw = NULL;
883 gfx_v10_0_check_gfxoff_flag(adev);
888 static u32 gfx_v10_0_get_csb_size(struct amdgpu_device *adev)
891 const struct cs_section_def *sect = NULL;
892 const struct cs_extent_def *ext = NULL;
894 /* begin clear state */
896 /* context control state */
899 for (sect = gfx10_cs_data; sect->section != NULL; ++sect) {
900 for (ext = sect->section; ext->extent != NULL; ++ext) {
901 if (sect->id == SECT_CONTEXT)
902 count += 2 + ext->reg_count;
908 /* set PA_SC_TILE_STEERING_OVERRIDE */
910 /* end clear state */
918 static void gfx_v10_0_get_csb_buffer(struct amdgpu_device *adev,
919 volatile u32 *buffer)
922 const struct cs_section_def *sect = NULL;
923 const struct cs_extent_def *ext = NULL;
926 if (adev->gfx.rlc.cs_data == NULL)
931 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
932 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
934 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
935 buffer[count++] = cpu_to_le32(0x80000000);
936 buffer[count++] = cpu_to_le32(0x80000000);
938 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
939 for (ext = sect->section; ext->extent != NULL; ++ext) {
940 if (sect->id == SECT_CONTEXT) {
942 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
943 buffer[count++] = cpu_to_le32(ext->reg_index -
944 PACKET3_SET_CONTEXT_REG_START);
945 for (i = 0; i < ext->reg_count; i++)
946 buffer[count++] = cpu_to_le32(ext->extent[i]);
954 SOC15_REG_OFFSET(GC, 0, mmPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
955 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
956 buffer[count++] = cpu_to_le32(ctx_reg_offset);
957 buffer[count++] = cpu_to_le32(adev->gfx.config.pa_sc_tile_steering_override);
959 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
960 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
962 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
963 buffer[count++] = cpu_to_le32(0);
966 static void gfx_v10_0_rlc_fini(struct amdgpu_device *adev)
968 /* clear state block */
969 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
970 &adev->gfx.rlc.clear_state_gpu_addr,
971 (void **)&adev->gfx.rlc.cs_ptr);
973 /* jump table block */
974 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
975 &adev->gfx.rlc.cp_table_gpu_addr,
976 (void **)&adev->gfx.rlc.cp_table_ptr);
979 static int gfx_v10_0_rlc_init(struct amdgpu_device *adev)
981 const struct cs_section_def *cs_data;
984 adev->gfx.rlc.cs_data = gfx10_cs_data;
986 cs_data = adev->gfx.rlc.cs_data;
989 /* init clear state block */
990 r = amdgpu_gfx_rlc_init_csb(adev);
998 static void gfx_v10_0_mec_fini(struct amdgpu_device *adev)
1000 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
1001 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
1004 static int gfx_v10_0_me_init(struct amdgpu_device *adev)
1008 bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
1010 amdgpu_gfx_graphics_queue_acquire(adev);
1012 r = gfx_v10_0_init_microcode(adev);
1014 DRM_ERROR("Failed to load gfx firmware!\n");
1019 static int gfx_v10_0_mec_init(struct amdgpu_device *adev)
1023 const __le32 *fw_data = NULL;
1026 size_t mec_hpd_size;
1028 const struct gfx_firmware_header_v1_0 *mec_hdr = NULL;
1030 bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1032 /* take ownership of the relevant compute queues */
1033 amdgpu_gfx_compute_queue_acquire(adev);
1034 mec_hpd_size = adev->gfx.num_compute_rings * GFX10_MEC_HPD_SIZE;
1036 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
1037 AMDGPU_GEM_DOMAIN_GTT,
1038 &adev->gfx.mec.hpd_eop_obj,
1039 &adev->gfx.mec.hpd_eop_gpu_addr,
1042 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
1043 gfx_v10_0_mec_fini(adev);
1047 memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size);
1049 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
1050 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
1052 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1053 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1055 fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1056 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
1057 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
1059 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
1060 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1061 &adev->gfx.mec.mec_fw_obj,
1062 &adev->gfx.mec.mec_fw_gpu_addr,
1065 dev_err(adev->dev, "(%d) failed to create mec fw bo\n", r);
1066 gfx_v10_0_mec_fini(adev);
1070 memcpy(fw, fw_data, fw_size);
1072 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
1073 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
1079 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t wave, uint32_t address)
1081 WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
1082 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1083 (address << SQ_IND_INDEX__INDEX__SHIFT));
1084 return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1087 static void wave_read_regs(struct amdgpu_device *adev, uint32_t wave,
1088 uint32_t thread, uint32_t regno,
1089 uint32_t num, uint32_t *out)
1091 WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
1092 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1093 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
1094 (thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) |
1095 (SQ_IND_INDEX__AUTO_INCR_MASK));
1097 *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1100 static void gfx_v10_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
1102 /* in gfx10 the SIMD_ID is specified as part of the INSTANCE
1103 * field when performing a select_se_sh so it should be
1107 /* type 2 wave data */
1108 dst[(*no_fields)++] = 2;
1109 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATUS);
1110 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_LO);
1111 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_HI);
1112 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_LO);
1113 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_HI);
1114 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID1);
1115 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID2);
1116 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_INST_DW0);
1117 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_GPR_ALLOC);
1118 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_LDS_ALLOC);
1119 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_TRAPSTS);
1120 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS);
1121 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2);
1122 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1);
1123 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0);
1126 static void gfx_v10_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
1127 uint32_t wave, uint32_t start,
1128 uint32_t size, uint32_t *dst)
1133 adev, wave, 0, start + SQIND_WAVE_SGPRS_OFFSET, size,
1137 static void gfx_v10_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
1138 uint32_t wave, uint32_t thread,
1139 uint32_t start, uint32_t size,
1144 start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
1147 static void gfx_v10_0_select_me_pipe_q(struct amdgpu_device *adev,
1148 u32 me, u32 pipe, u32 q, u32 vm)
1150 nv_grbm_select(adev, me, pipe, q, vm);
1154 static const struct amdgpu_gfx_funcs gfx_v10_0_gfx_funcs = {
1155 .get_gpu_clock_counter = &gfx_v10_0_get_gpu_clock_counter,
1156 .select_se_sh = &gfx_v10_0_select_se_sh,
1157 .read_wave_data = &gfx_v10_0_read_wave_data,
1158 .read_wave_sgprs = &gfx_v10_0_read_wave_sgprs,
1159 .read_wave_vgprs = &gfx_v10_0_read_wave_vgprs,
1160 .select_me_pipe_q = &gfx_v10_0_select_me_pipe_q,
1163 static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev)
1167 adev->gfx.funcs = &gfx_v10_0_gfx_funcs;
1169 switch (adev->asic_type) {
1173 adev->gfx.config.max_hw_contexts = 8;
1174 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1175 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1176 adev->gfx.config.sc_hiz_tile_fifo_size = 0;
1177 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1178 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
1185 adev->gfx.config.gb_addr_config = gb_addr_config;
1187 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
1188 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
1189 GB_ADDR_CONFIG, NUM_PIPES);
1191 adev->gfx.config.max_tile_pipes =
1192 adev->gfx.config.gb_addr_config_fields.num_pipes;
1194 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
1195 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
1196 GB_ADDR_CONFIG, MAX_COMPRESSED_FRAGS);
1197 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
1198 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
1199 GB_ADDR_CONFIG, NUM_RB_PER_SE);
1200 adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
1201 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
1202 GB_ADDR_CONFIG, NUM_SHADER_ENGINES);
1203 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
1204 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
1205 GB_ADDR_CONFIG, PIPE_INTERLEAVE_SIZE));
1208 static int gfx_v10_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
1209 int me, int pipe, int queue)
1212 struct amdgpu_ring *ring;
1213 unsigned int irq_type;
1215 ring = &adev->gfx.gfx_ring[ring_id];
1219 ring->queue = queue;
1221 ring->ring_obj = NULL;
1222 ring->use_doorbell = true;
1225 ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
1227 ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1;
1228 sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1230 irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe;
1231 r = amdgpu_ring_init(adev, ring, 1024,
1232 &adev->gfx.eop_irq, irq_type);
1238 static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
1239 int mec, int pipe, int queue)
1243 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
1245 ring = &adev->gfx.compute_ring[ring_id];
1250 ring->queue = queue;
1252 ring->ring_obj = NULL;
1253 ring->use_doorbell = true;
1254 ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
1255 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
1256 + (ring_id * GFX10_MEC_HPD_SIZE);
1257 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1259 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1260 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1263 /* type-2 packets are deprecated on MEC, use type-3 instead */
1264 r = amdgpu_ring_init(adev, ring, 1024,
1265 &adev->gfx.eop_irq, irq_type);
1272 static int gfx_v10_0_sw_init(void *handle)
1274 int i, j, k, r, ring_id = 0;
1275 struct amdgpu_kiq *kiq;
1276 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1278 switch (adev->asic_type) {
1282 adev->gfx.me.num_me = 1;
1283 adev->gfx.me.num_pipe_per_me = 2;
1284 adev->gfx.me.num_queue_per_pipe = 1;
1285 adev->gfx.mec.num_mec = 2;
1286 adev->gfx.mec.num_pipe_per_mec = 4;
1287 adev->gfx.mec.num_queue_per_pipe = 8;
1290 adev->gfx.me.num_me = 1;
1291 adev->gfx.me.num_pipe_per_me = 1;
1292 adev->gfx.me.num_queue_per_pipe = 1;
1293 adev->gfx.mec.num_mec = 1;
1294 adev->gfx.mec.num_pipe_per_mec = 4;
1295 adev->gfx.mec.num_queue_per_pipe = 8;
1300 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
1301 GFX_10_1__SRCID__CP_IB2_INTERRUPT_PKT,
1302 &adev->gfx.kiq.irq);
1307 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
1308 GFX_10_1__SRCID__CP_EOP_INTERRUPT,
1309 &adev->gfx.eop_irq);
1313 /* Privileged reg */
1314 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_10_1__SRCID__CP_PRIV_REG_FAULT,
1315 &adev->gfx.priv_reg_irq);
1319 /* Privileged inst */
1320 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_10_1__SRCID__CP_PRIV_INSTR_FAULT,
1321 &adev->gfx.priv_inst_irq);
1325 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1327 gfx_v10_0_scratch_init(adev);
1329 r = gfx_v10_0_me_init(adev);
1333 r = gfx_v10_0_rlc_init(adev);
1335 DRM_ERROR("Failed to init rlc BOs!\n");
1339 r = gfx_v10_0_mec_init(adev);
1341 DRM_ERROR("Failed to init MEC BOs!\n");
1345 /* set up the gfx ring */
1346 for (i = 0; i < adev->gfx.me.num_me; i++) {
1347 for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
1348 for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
1349 if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
1352 r = gfx_v10_0_gfx_ring_init(adev, ring_id,
1362 /* set up the compute queues - allocate horizontally across pipes */
1363 for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1364 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1365 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
1366 if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k,
1370 r = gfx_v10_0_compute_ring_init(adev, ring_id,
1380 r = amdgpu_gfx_kiq_init(adev, GFX10_MEC_HPD_SIZE);
1382 DRM_ERROR("Failed to init KIQ BOs!\n");
1386 kiq = &adev->gfx.kiq;
1387 r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
1391 r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v10_compute_mqd));
1395 /* allocate visible FB for rlc auto-loading fw */
1396 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1397 r = gfx_v10_0_rlc_backdoor_autoload_buffer_init(adev);
1402 adev->gfx.ce_ram_size = F32_CE_PROGRAM_RAM_SIZE;
1404 gfx_v10_0_gpu_early_init(adev);
1409 static void gfx_v10_0_pfp_fini(struct amdgpu_device *adev)
1411 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_obj,
1412 &adev->gfx.pfp.pfp_fw_gpu_addr,
1413 (void **)&adev->gfx.pfp.pfp_fw_ptr);
1416 static void gfx_v10_0_ce_fini(struct amdgpu_device *adev)
1418 amdgpu_bo_free_kernel(&adev->gfx.ce.ce_fw_obj,
1419 &adev->gfx.ce.ce_fw_gpu_addr,
1420 (void **)&adev->gfx.ce.ce_fw_ptr);
1423 static void gfx_v10_0_me_fini(struct amdgpu_device *adev)
1425 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_obj,
1426 &adev->gfx.me.me_fw_gpu_addr,
1427 (void **)&adev->gfx.me.me_fw_ptr);
1430 static int gfx_v10_0_sw_fini(void *handle)
1433 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1435 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
1436 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
1437 for (i = 0; i < adev->gfx.num_compute_rings; i++)
1438 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1440 amdgpu_gfx_mqd_sw_fini(adev);
1441 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
1442 amdgpu_gfx_kiq_fini(adev);
1444 gfx_v10_0_pfp_fini(adev);
1445 gfx_v10_0_ce_fini(adev);
1446 gfx_v10_0_me_fini(adev);
1447 gfx_v10_0_rlc_fini(adev);
1448 gfx_v10_0_mec_fini(adev);
1450 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
1451 gfx_v10_0_rlc_backdoor_autoload_buffer_fini(adev);
1453 gfx_v10_0_free_microcode(adev);
1459 static void gfx_v10_0_tiling_mode_table_init(struct amdgpu_device *adev)
1464 static void gfx_v10_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
1465 u32 sh_num, u32 instance)
1469 if (instance == 0xffffffff)
1470 data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
1471 INSTANCE_BROADCAST_WRITES, 1);
1473 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX,
1476 if (se_num == 0xffffffff)
1477 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES,
1480 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1482 if (sh_num == 0xffffffff)
1483 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_BROADCAST_WRITES,
1486 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_INDEX, sh_num);
1488 WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
1491 static u32 gfx_v10_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1495 data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
1496 data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);
1498 data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
1499 data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
1501 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
1502 adev->gfx.config.max_sh_per_se);
1504 return (~data) & mask;
1507 static void gfx_v10_0_setup_rb(struct amdgpu_device *adev)
1512 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
1513 adev->gfx.config.max_sh_per_se;
1515 mutex_lock(&adev->grbm_idx_mutex);
1516 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1517 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1518 gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff);
1519 data = gfx_v10_0_get_rb_active_bitmap(adev);
1520 active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
1521 rb_bitmap_width_per_sh);
1524 gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1525 mutex_unlock(&adev->grbm_idx_mutex);
1527 adev->gfx.config.backend_enable_mask = active_rbs;
1528 adev->gfx.config.num_rbs = hweight32(active_rbs);
1531 static u32 gfx_v10_0_init_pa_sc_tile_steering_override(struct amdgpu_device *adev)
1534 uint32_t enabled_rb_per_sh;
1535 uint32_t active_rb_bitmap;
1536 uint32_t num_rb_per_sc;
1537 uint32_t num_packer_per_sc;
1538 uint32_t pa_sc_tile_steering_override;
1541 num_sc = adev->gfx.config.max_shader_engines * adev->gfx.config.max_sh_per_se *
1542 adev->gfx.config.num_sc_per_sh;
1543 /* init num_rb_per_sc */
1544 active_rb_bitmap = gfx_v10_0_get_rb_active_bitmap(adev);
1545 enabled_rb_per_sh = hweight32(active_rb_bitmap);
1546 num_rb_per_sc = enabled_rb_per_sh / adev->gfx.config.num_sc_per_sh;
1547 /* init num_packer_per_sc */
1548 num_packer_per_sc = adev->gfx.config.num_packer_per_sc;
1550 pa_sc_tile_steering_override = 0;
1551 pa_sc_tile_steering_override |=
1552 (order_base_2(num_sc) << PA_SC_TILE_STEERING_OVERRIDE__NUM_SC__SHIFT) &
1553 PA_SC_TILE_STEERING_OVERRIDE__NUM_SC_MASK;
1554 pa_sc_tile_steering_override |=
1555 (order_base_2(num_rb_per_sc) << PA_SC_TILE_STEERING_OVERRIDE__NUM_RB_PER_SC__SHIFT) &
1556 PA_SC_TILE_STEERING_OVERRIDE__NUM_RB_PER_SC_MASK;
1557 pa_sc_tile_steering_override |=
1558 (order_base_2(num_packer_per_sc) << PA_SC_TILE_STEERING_OVERRIDE__NUM_PACKER_PER_SC__SHIFT) &
1559 PA_SC_TILE_STEERING_OVERRIDE__NUM_PACKER_PER_SC_MASK;
1561 return pa_sc_tile_steering_override;
1564 #define DEFAULT_SH_MEM_BASES (0x6000)
1565 #define FIRST_COMPUTE_VMID (8)
1566 #define LAST_COMPUTE_VMID (16)
1568 static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev)
1571 uint32_t sh_mem_bases;
1574 * Configure apertures:
1575 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
1576 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
1577 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
1579 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1581 mutex_lock(&adev->srbm_mutex);
1582 for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
1583 nv_grbm_select(adev, 0, 0, 0, i);
1584 /* CP and shaders */
1585 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1586 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
1588 nv_grbm_select(adev, 0, 0, 0, 0);
1589 mutex_unlock(&adev->srbm_mutex);
1591 /* Initialize all compute VMIDs to have no GDS, GWS, or OA
1592 acccess. These should be enabled by FW for target VMIDs. */
1593 for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
1594 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
1595 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
1596 WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0);
1597 WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0);
1601 static void gfx_v10_0_init_gds_vmid(struct amdgpu_device *adev)
1606 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
1607 * access. Compute VMIDs should be enabled by FW for target VMIDs,
1608 * the driver can enable them for graphics. VMID0 should maintain
1609 * access so that HWS firmware can save/restore entries.
1611 for (vmid = 1; vmid < 16; vmid++) {
1612 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * vmid, 0);
1613 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * vmid, 0);
1614 WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, vmid, 0);
1615 WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, vmid, 0);
1620 static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev)
1623 int max_wgp_per_sh = adev->gfx.config.max_cu_per_sh >> 1;
1624 u32 tmp, wgp_active_bitmap = 0;
1625 u32 gcrd_targets_disable_tcp = 0;
1626 u32 utcl_invreq_disable = 0;
1628 * GCRD_TARGETS_DISABLE field contains
1629 * for Navi10/Navi12: GL1C=[18:15], SQC=[14:10], TCP=[9:0]
1630 * for Navi14: GL1C=[21:18], SQC=[17:12], TCP=[11:0]
1632 u32 gcrd_targets_disable_mask = amdgpu_gfx_create_bitmask(
1633 2 * max_wgp_per_sh + /* TCP */
1634 max_wgp_per_sh + /* SQC */
1637 * UTCL1_UTCL0_INVREQ_DISABLE field contains
1638 * for Navi10Navi12: SQG=[24], RMI=[23:20], SQC=[19:10], TCP=[9:0]
1639 * for Navi14: SQG=[28], RMI=[27:24], SQC=[23:12], TCP=[11:0]
1641 u32 utcl_invreq_disable_mask = amdgpu_gfx_create_bitmask(
1642 2 * max_wgp_per_sh + /* TCP */
1643 2 * max_wgp_per_sh + /* SQC */
1647 if (adev->asic_type == CHIP_NAVI10 ||
1648 adev->asic_type == CHIP_NAVI14 ||
1649 adev->asic_type == CHIP_NAVI12) {
1650 mutex_lock(&adev->grbm_idx_mutex);
1651 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1652 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1653 gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff);
1654 wgp_active_bitmap = gfx_v10_0_get_wgp_active_bitmap_per_sh(adev);
1656 * Set corresponding TCP bits for the inactive WGPs in
1657 * GCRD_SA_TARGETS_DISABLE
1659 gcrd_targets_disable_tcp = 0;
1660 /* Set TCP & SQC bits in UTCL1_UTCL0_INVREQ_DISABLE */
1661 utcl_invreq_disable = 0;
1663 for (k = 0; k < max_wgp_per_sh; k++) {
1664 if (!(wgp_active_bitmap & (1 << k))) {
1665 gcrd_targets_disable_tcp |= 3 << (2 * k);
1666 utcl_invreq_disable |= (3 << (2 * k)) |
1667 (3 << (2 * (max_wgp_per_sh + k)));
1671 tmp = RREG32_SOC15(GC, 0, mmUTCL1_UTCL0_INVREQ_DISABLE);
1672 /* only override TCP & SQC bits */
1673 tmp &= 0xffffffff << (4 * max_wgp_per_sh);
1674 tmp |= (utcl_invreq_disable & utcl_invreq_disable_mask);
1675 WREG32_SOC15(GC, 0, mmUTCL1_UTCL0_INVREQ_DISABLE, tmp);
1677 tmp = RREG32_SOC15(GC, 0, mmGCRD_SA_TARGETS_DISABLE);
1678 /* only override TCP bits */
1679 tmp &= 0xffffffff << (2 * max_wgp_per_sh);
1680 tmp |= (gcrd_targets_disable_tcp & gcrd_targets_disable_mask);
1681 WREG32_SOC15(GC, 0, mmGCRD_SA_TARGETS_DISABLE, tmp);
1685 gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1686 mutex_unlock(&adev->grbm_idx_mutex);
1690 static void gfx_v10_0_get_tcc_info(struct amdgpu_device *adev)
1692 /* TCCs are global (not instanced). */
1693 uint32_t tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) |
1694 RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE);
1696 adev->gfx.config.tcc_disabled_mask =
1697 REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) |
1698 (REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, HI_TCC_DISABLE) << 16);
1701 static void gfx_v10_0_constants_init(struct amdgpu_device *adev)
1706 WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
1708 gfx_v10_0_tiling_mode_table_init(adev);
1710 gfx_v10_0_setup_rb(adev);
1711 gfx_v10_0_get_cu_info(adev, &adev->gfx.cu_info);
1712 gfx_v10_0_get_tcc_info(adev);
1713 adev->gfx.config.pa_sc_tile_steering_override =
1714 gfx_v10_0_init_pa_sc_tile_steering_override(adev);
1716 /* XXX SH_MEM regs */
1717 /* where to put LDS, scratch, GPUVM in FSA64 space */
1718 mutex_lock(&adev->srbm_mutex);
1719 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) {
1720 nv_grbm_select(adev, 0, 0, 0, i);
1721 /* CP and shaders */
1722 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1724 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1725 (adev->gmc.private_aperture_start >> 48));
1726 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
1727 (adev->gmc.shared_aperture_start >> 48));
1728 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp);
1731 nv_grbm_select(adev, 0, 0, 0, 0);
1733 mutex_unlock(&adev->srbm_mutex);
1735 gfx_v10_0_init_compute_vmid(adev);
1736 gfx_v10_0_init_gds_vmid(adev);
1740 static void gfx_v10_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1743 u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
1745 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
1747 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
1749 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
1751 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
1754 WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
1757 static int gfx_v10_0_init_csb(struct amdgpu_device *adev)
1759 adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
1762 WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_HI,
1763 adev->gfx.rlc.clear_state_gpu_addr >> 32);
1764 WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_LO,
1765 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
1766 WREG32_SOC15(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
1771 void gfx_v10_0_rlc_stop(struct amdgpu_device *adev)
1773 u32 tmp = RREG32_SOC15(GC, 0, mmRLC_CNTL);
1775 tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
1776 WREG32_SOC15(GC, 0, mmRLC_CNTL, tmp);
1779 static void gfx_v10_0_rlc_reset(struct amdgpu_device *adev)
1781 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
1783 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
1787 static void gfx_v10_0_rlc_smu_handshake_cntl(struct amdgpu_device *adev,
1790 uint32_t rlc_pg_cntl;
1792 rlc_pg_cntl = RREG32_SOC15(GC, 0, mmRLC_PG_CNTL);
1795 /* RLC_PG_CNTL[23] = 0 (default)
1796 * RLC will wait for handshake acks with SMU
1797 * GFXOFF will be enabled
1798 * RLC_PG_CNTL[23] = 1
1799 * RLC will not issue any message to SMU
1800 * hence no handshake between SMU & RLC
1801 * GFXOFF will be disabled
1803 rlc_pg_cntl |= 0x800000;
1805 rlc_pg_cntl &= ~0x800000;
1806 WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, rlc_pg_cntl);
1809 static void gfx_v10_0_rlc_start(struct amdgpu_device *adev)
1811 /* TODO: enable rlc & smu handshake until smu
1812 * and gfxoff feature works as expected */
1813 if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK))
1814 gfx_v10_0_rlc_smu_handshake_cntl(adev, false);
1816 WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
1820 static void gfx_v10_0_rlc_enable_srm(struct amdgpu_device *adev)
1824 /* enable Save Restore Machine */
1825 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
1826 tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
1827 tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
1828 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
1831 static int gfx_v10_0_rlc_load_microcode(struct amdgpu_device *adev)
1833 const struct rlc_firmware_header_v2_0 *hdr;
1834 const __le32 *fw_data;
1835 unsigned i, fw_size;
1837 if (!adev->gfx.rlc_fw)
1840 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1841 amdgpu_ucode_print_rlc_hdr(&hdr->header);
1843 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1844 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1845 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1847 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
1848 RLCG_UCODE_LOADING_START_ADDRESS);
1850 for (i = 0; i < fw_size; i++)
1851 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA,
1852 le32_to_cpup(fw_data++));
1854 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
1859 static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev)
1863 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1865 r = gfx_v10_0_wait_for_rlc_autoload_complete(adev);
1869 gfx_v10_0_init_csb(adev);
1871 if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */
1872 gfx_v10_0_rlc_enable_srm(adev);
1874 adev->gfx.rlc.funcs->stop(adev);
1877 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
1880 WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, 0);
1882 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1883 /* legacy rlc firmware loading */
1884 r = gfx_v10_0_rlc_load_microcode(adev);
1887 } else if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1888 /* rlc backdoor autoload firmware */
1889 r = gfx_v10_0_rlc_backdoor_autoload_enable(adev);
1894 gfx_v10_0_init_csb(adev);
1896 adev->gfx.rlc.funcs->start(adev);
1898 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1899 r = gfx_v10_0_wait_for_rlc_autoload_complete(adev);
1909 unsigned int offset;
1911 } rlc_autoload_info[FIRMWARE_ID_MAX];
1913 static int gfx_v10_0_parse_rlc_toc(struct amdgpu_device *adev)
1916 RLC_TABLE_OF_CONTENT *rlc_toc;
1918 ret = amdgpu_bo_create_reserved(adev, adev->psp.toc_bin_size, PAGE_SIZE,
1919 AMDGPU_GEM_DOMAIN_GTT,
1920 &adev->gfx.rlc.rlc_toc_bo,
1921 &adev->gfx.rlc.rlc_toc_gpu_addr,
1922 (void **)&adev->gfx.rlc.rlc_toc_buf);
1924 dev_err(adev->dev, "(%d) failed to create rlc toc bo\n", ret);
1928 /* Copy toc from psp sos fw to rlc toc buffer */
1929 memcpy(adev->gfx.rlc.rlc_toc_buf, adev->psp.toc_start_addr, adev->psp.toc_bin_size);
1931 rlc_toc = (RLC_TABLE_OF_CONTENT *)adev->gfx.rlc.rlc_toc_buf;
1932 while (rlc_toc && (rlc_toc->id > FIRMWARE_ID_INVALID) &&
1933 (rlc_toc->id < FIRMWARE_ID_MAX)) {
1934 if ((rlc_toc->id >= FIRMWARE_ID_CP_CE) &&
1935 (rlc_toc->id <= FIRMWARE_ID_CP_MES)) {
1936 /* Offset needs 4KB alignment */
1937 rlc_toc->offset = ALIGN(rlc_toc->offset * 4, PAGE_SIZE);
1940 rlc_autoload_info[rlc_toc->id].id = rlc_toc->id;
1941 rlc_autoload_info[rlc_toc->id].offset = rlc_toc->offset * 4;
1942 rlc_autoload_info[rlc_toc->id].size = rlc_toc->size * 4;
1950 static uint32_t gfx_v10_0_calc_toc_total_size(struct amdgpu_device *adev)
1952 uint32_t total_size = 0;
1956 ret = gfx_v10_0_parse_rlc_toc(adev);
1958 dev_err(adev->dev, "failed to parse rlc toc\n");
1962 for (id = FIRMWARE_ID_RLC_G_UCODE; id < FIRMWARE_ID_MAX; id++)
1963 total_size += rlc_autoload_info[id].size;
1965 /* In case the offset in rlc toc ucode is aligned */
1966 if (total_size < rlc_autoload_info[FIRMWARE_ID_MAX-1].offset)
1967 total_size = rlc_autoload_info[FIRMWARE_ID_MAX-1].offset +
1968 rlc_autoload_info[FIRMWARE_ID_MAX-1].size;
1973 static int gfx_v10_0_rlc_backdoor_autoload_buffer_init(struct amdgpu_device *adev)
1976 uint32_t total_size;
1978 total_size = gfx_v10_0_calc_toc_total_size(adev);
1980 r = amdgpu_bo_create_reserved(adev, total_size, PAGE_SIZE,
1981 AMDGPU_GEM_DOMAIN_GTT,
1982 &adev->gfx.rlc.rlc_autoload_bo,
1983 &adev->gfx.rlc.rlc_autoload_gpu_addr,
1984 (void **)&adev->gfx.rlc.rlc_autoload_ptr);
1986 dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r);
1993 static void gfx_v10_0_rlc_backdoor_autoload_buffer_fini(struct amdgpu_device *adev)
1995 amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_toc_bo,
1996 &adev->gfx.rlc.rlc_toc_gpu_addr,
1997 (void **)&adev->gfx.rlc.rlc_toc_buf);
1998 amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_autoload_bo,
1999 &adev->gfx.rlc.rlc_autoload_gpu_addr,
2000 (void **)&adev->gfx.rlc.rlc_autoload_ptr);
2003 static void gfx_v10_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *adev,
2005 const void *fw_data,
2008 uint32_t toc_offset;
2009 uint32_t toc_fw_size;
2010 char *ptr = adev->gfx.rlc.rlc_autoload_ptr;
2012 if (id <= FIRMWARE_ID_INVALID || id >= FIRMWARE_ID_MAX)
2015 toc_offset = rlc_autoload_info[id].offset;
2016 toc_fw_size = rlc_autoload_info[id].size;
2019 fw_size = toc_fw_size;
2021 if (fw_size > toc_fw_size)
2022 fw_size = toc_fw_size;
2024 memcpy(ptr + toc_offset, fw_data, fw_size);
2026 if (fw_size < toc_fw_size)
2027 memset(ptr + toc_offset + fw_size, 0, toc_fw_size - fw_size);
2030 static void gfx_v10_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev)
2035 data = adev->gfx.rlc.rlc_toc_buf;
2036 size = rlc_autoload_info[FIRMWARE_ID_RLC_TOC].size;
2038 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2039 FIRMWARE_ID_RLC_TOC,
2043 static void gfx_v10_0_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device *adev)
2045 const __le32 *fw_data;
2047 const struct gfx_firmware_header_v1_0 *cp_hdr;
2048 const struct rlc_firmware_header_v2_0 *rlc_hdr;
2051 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
2052 adev->gfx.pfp_fw->data;
2053 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
2054 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
2055 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
2056 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2061 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
2062 adev->gfx.ce_fw->data;
2063 fw_data = (const __le32 *)(adev->gfx.ce_fw->data +
2064 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
2065 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
2066 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2071 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
2072 adev->gfx.me_fw->data;
2073 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
2074 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
2075 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
2076 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2081 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)
2082 adev->gfx.rlc_fw->data;
2083 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2084 le32_to_cpu(rlc_hdr->header.ucode_array_offset_bytes));
2085 fw_size = le32_to_cpu(rlc_hdr->header.ucode_size_bytes);
2086 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2087 FIRMWARE_ID_RLC_G_UCODE,
2091 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
2092 adev->gfx.mec_fw->data;
2093 fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
2094 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
2095 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
2096 cp_hdr->jt_size * 4;
2097 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2100 /* mec2 ucode is not necessary if mec2 ucode is same as mec1 */
2103 /* Temporarily put sdma part here */
2104 static void gfx_v10_0_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device *adev)
2106 const __le32 *fw_data;
2108 const struct sdma_firmware_header_v1_0 *sdma_hdr;
2111 for (i = 0; i < adev->sdma.num_instances; i++) {
2112 sdma_hdr = (const struct sdma_firmware_header_v1_0 *)
2113 adev->sdma.instance[i].fw->data;
2114 fw_data = (const __le32 *) (adev->sdma.instance[i].fw->data +
2115 le32_to_cpu(sdma_hdr->header.ucode_array_offset_bytes));
2116 fw_size = le32_to_cpu(sdma_hdr->header.ucode_size_bytes);
2119 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2120 FIRMWARE_ID_SDMA0_UCODE, fw_data, fw_size);
2121 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2122 FIRMWARE_ID_SDMA0_JT,
2123 (uint32_t *)fw_data +
2124 sdma_hdr->jt_offset,
2125 sdma_hdr->jt_size * 4);
2126 } else if (i == 1) {
2127 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2128 FIRMWARE_ID_SDMA1_UCODE, fw_data, fw_size);
2129 gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2130 FIRMWARE_ID_SDMA1_JT,
2131 (uint32_t *)fw_data +
2132 sdma_hdr->jt_offset,
2133 sdma_hdr->jt_size * 4);
2138 static int gfx_v10_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev)
2140 uint32_t rlc_g_offset, rlc_g_size, tmp;
2143 gfx_v10_0_rlc_backdoor_autoload_copy_toc_ucode(adev);
2144 gfx_v10_0_rlc_backdoor_autoload_copy_sdma_ucode(adev);
2145 gfx_v10_0_rlc_backdoor_autoload_copy_gfx_ucode(adev);
2147 rlc_g_offset = rlc_autoload_info[FIRMWARE_ID_RLC_G_UCODE].offset;
2148 rlc_g_size = rlc_autoload_info[FIRMWARE_ID_RLC_G_UCODE].size;
2149 gpu_addr = adev->gfx.rlc.rlc_autoload_gpu_addr + rlc_g_offset;
2151 WREG32_SOC15(GC, 0, mmRLC_HYP_BOOTLOAD_ADDR_HI, upper_32_bits(gpu_addr));
2152 WREG32_SOC15(GC, 0, mmRLC_HYP_BOOTLOAD_ADDR_LO, lower_32_bits(gpu_addr));
2153 WREG32_SOC15(GC, 0, mmRLC_HYP_BOOTLOAD_SIZE, rlc_g_size);
2155 tmp = RREG32_SOC15(GC, 0, mmRLC_HYP_RESET_VECTOR);
2156 if (!(tmp & (RLC_HYP_RESET_VECTOR__COLD_BOOT_EXIT_MASK |
2157 RLC_HYP_RESET_VECTOR__VDDGFX_EXIT_MASK))) {
2158 DRM_ERROR("Neither COLD_BOOT_EXIT nor VDDGFX_EXIT is set\n");
2162 tmp = RREG32_SOC15(GC, 0, mmRLC_CNTL);
2163 if (tmp & RLC_CNTL__RLC_ENABLE_F32_MASK) {
2164 DRM_ERROR("RLC ROM should halt itself\n");
2171 static int gfx_v10_0_rlc_backdoor_autoload_config_me_cache(struct amdgpu_device *adev)
2173 uint32_t usec_timeout = 50000; /* wait for 50ms */
2178 /* Trigger an invalidation of the L1 instruction caches */
2179 tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL);
2180 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2181 WREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL, tmp);
2183 /* Wait for invalidation complete */
2184 for (i = 0; i < usec_timeout; i++) {
2185 tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL);
2186 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2187 INVALIDATE_CACHE_COMPLETE))
2192 if (i >= usec_timeout) {
2193 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2197 /* Program me ucode address into intruction cache address register */
2198 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2199 rlc_autoload_info[FIRMWARE_ID_CP_ME].offset;
2200 WREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_LO,
2201 lower_32_bits(addr) & 0xFFFFF000);
2202 WREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_HI,
2203 upper_32_bits(addr));
2208 static int gfx_v10_0_rlc_backdoor_autoload_config_ce_cache(struct amdgpu_device *adev)
2210 uint32_t usec_timeout = 50000; /* wait for 50ms */
2215 /* Trigger an invalidation of the L1 instruction caches */
2216 tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL);
2217 tmp = REG_SET_FIELD(tmp, CP_CE_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2218 WREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL, tmp);
2220 /* Wait for invalidation complete */
2221 for (i = 0; i < usec_timeout; i++) {
2222 tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL);
2223 if (1 == REG_GET_FIELD(tmp, CP_CE_IC_OP_CNTL,
2224 INVALIDATE_CACHE_COMPLETE))
2229 if (i >= usec_timeout) {
2230 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2234 /* Program ce ucode address into intruction cache address register */
2235 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2236 rlc_autoload_info[FIRMWARE_ID_CP_CE].offset;
2237 WREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_LO,
2238 lower_32_bits(addr) & 0xFFFFF000);
2239 WREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_HI,
2240 upper_32_bits(addr));
2245 static int gfx_v10_0_rlc_backdoor_autoload_config_pfp_cache(struct amdgpu_device *adev)
2247 uint32_t usec_timeout = 50000; /* wait for 50ms */
2252 /* Trigger an invalidation of the L1 instruction caches */
2253 tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL);
2254 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2255 WREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL, tmp);
2257 /* Wait for invalidation complete */
2258 for (i = 0; i < usec_timeout; i++) {
2259 tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL);
2260 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2261 INVALIDATE_CACHE_COMPLETE))
2266 if (i >= usec_timeout) {
2267 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2271 /* Program pfp ucode address into intruction cache address register */
2272 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2273 rlc_autoload_info[FIRMWARE_ID_CP_PFP].offset;
2274 WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_LO,
2275 lower_32_bits(addr) & 0xFFFFF000);
2276 WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_HI,
2277 upper_32_bits(addr));
2282 static int gfx_v10_0_rlc_backdoor_autoload_config_mec_cache(struct amdgpu_device *adev)
2284 uint32_t usec_timeout = 50000; /* wait for 50ms */
2289 /* Trigger an invalidation of the L1 instruction caches */
2290 tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL);
2291 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2292 WREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL, tmp);
2294 /* Wait for invalidation complete */
2295 for (i = 0; i < usec_timeout; i++) {
2296 tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL);
2297 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
2298 INVALIDATE_CACHE_COMPLETE))
2303 if (i >= usec_timeout) {
2304 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2308 /* Program mec1 ucode address into intruction cache address register */
2309 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2310 rlc_autoload_info[FIRMWARE_ID_CP_MEC].offset;
2311 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
2312 lower_32_bits(addr) & 0xFFFFF000);
2313 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
2314 upper_32_bits(addr));
2319 static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)
2322 uint32_t bootload_status;
2325 for (i = 0; i < adev->usec_timeout; i++) {
2326 cp_status = RREG32_SOC15(GC, 0, mmCP_STAT);
2327 bootload_status = RREG32_SOC15(GC, 0, mmRLC_RLCS_BOOTLOAD_STATUS);
2328 if ((cp_status == 0) &&
2329 (REG_GET_FIELD(bootload_status,
2330 RLC_RLCS_BOOTLOAD_STATUS, BOOTLOAD_COMPLETE) == 1)) {
2336 if (i >= adev->usec_timeout) {
2337 dev_err(adev->dev, "rlc autoload: gc ucode autoload timeout\n");
2341 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
2342 r = gfx_v10_0_rlc_backdoor_autoload_config_me_cache(adev);
2346 r = gfx_v10_0_rlc_backdoor_autoload_config_ce_cache(adev);
2350 r = gfx_v10_0_rlc_backdoor_autoload_config_pfp_cache(adev);
2354 r = gfx_v10_0_rlc_backdoor_autoload_config_mec_cache(adev);
2362 static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2365 u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
2367 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
2368 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
2369 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
2371 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2372 adev->gfx.gfx_ring[i].sched.ready = false;
2374 WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
2376 for (i = 0; i < adev->usec_timeout; i++) {
2377 if (RREG32_SOC15(GC, 0, mmCP_STAT) == 0)
2382 if (i >= adev->usec_timeout)
2383 DRM_ERROR("failed to %s cp gfx\n", enable ? "unhalt" : "halt");
2388 static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
2391 const struct gfx_firmware_header_v1_0 *pfp_hdr;
2392 const __le32 *fw_data;
2393 unsigned i, fw_size;
2395 uint32_t usec_timeout = 50000; /* wait for 50ms */
2397 pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
2398 adev->gfx.pfp_fw->data;
2400 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2402 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
2403 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2404 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes);
2406 r = amdgpu_bo_create_reserved(adev, pfp_hdr->header.ucode_size_bytes,
2407 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
2408 &adev->gfx.pfp.pfp_fw_obj,
2409 &adev->gfx.pfp.pfp_fw_gpu_addr,
2410 (void **)&adev->gfx.pfp.pfp_fw_ptr);
2412 dev_err(adev->dev, "(%d) failed to create pfp fw bo\n", r);
2413 gfx_v10_0_pfp_fini(adev);
2417 memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_data, fw_size);
2419 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj);
2420 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj);
2422 /* Trigger an invalidation of the L1 instruction caches */
2423 tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL);
2424 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2425 WREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL, tmp);
2427 /* Wait for invalidation complete */
2428 for (i = 0; i < usec_timeout; i++) {
2429 tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL);
2430 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2431 INVALIDATE_CACHE_COMPLETE))
2436 if (i >= usec_timeout) {
2437 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2441 if (amdgpu_emu_mode == 1)
2442 adev->nbio.funcs->hdp_flush(adev, NULL);
2444 tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL);
2445 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
2446 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0);
2447 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0);
2448 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2449 WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL, tmp);
2450 WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_LO,
2451 adev->gfx.pfp.pfp_fw_gpu_addr & 0xFFFFF000);
2452 WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_HI,
2453 upper_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
2458 static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev)
2461 const struct gfx_firmware_header_v1_0 *ce_hdr;
2462 const __le32 *fw_data;
2463 unsigned i, fw_size;
2465 uint32_t usec_timeout = 50000; /* wait for 50ms */
2467 ce_hdr = (const struct gfx_firmware_header_v1_0 *)
2468 adev->gfx.ce_fw->data;
2470 amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
2472 fw_data = (const __le32 *)(adev->gfx.ce_fw->data +
2473 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
2474 fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes);
2476 r = amdgpu_bo_create_reserved(adev, ce_hdr->header.ucode_size_bytes,
2477 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
2478 &adev->gfx.ce.ce_fw_obj,
2479 &adev->gfx.ce.ce_fw_gpu_addr,
2480 (void **)&adev->gfx.ce.ce_fw_ptr);
2482 dev_err(adev->dev, "(%d) failed to create ce fw bo\n", r);
2483 gfx_v10_0_ce_fini(adev);
2487 memcpy(adev->gfx.ce.ce_fw_ptr, fw_data, fw_size);
2489 amdgpu_bo_kunmap(adev->gfx.ce.ce_fw_obj);
2490 amdgpu_bo_unreserve(adev->gfx.ce.ce_fw_obj);
2492 /* Trigger an invalidation of the L1 instruction caches */
2493 tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL);
2494 tmp = REG_SET_FIELD(tmp, CP_CE_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2495 WREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL, tmp);
2497 /* Wait for invalidation complete */
2498 for (i = 0; i < usec_timeout; i++) {
2499 tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL);
2500 if (1 == REG_GET_FIELD(tmp, CP_CE_IC_OP_CNTL,
2501 INVALIDATE_CACHE_COMPLETE))
2506 if (i >= usec_timeout) {
2507 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2511 if (amdgpu_emu_mode == 1)
2512 adev->nbio.funcs->hdp_flush(adev, NULL);
2514 tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_CNTL);
2515 tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, VMID, 0);
2516 tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, CACHE_POLICY, 0);
2517 tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, EXE_DISABLE, 0);
2518 tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2519 WREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_LO,
2520 adev->gfx.ce.ce_fw_gpu_addr & 0xFFFFF000);
2521 WREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_HI,
2522 upper_32_bits(adev->gfx.ce.ce_fw_gpu_addr));
2527 static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
2530 const struct gfx_firmware_header_v1_0 *me_hdr;
2531 const __le32 *fw_data;
2532 unsigned i, fw_size;
2534 uint32_t usec_timeout = 50000; /* wait for 50ms */
2536 me_hdr = (const struct gfx_firmware_header_v1_0 *)
2537 adev->gfx.me_fw->data;
2539 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2541 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
2542 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2543 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes);
2545 r = amdgpu_bo_create_reserved(adev, me_hdr->header.ucode_size_bytes,
2546 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
2547 &adev->gfx.me.me_fw_obj,
2548 &adev->gfx.me.me_fw_gpu_addr,
2549 (void **)&adev->gfx.me.me_fw_ptr);
2551 dev_err(adev->dev, "(%d) failed to create me fw bo\n", r);
2552 gfx_v10_0_me_fini(adev);
2556 memcpy(adev->gfx.me.me_fw_ptr, fw_data, fw_size);
2558 amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj);
2559 amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj);
2561 /* Trigger an invalidation of the L1 instruction caches */
2562 tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL);
2563 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2564 WREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL, tmp);
2566 /* Wait for invalidation complete */
2567 for (i = 0; i < usec_timeout; i++) {
2568 tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL);
2569 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2570 INVALIDATE_CACHE_COMPLETE))
2575 if (i >= usec_timeout) {
2576 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2580 if (amdgpu_emu_mode == 1)
2581 adev->nbio.funcs->hdp_flush(adev, NULL);
2583 tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_CNTL);
2584 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
2585 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0);
2586 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0);
2587 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2588 WREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_LO,
2589 adev->gfx.me.me_fw_gpu_addr & 0xFFFFF000);
2590 WREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_HI,
2591 upper_32_bits(adev->gfx.me.me_fw_gpu_addr));
2596 static int gfx_v10_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2600 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
2603 gfx_v10_0_cp_gfx_enable(adev, false);
2605 r = gfx_v10_0_cp_gfx_load_pfp_microcode(adev);
2607 dev_err(adev->dev, "(%d) failed to load pfp fw\n", r);
2611 r = gfx_v10_0_cp_gfx_load_ce_microcode(adev);
2613 dev_err(adev->dev, "(%d) failed to load ce fw\n", r);
2617 r = gfx_v10_0_cp_gfx_load_me_microcode(adev);
2619 dev_err(adev->dev, "(%d) failed to load me fw\n", r);
2626 static int gfx_v10_0_cp_gfx_start(struct amdgpu_device *adev)
2628 struct amdgpu_ring *ring;
2629 const struct cs_section_def *sect = NULL;
2630 const struct cs_extent_def *ext = NULL;
2635 WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT,
2636 adev->gfx.config.max_hw_contexts - 1);
2637 WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);
2639 gfx_v10_0_cp_gfx_enable(adev, true);
2641 ring = &adev->gfx.gfx_ring[0];
2642 r = amdgpu_ring_alloc(ring, gfx_v10_0_get_csb_size(adev) + 4);
2644 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
2648 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2649 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
2651 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2652 amdgpu_ring_write(ring, 0x80000000);
2653 amdgpu_ring_write(ring, 0x80000000);
2655 for (sect = gfx10_cs_data; sect->section != NULL; ++sect) {
2656 for (ext = sect->section; ext->extent != NULL; ++ext) {
2657 if (sect->id == SECT_CONTEXT) {
2658 amdgpu_ring_write(ring,
2659 PACKET3(PACKET3_SET_CONTEXT_REG,
2661 amdgpu_ring_write(ring, ext->reg_index -
2662 PACKET3_SET_CONTEXT_REG_START);
2663 for (i = 0; i < ext->reg_count; i++)
2664 amdgpu_ring_write(ring, ext->extent[i]);
2670 SOC15_REG_OFFSET(GC, 0, mmPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
2671 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
2672 amdgpu_ring_write(ring, ctx_reg_offset);
2673 amdgpu_ring_write(ring, adev->gfx.config.pa_sc_tile_steering_override);
2675 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2676 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
2678 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2679 amdgpu_ring_write(ring, 0);
2681 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
2682 amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
2683 amdgpu_ring_write(ring, 0x8000);
2684 amdgpu_ring_write(ring, 0x8000);
2686 amdgpu_ring_commit(ring);
2688 /* submit cs packet to copy state 0 to next available state */
2689 ring = &adev->gfx.gfx_ring[1];
2690 r = amdgpu_ring_alloc(ring, 2);
2692 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
2696 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2697 amdgpu_ring_write(ring, 0);
2699 amdgpu_ring_commit(ring);
2704 static void gfx_v10_0_cp_gfx_switch_pipe(struct amdgpu_device *adev,
2709 tmp = RREG32_SOC15(GC, 0, mmGRBM_GFX_CNTL);
2710 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, pipe);
2712 WREG32_SOC15(GC, 0, mmGRBM_GFX_CNTL, tmp);
2715 static void gfx_v10_0_cp_gfx_set_doorbell(struct amdgpu_device *adev,
2716 struct amdgpu_ring *ring)
2720 tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
2721 if (ring->use_doorbell) {
2722 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2723 DOORBELL_OFFSET, ring->doorbell_index);
2724 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2727 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2730 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
2731 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
2732 DOORBELL_RANGE_LOWER, ring->doorbell_index);
2733 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
2735 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
2736 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
2739 static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
2741 struct amdgpu_ring *ring;
2744 u64 rb_addr, rptr_addr, wptr_gpu_addr;
2747 /* Set the write pointer delay */
2748 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
2750 /* set the RB to use vmid 0 */
2751 WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);
2753 /* Init gfx ring 0 for pipe 0 */
2754 mutex_lock(&adev->srbm_mutex);
2755 gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
2757 /* Set ring buffer size */
2758 ring = &adev->gfx.gfx_ring[0];
2759 rb_bufsz = order_base_2(ring->ring_size / 8);
2760 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
2761 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
2763 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
2765 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2767 /* Initialize the ring buffer's write pointers */
2769 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
2770 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
2772 /* set the wb address wether it's enabled or not */
2773 rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2774 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
2775 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
2776 CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
2778 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2779 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO,
2780 lower_32_bits(wptr_gpu_addr));
2781 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI,
2782 upper_32_bits(wptr_gpu_addr));
2785 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2787 rb_addr = ring->gpu_addr >> 8;
2788 WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
2789 WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
2791 WREG32_SOC15(GC, 0, mmCP_RB_ACTIVE, 1);
2793 gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
2794 mutex_unlock(&adev->srbm_mutex);
2796 /* Init gfx ring 1 for pipe 1 */
2797 mutex_lock(&adev->srbm_mutex);
2798 gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1);
2799 ring = &adev->gfx.gfx_ring[1];
2800 rb_bufsz = order_base_2(ring->ring_size / 8);
2801 tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
2802 tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2);
2803 WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
2804 /* Initialize the ring buffer's write pointers */
2806 WREG32_SOC15(GC, 0, mmCP_RB1_WPTR, lower_32_bits(ring->wptr));
2807 WREG32_SOC15(GC, 0, mmCP_RB1_WPTR_HI, upper_32_bits(ring->wptr));
2808 /* Set the wb address wether it's enabled or not */
2809 rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2810 WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
2811 WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
2812 CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
2813 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2814 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO,
2815 lower_32_bits(wptr_gpu_addr));
2816 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI,
2817 upper_32_bits(wptr_gpu_addr));
2820 WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
2822 rb_addr = ring->gpu_addr >> 8;
2823 WREG32_SOC15(GC, 0, mmCP_RB1_BASE, rb_addr);
2824 WREG32_SOC15(GC, 0, mmCP_RB1_BASE_HI, upper_32_bits(rb_addr));
2825 WREG32_SOC15(GC, 0, mmCP_RB1_ACTIVE, 1);
2827 gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
2828 mutex_unlock(&adev->srbm_mutex);
2830 /* Switch to pipe 0 */
2831 mutex_lock(&adev->srbm_mutex);
2832 gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
2833 mutex_unlock(&adev->srbm_mutex);
2835 /* start the ring */
2836 gfx_v10_0_cp_gfx_start(adev);
2838 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
2839 ring = &adev->gfx.gfx_ring[i];
2840 ring->sched.ready = true;
2846 static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
2851 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0);
2853 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
2854 (CP_MEC_CNTL__MEC_ME1_HALT_MASK |
2855 CP_MEC_CNTL__MEC_ME2_HALT_MASK));
2856 for (i = 0; i < adev->gfx.num_compute_rings; i++)
2857 adev->gfx.compute_ring[i].sched.ready = false;
2858 adev->gfx.kiq.ring.sched.ready = false;
2863 static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev)
2865 const struct gfx_firmware_header_v1_0 *mec_hdr;
2866 const __le32 *fw_data;
2869 u32 usec_timeout = 50000; /* Wait for 50 ms */
2871 if (!adev->gfx.mec_fw)
2874 gfx_v10_0_cp_compute_enable(adev, false);
2876 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
2877 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
2879 fw_data = (const __le32 *)
2880 (adev->gfx.mec_fw->data +
2881 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
2883 /* Trigger an invalidation of the L1 instruction caches */
2884 tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL);
2885 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2886 WREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL, tmp);
2888 /* Wait for invalidation complete */
2889 for (i = 0; i < usec_timeout; i++) {
2890 tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL);
2891 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
2892 INVALIDATE_CACHE_COMPLETE))
2897 if (i >= usec_timeout) {
2898 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2902 if (amdgpu_emu_mode == 1)
2903 adev->nbio.funcs->hdp_flush(adev, NULL);
2905 tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL);
2906 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2907 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
2908 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2909 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);
2911 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO, adev->gfx.mec.mec_fw_gpu_addr &
2913 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
2914 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
2917 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR, 0);
2919 for (i = 0; i < mec_hdr->jt_size; i++)
2920 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
2921 le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
2923 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR, adev->gfx.mec_fw_version);
2926 * TODO: Loading MEC2 firmware is only necessary if MEC2 should run
2927 * different microcode than MEC1.
2933 static void gfx_v10_0_kiq_setting(struct amdgpu_ring *ring)
2936 struct amdgpu_device *adev = ring->adev;
2938 /* tell RLC which is KIQ queue */
2939 tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
2941 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
2942 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2944 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2947 static int gfx_v10_0_gfx_mqd_init(struct amdgpu_ring *ring)
2949 struct amdgpu_device *adev = ring->adev;
2950 struct v10_gfx_mqd *mqd = ring->mqd_ptr;
2951 uint64_t hqd_gpu_addr, wb_gpu_addr;
2955 /* set up gfx hqd wptr */
2956 mqd->cp_gfx_hqd_wptr = 0;
2957 mqd->cp_gfx_hqd_wptr_hi = 0;
2959 /* set the pointer to the MQD */
2960 mqd->cp_mqd_base_addr = ring->mqd_gpu_addr & 0xfffffffc;
2961 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
2963 /* set up mqd control */
2964 tmp = RREG32_SOC15(GC, 0, mmCP_GFX_MQD_CONTROL);
2965 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0);
2966 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1);
2967 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0);
2968 mqd->cp_gfx_mqd_control = tmp;
2970 /* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */
2971 tmp = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_VMID);
2972 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0);
2973 mqd->cp_gfx_hqd_vmid = 0;
2975 /* set up default queue priority level
2976 * 0x0 = low priority, 0x1 = high priority */
2977 tmp = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUEUE_PRIORITY);
2978 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, 0);
2979 mqd->cp_gfx_hqd_queue_priority = tmp;
2981 /* set up time quantum */
2982 tmp = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUANTUM);
2983 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1);
2984 mqd->cp_gfx_hqd_quantum = tmp;
2986 /* set up gfx hqd base. this is similar as CP_RB_BASE */
2987 hqd_gpu_addr = ring->gpu_addr >> 8;
2988 mqd->cp_gfx_hqd_base = hqd_gpu_addr;
2989 mqd->cp_gfx_hqd_base_hi = upper_32_bits(hqd_gpu_addr);
2991 /* set up hqd_rptr_addr/_hi, similar as CP_RB_RPTR */
2992 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2993 mqd->cp_gfx_hqd_rptr_addr = wb_gpu_addr & 0xfffffffc;
2994 mqd->cp_gfx_hqd_rptr_addr_hi =
2995 upper_32_bits(wb_gpu_addr) & 0xffff;
2997 /* set up rb_wptr_poll addr */
2998 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2999 mqd->cp_rb_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3000 mqd->cp_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3002 /* set up the gfx_hqd_control, similar as CP_RB0_CNTL */
3003 rb_bufsz = order_base_2(ring->ring_size / 4) - 1;
3004 tmp = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_CNTL);
3005 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz);
3006 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2);
3008 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1);
3010 mqd->cp_gfx_hqd_cntl = tmp;
3012 /* set up cp_doorbell_control */
3013 tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
3014 if (ring->use_doorbell) {
3015 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3016 DOORBELL_OFFSET, ring->doorbell_index);
3017 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3020 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3022 mqd->cp_rb_doorbell_control = tmp;
3024 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3026 mqd->cp_gfx_hqd_rptr = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_RPTR);
3028 /* active the queue */
3029 mqd->cp_gfx_hqd_active = 1;
3034 #ifdef BRING_UP_DEBUG
3035 static int gfx_v10_0_gfx_queue_init_register(struct amdgpu_ring *ring)
3037 struct amdgpu_device *adev = ring->adev;
3038 struct v10_gfx_mqd *mqd = ring->mqd_ptr;
3040 /* set mmCP_GFX_HQD_WPTR/_HI to 0 */
3041 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_WPTR, mqd->cp_gfx_hqd_wptr);
3042 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_WPTR_HI, mqd->cp_gfx_hqd_wptr_hi);
3044 /* set GFX_MQD_BASE */
3045 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr);
3046 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi);
3048 /* set GFX_MQD_CONTROL */
3049 WREG32_SOC15(GC, 0, mmCP_GFX_MQD_CONTROL, mqd->cp_gfx_mqd_control);
3051 /* set GFX_HQD_VMID to 0 */
3052 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_VMID, mqd->cp_gfx_hqd_vmid);
3054 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUEUE_PRIORITY,
3055 mqd->cp_gfx_hqd_queue_priority);
3056 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUANTUM, mqd->cp_gfx_hqd_quantum);
3058 /* set GFX_HQD_BASE, similar as CP_RB_BASE */
3059 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_BASE, mqd->cp_gfx_hqd_base);
3060 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_BASE_HI, mqd->cp_gfx_hqd_base_hi);
3062 /* set GFX_HQD_RPTR_ADDR, similar as CP_RB_RPTR */
3063 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_RPTR_ADDR, mqd->cp_gfx_hqd_rptr_addr);
3064 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_RPTR_ADDR_HI, mqd->cp_gfx_hqd_rptr_addr_hi);
3066 /* set GFX_HQD_CNTL, similar as CP_RB_CNTL */
3067 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_CNTL, mqd->cp_gfx_hqd_cntl);
3069 /* set RB_WPTR_POLL_ADDR */
3070 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, mqd->cp_rb_wptr_poll_addr_lo);
3071 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, mqd->cp_rb_wptr_poll_addr_hi);
3073 /* set RB_DOORBELL_CONTROL */
3074 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, mqd->cp_rb_doorbell_control);
3076 /* active the queue */
3077 WREG32_SOC15(GC, 0, mmCP_GFX_HQD_ACTIVE, mqd->cp_gfx_hqd_active);
3083 static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
3085 struct amdgpu_device *adev = ring->adev;
3086 struct v10_gfx_mqd *mqd = ring->mqd_ptr;
3087 int mqd_idx = ring - &adev->gfx.gfx_ring[0];
3089 if (!adev->in_gpu_reset && !adev->in_suspend) {
3090 memset((void *)mqd, 0, sizeof(*mqd));
3091 mutex_lock(&adev->srbm_mutex);
3092 nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3093 gfx_v10_0_gfx_mqd_init(ring);
3094 #ifdef BRING_UP_DEBUG
3095 gfx_v10_0_gfx_queue_init_register(ring);
3097 nv_grbm_select(adev, 0, 0, 0, 0);
3098 mutex_unlock(&adev->srbm_mutex);
3099 if (adev->gfx.me.mqd_backup[mqd_idx])
3100 memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
3101 } else if (adev->in_gpu_reset) {
3102 /* reset mqd with the backup copy */
3103 if (adev->gfx.me.mqd_backup[mqd_idx])
3104 memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
3105 /* reset the ring */
3107 adev->wb.wb[ring->wptr_offs] = 0;
3108 amdgpu_ring_clear_ring(ring);
3109 #ifdef BRING_UP_DEBUG
3110 mutex_lock(&adev->srbm_mutex);
3111 nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3112 gfx_v10_0_gfx_queue_init_register(ring);
3113 nv_grbm_select(adev, 0, 0, 0, 0);
3114 mutex_unlock(&adev->srbm_mutex);
3117 amdgpu_ring_clear_ring(ring);
3123 #ifndef BRING_UP_DEBUG
3124 static int gfx_v10_0_kiq_enable_kgq(struct amdgpu_device *adev)
3126 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
3127 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
3130 if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
3133 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
3134 adev->gfx.num_gfx_rings);
3136 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
3140 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
3141 kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.gfx_ring[i]);
3143 r = amdgpu_ring_test_ring(kiq_ring);
3145 DRM_ERROR("kfq enable failed\n");
3146 kiq_ring->sched.ready = false;
3152 static int gfx_v10_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
3155 struct amdgpu_ring *ring;
3157 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
3158 ring = &adev->gfx.gfx_ring[i];
3160 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3161 if (unlikely(r != 0))
3164 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3166 r = gfx_v10_0_gfx_init_queue(ring);
3167 amdgpu_bo_kunmap(ring->mqd_obj);
3168 ring->mqd_ptr = NULL;
3170 amdgpu_bo_unreserve(ring->mqd_obj);
3174 #ifndef BRING_UP_DEBUG
3175 r = gfx_v10_0_kiq_enable_kgq(adev);
3179 r = gfx_v10_0_cp_gfx_start(adev);
3183 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
3184 ring = &adev->gfx.gfx_ring[i];
3185 ring->sched.ready = true;
3191 static int gfx_v10_0_compute_mqd_init(struct amdgpu_ring *ring)
3193 struct amdgpu_device *adev = ring->adev;
3194 struct v10_compute_mqd *mqd = ring->mqd_ptr;
3195 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
3198 mqd->header = 0xC0310800;
3199 mqd->compute_pipelinestat_enable = 0x00000001;
3200 mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
3201 mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
3202 mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
3203 mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
3204 mqd->compute_misc_reserved = 0x00000003;
3206 eop_base_addr = ring->eop_gpu_addr >> 8;
3207 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
3208 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
3210 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3211 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
3212 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
3213 (order_base_2(GFX10_MEC_HPD_SIZE / 4) - 1));
3215 mqd->cp_hqd_eop_control = tmp;
3217 /* enable doorbell? */
3218 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3220 if (ring->use_doorbell) {
3221 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3222 DOORBELL_OFFSET, ring->doorbell_index);
3223 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3225 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3226 DOORBELL_SOURCE, 0);
3227 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3230 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3234 mqd->cp_hqd_pq_doorbell_control = tmp;
3236 /* disable the queue if it's active */
3238 mqd->cp_hqd_dequeue_request = 0;
3239 mqd->cp_hqd_pq_rptr = 0;
3240 mqd->cp_hqd_pq_wptr_lo = 0;
3241 mqd->cp_hqd_pq_wptr_hi = 0;
3243 /* set the pointer to the MQD */
3244 mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
3245 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
3247 /* set MQD vmid to 0 */
3248 tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
3249 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
3250 mqd->cp_mqd_control = tmp;
3252 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3253 hqd_gpu_addr = ring->gpu_addr >> 8;
3254 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
3255 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
3257 /* set up the HQD, this is similar to CP_RB0_CNTL */
3258 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
3259 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
3260 (order_base_2(ring->ring_size / 4) - 1));
3261 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
3262 ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
3264 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
3266 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
3267 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
3268 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
3269 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
3270 mqd->cp_hqd_pq_control = tmp;
3272 /* set the wb address whether it's enabled or not */
3273 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
3274 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
3275 mqd->cp_hqd_pq_rptr_report_addr_hi =
3276 upper_32_bits(wb_gpu_addr) & 0xffff;
3278 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3279 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
3280 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3281 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3284 /* enable the doorbell if requested */
3285 if (ring->use_doorbell) {
3286 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3287 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3288 DOORBELL_OFFSET, ring->doorbell_index);
3290 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3292 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3293 DOORBELL_SOURCE, 0);
3294 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3298 mqd->cp_hqd_pq_doorbell_control = tmp;
3300 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3302 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
3304 /* set the vmid for the queue */
3305 mqd->cp_hqd_vmid = 0;
3307 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
3308 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
3309 mqd->cp_hqd_persistent_state = tmp;
3311 /* set MIN_IB_AVAIL_SIZE */
3312 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
3313 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
3314 mqd->cp_hqd_ib_control = tmp;
3316 /* activate the queue */
3317 mqd->cp_hqd_active = 1;
3322 static int gfx_v10_0_kiq_init_register(struct amdgpu_ring *ring)
3324 struct amdgpu_device *adev = ring->adev;
3325 struct v10_compute_mqd *mqd = ring->mqd_ptr;
3328 /* disable wptr polling */
3329 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3331 /* write the EOP addr */
3332 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
3333 mqd->cp_hqd_eop_base_addr_lo);
3334 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
3335 mqd->cp_hqd_eop_base_addr_hi);
3337 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3338 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL,
3339 mqd->cp_hqd_eop_control);
3341 /* enable doorbell? */
3342 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3343 mqd->cp_hqd_pq_doorbell_control);
3345 /* disable the queue if it's active */
3346 if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3347 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3348 for (j = 0; j < adev->usec_timeout; j++) {
3349 if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3353 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3354 mqd->cp_hqd_dequeue_request);
3355 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR,
3356 mqd->cp_hqd_pq_rptr);
3357 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3358 mqd->cp_hqd_pq_wptr_lo);
3359 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3360 mqd->cp_hqd_pq_wptr_hi);
3363 /* set the pointer to the MQD */
3364 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR,
3365 mqd->cp_mqd_base_addr_lo);
3366 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI,
3367 mqd->cp_mqd_base_addr_hi);
3369 /* set MQD vmid to 0 */
3370 WREG32_SOC15(GC, 0, mmCP_MQD_CONTROL,
3371 mqd->cp_mqd_control);
3373 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3374 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE,
3375 mqd->cp_hqd_pq_base_lo);
3376 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI,
3377 mqd->cp_hqd_pq_base_hi);
3379 /* set up the HQD, this is similar to CP_RB0_CNTL */
3380 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL,
3381 mqd->cp_hqd_pq_control);
3383 /* set the wb address whether it's enabled or not */
3384 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
3385 mqd->cp_hqd_pq_rptr_report_addr_lo);
3386 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3387 mqd->cp_hqd_pq_rptr_report_addr_hi);
3389 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3390 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
3391 mqd->cp_hqd_pq_wptr_poll_addr_lo);
3392 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3393 mqd->cp_hqd_pq_wptr_poll_addr_hi);
3395 /* enable the doorbell if requested */
3396 if (ring->use_doorbell) {
3397 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
3398 (adev->doorbell_index.kiq * 2) << 2);
3399 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
3400 (adev->doorbell_index.userqueue_end * 2) << 2);
3403 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3404 mqd->cp_hqd_pq_doorbell_control);
3406 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3407 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3408 mqd->cp_hqd_pq_wptr_lo);
3409 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3410 mqd->cp_hqd_pq_wptr_hi);
3412 /* set the vmid for the queue */
3413 WREG32_SOC15(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
3415 WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE,
3416 mqd->cp_hqd_persistent_state);
3418 /* activate the queue */
3419 WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE,
3420 mqd->cp_hqd_active);
3422 if (ring->use_doorbell)
3423 WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
3428 static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring)
3430 struct amdgpu_device *adev = ring->adev;
3431 struct v10_compute_mqd *mqd = ring->mqd_ptr;
3432 int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
3434 gfx_v10_0_kiq_setting(ring);
3436 if (adev->in_gpu_reset) { /* for GPU_RESET case */
3437 /* reset MQD to a clean status */
3438 if (adev->gfx.mec.mqd_backup[mqd_idx])
3439 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
3441 /* reset ring buffer */
3443 amdgpu_ring_clear_ring(ring);
3445 mutex_lock(&adev->srbm_mutex);
3446 nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3447 gfx_v10_0_kiq_init_register(ring);
3448 nv_grbm_select(adev, 0, 0, 0, 0);
3449 mutex_unlock(&adev->srbm_mutex);
3451 memset((void *)mqd, 0, sizeof(*mqd));
3452 mutex_lock(&adev->srbm_mutex);
3453 nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3454 gfx_v10_0_compute_mqd_init(ring);
3455 gfx_v10_0_kiq_init_register(ring);
3456 nv_grbm_select(adev, 0, 0, 0, 0);
3457 mutex_unlock(&adev->srbm_mutex);
3459 if (adev->gfx.mec.mqd_backup[mqd_idx])
3460 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
3466 static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring)
3468 struct amdgpu_device *adev = ring->adev;
3469 struct v10_compute_mqd *mqd = ring->mqd_ptr;
3470 int mqd_idx = ring - &adev->gfx.compute_ring[0];
3472 if (!adev->in_gpu_reset && !adev->in_suspend) {
3473 memset((void *)mqd, 0, sizeof(*mqd));
3474 mutex_lock(&adev->srbm_mutex);
3475 nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3476 gfx_v10_0_compute_mqd_init(ring);
3477 nv_grbm_select(adev, 0, 0, 0, 0);
3478 mutex_unlock(&adev->srbm_mutex);
3480 if (adev->gfx.mec.mqd_backup[mqd_idx])
3481 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
3482 } else if (adev->in_gpu_reset) { /* for GPU_RESET case */
3483 /* reset MQD to a clean status */
3484 if (adev->gfx.mec.mqd_backup[mqd_idx])
3485 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
3487 /* reset ring buffer */
3489 amdgpu_ring_clear_ring(ring);
3491 amdgpu_ring_clear_ring(ring);
3497 static int gfx_v10_0_kiq_resume(struct amdgpu_device *adev)
3499 struct amdgpu_ring *ring;
3502 ring = &adev->gfx.kiq.ring;
3504 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3505 if (unlikely(r != 0))
3508 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3509 if (unlikely(r != 0))
3512 gfx_v10_0_kiq_init_queue(ring);
3513 amdgpu_bo_kunmap(ring->mqd_obj);
3514 ring->mqd_ptr = NULL;
3515 amdgpu_bo_unreserve(ring->mqd_obj);
3516 ring->sched.ready = true;
3520 static int gfx_v10_0_kcq_resume(struct amdgpu_device *adev)
3522 struct amdgpu_ring *ring = NULL;
3525 gfx_v10_0_cp_compute_enable(adev, true);
3527 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3528 ring = &adev->gfx.compute_ring[i];
3530 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3531 if (unlikely(r != 0))
3533 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3535 r = gfx_v10_0_kcq_init_queue(ring);
3536 amdgpu_bo_kunmap(ring->mqd_obj);
3537 ring->mqd_ptr = NULL;
3539 amdgpu_bo_unreserve(ring->mqd_obj);
3544 r = amdgpu_gfx_enable_kcq(adev);
3549 static int gfx_v10_0_cp_resume(struct amdgpu_device *adev)
3552 struct amdgpu_ring *ring;
3554 if (!(adev->flags & AMD_IS_APU))
3555 gfx_v10_0_enable_gui_idle_interrupt(adev, false);
3557 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
3558 /* legacy firmware loading */
3559 r = gfx_v10_0_cp_gfx_load_microcode(adev);
3563 r = gfx_v10_0_cp_compute_load_microcode(adev);
3568 r = gfx_v10_0_kiq_resume(adev);
3572 r = gfx_v10_0_kcq_resume(adev);
3576 if (!amdgpu_async_gfx_ring) {
3577 r = gfx_v10_0_cp_gfx_resume(adev);
3581 r = gfx_v10_0_cp_async_gfx_ring_resume(adev);
3586 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
3587 ring = &adev->gfx.gfx_ring[i];
3588 DRM_INFO("gfx %d ring me %d pipe %d q %d\n",
3589 i, ring->me, ring->pipe, ring->queue);
3590 r = amdgpu_ring_test_ring(ring);
3592 ring->sched.ready = false;
3597 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3598 ring = &adev->gfx.compute_ring[i];
3599 ring->sched.ready = true;
3600 DRM_INFO("compute ring %d mec %d pipe %d q %d\n",
3601 i, ring->me, ring->pipe, ring->queue);
3602 r = amdgpu_ring_test_ring(ring);
3604 ring->sched.ready = false;
3610 static void gfx_v10_0_cp_enable(struct amdgpu_device *adev, bool enable)
3612 gfx_v10_0_cp_gfx_enable(adev, enable);
3613 gfx_v10_0_cp_compute_enable(adev, enable);
3616 static bool gfx_v10_0_check_grbm_cam_remapping(struct amdgpu_device *adev)
3618 uint32_t data, pattern = 0xDEADBEEF;
3620 /* check if mmVGT_ESGS_RING_SIZE_UMD
3621 * has been remapped to mmVGT_ESGS_RING_SIZE */
3622 data = RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE);
3624 WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE, 0);
3626 WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, pattern);
3628 if (RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE) == pattern) {
3629 WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, data);
3632 WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE, data);
3637 static void gfx_v10_0_setup_grbm_cam_remapping(struct amdgpu_device *adev)
3641 /* initialize cam_index to 0
3642 * index will auto-inc after each data writting */
3643 WREG32_SOC15(GC, 0, mmGRBM_CAM_INDEX, 0);
3645 /* mmVGT_TF_RING_SIZE_UMD -> mmVGT_TF_RING_SIZE */
3646 data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_RING_SIZE_UMD) <<
3647 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3648 (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_RING_SIZE) <<
3649 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3650 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3651 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3653 /* mmVGT_TF_MEMORY_BASE_UMD -> mmVGT_TF_MEMORY_BASE */
3654 data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_UMD) <<
3655 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3656 (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE) <<
3657 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3658 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3659 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3661 /* mmVGT_TF_MEMORY_BASE_HI_UMD -> mmVGT_TF_MEMORY_BASE_HI */
3662 data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_HI_UMD) <<
3663 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3664 (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_HI) <<
3665 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3666 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3667 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3669 /* mmVGT_HS_OFFCHIP_PARAM_UMD -> mmVGT_HS_OFFCHIP_PARAM */
3670 data = (SOC15_REG_OFFSET(GC, 0, mmVGT_HS_OFFCHIP_PARAM_UMD) <<
3671 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3672 (SOC15_REG_OFFSET(GC, 0, mmVGT_HS_OFFCHIP_PARAM) <<
3673 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3674 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3675 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3677 /* mmVGT_ESGS_RING_SIZE_UMD -> mmVGT_ESGS_RING_SIZE */
3678 data = (SOC15_REG_OFFSET(GC, 0, mmVGT_ESGS_RING_SIZE_UMD) <<
3679 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3680 (SOC15_REG_OFFSET(GC, 0, mmVGT_ESGS_RING_SIZE) <<
3681 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3682 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3683 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3685 /* mmVGT_GSVS_RING_SIZE_UMD -> mmVGT_GSVS_RING_SIZE */
3686 data = (SOC15_REG_OFFSET(GC, 0, mmVGT_GSVS_RING_SIZE_UMD) <<
3687 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3688 (SOC15_REG_OFFSET(GC, 0, mmVGT_GSVS_RING_SIZE) <<
3689 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3690 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3691 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3693 /* mmSPI_CONFIG_CNTL_REMAP -> mmSPI_CONFIG_CNTL */
3694 data = (SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_REMAP) <<
3695 GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3696 (SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL) <<
3697 GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3698 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3699 WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3702 static int gfx_v10_0_hw_init(void *handle)
3705 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3707 if (!amdgpu_emu_mode)
3708 gfx_v10_0_init_golden_registers(adev);
3710 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
3712 * For gfx 10, rlc firmware loading relies on smu firmware is
3713 * loaded firstly, so in direct type, it has to load smc ucode
3716 r = smu_load_microcode(&adev->smu);
3720 r = smu_check_fw_status(&adev->smu);
3722 pr_err("SMC firmware status is not correct\n");
3727 /* if GRBM CAM not remapped, set up the remapping */
3728 if (!gfx_v10_0_check_grbm_cam_remapping(adev))
3729 gfx_v10_0_setup_grbm_cam_remapping(adev);
3731 gfx_v10_0_constants_init(adev);
3733 r = gfx_v10_0_rlc_resume(adev);
3738 * init golden registers and rlc resume may override some registers,
3739 * reconfig them here
3741 gfx_v10_0_tcp_harvest(adev);
3743 r = gfx_v10_0_cp_resume(adev);
3750 #ifndef BRING_UP_DEBUG
3751 static int gfx_v10_0_kiq_disable_kgq(struct amdgpu_device *adev)
3753 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
3754 struct amdgpu_ring *kiq_ring = &kiq->ring;
3757 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
3760 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
3761 adev->gfx.num_gfx_rings))
3764 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
3765 kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i],
3766 PREEMPT_QUEUES, 0, 0);
3768 return amdgpu_ring_test_ring(kiq_ring);
3772 static int gfx_v10_0_hw_fini(void *handle)
3774 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3777 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
3778 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
3779 #ifndef BRING_UP_DEBUG
3780 if (amdgpu_async_gfx_ring) {
3781 r = gfx_v10_0_kiq_disable_kgq(adev);
3783 DRM_ERROR("KGQ disable failed\n");
3786 if (amdgpu_gfx_disable_kcq(adev))
3787 DRM_ERROR("KCQ disable failed\n");
3788 if (amdgpu_sriov_vf(adev)) {
3789 gfx_v10_0_cp_gfx_enable(adev, false);
3792 gfx_v10_0_cp_enable(adev, false);
3793 gfx_v10_0_enable_gui_idle_interrupt(adev, false);
3798 static int gfx_v10_0_suspend(void *handle)
3800 return gfx_v10_0_hw_fini(handle);
3803 static int gfx_v10_0_resume(void *handle)
3805 return gfx_v10_0_hw_init(handle);
3808 static bool gfx_v10_0_is_idle(void *handle)
3810 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3812 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
3813 GRBM_STATUS, GUI_ACTIVE))
3819 static int gfx_v10_0_wait_for_idle(void *handle)
3823 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3825 for (i = 0; i < adev->usec_timeout; i++) {
3826 /* read MC_STATUS */
3827 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS) &
3828 GRBM_STATUS__GUI_ACTIVE_MASK;
3830 if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE))
3837 static int gfx_v10_0_soft_reset(void *handle)
3839 u32 grbm_soft_reset = 0;
3841 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3844 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
3845 if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
3846 GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
3847 GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__DB_BUSY_MASK |
3848 GRBM_STATUS__CB_BUSY_MASK | GRBM_STATUS__GDS_BUSY_MASK |
3849 GRBM_STATUS__SPI_BUSY_MASK | GRBM_STATUS__GE_BUSY_NO_DMA_MASK
3850 | GRBM_STATUS__BCI_BUSY_MASK)) {
3851 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3852 GRBM_SOFT_RESET, SOFT_RESET_CP,
3854 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3855 GRBM_SOFT_RESET, SOFT_RESET_GFX,
3859 if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
3860 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3861 GRBM_SOFT_RESET, SOFT_RESET_CP,
3866 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
3867 if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
3868 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3869 GRBM_SOFT_RESET, SOFT_RESET_RLC,
3872 if (grbm_soft_reset) {
3874 gfx_v10_0_rlc_stop(adev);
3876 /* Disable GFX parsing/prefetching */
3877 gfx_v10_0_cp_gfx_enable(adev, false);
3879 /* Disable MEC parsing/prefetching */
3880 gfx_v10_0_cp_compute_enable(adev, false);
3882 if (grbm_soft_reset) {
3883 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3884 tmp |= grbm_soft_reset;
3885 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
3886 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
3887 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3891 tmp &= ~grbm_soft_reset;
3892 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
3893 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3896 /* Wait a little for things to settle down */
3902 static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
3906 mutex_lock(&adev->gfx.gpu_clock_mutex);
3907 WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
3908 clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
3909 ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
3910 mutex_unlock(&adev->gfx.gpu_clock_mutex);
3914 static void gfx_v10_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
3916 uint32_t gds_base, uint32_t gds_size,
3917 uint32_t gws_base, uint32_t gws_size,
3918 uint32_t oa_base, uint32_t oa_size)
3920 struct amdgpu_device *adev = ring->adev;
3923 gfx_v10_0_write_data_to_reg(ring, 0, false,
3924 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid,
3928 gfx_v10_0_write_data_to_reg(ring, 0, false,
3929 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid,
3933 gfx_v10_0_write_data_to_reg(ring, 0, false,
3934 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid,
3935 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
3938 gfx_v10_0_write_data_to_reg(ring, 0, false,
3939 SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid,
3940 (1 << (oa_size + oa_base)) - (1 << oa_base));
3943 static int gfx_v10_0_early_init(void *handle)
3945 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3947 adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS;
3948 adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
3950 gfx_v10_0_set_kiq_pm4_funcs(adev);
3951 gfx_v10_0_set_ring_funcs(adev);
3952 gfx_v10_0_set_irq_funcs(adev);
3953 gfx_v10_0_set_gds_init(adev);
3954 gfx_v10_0_set_rlc_funcs(adev);
3959 static int gfx_v10_0_late_init(void *handle)
3961 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3964 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
3968 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
3975 static bool gfx_v10_0_is_rlc_enabled(struct amdgpu_device *adev)
3979 /* if RLC is not enabled, do nothing */
3980 rlc_cntl = RREG32_SOC15(GC, 0, mmRLC_CNTL);
3981 return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false;
3984 static void gfx_v10_0_set_safe_mode(struct amdgpu_device *adev)
3989 data = RLC_SAFE_MODE__CMD_MASK;
3990 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
3991 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
3993 /* wait for RLC_SAFE_MODE */
3994 for (i = 0; i < adev->usec_timeout; i++) {
3995 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
4001 static void gfx_v10_0_unset_safe_mode(struct amdgpu_device *adev)
4005 data = RLC_SAFE_MODE__CMD_MASK;
4006 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
4009 static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
4014 /* It is disabled by HW by default */
4015 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
4016 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
4017 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4018 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4019 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
4020 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
4022 /* only for Vega10 & Raven1 */
4023 data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
4026 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4028 /* MGLS is a global flag to control all MGLS in GFX */
4029 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
4030 /* 2 - RLC memory Light sleep */
4031 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
4032 def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4033 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4035 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
4037 /* 3 - CP memory Light sleep */
4038 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
4039 def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4040 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4042 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
4046 /* 1 - MGCG_OVERRIDE */
4047 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4048 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
4049 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4050 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
4051 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
4053 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4055 /* 2 - disable MGLS in RLC */
4056 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4057 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
4058 data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4059 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
4062 /* 3 - disable MGLS in CP */
4063 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4064 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
4065 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4066 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
4071 static void gfx_v10_0_update_3d_clock_gating(struct amdgpu_device *adev,
4076 /* Enable 3D CGCG/CGLS */
4077 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
4078 /* write cmd to clear cgcg/cgls ov */
4079 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4080 /* unset CGCG override */
4081 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
4082 /* update CGCG and CGLS override bits */
4084 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4085 /* enable 3Dcgcg FSM(0x0000363f) */
4086 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4087 data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4088 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
4089 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
4090 data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4091 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
4093 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
4095 /* set IDLE_POLL_COUNT(0x00900100) */
4096 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
4097 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
4098 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
4100 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
4102 /* Disable CGCG/CGLS */
4103 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4104 /* disable cgcg, cgls should be disabled */
4105 data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
4106 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
4107 /* disable cgcg and cgls in FSM */
4109 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
4113 static void gfx_v10_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
4118 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
4119 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4120 /* unset CGCG override */
4121 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
4122 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
4123 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
4125 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
4126 /* update CGCG and CGLS override bits */
4128 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4130 /* enable cgcg FSM(0x0000363F) */
4131 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
4132 data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4133 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
4134 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
4135 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4136 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
4138 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
4140 /* set IDLE_POLL_COUNT(0x00900100) */
4141 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
4142 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
4143 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
4145 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
4147 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
4148 /* reset CGCG/CGLS bits */
4149 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
4150 /* disable cgcg and cgls in FSM */
4152 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
4156 static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
4159 amdgpu_gfx_rlc_enter_safe_mode(adev);
4162 /* CGCG/CGLS should be enabled after MGCG/MGLS
4163 * === MGCG + MGLS ===
4165 gfx_v10_0_update_medium_grain_clock_gating(adev, enable);
4166 /* === CGCG /CGLS for GFX 3D Only === */
4167 gfx_v10_0_update_3d_clock_gating(adev, enable);
4168 /* === CGCG + CGLS === */
4169 gfx_v10_0_update_coarse_grain_clock_gating(adev, enable);
4171 /* CGCG/CGLS should be disabled before MGCG/MGLS
4172 * === CGCG + CGLS ===
4174 gfx_v10_0_update_coarse_grain_clock_gating(adev, enable);
4175 /* === CGCG /CGLS for GFX 3D Only === */
4176 gfx_v10_0_update_3d_clock_gating(adev, enable);
4177 /* === MGCG + MGLS === */
4178 gfx_v10_0_update_medium_grain_clock_gating(adev, enable);
4181 if (adev->cg_flags &
4182 (AMD_CG_SUPPORT_GFX_MGCG |
4183 AMD_CG_SUPPORT_GFX_CGLS |
4184 AMD_CG_SUPPORT_GFX_CGCG |
4185 AMD_CG_SUPPORT_GFX_CGLS |
4186 AMD_CG_SUPPORT_GFX_3D_CGCG |
4187 AMD_CG_SUPPORT_GFX_3D_CGLS))
4188 gfx_v10_0_enable_gui_idle_interrupt(adev, enable);
4190 amdgpu_gfx_rlc_exit_safe_mode(adev);
4195 static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = {
4196 .is_rlc_enabled = gfx_v10_0_is_rlc_enabled,
4197 .set_safe_mode = gfx_v10_0_set_safe_mode,
4198 .unset_safe_mode = gfx_v10_0_unset_safe_mode,
4199 .init = gfx_v10_0_rlc_init,
4200 .get_csb_size = gfx_v10_0_get_csb_size,
4201 .get_csb_buffer = gfx_v10_0_get_csb_buffer,
4202 .resume = gfx_v10_0_rlc_resume,
4203 .stop = gfx_v10_0_rlc_stop,
4204 .reset = gfx_v10_0_rlc_reset,
4205 .start = gfx_v10_0_rlc_start
4208 static int gfx_v10_0_set_powergating_state(void *handle,
4209 enum amd_powergating_state state)
4211 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4212 bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
4213 switch (adev->asic_type) {
4217 amdgpu_gfx_off_ctrl(adev, false);
4218 cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
4220 amdgpu_gfx_off_ctrl(adev, true);
4228 static int gfx_v10_0_set_clockgating_state(void *handle,
4229 enum amd_clockgating_state state)
4231 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4233 switch (adev->asic_type) {
4237 gfx_v10_0_update_gfx_clock_gating(adev,
4238 state == AMD_CG_STATE_GATE ? true : false);
4246 static void gfx_v10_0_get_clockgating_state(void *handle, u32 *flags)
4248 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4251 /* AMD_CG_SUPPORT_GFX_MGCG */
4252 data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4253 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
4254 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
4256 /* AMD_CG_SUPPORT_GFX_CGCG */
4257 data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
4258 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
4259 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
4261 /* AMD_CG_SUPPORT_GFX_CGLS */
4262 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
4263 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
4265 /* AMD_CG_SUPPORT_GFX_RLC_LS */
4266 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4267 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
4268 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
4270 /* AMD_CG_SUPPORT_GFX_CP_LS */
4271 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4272 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
4273 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
4275 /* AMD_CG_SUPPORT_GFX_3D_CGCG */
4276 data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4277 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
4278 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
4280 /* AMD_CG_SUPPORT_GFX_3D_CGLS */
4281 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
4282 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
4285 static u64 gfx_v10_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
4287 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx10 is 32bit rptr*/
4290 static u64 gfx_v10_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
4292 struct amdgpu_device *adev = ring->adev;
4295 /* XXX check if swapping is necessary on BE */
4296 if (ring->use_doorbell) {
4297 wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]);
4299 wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR);
4300 wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32;
4306 static void gfx_v10_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
4308 struct amdgpu_device *adev = ring->adev;
4310 if (ring->use_doorbell) {
4311 /* XXX check if swapping is necessary on BE */
4312 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
4313 WDOORBELL64(ring->doorbell_index, ring->wptr);
4315 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
4316 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
4320 static u64 gfx_v10_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
4322 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx10 hardware is 32bit rptr */
4325 static u64 gfx_v10_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
4329 /* XXX check if swapping is necessary on BE */
4330 if (ring->use_doorbell)
4331 wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
4337 static void gfx_v10_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
4339 struct amdgpu_device *adev = ring->adev;
4341 /* XXX check if swapping is necessary on BE */
4342 if (ring->use_doorbell) {
4343 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
4344 WDOORBELL64(ring->doorbell_index, ring->wptr);
4346 BUG(); /* only DOORBELL method supported on gfx10 now */
4350 static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
4352 struct amdgpu_device *adev = ring->adev;
4353 u32 ref_and_mask, reg_mem_engine;
4354 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
4356 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
4359 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
4362 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
4369 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
4370 reg_mem_engine = 1; /* pfp */
4373 gfx_v10_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
4374 adev->nbio.funcs->get_hdp_flush_req_offset(adev),
4375 adev->nbio.funcs->get_hdp_flush_done_offset(adev),
4376 ref_and_mask, ref_and_mask, 0x20);
4379 static void gfx_v10_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
4380 struct amdgpu_job *job,
4381 struct amdgpu_ib *ib,
4384 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
4385 u32 header, control = 0;
4387 if (ib->flags & AMDGPU_IB_FLAG_CE)
4388 header = PACKET3(PACKET3_INDIRECT_BUFFER_CNST, 2);
4390 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
4392 control |= ib->length_dw | (vmid << 24);
4394 if (amdgpu_mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
4395 control |= INDIRECT_BUFFER_PRE_ENB(1);
4397 if (flags & AMDGPU_IB_PREEMPTED)
4398 control |= INDIRECT_BUFFER_PRE_RESUME(1);
4400 if (!(ib->flags & AMDGPU_IB_FLAG_CE))
4401 gfx_v10_0_ring_emit_de_meta(ring,
4402 flags & AMDGPU_IB_PREEMPTED ? true : false);
4405 amdgpu_ring_write(ring, header);
4406 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
4407 amdgpu_ring_write(ring,
4411 lower_32_bits(ib->gpu_addr));
4412 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
4413 amdgpu_ring_write(ring, control);
4416 static void gfx_v10_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
4417 struct amdgpu_job *job,
4418 struct amdgpu_ib *ib,
4421 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
4422 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
4424 /* Currently, there is a high possibility to get wave ID mismatch
4425 * between ME and GDS, leading to a hw deadlock, because ME generates
4426 * different wave IDs than the GDS expects. This situation happens
4427 * randomly when at least 5 compute pipes use GDS ordered append.
4428 * The wave IDs generated by ME are also wrong after suspend/resume.
4429 * Those are probably bugs somewhere else in the kernel driver.
4431 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
4432 * GDS to 0 for this ring (me/pipe).
4434 if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
4435 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
4436 amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID);
4437 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
4440 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
4441 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
4442 amdgpu_ring_write(ring,
4446 lower_32_bits(ib->gpu_addr));
4447 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
4448 amdgpu_ring_write(ring, control);
4451 static void gfx_v10_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
4452 u64 seq, unsigned flags)
4454 struct amdgpu_device *adev = ring->adev;
4455 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
4456 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
4458 /* Interrupt not work fine on GFX10.1 model yet. Use fallback instead */
4459 if (adev->pdev->device == 0x50)
4462 /* RELEASE_MEM - flush caches, send int */
4463 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
4464 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ |
4465 PACKET3_RELEASE_MEM_GCR_GL2_WB |
4466 PACKET3_RELEASE_MEM_GCR_GLM_INV | /* must be set with GLM_WB */
4467 PACKET3_RELEASE_MEM_GCR_GLM_WB |
4468 PACKET3_RELEASE_MEM_CACHE_POLICY(3) |
4469 PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
4470 PACKET3_RELEASE_MEM_EVENT_INDEX(5)));
4471 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 1) |
4472 PACKET3_RELEASE_MEM_INT_SEL(int_sel ? 2 : 0)));
4475 * the address should be Qword aligned if 64bit write, Dword
4476 * aligned if only send 32bit data low (discard data high)
4482 amdgpu_ring_write(ring, lower_32_bits(addr));
4483 amdgpu_ring_write(ring, upper_32_bits(addr));
4484 amdgpu_ring_write(ring, lower_32_bits(seq));
4485 amdgpu_ring_write(ring, upper_32_bits(seq));
4486 amdgpu_ring_write(ring, 0);
4489 static void gfx_v10_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
4491 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
4492 uint32_t seq = ring->fence_drv.sync_seq;
4493 uint64_t addr = ring->fence_drv.gpu_addr;
4495 gfx_v10_0_wait_reg_mem(ring, usepfp, 1, 0, lower_32_bits(addr),
4496 upper_32_bits(addr), seq, 0xffffffff, 4);
4499 static void gfx_v10_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
4500 unsigned vmid, uint64_t pd_addr)
4502 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
4504 /* compute doesn't have PFP */
4505 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
4506 /* sync PFP to ME, otherwise we might get invalid PFP reads */
4507 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
4508 amdgpu_ring_write(ring, 0x0);
4512 static void gfx_v10_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
4513 u64 seq, unsigned int flags)
4515 struct amdgpu_device *adev = ring->adev;
4517 /* we only allocate 32bit for each seq wb address */
4518 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
4520 /* write fence seq to the "addr" */
4521 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4522 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4523 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
4524 amdgpu_ring_write(ring, lower_32_bits(addr));
4525 amdgpu_ring_write(ring, upper_32_bits(addr));
4526 amdgpu_ring_write(ring, lower_32_bits(seq));
4528 if (flags & AMDGPU_FENCE_FLAG_INT) {
4529 /* set register to trigger INT */
4530 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4531 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4532 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
4533 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
4534 amdgpu_ring_write(ring, 0);
4535 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
4539 static void gfx_v10_0_ring_emit_sb(struct amdgpu_ring *ring)
4541 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4542 amdgpu_ring_write(ring, 0);
4545 static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
4550 gfx_v10_0_ring_emit_ce_meta(ring,
4551 flags & AMDGPU_IB_PREEMPTED ? true : false);
4553 gfx_v10_0_ring_emit_tmz(ring, true);
4555 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
4556 if (flags & AMDGPU_HAVE_CTX_SWITCH) {
4557 /* set load_global_config & load_global_uconfig */
4559 /* set load_cs_sh_regs */
4561 /* set load_per_context_state & load_gfx_sh_regs for GFX */
4564 /* set load_ce_ram if preamble presented */
4565 if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
4568 /* still load_ce_ram if this is the first time preamble presented
4569 * although there is no context switch happens.
4571 if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
4575 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
4576 amdgpu_ring_write(ring, dw2);
4577 amdgpu_ring_write(ring, 0);
4580 static unsigned gfx_v10_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
4584 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
4585 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
4586 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
4587 amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
4588 ret = ring->wptr & ring->buf_mask;
4589 amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
4594 static void gfx_v10_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
4597 BUG_ON(offset > ring->buf_mask);
4598 BUG_ON(ring->ring[offset] != 0x55aa55aa);
4600 cur = (ring->wptr - 1) & ring->buf_mask;
4601 if (likely(cur > offset))
4602 ring->ring[offset] = cur - offset;
4604 ring->ring[offset] = (ring->buf_mask + 1) - offset + cur;
4607 static int gfx_v10_0_ring_preempt_ib(struct amdgpu_ring *ring)
4610 struct amdgpu_device *adev = ring->adev;
4611 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
4612 struct amdgpu_ring *kiq_ring = &kiq->ring;
4614 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
4617 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size))
4620 /* assert preemption condition */
4621 amdgpu_ring_set_preempt_cond_exec(ring, false);
4623 /* assert IB preemption, emit the trailing fence */
4624 kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP,
4625 ring->trail_fence_gpu_addr,
4627 amdgpu_ring_commit(kiq_ring);
4629 /* poll the trailing fence */
4630 for (i = 0; i < adev->usec_timeout; i++) {
4631 if (ring->trail_seq ==
4632 le32_to_cpu(*(ring->trail_fence_cpu_addr)))
4637 if (i >= adev->usec_timeout) {
4639 DRM_ERROR("ring %d failed to preempt ib\n", ring->idx);
4642 /* deassert preemption condition */
4643 amdgpu_ring_set_preempt_cond_exec(ring, true);
4647 static void gfx_v10_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume)
4649 struct amdgpu_device *adev = ring->adev;
4650 struct v10_ce_ib_state ce_payload = {0};
4654 cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
4655 csa_addr = amdgpu_csa_vaddr(ring->adev);
4657 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
4658 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
4659 WRITE_DATA_DST_SEL(8) |
4661 WRITE_DATA_CACHE_POLICY(0));
4662 amdgpu_ring_write(ring, lower_32_bits(csa_addr +
4663 offsetof(struct v10_gfx_meta_data, ce_payload)));
4664 amdgpu_ring_write(ring, upper_32_bits(csa_addr +
4665 offsetof(struct v10_gfx_meta_data, ce_payload)));
4668 amdgpu_ring_write_multiple(ring, adev->virt.csa_cpu_addr +
4669 offsetof(struct v10_gfx_meta_data,
4671 sizeof(ce_payload) >> 2);
4673 amdgpu_ring_write_multiple(ring, (void *)&ce_payload,
4674 sizeof(ce_payload) >> 2);
4677 static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume)
4679 struct amdgpu_device *adev = ring->adev;
4680 struct v10_de_ib_state de_payload = {0};
4681 uint64_t csa_addr, gds_addr;
4684 csa_addr = amdgpu_csa_vaddr(ring->adev);
4685 gds_addr = ALIGN(csa_addr + AMDGPU_CSA_SIZE - adev->gds.gds_size,
4687 de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
4688 de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
4690 cnt = (sizeof(de_payload) >> 2) + 4 - 2;
4691 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
4692 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
4693 WRITE_DATA_DST_SEL(8) |
4695 WRITE_DATA_CACHE_POLICY(0));
4696 amdgpu_ring_write(ring, lower_32_bits(csa_addr +
4697 offsetof(struct v10_gfx_meta_data, de_payload)));
4698 amdgpu_ring_write(ring, upper_32_bits(csa_addr +
4699 offsetof(struct v10_gfx_meta_data, de_payload)));
4702 amdgpu_ring_write_multiple(ring, adev->virt.csa_cpu_addr +
4703 offsetof(struct v10_gfx_meta_data,
4705 sizeof(de_payload) >> 2);
4707 amdgpu_ring_write_multiple(ring, (void *)&de_payload,
4708 sizeof(de_payload) >> 2);
4711 static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
4713 amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
4714 amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
4717 static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
4719 struct amdgpu_device *adev = ring->adev;
4721 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
4722 amdgpu_ring_write(ring, 0 | /* src: register*/
4723 (5 << 8) | /* dst: memory */
4724 (1 << 20)); /* write confirm */
4725 amdgpu_ring_write(ring, reg);
4726 amdgpu_ring_write(ring, 0);
4727 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
4728 adev->virt.reg_val_offs * 4));
4729 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
4730 adev->virt.reg_val_offs * 4));
4733 static void gfx_v10_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
4738 switch (ring->funcs->type) {
4739 case AMDGPU_RING_TYPE_GFX:
4740 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
4742 case AMDGPU_RING_TYPE_KIQ:
4743 cmd = (1 << 16); /* no inc addr */
4749 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4750 amdgpu_ring_write(ring, cmd);
4751 amdgpu_ring_write(ring, reg);
4752 amdgpu_ring_write(ring, 0);
4753 amdgpu_ring_write(ring, val);
4756 static void gfx_v10_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
4757 uint32_t val, uint32_t mask)
4759 gfx_v10_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
4762 static void gfx_v10_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
4763 uint32_t reg0, uint32_t reg1,
4764 uint32_t ref, uint32_t mask)
4766 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
4767 struct amdgpu_device *adev = ring->adev;
4768 bool fw_version_ok = false;
4770 fw_version_ok = adev->gfx.cp_fw_write_wait;
4773 gfx_v10_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
4776 amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
4781 gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
4782 uint32_t me, uint32_t pipe,
4783 enum amdgpu_interrupt_state state)
4785 uint32_t cp_int_cntl, cp_int_cntl_reg;
4790 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0);
4793 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING1);
4796 DRM_DEBUG("invalid pipe %d\n", pipe);
4800 DRM_DEBUG("invalid me %d\n", me);
4805 case AMDGPU_IRQ_STATE_DISABLE:
4806 cp_int_cntl = RREG32(cp_int_cntl_reg);
4807 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
4808 TIME_STAMP_INT_ENABLE, 0);
4809 WREG32(cp_int_cntl_reg, cp_int_cntl);
4811 case AMDGPU_IRQ_STATE_ENABLE:
4812 cp_int_cntl = RREG32(cp_int_cntl_reg);
4813 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
4814 TIME_STAMP_INT_ENABLE, 1);
4815 WREG32(cp_int_cntl_reg, cp_int_cntl);
4822 static void gfx_v10_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
4824 enum amdgpu_interrupt_state state)
4826 u32 mec_int_cntl, mec_int_cntl_reg;
4829 * amdgpu controls only the first MEC. That's why this function only
4830 * handles the setting of interrupts for this specific MEC. All other
4831 * pipes' interrupts are set by amdkfd.
4837 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
4840 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
4843 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
4846 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
4849 DRM_DEBUG("invalid pipe %d\n", pipe);
4853 DRM_DEBUG("invalid me %d\n", me);
4858 case AMDGPU_IRQ_STATE_DISABLE:
4859 mec_int_cntl = RREG32(mec_int_cntl_reg);
4860 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4861 TIME_STAMP_INT_ENABLE, 0);
4862 WREG32(mec_int_cntl_reg, mec_int_cntl);
4864 case AMDGPU_IRQ_STATE_ENABLE:
4865 mec_int_cntl = RREG32(mec_int_cntl_reg);
4866 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4867 TIME_STAMP_INT_ENABLE, 1);
4868 WREG32(mec_int_cntl_reg, mec_int_cntl);
4875 static int gfx_v10_0_set_eop_interrupt_state(struct amdgpu_device *adev,
4876 struct amdgpu_irq_src *src,
4878 enum amdgpu_interrupt_state state)
4881 case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
4882 gfx_v10_0_set_gfx_eop_interrupt_state(adev, 0, 0, state);
4884 case AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP:
4885 gfx_v10_0_set_gfx_eop_interrupt_state(adev, 0, 1, state);
4887 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
4888 gfx_v10_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
4890 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
4891 gfx_v10_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
4893 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
4894 gfx_v10_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
4896 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
4897 gfx_v10_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
4899 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
4900 gfx_v10_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
4902 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
4903 gfx_v10_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
4905 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
4906 gfx_v10_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
4908 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
4909 gfx_v10_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
4917 static int gfx_v10_0_eop_irq(struct amdgpu_device *adev,
4918 struct amdgpu_irq_src *source,
4919 struct amdgpu_iv_entry *entry)
4922 u8 me_id, pipe_id, queue_id;
4923 struct amdgpu_ring *ring;
4925 DRM_DEBUG("IH: CP EOP\n");
4926 me_id = (entry->ring_id & 0x0c) >> 2;
4927 pipe_id = (entry->ring_id & 0x03) >> 0;
4928 queue_id = (entry->ring_id & 0x70) >> 4;
4933 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
4935 amdgpu_fence_process(&adev->gfx.gfx_ring[1]);
4939 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4940 ring = &adev->gfx.compute_ring[i];
4941 /* Per-queue interrupt is supported for MEC starting from VI.
4942 * The interrupt can only be enabled/disabled per pipe instead of per queue.
4944 if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
4945 amdgpu_fence_process(ring);
4952 static int gfx_v10_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
4953 struct amdgpu_irq_src *source,
4955 enum amdgpu_interrupt_state state)
4958 case AMDGPU_IRQ_STATE_DISABLE:
4959 case AMDGPU_IRQ_STATE_ENABLE:
4960 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4961 PRIV_REG_INT_ENABLE,
4962 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4971 static int gfx_v10_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
4972 struct amdgpu_irq_src *source,
4974 enum amdgpu_interrupt_state state)
4977 case AMDGPU_IRQ_STATE_DISABLE:
4978 case AMDGPU_IRQ_STATE_ENABLE:
4979 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4980 PRIV_INSTR_INT_ENABLE,
4981 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4989 static void gfx_v10_0_handle_priv_fault(struct amdgpu_device *adev,
4990 struct amdgpu_iv_entry *entry)
4992 u8 me_id, pipe_id, queue_id;
4993 struct amdgpu_ring *ring;
4996 me_id = (entry->ring_id & 0x0c) >> 2;
4997 pipe_id = (entry->ring_id & 0x03) >> 0;
4998 queue_id = (entry->ring_id & 0x70) >> 4;
5002 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
5003 ring = &adev->gfx.gfx_ring[i];
5004 /* we only enabled 1 gfx queue per pipe for now */
5005 if (ring->me == me_id && ring->pipe == pipe_id)
5006 drm_sched_fault(&ring->sched);
5011 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5012 ring = &adev->gfx.compute_ring[i];
5013 if (ring->me == me_id && ring->pipe == pipe_id &&
5014 ring->queue == queue_id)
5015 drm_sched_fault(&ring->sched);
5023 static int gfx_v10_0_priv_reg_irq(struct amdgpu_device *adev,
5024 struct amdgpu_irq_src *source,
5025 struct amdgpu_iv_entry *entry)
5027 DRM_ERROR("Illegal register access in command stream\n");
5028 gfx_v10_0_handle_priv_fault(adev, entry);
5032 static int gfx_v10_0_priv_inst_irq(struct amdgpu_device *adev,
5033 struct amdgpu_irq_src *source,
5034 struct amdgpu_iv_entry *entry)
5036 DRM_ERROR("Illegal instruction in command stream\n");
5037 gfx_v10_0_handle_priv_fault(adev, entry);
5041 static int gfx_v10_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
5042 struct amdgpu_irq_src *src,
5044 enum amdgpu_interrupt_state state)
5046 uint32_t tmp, target;
5047 struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
5050 target = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
5052 target = SOC15_REG_OFFSET(GC, 0, mmCP_ME2_PIPE0_INT_CNTL);
5053 target += ring->pipe;
5056 case AMDGPU_CP_KIQ_IRQ_DRIVER0:
5057 if (state == AMDGPU_IRQ_STATE_DISABLE) {
5058 tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
5059 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
5060 GENERIC2_INT_ENABLE, 0);
5061 WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
5063 tmp = RREG32(target);
5064 tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
5065 GENERIC2_INT_ENABLE, 0);
5066 WREG32(target, tmp);
5068 tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
5069 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
5070 GENERIC2_INT_ENABLE, 1);
5071 WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
5073 tmp = RREG32(target);
5074 tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
5075 GENERIC2_INT_ENABLE, 1);
5076 WREG32(target, tmp);
5080 BUG(); /* kiq only support GENERIC2_INT now */
5086 static int gfx_v10_0_kiq_irq(struct amdgpu_device *adev,
5087 struct amdgpu_irq_src *source,
5088 struct amdgpu_iv_entry *entry)
5090 u8 me_id, pipe_id, queue_id;
5091 struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
5093 me_id = (entry->ring_id & 0x0c) >> 2;
5094 pipe_id = (entry->ring_id & 0x03) >> 0;
5095 queue_id = (entry->ring_id & 0x70) >> 4;
5096 DRM_DEBUG("IH: CPC GENERIC2_INT, me:%d, pipe:%d, queue:%d\n",
5097 me_id, pipe_id, queue_id);
5099 amdgpu_fence_process(ring);
5103 static const struct amd_ip_funcs gfx_v10_0_ip_funcs = {
5104 .name = "gfx_v10_0",
5105 .early_init = gfx_v10_0_early_init,
5106 .late_init = gfx_v10_0_late_init,
5107 .sw_init = gfx_v10_0_sw_init,
5108 .sw_fini = gfx_v10_0_sw_fini,
5109 .hw_init = gfx_v10_0_hw_init,
5110 .hw_fini = gfx_v10_0_hw_fini,
5111 .suspend = gfx_v10_0_suspend,
5112 .resume = gfx_v10_0_resume,
5113 .is_idle = gfx_v10_0_is_idle,
5114 .wait_for_idle = gfx_v10_0_wait_for_idle,
5115 .soft_reset = gfx_v10_0_soft_reset,
5116 .set_clockgating_state = gfx_v10_0_set_clockgating_state,
5117 .set_powergating_state = gfx_v10_0_set_powergating_state,
5118 .get_clockgating_state = gfx_v10_0_get_clockgating_state,
5121 static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
5122 .type = AMDGPU_RING_TYPE_GFX,
5124 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
5125 .support_64bit_ptrs = true,
5126 .vmhub = AMDGPU_GFXHUB_0,
5127 .get_rptr = gfx_v10_0_ring_get_rptr_gfx,
5128 .get_wptr = gfx_v10_0_ring_get_wptr_gfx,
5129 .set_wptr = gfx_v10_0_ring_set_wptr_gfx,
5130 .emit_frame_size = /* totally 242 maximum if 16 IBs */
5132 7 + /* PIPELINE_SYNC */
5133 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
5134 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
5136 8 + /* FENCE for VM_FLUSH */
5137 20 + /* GDS switch */
5138 4 + /* double SWITCH_BUFFER,
5139 * the first COND_EXEC jump to the place
5140 * just prior to this double SWITCH_BUFFER
5149 8 + 8 + /* FENCE x2 */
5150 2, /* SWITCH_BUFFER */
5151 .emit_ib_size = 4, /* gfx_v10_0_ring_emit_ib_gfx */
5152 .emit_ib = gfx_v10_0_ring_emit_ib_gfx,
5153 .emit_fence = gfx_v10_0_ring_emit_fence,
5154 .emit_pipeline_sync = gfx_v10_0_ring_emit_pipeline_sync,
5155 .emit_vm_flush = gfx_v10_0_ring_emit_vm_flush,
5156 .emit_gds_switch = gfx_v10_0_ring_emit_gds_switch,
5157 .emit_hdp_flush = gfx_v10_0_ring_emit_hdp_flush,
5158 .test_ring = gfx_v10_0_ring_test_ring,
5159 .test_ib = gfx_v10_0_ring_test_ib,
5160 .insert_nop = amdgpu_ring_insert_nop,
5161 .pad_ib = amdgpu_ring_generic_pad_ib,
5162 .emit_switch_buffer = gfx_v10_0_ring_emit_sb,
5163 .emit_cntxcntl = gfx_v10_0_ring_emit_cntxcntl,
5164 .init_cond_exec = gfx_v10_0_ring_emit_init_cond_exec,
5165 .patch_cond_exec = gfx_v10_0_ring_emit_patch_cond_exec,
5166 .preempt_ib = gfx_v10_0_ring_preempt_ib,
5167 .emit_tmz = gfx_v10_0_ring_emit_tmz,
5168 .emit_wreg = gfx_v10_0_ring_emit_wreg,
5169 .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
5170 .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
5173 static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
5174 .type = AMDGPU_RING_TYPE_COMPUTE,
5176 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
5177 .support_64bit_ptrs = true,
5178 .vmhub = AMDGPU_GFXHUB_0,
5179 .get_rptr = gfx_v10_0_ring_get_rptr_compute,
5180 .get_wptr = gfx_v10_0_ring_get_wptr_compute,
5181 .set_wptr = gfx_v10_0_ring_set_wptr_compute,
5183 20 + /* gfx_v10_0_ring_emit_gds_switch */
5184 7 + /* gfx_v10_0_ring_emit_hdp_flush */
5185 5 + /* hdp invalidate */
5186 7 + /* gfx_v10_0_ring_emit_pipeline_sync */
5187 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
5188 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
5189 2 + /* gfx_v10_0_ring_emit_vm_flush */
5190 8 + 8 + 8, /* gfx_v10_0_ring_emit_fence x3 for user fence, vm fence */
5191 .emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_compute */
5192 .emit_ib = gfx_v10_0_ring_emit_ib_compute,
5193 .emit_fence = gfx_v10_0_ring_emit_fence,
5194 .emit_pipeline_sync = gfx_v10_0_ring_emit_pipeline_sync,
5195 .emit_vm_flush = gfx_v10_0_ring_emit_vm_flush,
5196 .emit_gds_switch = gfx_v10_0_ring_emit_gds_switch,
5197 .emit_hdp_flush = gfx_v10_0_ring_emit_hdp_flush,
5198 .test_ring = gfx_v10_0_ring_test_ring,
5199 .test_ib = gfx_v10_0_ring_test_ib,
5200 .insert_nop = amdgpu_ring_insert_nop,
5201 .pad_ib = amdgpu_ring_generic_pad_ib,
5202 .emit_wreg = gfx_v10_0_ring_emit_wreg,
5203 .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
5204 .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
5207 static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
5208 .type = AMDGPU_RING_TYPE_KIQ,
5210 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
5211 .support_64bit_ptrs = true,
5212 .vmhub = AMDGPU_GFXHUB_0,
5213 .get_rptr = gfx_v10_0_ring_get_rptr_compute,
5214 .get_wptr = gfx_v10_0_ring_get_wptr_compute,
5215 .set_wptr = gfx_v10_0_ring_set_wptr_compute,
5217 20 + /* gfx_v10_0_ring_emit_gds_switch */
5218 7 + /* gfx_v10_0_ring_emit_hdp_flush */
5219 5 + /*hdp invalidate */
5220 7 + /* gfx_v10_0_ring_emit_pipeline_sync */
5221 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
5222 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
5223 2 + /* gfx_v10_0_ring_emit_vm_flush */
5224 8 + 8 + 8, /* gfx_v10_0_ring_emit_fence_kiq x3 for user fence, vm fence */
5225 .emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_compute */
5226 .emit_ib = gfx_v10_0_ring_emit_ib_compute,
5227 .emit_fence = gfx_v10_0_ring_emit_fence_kiq,
5228 .test_ring = gfx_v10_0_ring_test_ring,
5229 .test_ib = gfx_v10_0_ring_test_ib,
5230 .insert_nop = amdgpu_ring_insert_nop,
5231 .pad_ib = amdgpu_ring_generic_pad_ib,
5232 .emit_rreg = gfx_v10_0_ring_emit_rreg,
5233 .emit_wreg = gfx_v10_0_ring_emit_wreg,
5234 .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
5235 .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
5238 static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev)
5242 adev->gfx.kiq.ring.funcs = &gfx_v10_0_ring_funcs_kiq;
5244 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
5245 adev->gfx.gfx_ring[i].funcs = &gfx_v10_0_ring_funcs_gfx;
5247 for (i = 0; i < adev->gfx.num_compute_rings; i++)
5248 adev->gfx.compute_ring[i].funcs = &gfx_v10_0_ring_funcs_compute;
5251 static const struct amdgpu_irq_src_funcs gfx_v10_0_eop_irq_funcs = {
5252 .set = gfx_v10_0_set_eop_interrupt_state,
5253 .process = gfx_v10_0_eop_irq,
5256 static const struct amdgpu_irq_src_funcs gfx_v10_0_priv_reg_irq_funcs = {
5257 .set = gfx_v10_0_set_priv_reg_fault_state,
5258 .process = gfx_v10_0_priv_reg_irq,
5261 static const struct amdgpu_irq_src_funcs gfx_v10_0_priv_inst_irq_funcs = {
5262 .set = gfx_v10_0_set_priv_inst_fault_state,
5263 .process = gfx_v10_0_priv_inst_irq,
5266 static const struct amdgpu_irq_src_funcs gfx_v10_0_kiq_irq_funcs = {
5267 .set = gfx_v10_0_kiq_set_interrupt_state,
5268 .process = gfx_v10_0_kiq_irq,
5271 static void gfx_v10_0_set_irq_funcs(struct amdgpu_device *adev)
5273 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
5274 adev->gfx.eop_irq.funcs = &gfx_v10_0_eop_irq_funcs;
5276 adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
5277 adev->gfx.kiq.irq.funcs = &gfx_v10_0_kiq_irq_funcs;
5279 adev->gfx.priv_reg_irq.num_types = 1;
5280 adev->gfx.priv_reg_irq.funcs = &gfx_v10_0_priv_reg_irq_funcs;
5282 adev->gfx.priv_inst_irq.num_types = 1;
5283 adev->gfx.priv_inst_irq.funcs = &gfx_v10_0_priv_inst_irq_funcs;
5286 static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev)
5288 switch (adev->asic_type) {
5292 adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs;
5299 static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev)
5301 unsigned total_cu = adev->gfx.config.max_cu_per_sh *
5302 adev->gfx.config.max_sh_per_se *
5303 adev->gfx.config.max_shader_engines;
5305 adev->gds.gds_size = 0x10000;
5306 adev->gds.gds_compute_max_wave_id = total_cu * 32 - 1;
5307 adev->gds.gws_size = 64;
5308 adev->gds.oa_size = 16;
5311 static void gfx_v10_0_set_user_wgp_inactive_bitmap_per_sh(struct amdgpu_device *adev,
5319 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
5320 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
5322 WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data);
5325 static u32 gfx_v10_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev)
5327 u32 data, wgp_bitmask;
5328 data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
5329 data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
5331 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
5332 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
5335 amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1);
5337 return (~data) & wgp_bitmask;
5340 static u32 gfx_v10_0_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev)
5342 u32 wgp_idx, wgp_active_bitmap;
5343 u32 cu_bitmap_per_wgp, cu_active_bitmap;
5345 wgp_active_bitmap = gfx_v10_0_get_wgp_active_bitmap_per_sh(adev);
5346 cu_active_bitmap = 0;
5348 for (wgp_idx = 0; wgp_idx < 16; wgp_idx++) {
5349 /* if there is one WGP enabled, it means 2 CUs will be enabled */
5350 cu_bitmap_per_wgp = 3 << (2 * wgp_idx);
5351 if (wgp_active_bitmap & (1 << wgp_idx))
5352 cu_active_bitmap |= cu_bitmap_per_wgp;
5355 return cu_active_bitmap;
5358 static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev,
5359 struct amdgpu_cu_info *cu_info)
5361 int i, j, k, counter, active_cu_number = 0;
5362 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
5363 unsigned disable_masks[4 * 2];
5365 if (!adev || !cu_info)
5368 amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
5370 mutex_lock(&adev->grbm_idx_mutex);
5371 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
5372 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
5376 gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff);
5378 gfx_v10_0_set_user_wgp_inactive_bitmap_per_sh(
5379 adev, disable_masks[i * 2 + j]);
5380 bitmap = gfx_v10_0_get_cu_active_bitmap_per_sh(adev);
5381 cu_info->bitmap[i][j] = bitmap;
5383 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
5384 if (bitmap & mask) {
5385 if (counter < adev->gfx.config.max_cu_per_sh)
5391 active_cu_number += counter;
5393 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
5394 cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
5397 gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
5398 mutex_unlock(&adev->grbm_idx_mutex);
5400 cu_info->number = active_cu_number;
5401 cu_info->ao_cu_mask = ao_cu_mask;
5402 cu_info->simd_per_cu = NUM_SIMD_PER_CU;
5407 const struct amdgpu_ip_block_version gfx_v10_0_ip_block =
5409 .type = AMD_IP_BLOCK_TYPE_GFX,
5413 .funcs = &gfx_v10_0_ip_funcs,