2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/kernel.h>
24 #include <linux/firmware.h>
27 #include "amdgpu_gfx.h"
30 #include "amdgpu_atomfirmware.h"
32 #include "gc/gc_9_0_offset.h"
33 #include "gc/gc_9_0_sh_mask.h"
34 #include "vega10_enum.h"
35 #include "hdp/hdp_4_0_offset.h"
37 #include "soc15_common.h"
38 #include "clearstate_gfx9.h"
39 #include "v9_structs.h"
41 #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
43 #define GFX9_NUM_GFX_RINGS 1
44 #define GFX9_MEC_HPD_SIZE 4096
45 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
46 #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
48 #define mmPWR_MISC_CNTL_STATUS 0x0183
49 #define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0
50 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0
51 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1
52 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L
53 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L
55 MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
56 MODULE_FIRMWARE("amdgpu/vega10_pfp.bin");
57 MODULE_FIRMWARE("amdgpu/vega10_me.bin");
58 MODULE_FIRMWARE("amdgpu/vega10_mec.bin");
59 MODULE_FIRMWARE("amdgpu/vega10_mec2.bin");
60 MODULE_FIRMWARE("amdgpu/vega10_rlc.bin");
62 MODULE_FIRMWARE("amdgpu/vega12_ce.bin");
63 MODULE_FIRMWARE("amdgpu/vega12_pfp.bin");
64 MODULE_FIRMWARE("amdgpu/vega12_me.bin");
65 MODULE_FIRMWARE("amdgpu/vega12_mec.bin");
66 MODULE_FIRMWARE("amdgpu/vega12_mec2.bin");
67 MODULE_FIRMWARE("amdgpu/vega12_rlc.bin");
69 MODULE_FIRMWARE("amdgpu/vega20_ce.bin");
70 MODULE_FIRMWARE("amdgpu/vega20_pfp.bin");
71 MODULE_FIRMWARE("amdgpu/vega20_me.bin");
72 MODULE_FIRMWARE("amdgpu/vega20_mec.bin");
73 MODULE_FIRMWARE("amdgpu/vega20_mec2.bin");
74 MODULE_FIRMWARE("amdgpu/vega20_rlc.bin");
76 MODULE_FIRMWARE("amdgpu/raven_ce.bin");
77 MODULE_FIRMWARE("amdgpu/raven_pfp.bin");
78 MODULE_FIRMWARE("amdgpu/raven_me.bin");
79 MODULE_FIRMWARE("amdgpu/raven_mec.bin");
80 MODULE_FIRMWARE("amdgpu/raven_mec2.bin");
81 MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
83 MODULE_FIRMWARE("amdgpu/picasso_ce.bin");
84 MODULE_FIRMWARE("amdgpu/picasso_pfp.bin");
85 MODULE_FIRMWARE("amdgpu/picasso_me.bin");
86 MODULE_FIRMWARE("amdgpu/picasso_mec.bin");
87 MODULE_FIRMWARE("amdgpu/picasso_mec2.bin");
88 MODULE_FIRMWARE("amdgpu/picasso_rlc.bin");
90 MODULE_FIRMWARE("amdgpu/raven2_ce.bin");
91 MODULE_FIRMWARE("amdgpu/raven2_pfp.bin");
92 MODULE_FIRMWARE("amdgpu/raven2_me.bin");
93 MODULE_FIRMWARE("amdgpu/raven2_mec.bin");
94 MODULE_FIRMWARE("amdgpu/raven2_mec2.bin");
95 MODULE_FIRMWARE("amdgpu/raven2_rlc.bin");
97 static const struct soc15_reg_golden golden_settings_gc_9_0[] =
99 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
100 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x80000000, 0x80000000),
101 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
102 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
103 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
104 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
105 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
106 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
107 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
108 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
109 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
110 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
111 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
112 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
113 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
114 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
115 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
118 static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
120 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107),
121 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
122 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
123 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
124 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
125 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
126 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042),
127 SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
128 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000),
129 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
130 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
131 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
132 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
133 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
134 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
135 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
136 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
137 SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
138 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
139 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
140 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
143 static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
145 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x0f000080, 0x04000080),
146 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
147 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
148 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042),
149 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x22014042),
150 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0x00003e00, 0x00000400),
151 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xff840000, 0x04040000),
152 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00030000),
153 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff010f, 0x01000107),
154 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x000b0000, 0x000b0000),
155 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01000000, 0x01000000)
158 static const struct soc15_reg_golden golden_settings_gc_9_1[] =
160 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
161 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
162 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
163 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
164 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
165 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
166 SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
167 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
168 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
169 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
170 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
171 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
172 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
173 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
174 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
175 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
176 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
177 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
178 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
179 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
180 SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
181 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
182 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
183 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
186 static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
188 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
189 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24000042),
190 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24000042),
191 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04048000),
192 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_MODE_CNTL_1, 0x06000000, 0x06000000),
193 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
194 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x00000800)
197 static const struct soc15_reg_golden golden_settings_gc_9_1_rv2[] =
199 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0xff7fffff, 0x04000000),
200 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
201 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
202 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x7f0fffff, 0x08000080),
203 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0xff8fffff, 0x08000080),
204 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x7f8fffff, 0x08000080),
205 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x26013041),
206 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x26013041),
207 SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
208 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
209 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0xff0fffff, 0x08000080),
210 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0xff0fffff, 0x08000080),
211 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0xff0fffff, 0x08000080),
212 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0xff0fffff, 0x08000080),
213 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0xff0fffff, 0x08000080),
214 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
215 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010),
216 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
217 SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
220 static const struct soc15_reg_golden golden_settings_gc_9_x_common[] =
222 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
223 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382)
226 static const struct soc15_reg_golden golden_settings_gc_9_2_1[] =
228 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
229 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
230 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
231 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
232 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
233 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
234 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
235 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
236 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
237 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
238 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
239 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
240 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
241 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
242 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
243 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
246 static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
248 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x00000080, 0x04000080),
249 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
250 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
251 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24104041),
252 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24104041),
253 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
254 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff03ff, 0x01000107),
255 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
256 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410),
257 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
258 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
259 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
260 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
263 static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
265 mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
266 mmRLC_SRM_INDEX_CNTL_ADDR_1 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
267 mmRLC_SRM_INDEX_CNTL_ADDR_2 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
268 mmRLC_SRM_INDEX_CNTL_ADDR_3 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
269 mmRLC_SRM_INDEX_CNTL_ADDR_4 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
270 mmRLC_SRM_INDEX_CNTL_ADDR_5 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
271 mmRLC_SRM_INDEX_CNTL_ADDR_6 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
272 mmRLC_SRM_INDEX_CNTL_ADDR_7 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
275 static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
277 mmRLC_SRM_INDEX_CNTL_DATA_0 - mmRLC_SRM_INDEX_CNTL_DATA_0,
278 mmRLC_SRM_INDEX_CNTL_DATA_1 - mmRLC_SRM_INDEX_CNTL_DATA_0,
279 mmRLC_SRM_INDEX_CNTL_DATA_2 - mmRLC_SRM_INDEX_CNTL_DATA_0,
280 mmRLC_SRM_INDEX_CNTL_DATA_3 - mmRLC_SRM_INDEX_CNTL_DATA_0,
281 mmRLC_SRM_INDEX_CNTL_DATA_4 - mmRLC_SRM_INDEX_CNTL_DATA_0,
282 mmRLC_SRM_INDEX_CNTL_DATA_5 - mmRLC_SRM_INDEX_CNTL_DATA_0,
283 mmRLC_SRM_INDEX_CNTL_DATA_6 - mmRLC_SRM_INDEX_CNTL_DATA_0,
284 mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
287 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
288 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
289 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
290 #define RAVEN2_GB_ADDR_CONFIG_GOLDEN 0x26013041
292 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev);
293 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev);
294 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev);
295 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
296 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
297 struct amdgpu_cu_info *cu_info);
298 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
299 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
300 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
302 static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
304 switch (adev->asic_type) {
306 soc15_program_register_sequence(adev,
307 golden_settings_gc_9_0,
308 ARRAY_SIZE(golden_settings_gc_9_0));
309 soc15_program_register_sequence(adev,
310 golden_settings_gc_9_0_vg10,
311 ARRAY_SIZE(golden_settings_gc_9_0_vg10));
314 soc15_program_register_sequence(adev,
315 golden_settings_gc_9_2_1,
316 ARRAY_SIZE(golden_settings_gc_9_2_1));
317 soc15_program_register_sequence(adev,
318 golden_settings_gc_9_2_1_vg12,
319 ARRAY_SIZE(golden_settings_gc_9_2_1_vg12));
322 soc15_program_register_sequence(adev,
323 golden_settings_gc_9_0,
324 ARRAY_SIZE(golden_settings_gc_9_0));
325 soc15_program_register_sequence(adev,
326 golden_settings_gc_9_0_vg20,
327 ARRAY_SIZE(golden_settings_gc_9_0_vg20));
330 soc15_program_register_sequence(adev, golden_settings_gc_9_1,
331 ARRAY_SIZE(golden_settings_gc_9_1));
332 if (adev->rev_id >= 8)
333 soc15_program_register_sequence(adev,
334 golden_settings_gc_9_1_rv2,
335 ARRAY_SIZE(golden_settings_gc_9_1_rv2));
337 soc15_program_register_sequence(adev,
338 golden_settings_gc_9_1_rv1,
339 ARRAY_SIZE(golden_settings_gc_9_1_rv1));
345 soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
346 (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
349 static void gfx_v9_0_scratch_init(struct amdgpu_device *adev)
351 adev->gfx.scratch.num_reg = 8;
352 adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
353 adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
356 static void gfx_v9_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
357 bool wc, uint32_t reg, uint32_t val)
359 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
360 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
361 WRITE_DATA_DST_SEL(0) |
362 (wc ? WR_CONFIRM : 0));
363 amdgpu_ring_write(ring, reg);
364 amdgpu_ring_write(ring, 0);
365 amdgpu_ring_write(ring, val);
368 static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
369 int mem_space, int opt, uint32_t addr0,
370 uint32_t addr1, uint32_t ref, uint32_t mask,
373 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
374 amdgpu_ring_write(ring,
375 /* memory (1) or register (0) */
376 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
377 WAIT_REG_MEM_OPERATION(opt) | /* wait */
378 WAIT_REG_MEM_FUNCTION(3) | /* equal */
379 WAIT_REG_MEM_ENGINE(eng_sel)));
382 BUG_ON(addr0 & 0x3); /* Dword align */
383 amdgpu_ring_write(ring, addr0);
384 amdgpu_ring_write(ring, addr1);
385 amdgpu_ring_write(ring, ref);
386 amdgpu_ring_write(ring, mask);
387 amdgpu_ring_write(ring, inv); /* poll interval */
390 static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
392 struct amdgpu_device *adev = ring->adev;
398 r = amdgpu_gfx_scratch_get(adev, &scratch);
402 WREG32(scratch, 0xCAFEDEAD);
403 r = amdgpu_ring_alloc(ring, 3);
405 goto error_free_scratch;
407 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
408 amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
409 amdgpu_ring_write(ring, 0xDEADBEEF);
410 amdgpu_ring_commit(ring);
412 for (i = 0; i < adev->usec_timeout; i++) {
413 tmp = RREG32(scratch);
414 if (tmp == 0xDEADBEEF)
419 if (i >= adev->usec_timeout)
423 amdgpu_gfx_scratch_free(adev, scratch);
427 static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
429 struct amdgpu_device *adev = ring->adev;
431 struct dma_fence *f = NULL;
438 r = amdgpu_device_wb_get(adev, &index);
442 gpu_addr = adev->wb.gpu_addr + (index * 4);
443 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
444 memset(&ib, 0, sizeof(ib));
445 r = amdgpu_ib_get(adev, NULL, 16, &ib);
449 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
450 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
451 ib.ptr[2] = lower_32_bits(gpu_addr);
452 ib.ptr[3] = upper_32_bits(gpu_addr);
453 ib.ptr[4] = 0xDEADBEEF;
456 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
460 r = dma_fence_wait_timeout(f, false, timeout);
468 tmp = adev->wb.wb[index];
469 if (tmp == 0xDEADBEEF)
475 amdgpu_ib_free(adev, &ib, NULL);
478 amdgpu_device_wb_free(adev, index);
483 static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
485 release_firmware(adev->gfx.pfp_fw);
486 adev->gfx.pfp_fw = NULL;
487 release_firmware(adev->gfx.me_fw);
488 adev->gfx.me_fw = NULL;
489 release_firmware(adev->gfx.ce_fw);
490 adev->gfx.ce_fw = NULL;
491 release_firmware(adev->gfx.rlc_fw);
492 adev->gfx.rlc_fw = NULL;
493 release_firmware(adev->gfx.mec_fw);
494 adev->gfx.mec_fw = NULL;
495 release_firmware(adev->gfx.mec2_fw);
496 adev->gfx.mec2_fw = NULL;
498 kfree(adev->gfx.rlc.register_list_format);
501 static void gfx_v9_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
503 const struct rlc_firmware_header_v2_1 *rlc_hdr;
505 rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
506 adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
507 adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
508 adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
509 adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
510 adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
511 adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
512 adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
513 adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
514 adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
515 adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
516 adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
517 adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
518 adev->gfx.rlc.reg_list_format_direct_reg_list_length =
519 le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
522 static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
524 adev->gfx.me_fw_write_wait = false;
525 adev->gfx.mec_fw_write_wait = false;
527 switch (adev->asic_type) {
529 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
530 (adev->gfx.me_feature_version >= 42) &&
531 (adev->gfx.pfp_fw_version >= 0x000000b1) &&
532 (adev->gfx.pfp_feature_version >= 42))
533 adev->gfx.me_fw_write_wait = true;
535 if ((adev->gfx.mec_fw_version >= 0x00000193) &&
536 (adev->gfx.mec_feature_version >= 42))
537 adev->gfx.mec_fw_write_wait = true;
540 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
541 (adev->gfx.me_feature_version >= 44) &&
542 (adev->gfx.pfp_fw_version >= 0x000000b2) &&
543 (adev->gfx.pfp_feature_version >= 44))
544 adev->gfx.me_fw_write_wait = true;
546 if ((adev->gfx.mec_fw_version >= 0x00000196) &&
547 (adev->gfx.mec_feature_version >= 44))
548 adev->gfx.mec_fw_write_wait = true;
551 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
552 (adev->gfx.me_feature_version >= 44) &&
553 (adev->gfx.pfp_fw_version >= 0x000000b2) &&
554 (adev->gfx.pfp_feature_version >= 44))
555 adev->gfx.me_fw_write_wait = true;
557 if ((adev->gfx.mec_fw_version >= 0x00000197) &&
558 (adev->gfx.mec_feature_version >= 44))
559 adev->gfx.mec_fw_write_wait = true;
562 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
563 (adev->gfx.me_feature_version >= 42) &&
564 (adev->gfx.pfp_fw_version >= 0x000000b1) &&
565 (adev->gfx.pfp_feature_version >= 42))
566 adev->gfx.me_fw_write_wait = true;
568 if ((adev->gfx.mec_fw_version >= 0x00000192) &&
569 (adev->gfx.mec_feature_version >= 42))
570 adev->gfx.mec_fw_write_wait = true;
577 static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
579 const char *chip_name;
582 struct amdgpu_firmware_info *info = NULL;
583 const struct common_firmware_header *header = NULL;
584 const struct gfx_firmware_header_v1_0 *cp_hdr;
585 const struct rlc_firmware_header_v2_0 *rlc_hdr;
586 unsigned int *tmp = NULL;
588 uint16_t version_major;
589 uint16_t version_minor;
593 switch (adev->asic_type) {
595 chip_name = "vega10";
598 chip_name = "vega12";
601 chip_name = "vega20";
604 if (adev->rev_id >= 8)
605 chip_name = "raven2";
606 else if (adev->pdev->device == 0x15d8)
607 chip_name = "picasso";
615 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
616 err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
619 err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
622 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
623 adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
624 adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
626 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
627 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
630 err = amdgpu_ucode_validate(adev->gfx.me_fw);
633 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
634 adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
635 adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
637 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
638 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
641 err = amdgpu_ucode_validate(adev->gfx.ce_fw);
644 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
645 adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
646 adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
648 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
649 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
652 err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
653 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
655 version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
656 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
657 if (version_major == 2 && version_minor == 1)
658 adev->gfx.rlc.is_rlc_v2_1 = true;
660 adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
661 adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
662 adev->gfx.rlc.save_and_restore_offset =
663 le32_to_cpu(rlc_hdr->save_and_restore_offset);
664 adev->gfx.rlc.clear_state_descriptor_offset =
665 le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
666 adev->gfx.rlc.avail_scratch_ram_locations =
667 le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
668 adev->gfx.rlc.reg_restore_list_size =
669 le32_to_cpu(rlc_hdr->reg_restore_list_size);
670 adev->gfx.rlc.reg_list_format_start =
671 le32_to_cpu(rlc_hdr->reg_list_format_start);
672 adev->gfx.rlc.reg_list_format_separate_start =
673 le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
674 adev->gfx.rlc.starting_offsets_start =
675 le32_to_cpu(rlc_hdr->starting_offsets_start);
676 adev->gfx.rlc.reg_list_format_size_bytes =
677 le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
678 adev->gfx.rlc.reg_list_size_bytes =
679 le32_to_cpu(rlc_hdr->reg_list_size_bytes);
680 adev->gfx.rlc.register_list_format =
681 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
682 adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
683 if (!adev->gfx.rlc.register_list_format) {
688 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
689 le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
690 for (i = 0 ; i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2); i++)
691 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
693 adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
695 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
696 le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
697 for (i = 0 ; i < (adev->gfx.rlc.reg_list_size_bytes >> 2); i++)
698 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
700 if (adev->gfx.rlc.is_rlc_v2_1)
701 gfx_v9_0_init_rlc_ext_microcode(adev);
703 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
704 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
707 err = amdgpu_ucode_validate(adev->gfx.mec_fw);
710 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
711 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
712 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
715 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
716 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
718 err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
721 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
722 adev->gfx.mec2_fw->data;
723 adev->gfx.mec2_fw_version =
724 le32_to_cpu(cp_hdr->header.ucode_version);
725 adev->gfx.mec2_feature_version =
726 le32_to_cpu(cp_hdr->ucode_feature_version);
729 adev->gfx.mec2_fw = NULL;
732 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
733 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
734 info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
735 info->fw = adev->gfx.pfp_fw;
736 header = (const struct common_firmware_header *)info->fw->data;
737 adev->firmware.fw_size +=
738 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
740 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
741 info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
742 info->fw = adev->gfx.me_fw;
743 header = (const struct common_firmware_header *)info->fw->data;
744 adev->firmware.fw_size +=
745 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
747 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
748 info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
749 info->fw = adev->gfx.ce_fw;
750 header = (const struct common_firmware_header *)info->fw->data;
751 adev->firmware.fw_size +=
752 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
754 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
755 info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
756 info->fw = adev->gfx.rlc_fw;
757 header = (const struct common_firmware_header *)info->fw->data;
758 adev->firmware.fw_size +=
759 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
761 if (adev->gfx.rlc.is_rlc_v2_1 &&
762 adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
763 adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
764 adev->gfx.rlc.save_restore_list_srm_size_bytes) {
765 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
766 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
767 info->fw = adev->gfx.rlc_fw;
768 adev->firmware.fw_size +=
769 ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
771 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
772 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
773 info->fw = adev->gfx.rlc_fw;
774 adev->firmware.fw_size +=
775 ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
777 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
778 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
779 info->fw = adev->gfx.rlc_fw;
780 adev->firmware.fw_size +=
781 ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
784 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
785 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
786 info->fw = adev->gfx.mec_fw;
787 header = (const struct common_firmware_header *)info->fw->data;
788 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
789 adev->firmware.fw_size +=
790 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
792 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
793 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
794 info->fw = adev->gfx.mec_fw;
795 adev->firmware.fw_size +=
796 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
798 if (adev->gfx.mec2_fw) {
799 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
800 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
801 info->fw = adev->gfx.mec2_fw;
802 header = (const struct common_firmware_header *)info->fw->data;
803 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
804 adev->firmware.fw_size +=
805 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
806 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
807 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
808 info->fw = adev->gfx.mec2_fw;
809 adev->firmware.fw_size +=
810 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
816 gfx_v9_0_check_fw_write_wait(adev);
819 "gfx9: Failed to load firmware \"%s\"\n",
821 release_firmware(adev->gfx.pfp_fw);
822 adev->gfx.pfp_fw = NULL;
823 release_firmware(adev->gfx.me_fw);
824 adev->gfx.me_fw = NULL;
825 release_firmware(adev->gfx.ce_fw);
826 adev->gfx.ce_fw = NULL;
827 release_firmware(adev->gfx.rlc_fw);
828 adev->gfx.rlc_fw = NULL;
829 release_firmware(adev->gfx.mec_fw);
830 adev->gfx.mec_fw = NULL;
831 release_firmware(adev->gfx.mec2_fw);
832 adev->gfx.mec2_fw = NULL;
837 static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
840 const struct cs_section_def *sect = NULL;
841 const struct cs_extent_def *ext = NULL;
843 /* begin clear state */
845 /* context control state */
848 for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
849 for (ext = sect->section; ext->extent != NULL; ++ext) {
850 if (sect->id == SECT_CONTEXT)
851 count += 2 + ext->reg_count;
857 /* end clear state */
865 static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
866 volatile u32 *buffer)
869 const struct cs_section_def *sect = NULL;
870 const struct cs_extent_def *ext = NULL;
872 if (adev->gfx.rlc.cs_data == NULL)
877 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
878 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
880 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
881 buffer[count++] = cpu_to_le32(0x80000000);
882 buffer[count++] = cpu_to_le32(0x80000000);
884 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
885 for (ext = sect->section; ext->extent != NULL; ++ext) {
886 if (sect->id == SECT_CONTEXT) {
888 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
889 buffer[count++] = cpu_to_le32(ext->reg_index -
890 PACKET3_SET_CONTEXT_REG_START);
891 for (i = 0; i < ext->reg_count; i++)
892 buffer[count++] = cpu_to_le32(ext->extent[i]);
899 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
900 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
902 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
903 buffer[count++] = cpu_to_le32(0);
906 static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
908 struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
909 uint32_t pg_always_on_cu_num = 2;
910 uint32_t always_on_cu_num;
912 uint32_t mask, cu_bitmap, counter;
914 if (adev->flags & AMD_IS_APU)
915 always_on_cu_num = 4;
916 else if (adev->asic_type == CHIP_VEGA12)
917 always_on_cu_num = 8;
919 always_on_cu_num = 12;
921 mutex_lock(&adev->grbm_idx_mutex);
922 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
923 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
927 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
929 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
930 if (cu_info->bitmap[i][j] & mask) {
931 if (counter == pg_always_on_cu_num)
932 WREG32_SOC15(GC, 0, mmRLC_PG_ALWAYS_ON_CU_MASK, cu_bitmap);
933 if (counter < always_on_cu_num)
942 WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, cu_bitmap);
943 cu_info->ao_cu_bitmap[i][j] = cu_bitmap;
946 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
947 mutex_unlock(&adev->grbm_idx_mutex);
950 static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
954 /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
955 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
956 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x0333A5A7);
957 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
958 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x30 | 0x40 << 8 | 0x02FA << 16));
960 /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
961 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
963 /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
964 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000500);
966 mutex_lock(&adev->grbm_idx_mutex);
967 /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
968 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
969 WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
971 /* set mmRLC_LB_PARAMS = 0x003F_1006 */
972 data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
973 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
974 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
975 WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
977 /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
978 data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
981 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
984 * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xF (4 CUs AON for Raven),
985 * programmed in gfx_v9_0_init_always_on_cu_mask()
988 /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
989 * but used for RLC_LB_CNTL configuration */
990 data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
991 data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
992 data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
993 WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
994 mutex_unlock(&adev->grbm_idx_mutex);
996 gfx_v9_0_init_always_on_cu_mask(adev);
999 static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev)
1003 /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
1004 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1005 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x033388F8);
1006 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1007 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x10 | 0x27 << 8 | 0x02FA << 16));
1009 /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
1010 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1012 /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
1013 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000800);
1015 mutex_lock(&adev->grbm_idx_mutex);
1016 /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
1017 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1018 WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1020 /* set mmRLC_LB_PARAMS = 0x003F_1006 */
1021 data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1022 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1023 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1024 WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1026 /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
1027 data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1030 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1033 * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF (12 CUs AON),
1034 * programmed in gfx_v9_0_init_always_on_cu_mask()
1037 /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1038 * but used for RLC_LB_CNTL configuration */
1039 data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1040 data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1041 data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1042 WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1043 mutex_unlock(&adev->grbm_idx_mutex);
1045 gfx_v9_0_init_always_on_cu_mask(adev);
1048 static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
1050 WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
1053 static void rv_init_cp_jump_table(struct amdgpu_device *adev)
1055 const __le32 *fw_data;
1056 volatile u32 *dst_ptr;
1057 int me, i, max_me = 5;
1059 u32 table_offset, table_size;
1061 /* write the cp table buffer */
1062 dst_ptr = adev->gfx.rlc.cp_table_ptr;
1063 for (me = 0; me < max_me; me++) {
1065 const struct gfx_firmware_header_v1_0 *hdr =
1066 (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
1067 fw_data = (const __le32 *)
1068 (adev->gfx.ce_fw->data +
1069 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1070 table_offset = le32_to_cpu(hdr->jt_offset);
1071 table_size = le32_to_cpu(hdr->jt_size);
1072 } else if (me == 1) {
1073 const struct gfx_firmware_header_v1_0 *hdr =
1074 (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
1075 fw_data = (const __le32 *)
1076 (adev->gfx.pfp_fw->data +
1077 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1078 table_offset = le32_to_cpu(hdr->jt_offset);
1079 table_size = le32_to_cpu(hdr->jt_size);
1080 } else if (me == 2) {
1081 const struct gfx_firmware_header_v1_0 *hdr =
1082 (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
1083 fw_data = (const __le32 *)
1084 (adev->gfx.me_fw->data +
1085 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1086 table_offset = le32_to_cpu(hdr->jt_offset);
1087 table_size = le32_to_cpu(hdr->jt_size);
1088 } else if (me == 3) {
1089 const struct gfx_firmware_header_v1_0 *hdr =
1090 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1091 fw_data = (const __le32 *)
1092 (adev->gfx.mec_fw->data +
1093 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1094 table_offset = le32_to_cpu(hdr->jt_offset);
1095 table_size = le32_to_cpu(hdr->jt_size);
1096 } else if (me == 4) {
1097 const struct gfx_firmware_header_v1_0 *hdr =
1098 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
1099 fw_data = (const __le32 *)
1100 (adev->gfx.mec2_fw->data +
1101 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1102 table_offset = le32_to_cpu(hdr->jt_offset);
1103 table_size = le32_to_cpu(hdr->jt_size);
1106 for (i = 0; i < table_size; i ++) {
1107 dst_ptr[bo_offset + i] =
1108 cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
1111 bo_offset += table_size;
1115 static void gfx_v9_0_rlc_fini(struct amdgpu_device *adev)
1117 /* clear state block */
1118 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
1119 &adev->gfx.rlc.clear_state_gpu_addr,
1120 (void **)&adev->gfx.rlc.cs_ptr);
1122 /* jump table block */
1123 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
1124 &adev->gfx.rlc.cp_table_gpu_addr,
1125 (void **)&adev->gfx.rlc.cp_table_ptr);
1128 static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
1130 volatile u32 *dst_ptr;
1132 const struct cs_section_def *cs_data;
1135 adev->gfx.rlc.cs_data = gfx9_cs_data;
1137 cs_data = adev->gfx.rlc.cs_data;
1140 /* clear state block */
1141 adev->gfx.rlc.clear_state_size = dws = gfx_v9_0_get_csb_size(adev);
1142 r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
1143 AMDGPU_GEM_DOMAIN_VRAM,
1144 &adev->gfx.rlc.clear_state_obj,
1145 &adev->gfx.rlc.clear_state_gpu_addr,
1146 (void **)&adev->gfx.rlc.cs_ptr);
1148 dev_err(adev->dev, "(%d) failed to create rlc csb bo\n",
1150 gfx_v9_0_rlc_fini(adev);
1153 /* set up the cs buffer */
1154 dst_ptr = adev->gfx.rlc.cs_ptr;
1155 gfx_v9_0_get_csb_buffer(adev, dst_ptr);
1156 amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
1157 amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
1158 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
1161 if (adev->asic_type == CHIP_RAVEN) {
1162 /* TODO: double check the cp_table_size for RV */
1163 adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
1164 r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
1165 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
1166 &adev->gfx.rlc.cp_table_obj,
1167 &adev->gfx.rlc.cp_table_gpu_addr,
1168 (void **)&adev->gfx.rlc.cp_table_ptr);
1171 "(%d) failed to create cp table bo\n", r);
1172 gfx_v9_0_rlc_fini(adev);
1176 rv_init_cp_jump_table(adev);
1177 amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
1178 amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
1181 switch (adev->asic_type) {
1183 gfx_v9_0_init_lbpw(adev);
1186 gfx_v9_4_init_lbpw(adev);
1195 static int gfx_v9_0_csb_vram_pin(struct amdgpu_device *adev)
1199 r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
1200 if (unlikely(r != 0))
1203 r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj,
1204 AMDGPU_GEM_DOMAIN_VRAM);
1206 adev->gfx.rlc.clear_state_gpu_addr =
1207 amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj);
1209 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
1214 static void gfx_v9_0_csb_vram_unpin(struct amdgpu_device *adev)
1218 if (!adev->gfx.rlc.clear_state_obj)
1221 r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
1222 if (likely(r == 0)) {
1223 amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
1224 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
1228 static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
1230 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
1231 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
1234 static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
1238 const __le32 *fw_data;
1241 size_t mec_hpd_size;
1243 const struct gfx_firmware_header_v1_0 *mec_hdr;
1245 bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1247 /* take ownership of the relevant compute queues */
1248 amdgpu_gfx_compute_queue_acquire(adev);
1249 mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
1251 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
1252 AMDGPU_GEM_DOMAIN_VRAM,
1253 &adev->gfx.mec.hpd_eop_obj,
1254 &adev->gfx.mec.hpd_eop_gpu_addr,
1257 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
1258 gfx_v9_0_mec_fini(adev);
1262 memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size);
1264 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
1265 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
1267 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1269 fw_data = (const __le32 *)
1270 (adev->gfx.mec_fw->data +
1271 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
1272 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
1274 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
1275 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1276 &adev->gfx.mec.mec_fw_obj,
1277 &adev->gfx.mec.mec_fw_gpu_addr,
1280 dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
1281 gfx_v9_0_mec_fini(adev);
1285 memcpy(fw, fw_data, fw_size);
1287 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
1288 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
1293 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
1295 WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
1296 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1297 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
1298 (address << SQ_IND_INDEX__INDEX__SHIFT) |
1299 (SQ_IND_INDEX__FORCE_READ_MASK));
1300 return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1303 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
1304 uint32_t wave, uint32_t thread,
1305 uint32_t regno, uint32_t num, uint32_t *out)
1307 WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
1308 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1309 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
1310 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
1311 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
1312 (SQ_IND_INDEX__FORCE_READ_MASK) |
1313 (SQ_IND_INDEX__AUTO_INCR_MASK));
1315 *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1318 static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
1320 /* type 1 wave data */
1321 dst[(*no_fields)++] = 1;
1322 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
1323 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
1324 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
1325 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
1326 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
1327 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
1328 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
1329 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
1330 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
1331 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
1332 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
1333 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
1334 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
1335 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
1338 static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
1339 uint32_t wave, uint32_t start,
1340 uint32_t size, uint32_t *dst)
1343 adev, simd, wave, 0,
1344 start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
1347 static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
1348 uint32_t wave, uint32_t thread,
1349 uint32_t start, uint32_t size,
1353 adev, simd, wave, thread,
1354 start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
1357 static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
1358 u32 me, u32 pipe, u32 q)
1360 soc15_grbm_select(adev, me, pipe, q, 0);
1363 static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
1364 .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
1365 .select_se_sh = &gfx_v9_0_select_se_sh,
1366 .read_wave_data = &gfx_v9_0_read_wave_data,
1367 .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
1368 .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
1369 .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q
1372 static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
1377 adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
1379 switch (adev->asic_type) {
1381 adev->gfx.config.max_hw_contexts = 8;
1382 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1383 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1384 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1385 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1386 gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN;
1389 adev->gfx.config.max_hw_contexts = 8;
1390 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1391 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1392 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1393 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1394 gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN;
1395 DRM_INFO("fix gfx.config for vega12\n");
1398 adev->gfx.config.max_hw_contexts = 8;
1399 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1400 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1401 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1402 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1403 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
1404 gb_addr_config &= ~0xf3e777ff;
1405 gb_addr_config |= 0x22014042;
1406 /* check vbios table if gpu info is not available */
1407 err = amdgpu_atomfirmware_get_gfx_info(adev);
1412 adev->gfx.config.max_hw_contexts = 8;
1413 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1414 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1415 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1416 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1417 if (adev->rev_id >= 8)
1418 gb_addr_config = RAVEN2_GB_ADDR_CONFIG_GOLDEN;
1420 gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
1427 adev->gfx.config.gb_addr_config = gb_addr_config;
1429 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
1431 adev->gfx.config.gb_addr_config,
1435 adev->gfx.config.max_tile_pipes =
1436 adev->gfx.config.gb_addr_config_fields.num_pipes;
1438 adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
1440 adev->gfx.config.gb_addr_config,
1443 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
1445 adev->gfx.config.gb_addr_config,
1447 MAX_COMPRESSED_FRAGS);
1448 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
1450 adev->gfx.config.gb_addr_config,
1453 adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
1455 adev->gfx.config.gb_addr_config,
1457 NUM_SHADER_ENGINES);
1458 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
1460 adev->gfx.config.gb_addr_config,
1462 PIPE_INTERLEAVE_SIZE));
1467 static int gfx_v9_0_ngg_create_buf(struct amdgpu_device *adev,
1468 struct amdgpu_ngg_buf *ngg_buf,
1470 int default_size_se)
1475 dev_err(adev->dev, "Buffer size is invalid: %d\n", size_se);
1478 size_se = size_se ? size_se : default_size_se;
1480 ngg_buf->size = size_se * adev->gfx.config.max_shader_engines;
1481 r = amdgpu_bo_create_kernel(adev, ngg_buf->size,
1482 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
1487 dev_err(adev->dev, "(%d) failed to create NGG buffer\n", r);
1490 ngg_buf->bo_size = amdgpu_bo_size(ngg_buf->bo);
1495 static int gfx_v9_0_ngg_fini(struct amdgpu_device *adev)
1499 for (i = 0; i < NGG_BUF_MAX; i++)
1500 amdgpu_bo_free_kernel(&adev->gfx.ngg.buf[i].bo,
1501 &adev->gfx.ngg.buf[i].gpu_addr,
1504 memset(&adev->gfx.ngg.buf[0], 0,
1505 sizeof(struct amdgpu_ngg_buf) * NGG_BUF_MAX);
1507 adev->gfx.ngg.init = false;
1512 static int gfx_v9_0_ngg_init(struct amdgpu_device *adev)
1516 if (!amdgpu_ngg || adev->gfx.ngg.init == true)
1519 /* GDS reserve memory: 64 bytes alignment */
1520 adev->gfx.ngg.gds_reserve_size = ALIGN(5 * 4, 0x40);
1521 adev->gds.mem.total_size -= adev->gfx.ngg.gds_reserve_size;
1522 adev->gds.mem.gfx_partition_size -= adev->gfx.ngg.gds_reserve_size;
1523 adev->gfx.ngg.gds_reserve_addr = RREG32_SOC15(GC, 0, mmGDS_VMID0_BASE);
1524 adev->gfx.ngg.gds_reserve_addr += RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE);
1526 /* Primitive Buffer */
1527 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PRIM],
1528 amdgpu_prim_buf_per_se,
1531 dev_err(adev->dev, "Failed to create Primitive Buffer\n");
1535 /* Position Buffer */
1536 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_POS],
1537 amdgpu_pos_buf_per_se,
1540 dev_err(adev->dev, "Failed to create Position Buffer\n");
1544 /* Control Sideband */
1545 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_CNTL],
1546 amdgpu_cntl_sb_buf_per_se,
1549 dev_err(adev->dev, "Failed to create Control Sideband Buffer\n");
1553 /* Parameter Cache, not created by default */
1554 if (amdgpu_param_buf_per_se <= 0)
1557 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PARAM],
1558 amdgpu_param_buf_per_se,
1561 dev_err(adev->dev, "Failed to create Parameter Cache\n");
1566 adev->gfx.ngg.init = true;
1569 gfx_v9_0_ngg_fini(adev);
1573 static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
1575 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
1582 /* Program buffer size */
1583 data = REG_SET_FIELD(0, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE,
1584 adev->gfx.ngg.buf[NGG_PRIM].size >> 8);
1585 data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE,
1586 adev->gfx.ngg.buf[NGG_POS].size >> 8);
1587 WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_1, data);
1589 data = REG_SET_FIELD(0, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE,
1590 adev->gfx.ngg.buf[NGG_CNTL].size >> 8);
1591 data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE,
1592 adev->gfx.ngg.buf[NGG_PARAM].size >> 10);
1593 WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_2, data);
1595 /* Program buffer base address */
1596 base = lower_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
1597 data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE, BASE, base);
1598 WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE, data);
1600 base = upper_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
1601 data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE_HI, BASE_HI, base);
1602 WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE_HI, data);
1604 base = lower_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
1605 data = REG_SET_FIELD(0, WD_POS_BUF_BASE, BASE, base);
1606 WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE, data);
1608 base = upper_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
1609 data = REG_SET_FIELD(0, WD_POS_BUF_BASE_HI, BASE_HI, base);
1610 WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE_HI, data);
1612 base = lower_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
1613 data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE, BASE, base);
1614 WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE, data);
1616 base = upper_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
1617 data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE_HI, BASE_HI, base);
1618 WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE_HI, data);
1620 /* Clear GDS reserved memory */
1621 r = amdgpu_ring_alloc(ring, 17);
1623 DRM_ERROR("amdgpu: NGG failed to lock ring %d (%d).\n",
1628 gfx_v9_0_write_data_to_reg(ring, 0, false,
1629 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
1630 (adev->gds.mem.total_size +
1631 adev->gfx.ngg.gds_reserve_size));
1633 amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
1634 amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
1635 PACKET3_DMA_DATA_DST_SEL(1) |
1636 PACKET3_DMA_DATA_SRC_SEL(2)));
1637 amdgpu_ring_write(ring, 0);
1638 amdgpu_ring_write(ring, 0);
1639 amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_addr);
1640 amdgpu_ring_write(ring, 0);
1641 amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
1642 adev->gfx.ngg.gds_reserve_size);
1644 gfx_v9_0_write_data_to_reg(ring, 0, false,
1645 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), 0);
1647 amdgpu_ring_commit(ring);
1652 static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
1653 int mec, int pipe, int queue)
1657 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
1659 ring = &adev->gfx.compute_ring[ring_id];
1664 ring->queue = queue;
1666 ring->ring_obj = NULL;
1667 ring->use_doorbell = true;
1668 ring->doorbell_index = (AMDGPU_DOORBELL_MEC_RING0 + ring_id) << 1;
1669 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
1670 + (ring_id * GFX9_MEC_HPD_SIZE);
1671 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1673 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1674 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1677 /* type-2 packets are deprecated on MEC, use type-3 instead */
1678 r = amdgpu_ring_init(adev, ring, 1024,
1679 &adev->gfx.eop_irq, irq_type);
1687 static int gfx_v9_0_sw_init(void *handle)
1689 int i, j, k, r, ring_id;
1690 struct amdgpu_ring *ring;
1691 struct amdgpu_kiq *kiq;
1692 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1694 switch (adev->asic_type) {
1699 adev->gfx.mec.num_mec = 2;
1702 adev->gfx.mec.num_mec = 1;
1706 adev->gfx.mec.num_pipe_per_mec = 4;
1707 adev->gfx.mec.num_queue_per_pipe = 8;
1710 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
1714 /* Privileged reg */
1715 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
1716 &adev->gfx.priv_reg_irq);
1720 /* Privileged inst */
1721 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
1722 &adev->gfx.priv_inst_irq);
1726 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1728 gfx_v9_0_scratch_init(adev);
1730 r = gfx_v9_0_init_microcode(adev);
1732 DRM_ERROR("Failed to load gfx firmware!\n");
1736 r = gfx_v9_0_rlc_init(adev);
1738 DRM_ERROR("Failed to init rlc BOs!\n");
1742 r = gfx_v9_0_mec_init(adev);
1744 DRM_ERROR("Failed to init MEC BOs!\n");
1748 /* set up the gfx ring */
1749 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
1750 ring = &adev->gfx.gfx_ring[i];
1751 ring->ring_obj = NULL;
1753 sprintf(ring->name, "gfx");
1755 sprintf(ring->name, "gfx_%d", i);
1756 ring->use_doorbell = true;
1757 ring->doorbell_index = AMDGPU_DOORBELL64_GFX_RING0 << 1;
1758 r = amdgpu_ring_init(adev, ring, 1024,
1759 &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
1764 /* set up the compute queues - allocate horizontally across pipes */
1766 for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1767 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1768 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
1769 if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j))
1772 r = gfx_v9_0_compute_ring_init(adev,
1783 r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE);
1785 DRM_ERROR("Failed to init KIQ BOs!\n");
1789 kiq = &adev->gfx.kiq;
1790 r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
1794 /* create MQD for all compute queues as wel as KIQ for SRIOV case */
1795 r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation));
1799 adev->gfx.ce_ram_size = 0x8000;
1801 r = gfx_v9_0_gpu_early_init(adev);
1805 r = gfx_v9_0_ngg_init(adev);
1813 static int gfx_v9_0_sw_fini(void *handle)
1816 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1818 amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
1819 amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
1820 amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
1822 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
1823 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
1824 for (i = 0; i < adev->gfx.num_compute_rings; i++)
1825 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1827 amdgpu_gfx_compute_mqd_sw_fini(adev);
1828 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
1829 amdgpu_gfx_kiq_fini(adev);
1831 gfx_v9_0_mec_fini(adev);
1832 gfx_v9_0_ngg_fini(adev);
1833 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
1834 &adev->gfx.rlc.clear_state_gpu_addr,
1835 (void **)&adev->gfx.rlc.cs_ptr);
1836 if (adev->asic_type == CHIP_RAVEN) {
1837 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
1838 &adev->gfx.rlc.cp_table_gpu_addr,
1839 (void **)&adev->gfx.rlc.cp_table_ptr);
1841 gfx_v9_0_free_microcode(adev);
1847 static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
1852 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance)
1856 if (instance == 0xffffffff)
1857 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
1859 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
1861 if (se_num == 0xffffffff)
1862 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
1864 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1866 if (sh_num == 0xffffffff)
1867 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
1869 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
1871 WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
1874 static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1878 data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
1879 data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);
1881 data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
1882 data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
1884 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
1885 adev->gfx.config.max_sh_per_se);
1887 return (~data) & mask;
1890 static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
1895 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
1896 adev->gfx.config.max_sh_per_se;
1898 mutex_lock(&adev->grbm_idx_mutex);
1899 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1900 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1901 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1902 data = gfx_v9_0_get_rb_active_bitmap(adev);
1903 active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
1904 rb_bitmap_width_per_sh);
1907 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1908 mutex_unlock(&adev->grbm_idx_mutex);
1910 adev->gfx.config.backend_enable_mask = active_rbs;
1911 adev->gfx.config.num_rbs = hweight32(active_rbs);
1914 #define DEFAULT_SH_MEM_BASES (0x6000)
1915 #define FIRST_COMPUTE_VMID (8)
1916 #define LAST_COMPUTE_VMID (16)
1917 static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
1920 uint32_t sh_mem_config;
1921 uint32_t sh_mem_bases;
1924 * Configure apertures:
1925 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
1926 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
1927 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
1929 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1931 sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
1932 SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
1933 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
1935 mutex_lock(&adev->srbm_mutex);
1936 for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
1937 soc15_grbm_select(adev, 0, 0, 0, i);
1938 /* CP and shaders */
1939 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
1940 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
1942 soc15_grbm_select(adev, 0, 0, 0, 0);
1943 mutex_unlock(&adev->srbm_mutex);
1946 static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
1951 WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
1953 gfx_v9_0_tiling_mode_table_init(adev);
1955 gfx_v9_0_setup_rb(adev);
1956 gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
1957 adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
1959 /* XXX SH_MEM regs */
1960 /* where to put LDS, scratch, GPUVM in FSA64 space */
1961 mutex_lock(&adev->srbm_mutex);
1962 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids; i++) {
1963 soc15_grbm_select(adev, 0, 0, 0, i);
1964 /* CP and shaders */
1966 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1967 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1968 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
1969 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, 0);
1971 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1972 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1973 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
1974 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1975 (adev->gmc.private_aperture_start >> 48));
1976 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
1977 (adev->gmc.shared_aperture_start >> 48));
1978 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp);
1981 soc15_grbm_select(adev, 0, 0, 0, 0);
1983 mutex_unlock(&adev->srbm_mutex);
1985 gfx_v9_0_init_compute_vmid(adev);
1987 mutex_lock(&adev->grbm_idx_mutex);
1989 * making sure that the following register writes will be broadcasted
1990 * to all the shaders
1992 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1994 WREG32_SOC15(GC, 0, mmPA_SC_FIFO_SIZE,
1995 (adev->gfx.config.sc_prim_fifo_size_frontend <<
1996 PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
1997 (adev->gfx.config.sc_prim_fifo_size_backend <<
1998 PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
1999 (adev->gfx.config.sc_hiz_tile_fifo_size <<
2000 PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
2001 (adev->gfx.config.sc_earlyz_tile_fifo_size <<
2002 PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT));
2003 mutex_unlock(&adev->grbm_idx_mutex);
2007 static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
2012 mutex_lock(&adev->grbm_idx_mutex);
2013 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
2014 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
2015 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
2016 for (k = 0; k < adev->usec_timeout; k++) {
2017 if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0)
2021 if (k == adev->usec_timeout) {
2022 gfx_v9_0_select_se_sh(adev, 0xffffffff,
2023 0xffffffff, 0xffffffff);
2024 mutex_unlock(&adev->grbm_idx_mutex);
2025 DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
2031 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2032 mutex_unlock(&adev->grbm_idx_mutex);
2034 mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
2035 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
2036 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
2037 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
2038 for (k = 0; k < adev->usec_timeout; k++) {
2039 if ((RREG32_SOC15(GC, 0, mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
2045 static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
2048 u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
2050 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
2051 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
2052 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
2053 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
2055 WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
2058 static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
2061 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
2062 adev->gfx.rlc.clear_state_gpu_addr >> 32);
2063 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO),
2064 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
2065 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH),
2066 adev->gfx.rlc.clear_state_size);
2069 static void gfx_v9_1_parse_ind_reg_list(int *register_list_format,
2070 int indirect_offset,
2072 int *unique_indirect_regs,
2073 int unique_indirect_reg_count,
2074 int *indirect_start_offsets,
2075 int *indirect_start_offsets_count,
2076 int max_start_offsets_count)
2080 for (; indirect_offset < list_size; indirect_offset++) {
2081 WARN_ON(*indirect_start_offsets_count >= max_start_offsets_count);
2082 indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
2083 *indirect_start_offsets_count = *indirect_start_offsets_count + 1;
2085 while (register_list_format[indirect_offset] != 0xFFFFFFFF) {
2086 indirect_offset += 2;
2088 /* look for the matching indice */
2089 for (idx = 0; idx < unique_indirect_reg_count; idx++) {
2090 if (unique_indirect_regs[idx] ==
2091 register_list_format[indirect_offset] ||
2092 !unique_indirect_regs[idx])
2096 BUG_ON(idx >= unique_indirect_reg_count);
2098 if (!unique_indirect_regs[idx])
2099 unique_indirect_regs[idx] = register_list_format[indirect_offset];
2106 static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev)
2108 int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2109 int unique_indirect_reg_count = 0;
2111 int indirect_start_offsets[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2112 int indirect_start_offsets_count = 0;
2118 u32 *register_list_format =
2119 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
2120 if (!register_list_format)
2122 memcpy(register_list_format, adev->gfx.rlc.register_list_format,
2123 adev->gfx.rlc.reg_list_format_size_bytes);
2125 /* setup unique_indirect_regs array and indirect_start_offsets array */
2126 unique_indirect_reg_count = ARRAY_SIZE(unique_indirect_regs);
2127 gfx_v9_1_parse_ind_reg_list(register_list_format,
2128 adev->gfx.rlc.reg_list_format_direct_reg_list_length,
2129 adev->gfx.rlc.reg_list_format_size_bytes >> 2,
2130 unique_indirect_regs,
2131 unique_indirect_reg_count,
2132 indirect_start_offsets,
2133 &indirect_start_offsets_count,
2134 ARRAY_SIZE(indirect_start_offsets));
2136 /* enable auto inc in case it is disabled */
2137 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
2138 tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
2139 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
2141 /* write register_restore table to offset 0x0 using RLC_SRM_ARAM_ADDR/DATA */
2142 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR),
2143 RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET);
2144 for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
2145 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
2146 adev->gfx.rlc.register_restore[i]);
2148 /* load indirect register */
2149 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2150 adev->gfx.rlc.reg_list_format_start);
2152 /* direct register portion */
2153 for (i = 0; i < adev->gfx.rlc.reg_list_format_direct_reg_list_length; i++)
2154 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2155 register_list_format[i]);
2157 /* indirect register portion */
2158 while (i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2)) {
2159 if (register_list_format[i] == 0xFFFFFFFF) {
2160 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2164 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2165 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2167 for (j = 0; j < unique_indirect_reg_count; j++) {
2168 if (register_list_format[i] == unique_indirect_regs[j]) {
2169 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, j);
2174 BUG_ON(j >= unique_indirect_reg_count);
2179 /* set save/restore list size */
2180 list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
2181 list_size = list_size >> 1;
2182 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2183 adev->gfx.rlc.reg_restore_list_size);
2184 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), list_size);
2186 /* write the starting offsets to RLC scratch ram */
2187 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2188 adev->gfx.rlc.starting_offsets_start);
2189 for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
2190 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2191 indirect_start_offsets[i]);
2193 /* load unique indirect regs*/
2194 for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) {
2195 if (unique_indirect_regs[i] != 0) {
2196 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0)
2197 + GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[i],
2198 unique_indirect_regs[i] & 0x3FFFF);
2200 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0)
2201 + GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[i],
2202 unique_indirect_regs[i] >> 20);
2206 kfree(register_list_format);
2210 static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev)
2212 WREG32_FIELD15(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1);
2215 static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
2219 uint32_t default_data = 0;
2221 default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS));
2222 if (enable == true) {
2223 /* enable GFXIP control over CGPG */
2224 data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2225 if(default_data != data)
2226 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2229 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK;
2230 data |= (2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT);
2231 if(default_data != data)
2232 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2234 /* restore GFXIP control over GCPG */
2235 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2236 if(default_data != data)
2237 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2241 static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev)
2245 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2246 AMD_PG_SUPPORT_GFX_SMG |
2247 AMD_PG_SUPPORT_GFX_DMG)) {
2248 /* init IDLE_POLL_COUNT = 60 */
2249 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL));
2250 data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
2251 data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
2252 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL), data);
2254 /* init RLC PG Delay */
2256 data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT);
2257 data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT);
2258 data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT);
2259 data |= (0x40 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT);
2260 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY), data);
2262 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2));
2263 data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK;
2264 data |= (0x4 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT);
2265 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2), data);
2267 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3));
2268 data &= ~RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK;
2269 data |= (0xff << RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT);
2270 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3), data);
2272 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL));
2273 data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
2275 /* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */
2276 data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
2277 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data);
2279 pwr_10_0_gfxip_control_over_cgpg(adev, true);
2283 static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
2287 uint32_t default_data = 0;
2289 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2290 data = REG_SET_FIELD(data, RLC_PG_CNTL,
2291 SMU_CLK_SLOWDOWN_ON_PU_ENABLE,
2293 if (default_data != data)
2294 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2297 static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
2301 uint32_t default_data = 0;
2303 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2304 data = REG_SET_FIELD(data, RLC_PG_CNTL,
2305 SMU_CLK_SLOWDOWN_ON_PD_ENABLE,
2307 if(default_data != data)
2308 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2311 static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
2315 uint32_t default_data = 0;
2317 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2318 data = REG_SET_FIELD(data, RLC_PG_CNTL,
2321 if(default_data != data)
2322 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2325 static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
2328 uint32_t data, default_data;
2330 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2331 data = REG_SET_FIELD(data, RLC_PG_CNTL,
2332 GFX_POWER_GATING_ENABLE,
2334 if(default_data != data)
2335 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2338 static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev,
2341 uint32_t data, default_data;
2343 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2344 data = REG_SET_FIELD(data, RLC_PG_CNTL,
2345 GFX_PIPELINE_PG_ENABLE,
2347 if(default_data != data)
2348 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2351 /* read any GFX register to wake up GFX */
2352 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_RENDER_CONTROL));
2355 static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
2358 uint32_t data, default_data;
2360 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2361 data = REG_SET_FIELD(data, RLC_PG_CNTL,
2362 STATIC_PER_CU_PG_ENABLE,
2364 if(default_data != data)
2365 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2368 static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
2371 uint32_t data, default_data;
2373 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2374 data = REG_SET_FIELD(data, RLC_PG_CNTL,
2375 DYN_PER_CU_PG_ENABLE,
2377 if(default_data != data)
2378 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2381 static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
2383 gfx_v9_0_init_csb(adev);
2386 * Rlc save restore list is workable since v2_1.
2387 * And it's needed by gfxoff feature.
2389 if (adev->gfx.rlc.is_rlc_v2_1) {
2390 gfx_v9_1_init_rlc_save_restore_list(adev);
2391 gfx_v9_0_enable_save_restore_machine(adev);
2394 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2395 AMD_PG_SUPPORT_GFX_SMG |
2396 AMD_PG_SUPPORT_GFX_DMG |
2398 AMD_PG_SUPPORT_GDS |
2399 AMD_PG_SUPPORT_RLC_SMU_HS)) {
2400 WREG32(mmRLC_JUMP_TABLE_RESTORE,
2401 adev->gfx.rlc.cp_table_gpu_addr >> 8);
2402 gfx_v9_0_init_gfx_power_gating(adev);
2406 void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
2408 WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0);
2409 gfx_v9_0_enable_gui_idle_interrupt(adev, false);
2410 gfx_v9_0_wait_for_rlc_serdes(adev);
2413 static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev)
2415 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2417 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
2421 static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
2423 #ifdef AMDGPU_RLC_DEBUG_RETRY
2427 WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
2429 /* carrizo do enable cp interrupt after cp inited */
2430 if (!(adev->flags & AMD_IS_APU))
2431 gfx_v9_0_enable_gui_idle_interrupt(adev, true);
2435 #ifdef AMDGPU_RLC_DEBUG_RETRY
2436 /* RLC_GPM_GENERAL_6 : RLC Ucode version */
2437 rlc_ucode_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6);
2438 if(rlc_ucode_ver == 0x108) {
2439 DRM_INFO("Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
2440 rlc_ucode_ver, adev->gfx.rlc_fw_version);
2441 /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
2442 * default is 0x9C4 to create a 100us interval */
2443 WREG32_SOC15(GC, 0, mmRLC_GPM_TIMER_INT_3, 0x9C4);
2444 /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
2445 * to disable the page fault retry interrupts, default is
2447 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_12, 0x100);
2452 static int gfx_v9_0_rlc_load_microcode(struct amdgpu_device *adev)
2454 const struct rlc_firmware_header_v2_0 *hdr;
2455 const __le32 *fw_data;
2456 unsigned i, fw_size;
2458 if (!adev->gfx.rlc_fw)
2461 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
2462 amdgpu_ucode_print_rlc_hdr(&hdr->header);
2464 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2465 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2466 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
2468 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
2469 RLCG_UCODE_LOADING_START_ADDRESS);
2470 for (i = 0; i < fw_size; i++)
2471 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
2472 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
2477 static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
2481 if (amdgpu_sriov_vf(adev)) {
2482 gfx_v9_0_init_csb(adev);
2486 gfx_v9_0_rlc_stop(adev);
2489 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
2491 gfx_v9_0_rlc_reset(adev);
2493 gfx_v9_0_init_pg(adev);
2495 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
2496 /* legacy rlc firmware loading */
2497 r = gfx_v9_0_rlc_load_microcode(adev);
2502 if (adev->asic_type == CHIP_RAVEN ||
2503 adev->asic_type == CHIP_VEGA20) {
2504 if (amdgpu_lbpw != 0)
2505 gfx_v9_0_enable_lbpw(adev, true);
2507 gfx_v9_0_enable_lbpw(adev, false);
2510 gfx_v9_0_rlc_start(adev);
2515 static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2518 u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
2520 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
2521 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
2522 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
2524 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2525 adev->gfx.gfx_ring[i].sched.ready = false;
2527 WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
2531 static int gfx_v9_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2533 const struct gfx_firmware_header_v1_0 *pfp_hdr;
2534 const struct gfx_firmware_header_v1_0 *ce_hdr;
2535 const struct gfx_firmware_header_v1_0 *me_hdr;
2536 const __le32 *fw_data;
2537 unsigned i, fw_size;
2539 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
2542 pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
2543 adev->gfx.pfp_fw->data;
2544 ce_hdr = (const struct gfx_firmware_header_v1_0 *)
2545 adev->gfx.ce_fw->data;
2546 me_hdr = (const struct gfx_firmware_header_v1_0 *)
2547 adev->gfx.me_fw->data;
2549 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2550 amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
2551 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2553 gfx_v9_0_cp_gfx_enable(adev, false);
2556 fw_data = (const __le32 *)
2557 (adev->gfx.pfp_fw->data +
2558 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2559 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
2560 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, 0);
2561 for (i = 0; i < fw_size; i++)
2562 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
2563 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
2566 fw_data = (const __le32 *)
2567 (adev->gfx.ce_fw->data +
2568 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
2569 fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
2570 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, 0);
2571 for (i = 0; i < fw_size; i++)
2572 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
2573 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
2576 fw_data = (const __le32 *)
2577 (adev->gfx.me_fw->data +
2578 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2579 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
2580 WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, 0);
2581 for (i = 0; i < fw_size; i++)
2582 WREG32_SOC15(GC, 0, mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
2583 WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
2588 static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
2590 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
2591 const struct cs_section_def *sect = NULL;
2592 const struct cs_extent_def *ext = NULL;
2596 WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
2597 WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);
2599 gfx_v9_0_cp_gfx_enable(adev, true);
2601 r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
2603 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
2607 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2608 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
2610 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2611 amdgpu_ring_write(ring, 0x80000000);
2612 amdgpu_ring_write(ring, 0x80000000);
2614 for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
2615 for (ext = sect->section; ext->extent != NULL; ++ext) {
2616 if (sect->id == SECT_CONTEXT) {
2617 amdgpu_ring_write(ring,
2618 PACKET3(PACKET3_SET_CONTEXT_REG,
2620 amdgpu_ring_write(ring,
2621 ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
2622 for (i = 0; i < ext->reg_count; i++)
2623 amdgpu_ring_write(ring, ext->extent[i]);
2628 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2629 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
2631 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2632 amdgpu_ring_write(ring, 0);
2634 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
2635 amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
2636 amdgpu_ring_write(ring, 0x8000);
2637 amdgpu_ring_write(ring, 0x8000);
2639 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG,1));
2640 tmp = (PACKET3_SET_UCONFIG_REG_INDEX_TYPE |
2641 (SOC15_REG_OFFSET(GC, 0, mmVGT_INDEX_TYPE) - PACKET3_SET_UCONFIG_REG_START));
2642 amdgpu_ring_write(ring, tmp);
2643 amdgpu_ring_write(ring, 0);
2645 amdgpu_ring_commit(ring);
2650 static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
2652 struct amdgpu_ring *ring;
2655 u64 rb_addr, rptr_addr, wptr_gpu_addr;
2657 /* Set the write pointer delay */
2658 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
2660 /* set the RB to use vmid 0 */
2661 WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);
2663 /* Set ring buffer size */
2664 ring = &adev->gfx.gfx_ring[0];
2665 rb_bufsz = order_base_2(ring->ring_size / 8);
2666 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
2667 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
2669 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
2671 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2673 /* Initialize the ring buffer's write pointers */
2675 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
2676 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
2678 /* set the wb address wether it's enabled or not */
2679 rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2680 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
2681 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
2683 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2684 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
2685 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
2688 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2690 rb_addr = ring->gpu_addr >> 8;
2691 WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
2692 WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
2694 tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
2695 if (ring->use_doorbell) {
2696 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2697 DOORBELL_OFFSET, ring->doorbell_index);
2698 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2701 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
2703 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
2705 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
2706 DOORBELL_RANGE_LOWER, ring->doorbell_index);
2707 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
2709 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
2710 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
2713 /* start the ring */
2714 gfx_v9_0_cp_gfx_start(adev);
2715 ring->sched.ready = true;
2720 static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
2725 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0);
2727 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
2728 (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
2729 for (i = 0; i < adev->gfx.num_compute_rings; i++)
2730 adev->gfx.compute_ring[i].sched.ready = false;
2731 adev->gfx.kiq.ring.sched.ready = false;
2736 static int gfx_v9_0_cp_compute_load_microcode(struct amdgpu_device *adev)
2738 const struct gfx_firmware_header_v1_0 *mec_hdr;
2739 const __le32 *fw_data;
2743 if (!adev->gfx.mec_fw)
2746 gfx_v9_0_cp_compute_enable(adev, false);
2748 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
2749 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
2751 fw_data = (const __le32 *)
2752 (adev->gfx.mec_fw->data +
2753 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
2755 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
2756 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2757 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);
2759 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
2760 adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
2761 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
2762 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
2765 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
2766 mec_hdr->jt_offset);
2767 for (i = 0; i < mec_hdr->jt_size; i++)
2768 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
2769 le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
2771 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
2772 adev->gfx.mec_fw_version);
2773 /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
2779 static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
2782 struct amdgpu_device *adev = ring->adev;
2784 /* tell RLC which is KIQ queue */
2785 tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
2787 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
2788 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2790 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2793 static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
2795 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
2796 uint64_t queue_mask = 0;
2799 for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
2800 if (!test_bit(i, adev->gfx.mec.queue_bitmap))
2803 /* This situation may be hit in the future if a new HW
2804 * generation exposes more than 64 queues. If so, the
2805 * definition of queue_mask needs updating */
2806 if (WARN_ON(i >= (sizeof(queue_mask)*8))) {
2807 DRM_ERROR("Invalid KCQ enabled: %d\n", i);
2811 queue_mask |= (1ull << i);
2814 r = amdgpu_ring_alloc(kiq_ring, (7 * adev->gfx.num_compute_rings) + 8);
2816 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
2821 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
2822 amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
2823 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
2824 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
2825 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
2826 amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
2827 amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
2828 amdgpu_ring_write(kiq_ring, 0); /* oac mask */
2829 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
2830 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2831 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
2832 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
2833 uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2835 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
2836 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
2837 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
2838 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
2839 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
2840 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
2841 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
2842 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
2843 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
2844 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
2845 PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */
2846 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
2847 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
2848 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
2849 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
2850 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
2851 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
2854 r = amdgpu_ring_test_helper(kiq_ring);
2856 DRM_ERROR("KCQ enable failed\n");
2861 static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
2863 struct amdgpu_device *adev = ring->adev;
2864 struct v9_mqd *mqd = ring->mqd_ptr;
2865 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
2868 mqd->header = 0xC0310800;
2869 mqd->compute_pipelinestat_enable = 0x00000001;
2870 mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
2871 mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
2872 mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
2873 mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
2874 mqd->compute_misc_reserved = 0x00000003;
2876 mqd->dynamic_cu_mask_addr_lo =
2877 lower_32_bits(ring->mqd_gpu_addr
2878 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
2879 mqd->dynamic_cu_mask_addr_hi =
2880 upper_32_bits(ring->mqd_gpu_addr
2881 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
2883 eop_base_addr = ring->eop_gpu_addr >> 8;
2884 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
2885 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
2887 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2888 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
2889 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
2890 (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
2892 mqd->cp_hqd_eop_control = tmp;
2894 /* enable doorbell? */
2895 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
2897 if (ring->use_doorbell) {
2898 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2899 DOORBELL_OFFSET, ring->doorbell_index);
2900 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2902 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2903 DOORBELL_SOURCE, 0);
2904 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2907 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2911 mqd->cp_hqd_pq_doorbell_control = tmp;
2913 /* disable the queue if it's active */
2915 mqd->cp_hqd_dequeue_request = 0;
2916 mqd->cp_hqd_pq_rptr = 0;
2917 mqd->cp_hqd_pq_wptr_lo = 0;
2918 mqd->cp_hqd_pq_wptr_hi = 0;
2920 /* set the pointer to the MQD */
2921 mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
2922 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
2924 /* set MQD vmid to 0 */
2925 tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
2926 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
2927 mqd->cp_mqd_control = tmp;
2929 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2930 hqd_gpu_addr = ring->gpu_addr >> 8;
2931 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
2932 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
2934 /* set up the HQD, this is similar to CP_RB0_CNTL */
2935 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
2936 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
2937 (order_base_2(ring->ring_size / 4) - 1));
2938 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
2939 ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
2941 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
2943 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
2944 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
2945 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
2946 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
2947 mqd->cp_hqd_pq_control = tmp;
2949 /* set the wb address whether it's enabled or not */
2950 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2951 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
2952 mqd->cp_hqd_pq_rptr_report_addr_hi =
2953 upper_32_bits(wb_gpu_addr) & 0xffff;
2955 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2956 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2957 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
2958 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
2961 /* enable the doorbell if requested */
2962 if (ring->use_doorbell) {
2963 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
2964 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2965 DOORBELL_OFFSET, ring->doorbell_index);
2967 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2969 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2970 DOORBELL_SOURCE, 0);
2971 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2975 mqd->cp_hqd_pq_doorbell_control = tmp;
2977 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2979 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
2981 /* set the vmid for the queue */
2982 mqd->cp_hqd_vmid = 0;
2984 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
2985 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
2986 mqd->cp_hqd_persistent_state = tmp;
2988 /* set MIN_IB_AVAIL_SIZE */
2989 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
2990 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
2991 mqd->cp_hqd_ib_control = tmp;
2993 /* activate the queue */
2994 mqd->cp_hqd_active = 1;
2999 static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
3001 struct amdgpu_device *adev = ring->adev;
3002 struct v9_mqd *mqd = ring->mqd_ptr;
3005 /* disable wptr polling */
3006 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3008 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
3009 mqd->cp_hqd_eop_base_addr_lo);
3010 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
3011 mqd->cp_hqd_eop_base_addr_hi);
3013 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3014 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL,
3015 mqd->cp_hqd_eop_control);
3017 /* enable doorbell? */
3018 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3019 mqd->cp_hqd_pq_doorbell_control);
3021 /* disable the queue if it's active */
3022 if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3023 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3024 for (j = 0; j < adev->usec_timeout; j++) {
3025 if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3029 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3030 mqd->cp_hqd_dequeue_request);
3031 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR,
3032 mqd->cp_hqd_pq_rptr);
3033 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3034 mqd->cp_hqd_pq_wptr_lo);
3035 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3036 mqd->cp_hqd_pq_wptr_hi);
3039 /* set the pointer to the MQD */
3040 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR,
3041 mqd->cp_mqd_base_addr_lo);
3042 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI,
3043 mqd->cp_mqd_base_addr_hi);
3045 /* set MQD vmid to 0 */
3046 WREG32_SOC15(GC, 0, mmCP_MQD_CONTROL,
3047 mqd->cp_mqd_control);
3049 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3050 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE,
3051 mqd->cp_hqd_pq_base_lo);
3052 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI,
3053 mqd->cp_hqd_pq_base_hi);
3055 /* set up the HQD, this is similar to CP_RB0_CNTL */
3056 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL,
3057 mqd->cp_hqd_pq_control);
3059 /* set the wb address whether it's enabled or not */
3060 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
3061 mqd->cp_hqd_pq_rptr_report_addr_lo);
3062 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3063 mqd->cp_hqd_pq_rptr_report_addr_hi);
3065 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3066 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
3067 mqd->cp_hqd_pq_wptr_poll_addr_lo);
3068 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3069 mqd->cp_hqd_pq_wptr_poll_addr_hi);
3071 /* enable the doorbell if requested */
3072 if (ring->use_doorbell) {
3073 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
3074 (AMDGPU_DOORBELL64_KIQ *2) << 2);
3075 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
3076 (AMDGPU_DOORBELL64_USERQUEUE_END * 2) << 2);
3079 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3080 mqd->cp_hqd_pq_doorbell_control);
3082 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3083 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3084 mqd->cp_hqd_pq_wptr_lo);
3085 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3086 mqd->cp_hqd_pq_wptr_hi);
3088 /* set the vmid for the queue */
3089 WREG32_SOC15(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
3091 WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE,
3092 mqd->cp_hqd_persistent_state);
3094 /* activate the queue */
3095 WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE,
3096 mqd->cp_hqd_active);
3098 if (ring->use_doorbell)
3099 WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
3104 static int gfx_v9_0_kiq_fini_register(struct amdgpu_ring *ring)
3106 struct amdgpu_device *adev = ring->adev;
3109 /* disable the queue if it's active */
3110 if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3112 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3114 for (j = 0; j < adev->usec_timeout; j++) {
3115 if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3120 if (j == AMDGPU_MAX_USEC_TIMEOUT) {
3121 DRM_DEBUG("KIQ dequeue request failed.\n");
3123 /* Manual disable if dequeue request times out */
3124 WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, 0);
3127 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3131 WREG32_SOC15(GC, 0, mmCP_HQD_IQ_TIMER, 0);
3132 WREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL, 0);
3133 WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE, 0);
3134 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
3135 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
3136 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR, 0);
3137 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, 0);
3138 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, 0);
3143 static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
3145 struct amdgpu_device *adev = ring->adev;
3146 struct v9_mqd *mqd = ring->mqd_ptr;
3147 int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
3149 gfx_v9_0_kiq_setting(ring);
3151 if (adev->in_gpu_reset) { /* for GPU_RESET case */
3152 /* reset MQD to a clean status */
3153 if (adev->gfx.mec.mqd_backup[mqd_idx])
3154 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
3156 /* reset ring buffer */
3158 amdgpu_ring_clear_ring(ring);
3160 mutex_lock(&adev->srbm_mutex);
3161 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3162 gfx_v9_0_kiq_init_register(ring);
3163 soc15_grbm_select(adev, 0, 0, 0, 0);
3164 mutex_unlock(&adev->srbm_mutex);
3166 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3167 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3168 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3169 mutex_lock(&adev->srbm_mutex);
3170 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3171 gfx_v9_0_mqd_init(ring);
3172 gfx_v9_0_kiq_init_register(ring);
3173 soc15_grbm_select(adev, 0, 0, 0, 0);
3174 mutex_unlock(&adev->srbm_mutex);
3176 if (adev->gfx.mec.mqd_backup[mqd_idx])
3177 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3183 static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
3185 struct amdgpu_device *adev = ring->adev;
3186 struct v9_mqd *mqd = ring->mqd_ptr;
3187 int mqd_idx = ring - &adev->gfx.compute_ring[0];
3189 if (!adev->in_gpu_reset && !adev->in_suspend) {
3190 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3191 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3192 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3193 mutex_lock(&adev->srbm_mutex);
3194 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3195 gfx_v9_0_mqd_init(ring);
3196 soc15_grbm_select(adev, 0, 0, 0, 0);
3197 mutex_unlock(&adev->srbm_mutex);
3199 if (adev->gfx.mec.mqd_backup[mqd_idx])
3200 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3201 } else if (adev->in_gpu_reset) { /* for GPU_RESET case */
3202 /* reset MQD to a clean status */
3203 if (adev->gfx.mec.mqd_backup[mqd_idx])
3204 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
3206 /* reset ring buffer */
3208 amdgpu_ring_clear_ring(ring);
3210 amdgpu_ring_clear_ring(ring);
3216 static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
3218 struct amdgpu_ring *ring;
3221 ring = &adev->gfx.kiq.ring;
3223 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3224 if (unlikely(r != 0))
3227 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3228 if (unlikely(r != 0))
3231 gfx_v9_0_kiq_init_queue(ring);
3232 amdgpu_bo_kunmap(ring->mqd_obj);
3233 ring->mqd_ptr = NULL;
3234 amdgpu_bo_unreserve(ring->mqd_obj);
3235 ring->sched.ready = true;
3239 static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
3241 struct amdgpu_ring *ring = NULL;
3244 gfx_v9_0_cp_compute_enable(adev, true);
3246 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3247 ring = &adev->gfx.compute_ring[i];
3249 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3250 if (unlikely(r != 0))
3252 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3254 r = gfx_v9_0_kcq_init_queue(ring);
3255 amdgpu_bo_kunmap(ring->mqd_obj);
3256 ring->mqd_ptr = NULL;
3258 amdgpu_bo_unreserve(ring->mqd_obj);
3263 r = gfx_v9_0_kiq_kcq_enable(adev);
3268 static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
3271 struct amdgpu_ring *ring;
3273 if (!(adev->flags & AMD_IS_APU))
3274 gfx_v9_0_enable_gui_idle_interrupt(adev, false);
3276 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
3277 /* legacy firmware loading */
3278 r = gfx_v9_0_cp_gfx_load_microcode(adev);
3282 r = gfx_v9_0_cp_compute_load_microcode(adev);
3287 r = gfx_v9_0_kiq_resume(adev);
3291 r = gfx_v9_0_cp_gfx_resume(adev);
3295 r = gfx_v9_0_kcq_resume(adev);
3299 ring = &adev->gfx.gfx_ring[0];
3300 r = amdgpu_ring_test_helper(ring);
3304 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3305 ring = &adev->gfx.compute_ring[i];
3306 amdgpu_ring_test_helper(ring);
3309 gfx_v9_0_enable_gui_idle_interrupt(adev, true);
3314 static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
3316 gfx_v9_0_cp_gfx_enable(adev, enable);
3317 gfx_v9_0_cp_compute_enable(adev, enable);
3320 static int gfx_v9_0_hw_init(void *handle)
3323 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3325 gfx_v9_0_init_golden_registers(adev);
3327 gfx_v9_0_constants_init(adev);
3329 r = gfx_v9_0_csb_vram_pin(adev);
3333 r = gfx_v9_0_rlc_resume(adev);
3337 r = gfx_v9_0_cp_resume(adev);
3341 r = gfx_v9_0_ngg_en(adev);
3348 static int gfx_v9_0_kcq_disable(struct amdgpu_device *adev)
3351 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
3353 r = amdgpu_ring_alloc(kiq_ring, 6 * adev->gfx.num_compute_rings);
3355 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
3357 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3358 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
3360 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
3361 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
3362 PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
3363 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
3364 PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
3365 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
3366 amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
3367 amdgpu_ring_write(kiq_ring, 0);
3368 amdgpu_ring_write(kiq_ring, 0);
3369 amdgpu_ring_write(kiq_ring, 0);
3371 r = amdgpu_ring_test_helper(kiq_ring);
3373 DRM_ERROR("KCQ disable failed\n");
3378 static int gfx_v9_0_hw_fini(void *handle)
3380 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3382 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
3383 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
3385 /* disable KCQ to avoid CPC touch memory not valid anymore */
3386 gfx_v9_0_kcq_disable(adev);
3388 if (amdgpu_sriov_vf(adev)) {
3389 gfx_v9_0_cp_gfx_enable(adev, false);
3390 /* must disable polling for SRIOV when hw finished, otherwise
3391 * CPC engine may still keep fetching WB address which is already
3392 * invalid after sw finished and trigger DMAR reading error in
3395 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3399 /* Use deinitialize sequence from CAIL when unbinding device from driver,
3400 * otherwise KIQ is hanging when binding back
3402 if (!adev->in_gpu_reset && !adev->in_suspend) {
3403 mutex_lock(&adev->srbm_mutex);
3404 soc15_grbm_select(adev, adev->gfx.kiq.ring.me,
3405 adev->gfx.kiq.ring.pipe,
3406 adev->gfx.kiq.ring.queue, 0);
3407 gfx_v9_0_kiq_fini_register(&adev->gfx.kiq.ring);
3408 soc15_grbm_select(adev, 0, 0, 0, 0);
3409 mutex_unlock(&adev->srbm_mutex);
3412 gfx_v9_0_cp_enable(adev, false);
3413 gfx_v9_0_rlc_stop(adev);
3415 gfx_v9_0_csb_vram_unpin(adev);
3420 static int gfx_v9_0_suspend(void *handle)
3422 return gfx_v9_0_hw_fini(handle);
3425 static int gfx_v9_0_resume(void *handle)
3427 return gfx_v9_0_hw_init(handle);
3430 static bool gfx_v9_0_is_idle(void *handle)
3432 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3434 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
3435 GRBM_STATUS, GUI_ACTIVE))
3441 static int gfx_v9_0_wait_for_idle(void *handle)
3444 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3446 for (i = 0; i < adev->usec_timeout; i++) {
3447 if (gfx_v9_0_is_idle(handle))
3454 static int gfx_v9_0_soft_reset(void *handle)
3456 u32 grbm_soft_reset = 0;
3458 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3461 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
3462 if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
3463 GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
3464 GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
3465 GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
3466 GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
3467 GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
3468 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3469 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
3470 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3471 GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
3474 if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
3475 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3476 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
3480 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
3481 if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
3482 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3483 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
3486 if (grbm_soft_reset) {
3488 gfx_v9_0_rlc_stop(adev);
3490 /* Disable GFX parsing/prefetching */
3491 gfx_v9_0_cp_gfx_enable(adev, false);
3493 /* Disable MEC parsing/prefetching */
3494 gfx_v9_0_cp_compute_enable(adev, false);
3496 if (grbm_soft_reset) {
3497 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3498 tmp |= grbm_soft_reset;
3499 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
3500 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
3501 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3505 tmp &= ~grbm_soft_reset;
3506 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
3507 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3510 /* Wait a little for things to settle down */
3516 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
3520 mutex_lock(&adev->gfx.gpu_clock_mutex);
3521 WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
3522 clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
3523 ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
3524 mutex_unlock(&adev->gfx.gpu_clock_mutex);
3528 static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
3530 uint32_t gds_base, uint32_t gds_size,
3531 uint32_t gws_base, uint32_t gws_size,
3532 uint32_t oa_base, uint32_t oa_size)
3534 struct amdgpu_device *adev = ring->adev;
3537 gfx_v9_0_write_data_to_reg(ring, 0, false,
3538 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid,
3542 gfx_v9_0_write_data_to_reg(ring, 0, false,
3543 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid,
3547 gfx_v9_0_write_data_to_reg(ring, 0, false,
3548 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid,
3549 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
3552 gfx_v9_0_write_data_to_reg(ring, 0, false,
3553 SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid,
3554 (1 << (oa_size + oa_base)) - (1 << oa_base));
3557 static int gfx_v9_0_early_init(void *handle)
3559 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3561 adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
3562 adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
3563 gfx_v9_0_set_ring_funcs(adev);
3564 gfx_v9_0_set_irq_funcs(adev);
3565 gfx_v9_0_set_gds_init(adev);
3566 gfx_v9_0_set_rlc_funcs(adev);
3571 static int gfx_v9_0_late_init(void *handle)
3573 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3576 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
3580 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
3587 static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
3589 uint32_t rlc_setting, data;
3592 if (adev->gfx.rlc.in_safe_mode)
3595 /* if RLC is not enabled, do nothing */
3596 rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
3597 if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
3600 if (adev->cg_flags &
3601 (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
3602 AMD_CG_SUPPORT_GFX_3D_CGCG)) {
3603 data = RLC_SAFE_MODE__CMD_MASK;
3604 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
3605 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
3607 /* wait for RLC_SAFE_MODE */
3608 for (i = 0; i < adev->usec_timeout; i++) {
3609 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
3613 adev->gfx.rlc.in_safe_mode = true;
3617 static void gfx_v9_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
3619 uint32_t rlc_setting, data;
3621 if (!adev->gfx.rlc.in_safe_mode)
3624 /* if RLC is not enabled, do nothing */
3625 rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
3626 if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
3629 if (adev->cg_flags &
3630 (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
3632 * Try to exit safe mode only if it is already in safe
3635 data = RLC_SAFE_MODE__CMD_MASK;
3636 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
3637 adev->gfx.rlc.in_safe_mode = false;
3641 static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
3644 gfx_v9_0_enter_rlc_safe_mode(adev);
3646 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
3647 gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
3648 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
3649 gfx_v9_0_enable_gfx_pipeline_powergating(adev, true);
3651 gfx_v9_0_enable_gfx_cg_power_gating(adev, false);
3652 gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
3655 gfx_v9_0_exit_rlc_safe_mode(adev);
3658 static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
3661 /* TODO: double check if we need to perform under safe mode */
3662 /* gfx_v9_0_enter_rlc_safe_mode(adev); */
3664 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
3665 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, true);
3667 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, false);
3669 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
3670 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, true);
3672 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, false);
3674 /* gfx_v9_0_exit_rlc_safe_mode(adev); */
3677 static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
3682 /* It is disabled by HW by default */
3683 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
3684 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
3685 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3687 if (adev->asic_type != CHIP_VEGA12)
3688 data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
3690 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3691 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
3692 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
3694 /* only for Vega10 & Raven1 */
3695 data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
3698 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3700 /* MGLS is a global flag to control all MGLS in GFX */
3701 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
3702 /* 2 - RLC memory Light sleep */
3703 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
3704 def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3705 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
3707 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
3709 /* 3 - CP memory Light sleep */
3710 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
3711 def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3712 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3714 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
3718 /* 1 - MGCG_OVERRIDE */
3719 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3721 if (adev->asic_type != CHIP_VEGA12)
3722 data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
3724 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
3725 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3726 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
3727 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
3730 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3732 /* 2 - disable MGLS in RLC */
3733 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3734 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
3735 data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
3736 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
3739 /* 3 - disable MGLS in CP */
3740 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3741 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
3742 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3743 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
3748 static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
3753 adev->gfx.rlc.funcs->enter_safe_mode(adev);
3755 /* Enable 3D CGCG/CGLS */
3756 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
3757 /* write cmd to clear cgcg/cgls ov */
3758 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3759 /* unset CGCG override */
3760 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
3761 /* update CGCG and CGLS override bits */
3763 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3765 /* enable 3Dcgcg FSM(0x0000363f) */
3766 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3768 data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
3769 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
3770 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
3771 data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
3772 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
3774 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
3776 /* set IDLE_POLL_COUNT(0x00900100) */
3777 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
3778 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
3779 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3781 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
3783 /* Disable CGCG/CGLS */
3784 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3785 /* disable cgcg, cgls should be disabled */
3786 data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
3787 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
3788 /* disable cgcg and cgls in FSM */
3790 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
3793 adev->gfx.rlc.funcs->exit_safe_mode(adev);
3796 static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
3801 adev->gfx.rlc.funcs->enter_safe_mode(adev);
3803 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
3804 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3805 /* unset CGCG override */
3806 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
3807 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
3808 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
3810 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
3811 /* update CGCG and CGLS override bits */
3813 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3815 /* enable cgcg FSM(0x0000363F) */
3816 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3818 data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
3819 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
3820 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
3821 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
3822 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
3824 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
3826 /* set IDLE_POLL_COUNT(0x00900100) */
3827 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
3828 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
3829 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3831 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
3833 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3834 /* reset CGCG/CGLS bits */
3835 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
3836 /* disable cgcg and cgls in FSM */
3838 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
3841 adev->gfx.rlc.funcs->exit_safe_mode(adev);
3844 static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
3848 /* CGCG/CGLS should be enabled after MGCG/MGLS
3849 * === MGCG + MGLS ===
3851 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
3852 /* === CGCG /CGLS for GFX 3D Only === */
3853 gfx_v9_0_update_3d_clock_gating(adev, enable);
3854 /* === CGCG + CGLS === */
3855 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
3857 /* CGCG/CGLS should be disabled before MGCG/MGLS
3858 * === CGCG + CGLS ===
3860 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
3861 /* === CGCG /CGLS for GFX 3D Only === */
3862 gfx_v9_0_update_3d_clock_gating(adev, enable);
3863 /* === MGCG + MGLS === */
3864 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
3869 static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
3870 .enter_safe_mode = gfx_v9_0_enter_rlc_safe_mode,
3871 .exit_safe_mode = gfx_v9_0_exit_rlc_safe_mode
3874 static int gfx_v9_0_set_powergating_state(void *handle,
3875 enum amd_powergating_state state)
3877 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3878 bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
3880 switch (adev->asic_type) {
3883 amdgpu_gfx_off_ctrl(adev, false);
3884 cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
3886 if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
3887 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
3888 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
3890 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
3891 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
3894 if (adev->pg_flags & AMD_PG_SUPPORT_CP)
3895 gfx_v9_0_enable_cp_power_gating(adev, true);
3897 gfx_v9_0_enable_cp_power_gating(adev, false);
3899 /* update gfx cgpg state */
3900 gfx_v9_0_update_gfx_cg_power_gating(adev, enable);
3902 /* update mgcg state */
3903 gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
3906 amdgpu_gfx_off_ctrl(adev, true);
3910 amdgpu_gfx_off_ctrl(adev, false);
3911 cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
3913 amdgpu_gfx_off_ctrl(adev, true);
3923 static int gfx_v9_0_set_clockgating_state(void *handle,
3924 enum amd_clockgating_state state)
3926 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3928 if (amdgpu_sriov_vf(adev))
3931 switch (adev->asic_type) {
3936 gfx_v9_0_update_gfx_clock_gating(adev,
3937 state == AMD_CG_STATE_GATE ? true : false);
3945 static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
3947 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3950 if (amdgpu_sriov_vf(adev))
3953 /* AMD_CG_SUPPORT_GFX_MGCG */
3954 data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3955 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
3956 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
3958 /* AMD_CG_SUPPORT_GFX_CGCG */
3959 data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3960 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
3961 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
3963 /* AMD_CG_SUPPORT_GFX_CGLS */
3964 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
3965 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
3967 /* AMD_CG_SUPPORT_GFX_RLC_LS */
3968 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3969 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
3970 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
3972 /* AMD_CG_SUPPORT_GFX_CP_LS */
3973 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3974 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
3975 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
3977 /* AMD_CG_SUPPORT_GFX_3D_CGCG */
3978 data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3979 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
3980 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
3982 /* AMD_CG_SUPPORT_GFX_3D_CGLS */
3983 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
3984 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
3987 static u64 gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
3989 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 is 32bit rptr*/
3992 static u64 gfx_v9_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
3994 struct amdgpu_device *adev = ring->adev;
3997 /* XXX check if swapping is necessary on BE */
3998 if (ring->use_doorbell) {
3999 wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]);
4001 wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR);
4002 wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32;
4008 static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
4010 struct amdgpu_device *adev = ring->adev;
4012 if (ring->use_doorbell) {
4013 /* XXX check if swapping is necessary on BE */
4014 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
4015 WDOORBELL64(ring->doorbell_index, ring->wptr);
4017 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
4018 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
4022 static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
4024 struct amdgpu_device *adev = ring->adev;
4025 u32 ref_and_mask, reg_mem_engine;
4026 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
4028 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
4031 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
4034 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
4041 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
4042 reg_mem_engine = 1; /* pfp */
4045 gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
4046 adev->nbio_funcs->get_hdp_flush_req_offset(adev),
4047 adev->nbio_funcs->get_hdp_flush_done_offset(adev),
4048 ref_and_mask, ref_and_mask, 0x20);
4051 static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
4052 struct amdgpu_ib *ib,
4053 unsigned vmid, bool ctx_switch)
4055 u32 header, control = 0;
4057 if (ib->flags & AMDGPU_IB_FLAG_CE)
4058 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
4060 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
4062 control |= ib->length_dw | (vmid << 24);
4064 if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
4065 control |= INDIRECT_BUFFER_PRE_ENB(1);
4067 if (!(ib->flags & AMDGPU_IB_FLAG_CE))
4068 gfx_v9_0_ring_emit_de_meta(ring);
4071 amdgpu_ring_write(ring, header);
4072 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
4073 amdgpu_ring_write(ring,
4077 lower_32_bits(ib->gpu_addr));
4078 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
4079 amdgpu_ring_write(ring, control);
4082 static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
4083 struct amdgpu_ib *ib,
4084 unsigned vmid, bool ctx_switch)
4086 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
4088 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
4089 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
4090 amdgpu_ring_write(ring,
4094 lower_32_bits(ib->gpu_addr));
4095 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
4096 amdgpu_ring_write(ring, control);
4099 static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
4100 u64 seq, unsigned flags)
4102 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
4103 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
4104 bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
4106 /* RELEASE_MEM - flush caches, send int */
4107 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
4108 amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN |
4109 EOP_TC_NC_ACTION_EN) :
4110 (EOP_TCL1_ACTION_EN |
4112 EOP_TC_WB_ACTION_EN |
4113 EOP_TC_MD_ACTION_EN)) |
4114 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
4116 amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
4119 * the address should be Qword aligned if 64bit write, Dword
4120 * aligned if only send 32bit data low (discard data high)
4126 amdgpu_ring_write(ring, lower_32_bits(addr));
4127 amdgpu_ring_write(ring, upper_32_bits(addr));
4128 amdgpu_ring_write(ring, lower_32_bits(seq));
4129 amdgpu_ring_write(ring, upper_32_bits(seq));
4130 amdgpu_ring_write(ring, 0);
4133 static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
4135 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
4136 uint32_t seq = ring->fence_drv.sync_seq;
4137 uint64_t addr = ring->fence_drv.gpu_addr;
4139 gfx_v9_0_wait_reg_mem(ring, usepfp, 1, 0,
4140 lower_32_bits(addr), upper_32_bits(addr),
4141 seq, 0xffffffff, 4);
4144 static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
4145 unsigned vmid, uint64_t pd_addr)
4147 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
4149 /* compute doesn't have PFP */
4150 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
4151 /* sync PFP to ME, otherwise we might get invalid PFP reads */
4152 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
4153 amdgpu_ring_write(ring, 0x0);
4157 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
4159 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */
4162 static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
4166 /* XXX check if swapping is necessary on BE */
4167 if (ring->use_doorbell)
4168 wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
4174 static void gfx_v9_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
4177 struct amdgpu_device *adev = ring->adev;
4178 int pipe_num, tmp, reg;
4179 int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1;
4181 pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
4183 /* first me only has 2 entries, GFX and HP3D */
4187 reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX) + pipe_num;
4189 tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent);
4193 static void gfx_v9_0_pipe_reserve_resources(struct amdgpu_device *adev,
4194 struct amdgpu_ring *ring,
4199 struct amdgpu_ring *iring;
4201 mutex_lock(&adev->gfx.pipe_reserve_mutex);
4202 pipe = amdgpu_gfx_queue_to_bit(adev, ring->me, ring->pipe, 0);
4204 set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
4206 clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
4208 if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) {
4209 /* Clear all reservations - everyone reacquires all resources */
4210 for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
4211 gfx_v9_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
4214 for (i = 0; i < adev->gfx.num_compute_rings; ++i)
4215 gfx_v9_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
4218 /* Lower all pipes without a current reservation */
4219 for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
4220 iring = &adev->gfx.gfx_ring[i];
4221 pipe = amdgpu_gfx_queue_to_bit(adev,
4225 reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
4226 gfx_v9_0_ring_set_pipe_percent(iring, reserve);
4229 for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
4230 iring = &adev->gfx.compute_ring[i];
4231 pipe = amdgpu_gfx_queue_to_bit(adev,
4235 reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
4236 gfx_v9_0_ring_set_pipe_percent(iring, reserve);
4240 mutex_unlock(&adev->gfx.pipe_reserve_mutex);
4243 static void gfx_v9_0_hqd_set_priority(struct amdgpu_device *adev,
4244 struct amdgpu_ring *ring,
4247 uint32_t pipe_priority = acquire ? 0x2 : 0x0;
4248 uint32_t queue_priority = acquire ? 0xf : 0x0;
4250 mutex_lock(&adev->srbm_mutex);
4251 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4253 WREG32_SOC15(GC, 0, mmCP_HQD_PIPE_PRIORITY, pipe_priority);
4254 WREG32_SOC15(GC, 0, mmCP_HQD_QUEUE_PRIORITY, queue_priority);
4256 soc15_grbm_select(adev, 0, 0, 0, 0);
4257 mutex_unlock(&adev->srbm_mutex);
4260 static void gfx_v9_0_ring_set_priority_compute(struct amdgpu_ring *ring,
4261 enum drm_sched_priority priority)
4263 struct amdgpu_device *adev = ring->adev;
4264 bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW;
4266 if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
4269 gfx_v9_0_hqd_set_priority(adev, ring, acquire);
4270 gfx_v9_0_pipe_reserve_resources(adev, ring, acquire);
4273 static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
4275 struct amdgpu_device *adev = ring->adev;
4277 /* XXX check if swapping is necessary on BE */
4278 if (ring->use_doorbell) {
4279 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
4280 WDOORBELL64(ring->doorbell_index, ring->wptr);
4282 BUG(); /* only DOORBELL method supported on gfx9 now */
4286 static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
4287 u64 seq, unsigned int flags)
4289 struct amdgpu_device *adev = ring->adev;
4291 /* we only allocate 32bit for each seq wb address */
4292 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
4294 /* write fence seq to the "addr" */
4295 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4296 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4297 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
4298 amdgpu_ring_write(ring, lower_32_bits(addr));
4299 amdgpu_ring_write(ring, upper_32_bits(addr));
4300 amdgpu_ring_write(ring, lower_32_bits(seq));
4302 if (flags & AMDGPU_FENCE_FLAG_INT) {
4303 /* set register to trigger INT */
4304 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4305 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4306 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
4307 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
4308 amdgpu_ring_write(ring, 0);
4309 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
4313 static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring)
4315 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4316 amdgpu_ring_write(ring, 0);
4319 static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
4321 struct v9_ce_ib_state ce_payload = {0};
4325 cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
4326 csa_addr = amdgpu_csa_vaddr(ring->adev);
4328 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
4329 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
4330 WRITE_DATA_DST_SEL(8) |
4332 WRITE_DATA_CACHE_POLICY(0));
4333 amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
4334 amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
4335 amdgpu_ring_write_multiple(ring, (void *)&ce_payload, sizeof(ce_payload) >> 2);
4338 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
4340 struct v9_de_ib_state de_payload = {0};
4341 uint64_t csa_addr, gds_addr;
4344 csa_addr = amdgpu_csa_vaddr(ring->adev);
4345 gds_addr = csa_addr + 4096;
4346 de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
4347 de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
4349 cnt = (sizeof(de_payload) >> 2) + 4 - 2;
4350 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
4351 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
4352 WRITE_DATA_DST_SEL(8) |
4354 WRITE_DATA_CACHE_POLICY(0));
4355 amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
4356 amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
4357 amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
4360 static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
4362 amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
4363 amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
4366 static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
4370 if (amdgpu_sriov_vf(ring->adev))
4371 gfx_v9_0_ring_emit_ce_meta(ring);
4373 gfx_v9_0_ring_emit_tmz(ring, true);
4375 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
4376 if (flags & AMDGPU_HAVE_CTX_SWITCH) {
4377 /* set load_global_config & load_global_uconfig */
4379 /* set load_cs_sh_regs */
4381 /* set load_per_context_state & load_gfx_sh_regs for GFX */
4384 /* set load_ce_ram if preamble presented */
4385 if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
4388 /* still load_ce_ram if this is the first time preamble presented
4389 * although there is no context switch happens.
4391 if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
4395 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
4396 amdgpu_ring_write(ring, dw2);
4397 amdgpu_ring_write(ring, 0);
4400 static unsigned gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
4403 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
4404 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
4405 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
4406 amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
4407 ret = ring->wptr & ring->buf_mask;
4408 amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
4412 static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
4415 BUG_ON(offset > ring->buf_mask);
4416 BUG_ON(ring->ring[offset] != 0x55aa55aa);
4418 cur = (ring->wptr & ring->buf_mask) - 1;
4419 if (likely(cur > offset))
4420 ring->ring[offset] = cur - offset;
4422 ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
4425 static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
4427 struct amdgpu_device *adev = ring->adev;
4429 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
4430 amdgpu_ring_write(ring, 0 | /* src: register*/
4431 (5 << 8) | /* dst: memory */
4432 (1 << 20)); /* write confirm */
4433 amdgpu_ring_write(ring, reg);
4434 amdgpu_ring_write(ring, 0);
4435 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
4436 adev->virt.reg_val_offs * 4));
4437 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
4438 adev->virt.reg_val_offs * 4));
4441 static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
4446 switch (ring->funcs->type) {
4447 case AMDGPU_RING_TYPE_GFX:
4448 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
4450 case AMDGPU_RING_TYPE_KIQ:
4451 cmd = (1 << 16); /* no inc addr */
4457 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4458 amdgpu_ring_write(ring, cmd);
4459 amdgpu_ring_write(ring, reg);
4460 amdgpu_ring_write(ring, 0);
4461 amdgpu_ring_write(ring, val);
4464 static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
4465 uint32_t val, uint32_t mask)
4467 gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
4470 static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
4471 uint32_t reg0, uint32_t reg1,
4472 uint32_t ref, uint32_t mask)
4474 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
4475 struct amdgpu_device *adev = ring->adev;
4476 bool fw_version_ok = (ring->funcs->type == AMDGPU_RING_TYPE_GFX) ?
4477 adev->gfx.me_fw_write_wait : adev->gfx.mec_fw_write_wait;
4480 gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
4483 amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
4487 static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
4489 struct amdgpu_device *adev = ring->adev;
4492 value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
4493 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
4494 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
4495 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
4496 WREG32(mmSQ_CMD, value);
4499 static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
4500 enum amdgpu_interrupt_state state)
4503 case AMDGPU_IRQ_STATE_DISABLE:
4504 case AMDGPU_IRQ_STATE_ENABLE:
4505 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4506 TIME_STAMP_INT_ENABLE,
4507 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4514 static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
4516 enum amdgpu_interrupt_state state)
4518 u32 mec_int_cntl, mec_int_cntl_reg;
4521 * amdgpu controls only the first MEC. That's why this function only
4522 * handles the setting of interrupts for this specific MEC. All other
4523 * pipes' interrupts are set by amdkfd.
4529 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
4532 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
4535 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
4538 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
4541 DRM_DEBUG("invalid pipe %d\n", pipe);
4545 DRM_DEBUG("invalid me %d\n", me);
4550 case AMDGPU_IRQ_STATE_DISABLE:
4551 mec_int_cntl = RREG32(mec_int_cntl_reg);
4552 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4553 TIME_STAMP_INT_ENABLE, 0);
4554 WREG32(mec_int_cntl_reg, mec_int_cntl);
4556 case AMDGPU_IRQ_STATE_ENABLE:
4557 mec_int_cntl = RREG32(mec_int_cntl_reg);
4558 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4559 TIME_STAMP_INT_ENABLE, 1);
4560 WREG32(mec_int_cntl_reg, mec_int_cntl);
4567 static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
4568 struct amdgpu_irq_src *source,
4570 enum amdgpu_interrupt_state state)
4573 case AMDGPU_IRQ_STATE_DISABLE:
4574 case AMDGPU_IRQ_STATE_ENABLE:
4575 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4576 PRIV_REG_INT_ENABLE,
4577 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4586 static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
4587 struct amdgpu_irq_src *source,
4589 enum amdgpu_interrupt_state state)
4592 case AMDGPU_IRQ_STATE_DISABLE:
4593 case AMDGPU_IRQ_STATE_ENABLE:
4594 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4595 PRIV_INSTR_INT_ENABLE,
4596 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4604 static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev,
4605 struct amdgpu_irq_src *src,
4607 enum amdgpu_interrupt_state state)
4610 case AMDGPU_CP_IRQ_GFX_EOP:
4611 gfx_v9_0_set_gfx_eop_interrupt_state(adev, state);
4613 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
4614 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
4616 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
4617 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
4619 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
4620 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
4622 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
4623 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
4625 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
4626 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
4628 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
4629 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
4631 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
4632 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
4634 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
4635 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
4643 static int gfx_v9_0_eop_irq(struct amdgpu_device *adev,
4644 struct amdgpu_irq_src *source,
4645 struct amdgpu_iv_entry *entry)
4648 u8 me_id, pipe_id, queue_id;
4649 struct amdgpu_ring *ring;
4651 DRM_DEBUG("IH: CP EOP\n");
4652 me_id = (entry->ring_id & 0x0c) >> 2;
4653 pipe_id = (entry->ring_id & 0x03) >> 0;
4654 queue_id = (entry->ring_id & 0x70) >> 4;
4658 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
4662 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4663 ring = &adev->gfx.compute_ring[i];
4664 /* Per-queue interrupt is supported for MEC starting from VI.
4665 * The interrupt can only be enabled/disabled per pipe instead of per queue.
4667 if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
4668 amdgpu_fence_process(ring);
4675 static void gfx_v9_0_fault(struct amdgpu_device *adev,
4676 struct amdgpu_iv_entry *entry)
4678 u8 me_id, pipe_id, queue_id;
4679 struct amdgpu_ring *ring;
4682 me_id = (entry->ring_id & 0x0c) >> 2;
4683 pipe_id = (entry->ring_id & 0x03) >> 0;
4684 queue_id = (entry->ring_id & 0x70) >> 4;
4688 drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
4692 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4693 ring = &adev->gfx.compute_ring[i];
4694 if (ring->me == me_id && ring->pipe == pipe_id &&
4695 ring->queue == queue_id)
4696 drm_sched_fault(&ring->sched);
4702 static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
4703 struct amdgpu_irq_src *source,
4704 struct amdgpu_iv_entry *entry)
4706 DRM_ERROR("Illegal register access in command stream\n");
4707 gfx_v9_0_fault(adev, entry);
4711 static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
4712 struct amdgpu_irq_src *source,
4713 struct amdgpu_iv_entry *entry)
4715 DRM_ERROR("Illegal instruction in command stream\n");
4716 gfx_v9_0_fault(adev, entry);
4720 static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
4722 .early_init = gfx_v9_0_early_init,
4723 .late_init = gfx_v9_0_late_init,
4724 .sw_init = gfx_v9_0_sw_init,
4725 .sw_fini = gfx_v9_0_sw_fini,
4726 .hw_init = gfx_v9_0_hw_init,
4727 .hw_fini = gfx_v9_0_hw_fini,
4728 .suspend = gfx_v9_0_suspend,
4729 .resume = gfx_v9_0_resume,
4730 .is_idle = gfx_v9_0_is_idle,
4731 .wait_for_idle = gfx_v9_0_wait_for_idle,
4732 .soft_reset = gfx_v9_0_soft_reset,
4733 .set_clockgating_state = gfx_v9_0_set_clockgating_state,
4734 .set_powergating_state = gfx_v9_0_set_powergating_state,
4735 .get_clockgating_state = gfx_v9_0_get_clockgating_state,
4738 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
4739 .type = AMDGPU_RING_TYPE_GFX,
4741 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4742 .support_64bit_ptrs = true,
4743 .vmhub = AMDGPU_GFXHUB,
4744 .get_rptr = gfx_v9_0_ring_get_rptr_gfx,
4745 .get_wptr = gfx_v9_0_ring_get_wptr_gfx,
4746 .set_wptr = gfx_v9_0_ring_set_wptr_gfx,
4747 .emit_frame_size = /* totally 242 maximum if 16 IBs */
4749 7 + /* PIPELINE_SYNC */
4750 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4751 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4753 8 + /* FENCE for VM_FLUSH */
4754 20 + /* GDS switch */
4755 4 + /* double SWITCH_BUFFER,
4756 the first COND_EXEC jump to the place just
4757 prior to this double SWITCH_BUFFER */
4765 8 + 8 + /* FENCE x2 */
4766 2, /* SWITCH_BUFFER */
4767 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
4768 .emit_ib = gfx_v9_0_ring_emit_ib_gfx,
4769 .emit_fence = gfx_v9_0_ring_emit_fence,
4770 .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
4771 .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
4772 .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
4773 .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
4774 .test_ring = gfx_v9_0_ring_test_ring,
4775 .test_ib = gfx_v9_0_ring_test_ib,
4776 .insert_nop = amdgpu_ring_insert_nop,
4777 .pad_ib = amdgpu_ring_generic_pad_ib,
4778 .emit_switch_buffer = gfx_v9_ring_emit_sb,
4779 .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
4780 .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
4781 .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
4782 .emit_tmz = gfx_v9_0_ring_emit_tmz,
4783 .emit_wreg = gfx_v9_0_ring_emit_wreg,
4784 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
4785 .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
4786 .soft_recovery = gfx_v9_0_ring_soft_recovery,
4789 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
4790 .type = AMDGPU_RING_TYPE_COMPUTE,
4792 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4793 .support_64bit_ptrs = true,
4794 .vmhub = AMDGPU_GFXHUB,
4795 .get_rptr = gfx_v9_0_ring_get_rptr_compute,
4796 .get_wptr = gfx_v9_0_ring_get_wptr_compute,
4797 .set_wptr = gfx_v9_0_ring_set_wptr_compute,
4799 20 + /* gfx_v9_0_ring_emit_gds_switch */
4800 7 + /* gfx_v9_0_ring_emit_hdp_flush */
4801 5 + /* hdp invalidate */
4802 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
4803 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4804 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4805 2 + /* gfx_v9_0_ring_emit_vm_flush */
4806 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
4807 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
4808 .emit_ib = gfx_v9_0_ring_emit_ib_compute,
4809 .emit_fence = gfx_v9_0_ring_emit_fence,
4810 .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
4811 .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
4812 .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
4813 .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
4814 .test_ring = gfx_v9_0_ring_test_ring,
4815 .test_ib = gfx_v9_0_ring_test_ib,
4816 .insert_nop = amdgpu_ring_insert_nop,
4817 .pad_ib = amdgpu_ring_generic_pad_ib,
4818 .set_priority = gfx_v9_0_ring_set_priority_compute,
4819 .emit_wreg = gfx_v9_0_ring_emit_wreg,
4820 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
4821 .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
4824 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
4825 .type = AMDGPU_RING_TYPE_KIQ,
4827 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4828 .support_64bit_ptrs = true,
4829 .vmhub = AMDGPU_GFXHUB,
4830 .get_rptr = gfx_v9_0_ring_get_rptr_compute,
4831 .get_wptr = gfx_v9_0_ring_get_wptr_compute,
4832 .set_wptr = gfx_v9_0_ring_set_wptr_compute,
4834 20 + /* gfx_v9_0_ring_emit_gds_switch */
4835 7 + /* gfx_v9_0_ring_emit_hdp_flush */
4836 5 + /* hdp invalidate */
4837 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
4838 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4839 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4840 2 + /* gfx_v9_0_ring_emit_vm_flush */
4841 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
4842 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
4843 .emit_fence = gfx_v9_0_ring_emit_fence_kiq,
4844 .test_ring = gfx_v9_0_ring_test_ring,
4845 .insert_nop = amdgpu_ring_insert_nop,
4846 .pad_ib = amdgpu_ring_generic_pad_ib,
4847 .emit_rreg = gfx_v9_0_ring_emit_rreg,
4848 .emit_wreg = gfx_v9_0_ring_emit_wreg,
4849 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
4850 .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
4853 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
4857 adev->gfx.kiq.ring.funcs = &gfx_v9_0_ring_funcs_kiq;
4859 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
4860 adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx;
4862 for (i = 0; i < adev->gfx.num_compute_rings; i++)
4863 adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute;
4866 static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = {
4867 .set = gfx_v9_0_set_eop_interrupt_state,
4868 .process = gfx_v9_0_eop_irq,
4871 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs = {
4872 .set = gfx_v9_0_set_priv_reg_fault_state,
4873 .process = gfx_v9_0_priv_reg_irq,
4876 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
4877 .set = gfx_v9_0_set_priv_inst_fault_state,
4878 .process = gfx_v9_0_priv_inst_irq,
4881 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
4883 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
4884 adev->gfx.eop_irq.funcs = &gfx_v9_0_eop_irq_funcs;
4886 adev->gfx.priv_reg_irq.num_types = 1;
4887 adev->gfx.priv_reg_irq.funcs = &gfx_v9_0_priv_reg_irq_funcs;
4889 adev->gfx.priv_inst_irq.num_types = 1;
4890 adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
4893 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
4895 switch (adev->asic_type) {
4900 adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
4907 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
4909 /* init asci gds info */
4910 switch (adev->asic_type) {
4914 adev->gds.mem.total_size = 0x10000;
4917 adev->gds.mem.total_size = 0x1000;
4920 adev->gds.mem.total_size = 0x10000;
4924 adev->gds.gws.total_size = 64;
4925 adev->gds.oa.total_size = 16;
4927 if (adev->gds.mem.total_size == 64 * 1024) {
4928 adev->gds.mem.gfx_partition_size = 4096;
4929 adev->gds.mem.cs_partition_size = 4096;
4931 adev->gds.gws.gfx_partition_size = 4;
4932 adev->gds.gws.cs_partition_size = 4;
4934 adev->gds.oa.gfx_partition_size = 4;
4935 adev->gds.oa.cs_partition_size = 1;
4937 adev->gds.mem.gfx_partition_size = 1024;
4938 adev->gds.mem.cs_partition_size = 1024;
4940 adev->gds.gws.gfx_partition_size = 16;
4941 adev->gds.gws.cs_partition_size = 16;
4943 adev->gds.oa.gfx_partition_size = 4;
4944 adev->gds.oa.cs_partition_size = 4;
4948 static void gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
4956 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4957 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4959 WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data);
4962 static u32 gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device *adev)
4966 data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
4967 data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
4969 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4970 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4972 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
4974 return (~data) & mask;
4977 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
4978 struct amdgpu_cu_info *cu_info)
4980 int i, j, k, counter, active_cu_number = 0;
4981 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
4982 unsigned disable_masks[4 * 2];
4984 if (!adev || !cu_info)
4987 amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
4989 mutex_lock(&adev->grbm_idx_mutex);
4990 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
4991 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
4995 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
4997 gfx_v9_0_set_user_cu_inactive_bitmap(
4998 adev, disable_masks[i * 2 + j]);
4999 bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
5000 cu_info->bitmap[i][j] = bitmap;
5002 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
5003 if (bitmap & mask) {
5004 if (counter < adev->gfx.config.max_cu_per_sh)
5010 active_cu_number += counter;
5012 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
5013 cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
5016 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
5017 mutex_unlock(&adev->grbm_idx_mutex);
5019 cu_info->number = active_cu_number;
5020 cu_info->ao_cu_mask = ao_cu_mask;
5021 cu_info->simd_per_cu = NUM_SIMD_PER_CU;
5026 const struct amdgpu_ip_block_version gfx_v9_0_ip_block =
5028 .type = AMD_IP_BLOCK_TYPE_GFX,
5032 .funcs = &gfx_v9_0_ip_funcs,