2 * Copyright 2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include "amdgpu_ras.h"
25 #include "mmhub_v9_4.h"
27 #include "mmhub/mmhub_9_4_1_offset.h"
28 #include "mmhub/mmhub_9_4_1_sh_mask.h"
29 #include "mmhub/mmhub_9_4_1_default.h"
30 #include "athub/athub_1_0_offset.h"
31 #include "athub/athub_1_0_sh_mask.h"
32 #include "vega10_enum.h"
34 #include "soc15_common.h"
36 #define MMHUB_NUM_INSTANCES 2
37 #define MMHUB_INSTANCE_REGISTER_OFFSET 0x3000
39 u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev)
41 /* The base should be same b/t 2 mmhubs on Acrturus. Read one here. */
42 u64 base = RREG32_SOC15(MMHUB, 0, mmVMSHAREDVC0_MC_VM_FB_LOCATION_BASE);
43 u64 top = RREG32_SOC15(MMHUB, 0, mmVMSHAREDVC0_MC_VM_FB_LOCATION_TOP);
45 base &= VMSHAREDVC0_MC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
48 top &= VMSHAREDVC0_MC_VM_FB_LOCATION_TOP__FB_TOP_MASK;
51 adev->gmc.fb_start = base;
52 adev->gmc.fb_end = top;
57 static void mmhub_v9_4_setup_hubid_vm_pt_regs(struct amdgpu_device *adev, int hubid,
58 uint32_t vmid, uint64_t value)
60 /* two registers distance between mmVML2VC0_VM_CONTEXT0_* to
61 * mmVML2VC0_VM_CONTEXT1_*
63 int dist = mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
64 - mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
66 WREG32_SOC15_OFFSET(MMHUB, 0,
67 mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
68 dist * vmid + hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
69 lower_32_bits(value));
71 WREG32_SOC15_OFFSET(MMHUB, 0,
72 mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
73 dist * vmid + hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
74 upper_32_bits(value));
78 static void mmhub_v9_4_init_gart_aperture_regs(struct amdgpu_device *adev,
81 uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
83 mmhub_v9_4_setup_hubid_vm_pt_regs(adev, hubid, 0, pt_base);
85 WREG32_SOC15_OFFSET(MMHUB, 0,
86 mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
87 hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
88 (u32)(adev->gmc.gart_start >> 12));
89 WREG32_SOC15_OFFSET(MMHUB, 0,
90 mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
91 hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
92 (u32)(adev->gmc.gart_start >> 44));
94 WREG32_SOC15_OFFSET(MMHUB, 0,
95 mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
96 hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
97 (u32)(adev->gmc.gart_end >> 12));
98 WREG32_SOC15_OFFSET(MMHUB, 0,
99 mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
100 hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
101 (u32)(adev->gmc.gart_end >> 44));
104 void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
105 uint64_t page_table_base)
109 for (i = 0; i < MMHUB_NUM_INSTANCES; i++)
110 mmhub_v9_4_setup_hubid_vm_pt_regs(adev, i, vmid,
114 static void mmhub_v9_4_init_system_aperture_regs(struct amdgpu_device *adev,
120 /* Program the AGP BAR */
121 WREG32_SOC15_OFFSET(MMHUB, 0, mmVMSHAREDVC0_MC_VM_AGP_BASE,
122 hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
124 WREG32_SOC15_OFFSET(MMHUB, 0, mmVMSHAREDVC0_MC_VM_AGP_TOP,
125 hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
126 adev->gmc.agp_end >> 24);
127 WREG32_SOC15_OFFSET(MMHUB, 0, mmVMSHAREDVC0_MC_VM_AGP_BOT,
128 hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
129 adev->gmc.agp_start >> 24);
131 /* Program the system aperture low logical page number. */
132 WREG32_SOC15_OFFSET(MMHUB, 0,
133 mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_LOW_ADDR,
134 hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
135 min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
136 WREG32_SOC15_OFFSET(MMHUB, 0,
137 mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
138 hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
139 max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
141 /* Set default page address. */
142 value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
143 adev->vm_manager.vram_base_offset;
144 WREG32_SOC15_OFFSET(MMHUB, 0,
145 mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
146 hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
148 WREG32_SOC15_OFFSET(MMHUB, 0,
149 mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
150 hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
153 /* Program "protection fault". */
154 WREG32_SOC15_OFFSET(MMHUB, 0,
155 mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
156 hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
157 (u32)(adev->dummy_page_addr >> 12));
158 WREG32_SOC15_OFFSET(MMHUB, 0,
159 mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
160 hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
161 (u32)((u64)adev->dummy_page_addr >> 44));
163 tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
164 mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
165 hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
166 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
167 ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
168 WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
169 hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
172 static void mmhub_v9_4_init_tlb_regs(struct amdgpu_device *adev, int hubid)
176 /* Setup TLB control */
177 tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
178 mmVMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
179 hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
181 tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
183 tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
184 SYSTEM_ACCESS_MODE, 3);
185 tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
186 ENABLE_ADVANCED_DRIVER_MODEL, 1);
187 tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
188 SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
189 tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
191 tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
192 MTYPE, MTYPE_UC);/* XXX for emulation. */
193 tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
196 WREG32_SOC15_OFFSET(MMHUB, 0, mmVMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
197 hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
200 static void mmhub_v9_4_init_cache_regs(struct amdgpu_device *adev, int hubid)
205 tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL,
206 hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
207 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
209 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
210 ENABLE_L2_FRAGMENT_PROCESSING, 1);
211 /* XXX for emulation, Refer to closed source code.*/
212 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
213 L2_PDE0_CACHE_TAG_GENERATION_MODE, 0);
214 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
215 PDE_FAULT_CLASSIFICATION, 0);
216 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
217 CONTEXT1_IDENTITY_ACCESS_MODE, 1);
218 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
219 IDENTITY_MODE_FRAGMENT_SIZE, 0);
220 WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL,
221 hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
223 tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL2,
224 hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
225 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL2,
226 INVALIDATE_ALL_L1_TLBS, 1);
227 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL2,
228 INVALIDATE_L2_CACHE, 1);
229 WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL2,
230 hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
232 tmp = mmVML2PF0_VM_L2_CNTL3_DEFAULT;
233 if (adev->gmc.translate_further) {
234 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3, BANK_SELECT, 12);
235 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3,
236 L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
238 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3, BANK_SELECT, 9);
239 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3,
240 L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
242 WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL3,
243 hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
245 tmp = mmVML2PF0_VM_L2_CNTL4_DEFAULT;
246 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL4,
247 VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
248 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL4,
249 VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
250 WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL4,
251 hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
254 static void mmhub_v9_4_enable_system_domain(struct amdgpu_device *adev,
259 tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT0_CNTL,
260 hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
261 tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
262 tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
263 tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL,
264 RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
265 WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT0_CNTL,
266 hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
269 static void mmhub_v9_4_disable_identity_aperture(struct amdgpu_device *adev,
272 WREG32_SOC15_OFFSET(MMHUB, 0,
273 mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
274 hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0XFFFFFFFF);
275 WREG32_SOC15_OFFSET(MMHUB, 0,
276 mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
277 hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0x0000000F);
279 WREG32_SOC15_OFFSET(MMHUB, 0,
280 mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
281 hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0);
282 WREG32_SOC15_OFFSET(MMHUB, 0,
283 mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
284 hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0);
286 WREG32_SOC15_OFFSET(MMHUB, 0,
287 mmVML2PF0_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32,
288 hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0);
289 WREG32_SOC15_OFFSET(MMHUB, 0,
290 mmVML2PF0_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32,
291 hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0);
294 static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid)
299 for (i = 0; i <= 14; i++) {
300 tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT1_CNTL,
301 hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i);
302 tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
304 tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
306 adev->vm_manager.num_level);
307 tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
308 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
309 tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
310 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
312 tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
313 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
314 tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
315 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
316 tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
317 READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
318 tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
319 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
320 tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
321 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
322 tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
323 PAGE_TABLE_BLOCK_SIZE,
324 adev->vm_manager.block_size - 9);
325 /* Send no-retry XNACK on fault to suppress VM fault storm. */
326 tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
327 RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
329 WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT1_CNTL,
330 hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i,
332 WREG32_SOC15_OFFSET(MMHUB, 0,
333 mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
334 hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i*2, 0);
335 WREG32_SOC15_OFFSET(MMHUB, 0,
336 mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
337 hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i*2, 0);
338 WREG32_SOC15_OFFSET(MMHUB, 0,
339 mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
340 hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i*2,
341 lower_32_bits(adev->vm_manager.max_pfn - 1));
342 WREG32_SOC15_OFFSET(MMHUB, 0,
343 mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
344 hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i*2,
345 upper_32_bits(adev->vm_manager.max_pfn - 1));
349 static void mmhub_v9_4_program_invalidation(struct amdgpu_device *adev,
354 for (i = 0; i < 18; ++i) {
355 WREG32_SOC15_OFFSET(MMHUB, 0,
356 mmVML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
357 hubid * MMHUB_INSTANCE_REGISTER_OFFSET + 2 * i,
359 WREG32_SOC15_OFFSET(MMHUB, 0,
360 mmVML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
361 hubid * MMHUB_INSTANCE_REGISTER_OFFSET + 2 * i,
366 int mmhub_v9_4_gart_enable(struct amdgpu_device *adev)
370 for (i = 0; i < MMHUB_NUM_INSTANCES; i++) {
372 mmhub_v9_4_init_gart_aperture_regs(adev, i);
373 mmhub_v9_4_init_system_aperture_regs(adev, i);
374 mmhub_v9_4_init_tlb_regs(adev, i);
375 mmhub_v9_4_init_cache_regs(adev, i);
377 mmhub_v9_4_enable_system_domain(adev, i);
378 mmhub_v9_4_disable_identity_aperture(adev, i);
379 mmhub_v9_4_setup_vmid_config(adev, i);
380 mmhub_v9_4_program_invalidation(adev, i);
386 void mmhub_v9_4_gart_disable(struct amdgpu_device *adev)
391 for (j = 0; j < MMHUB_NUM_INSTANCES; j++) {
392 /* Disable all tables */
393 for (i = 0; i < 16; i++)
394 WREG32_SOC15_OFFSET(MMHUB, 0,
395 mmVML2VC0_VM_CONTEXT0_CNTL,
396 j * MMHUB_INSTANCE_REGISTER_OFFSET +
399 /* Setup TLB control */
400 tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
401 mmVMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
402 j * MMHUB_INSTANCE_REGISTER_OFFSET);
403 tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
405 tmp = REG_SET_FIELD(tmp,
406 VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
407 ENABLE_ADVANCED_DRIVER_MODEL, 0);
408 WREG32_SOC15_OFFSET(MMHUB, 0,
409 mmVMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
410 j * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
413 tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL,
414 j * MMHUB_INSTANCE_REGISTER_OFFSET);
415 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
417 WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL,
418 j * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
419 WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL3,
420 j * MMHUB_INSTANCE_REGISTER_OFFSET, 0);
425 * mmhub_v1_0_set_fault_enable_default - update GART/VM fault handling
427 * @adev: amdgpu_device pointer
428 * @value: true redirects VM faults to the default page
430 void mmhub_v9_4_set_fault_enable_default(struct amdgpu_device *adev, bool value)
435 for (i = 0; i < MMHUB_NUM_INSTANCES; i++) {
436 tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
437 mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
438 i * MMHUB_INSTANCE_REGISTER_OFFSET);
439 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
440 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT,
442 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
443 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT,
445 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
446 PDE1_PROTECTION_FAULT_ENABLE_DEFAULT,
448 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
449 PDE2_PROTECTION_FAULT_ENABLE_DEFAULT,
451 tmp = REG_SET_FIELD(tmp,
452 VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
453 TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
455 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
456 NACK_PROTECTION_FAULT_ENABLE_DEFAULT,
458 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
459 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
461 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
462 VALID_PROTECTION_FAULT_ENABLE_DEFAULT,
464 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
465 READ_PROTECTION_FAULT_ENABLE_DEFAULT,
467 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
468 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT,
470 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
471 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT,
474 tmp = REG_SET_FIELD(tmp,
475 VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
476 CRASH_ON_NO_RETRY_FAULT, 1);
477 tmp = REG_SET_FIELD(tmp,
478 VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
479 CRASH_ON_RETRY_FAULT, 1);
482 WREG32_SOC15_OFFSET(MMHUB, 0,
483 mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
484 i * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
488 void mmhub_v9_4_init(struct amdgpu_device *adev)
490 struct amdgpu_vmhub *hub[MMHUB_NUM_INSTANCES] =
491 {&adev->vmhub[AMDGPU_MMHUB_0], &adev->vmhub[AMDGPU_MMHUB_1]};
494 for (i = 0; i < MMHUB_NUM_INSTANCES; i++) {
495 hub[i]->ctx0_ptb_addr_lo32 =
496 SOC15_REG_OFFSET(MMHUB, 0,
497 mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) +
498 i * MMHUB_INSTANCE_REGISTER_OFFSET;
499 hub[i]->ctx0_ptb_addr_hi32 =
500 SOC15_REG_OFFSET(MMHUB, 0,
501 mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) +
502 i * MMHUB_INSTANCE_REGISTER_OFFSET;
503 hub[i]->vm_inv_eng0_sem =
504 SOC15_REG_OFFSET(MMHUB, 0,
505 mmVML2VC0_VM_INVALIDATE_ENG0_SEM) +
506 i * MMHUB_INSTANCE_REGISTER_OFFSET;
507 hub[i]->vm_inv_eng0_req =
508 SOC15_REG_OFFSET(MMHUB, 0,
509 mmVML2VC0_VM_INVALIDATE_ENG0_REQ) +
510 i * MMHUB_INSTANCE_REGISTER_OFFSET;
511 hub[i]->vm_inv_eng0_ack =
512 SOC15_REG_OFFSET(MMHUB, 0,
513 mmVML2VC0_VM_INVALIDATE_ENG0_ACK) +
514 i * MMHUB_INSTANCE_REGISTER_OFFSET;
515 hub[i]->vm_context0_cntl =
516 SOC15_REG_OFFSET(MMHUB, 0,
517 mmVML2VC0_VM_CONTEXT0_CNTL) +
518 i * MMHUB_INSTANCE_REGISTER_OFFSET;
519 hub[i]->vm_l2_pro_fault_status =
520 SOC15_REG_OFFSET(MMHUB, 0,
521 mmVML2PF0_VM_L2_PROTECTION_FAULT_STATUS) +
522 i * MMHUB_INSTANCE_REGISTER_OFFSET;
523 hub[i]->vm_l2_pro_fault_cntl =
524 SOC15_REG_OFFSET(MMHUB, 0,
525 mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL) +
526 i * MMHUB_INSTANCE_REGISTER_OFFSET;
530 static void mmhub_v9_4_update_medium_grain_clock_gating(struct amdgpu_device *adev,
533 uint32_t def, data, def1, data1;
535 int dist = mmDAGB1_CNTL_MISC2 - mmDAGB0_CNTL_MISC2;
537 for (i = 0; i < MMHUB_NUM_INSTANCES; i++) {
538 def = data = RREG32_SOC15_OFFSET(MMHUB, 0,
539 mmATCL2_0_ATC_L2_MISC_CG,
540 i * MMHUB_INSTANCE_REGISTER_OFFSET);
542 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
543 data |= ATCL2_0_ATC_L2_MISC_CG__ENABLE_MASK;
545 data &= ~ATCL2_0_ATC_L2_MISC_CG__ENABLE_MASK;
548 WREG32_SOC15_OFFSET(MMHUB, 0, mmATCL2_0_ATC_L2_MISC_CG,
549 i * MMHUB_INSTANCE_REGISTER_OFFSET, data);
551 for (j = 0; j < 5; j++) {
552 def1 = data1 = RREG32_SOC15_OFFSET(MMHUB, 0,
554 i * MMHUB_INSTANCE_REGISTER_OFFSET +
557 (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
559 ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
560 DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
561 DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
562 DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
563 DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
564 DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
567 (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
568 DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
569 DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
570 DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
571 DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
572 DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
576 WREG32_SOC15_OFFSET(MMHUB, 0,
578 i * MMHUB_INSTANCE_REGISTER_OFFSET +
581 if (i == 1 && j == 3)
587 static void mmhub_v9_4_update_medium_grain_light_sleep(struct amdgpu_device *adev,
593 for (i = 0; i < MMHUB_NUM_INSTANCES; i++) {
594 def = data = RREG32_SOC15_OFFSET(MMHUB, 0,
595 mmATCL2_0_ATC_L2_MISC_CG,
596 i * MMHUB_INSTANCE_REGISTER_OFFSET);
598 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
599 data |= ATCL2_0_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
601 data &= ~ATCL2_0_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
604 WREG32_SOC15_OFFSET(MMHUB, 0, mmATCL2_0_ATC_L2_MISC_CG,
605 i * MMHUB_INSTANCE_REGISTER_OFFSET, data);
609 int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev,
610 enum amd_clockgating_state state)
612 if (amdgpu_sriov_vf(adev))
615 switch (adev->asic_type) {
617 mmhub_v9_4_update_medium_grain_clock_gating(adev,
618 state == AMD_CG_STATE_GATE ? true : false);
619 mmhub_v9_4_update_medium_grain_light_sleep(adev,
620 state == AMD_CG_STATE_GATE ? true : false);
629 void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags)
633 if (amdgpu_sriov_vf(adev))
636 /* AMD_CG_SUPPORT_MC_MGCG */
637 data = RREG32_SOC15(MMHUB, 0, mmATCL2_0_ATC_L2_MISC_CG);
639 data1 = RREG32_SOC15(MMHUB, 0, mmATCL2_0_ATC_L2_MISC_CG);
641 if ((data & ATCL2_0_ATC_L2_MISC_CG__ENABLE_MASK) &&
642 !(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
643 DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
644 DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
645 DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
646 DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
647 DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK)))
648 *flags |= AMD_CG_SUPPORT_MC_MGCG;
650 /* AMD_CG_SUPPORT_MC_LS */
651 if (data & ATCL2_0_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
652 *flags |= AMD_CG_SUPPORT_MC_LS;
655 static const struct soc15_ras_field_entry mmhub_v9_4_ras_fields[] = {
656 { "MMEA0_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
657 SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
658 SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
660 { "MMEA0_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
661 SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
662 SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
664 { "MMEA0_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
665 SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
666 SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
668 { "MMEA0_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
669 SOC15_REG_FIELD(MMEA0_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
670 SOC15_REG_FIELD(MMEA0_EDC_CNT, RRET_TAGMEM_DED_COUNT),
672 { "MMEA0_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
673 SOC15_REG_FIELD(MMEA0_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
674 SOC15_REG_FIELD(MMEA0_EDC_CNT, WRET_TAGMEM_DED_COUNT),
676 { "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
677 SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
680 { "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
681 SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
684 { "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
685 SOC15_REG_FIELD(MMEA0_EDC_CNT, IORD_CMDMEM_SED_COUNT),
688 { "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
689 SOC15_REG_FIELD(MMEA0_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
692 { "MMEA0_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
693 SOC15_REG_FIELD(MMEA0_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
696 { "MMEA0_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
697 SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
698 SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
700 { "MMEA0_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
701 SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
702 SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
704 { "MMEA0_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
705 SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
706 SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
708 { "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
709 SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
712 { "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
713 SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
716 { "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
718 SOC15_REG_FIELD(MMEA0_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
720 { "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
722 SOC15_REG_FIELD(MMEA0_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
724 { "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
726 SOC15_REG_FIELD(MMEA0_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
728 { "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
730 SOC15_REG_FIELD(MMEA0_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
732 { "MMEA0_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
734 SOC15_REG_FIELD(MMEA0_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
736 { "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
738 SOC15_REG_FIELD(MMEA0_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
740 { "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
742 SOC15_REG_FIELD(MMEA0_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
744 { "MMEA1_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
745 SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
746 SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
748 { "MMEA1_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
749 SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
750 SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
752 { "MMEA1_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
753 SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
754 SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
756 { "MMEA1_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
757 SOC15_REG_FIELD(MMEA1_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
758 SOC15_REG_FIELD(MMEA1_EDC_CNT, RRET_TAGMEM_DED_COUNT),
760 { "MMEA1_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
761 SOC15_REG_FIELD(MMEA1_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
762 SOC15_REG_FIELD(MMEA1_EDC_CNT, WRET_TAGMEM_DED_COUNT),
764 { "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
765 SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
768 { "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
769 SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
772 { "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
773 SOC15_REG_FIELD(MMEA1_EDC_CNT, IORD_CMDMEM_SED_COUNT),
776 { "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
777 SOC15_REG_FIELD(MMEA1_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
780 { "MMEA1_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
781 SOC15_REG_FIELD(MMEA1_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
784 { "MMEA1_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
785 SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
786 SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
788 { "MMEA1_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
789 SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
790 SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
792 { "MMEA1_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
793 SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
794 SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
796 { "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
797 SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
800 { "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
801 SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
804 { "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
806 SOC15_REG_FIELD(MMEA1_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
808 { "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
810 SOC15_REG_FIELD(MMEA1_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
812 { "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
814 SOC15_REG_FIELD(MMEA1_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
816 { "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
818 SOC15_REG_FIELD(MMEA1_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
820 { "MMEA1_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
822 SOC15_REG_FIELD(MMEA1_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
824 { "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
826 SOC15_REG_FIELD(MMEA1_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
828 { "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
830 SOC15_REG_FIELD(MMEA1_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
834 static const struct soc15_reg_entry mmhub_v9_4_edc_cnt_regs[] = {
835 { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), 0, 0, 0},
836 { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), 0, 0, 0},
837 { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), 0, 0, 0},
838 { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), 0, 0, 0},
839 { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), 0, 0, 0},
840 { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), 0, 0, 0},
843 static int mmhub_v9_4_get_ras_error_count(const struct soc15_reg_entry *reg,
844 uint32_t value, uint32_t *sec_count, uint32_t *ded_count)
847 uint32_t sec_cnt, ded_cnt;
849 for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_ras_fields); i++) {
850 if(mmhub_v9_4_ras_fields[i].reg_offset != reg->reg_offset)
854 mmhub_v9_4_ras_fields[i].sec_count_mask) >>
855 mmhub_v9_4_ras_fields[i].sec_count_shift;
857 DRM_INFO("MMHUB SubBlock %s, SEC %d\n",
858 mmhub_v9_4_ras_fields[i].name,
860 *sec_count += sec_cnt;
864 mmhub_v9_4_ras_fields[i].ded_count_mask) >>
865 mmhub_v9_4_ras_fields[i].ded_count_shift;
867 DRM_INFO("MMHUB SubBlock %s, DED %d\n",
868 mmhub_v9_4_ras_fields[i].name,
870 *ded_count += ded_cnt;
877 static void mmhub_v9_4_query_ras_error_count(struct amdgpu_device *adev,
878 void *ras_error_status)
880 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
881 uint32_t sec_count = 0, ded_count = 0;
885 err_data->ue_count = 0;
886 err_data->ce_count = 0;
888 for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_edc_cnt_regs); i++) {
890 RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_edc_cnt_regs[i]));
892 mmhub_v9_4_get_ras_error_count(&mmhub_v9_4_edc_cnt_regs[i],
893 reg_value, &sec_count, &ded_count);
896 err_data->ce_count += sec_count;
897 err_data->ue_count += ded_count;
900 const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = {
901 .ras_late_init = amdgpu_mmhub_ras_late_init,
902 .query_ras_error_count = mmhub_v9_4_query_ras_error_count,