]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
ac61206c4ce62c2648df02ed990f24c7d4c44f3f
[linux.git] / drivers / gpu / drm / amd / amdgpu / mmhub_v9_4.c
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu.h"
24 #include "amdgpu_ras.h"
25 #include "mmhub_v9_4.h"
26
27 #include "mmhub/mmhub_9_4_1_offset.h"
28 #include "mmhub/mmhub_9_4_1_sh_mask.h"
29 #include "mmhub/mmhub_9_4_1_default.h"
30 #include "athub/athub_1_0_offset.h"
31 #include "athub/athub_1_0_sh_mask.h"
32 #include "vega10_enum.h"
33 #include "soc15.h"
34 #include "soc15_common.h"
35
36 #define MMHUB_NUM_INSTANCES                     2
37 #define MMHUB_INSTANCE_REGISTER_OFFSET          0x3000
38
39 u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev)
40 {
41         /* The base should be same b/t 2 mmhubs on Acrturus. Read one here. */
42         u64 base = RREG32_SOC15(MMHUB, 0, mmVMSHAREDVC0_MC_VM_FB_LOCATION_BASE);
43         u64 top = RREG32_SOC15(MMHUB, 0, mmVMSHAREDVC0_MC_VM_FB_LOCATION_TOP);
44
45         base &= VMSHAREDVC0_MC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
46         base <<= 24;
47
48         top &= VMSHAREDVC0_MC_VM_FB_LOCATION_TOP__FB_TOP_MASK;
49         top <<= 24;
50
51         adev->gmc.fb_start = base;
52         adev->gmc.fb_end = top;
53
54         return base;
55 }
56
57 static void mmhub_v9_4_setup_hubid_vm_pt_regs(struct amdgpu_device *adev, int hubid,
58                                 uint32_t vmid, uint64_t value)
59 {
60         /* two registers distance between mmVML2VC0_VM_CONTEXT0_* to
61          * mmVML2VC0_VM_CONTEXT1_*
62          */
63         int dist = mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
64                         - mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
65
66         WREG32_SOC15_OFFSET(MMHUB, 0,
67                             mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
68                             dist * vmid + hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
69                             lower_32_bits(value));
70
71         WREG32_SOC15_OFFSET(MMHUB, 0,
72                             mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
73                             dist * vmid + hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
74                             upper_32_bits(value));
75
76 }
77
78 static void mmhub_v9_4_init_gart_aperture_regs(struct amdgpu_device *adev,
79                                                int hubid)
80 {
81         uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
82
83         mmhub_v9_4_setup_hubid_vm_pt_regs(adev, hubid, 0, pt_base);
84
85         WREG32_SOC15_OFFSET(MMHUB, 0,
86                             mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
87                             hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
88                             (u32)(adev->gmc.gart_start >> 12));
89         WREG32_SOC15_OFFSET(MMHUB, 0,
90                             mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
91                             hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
92                             (u32)(adev->gmc.gart_start >> 44));
93
94         WREG32_SOC15_OFFSET(MMHUB, 0,
95                             mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
96                             hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
97                             (u32)(adev->gmc.gart_end >> 12));
98         WREG32_SOC15_OFFSET(MMHUB, 0,
99                             mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
100                             hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
101                             (u32)(adev->gmc.gart_end >> 44));
102 }
103
104 void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
105                                 uint64_t page_table_base)
106 {
107         int i;
108
109         for (i = 0; i < MMHUB_NUM_INSTANCES; i++)
110                 mmhub_v9_4_setup_hubid_vm_pt_regs(adev, i, vmid,
111                                 page_table_base);
112 }
113
114 static void mmhub_v9_4_init_system_aperture_regs(struct amdgpu_device *adev,
115                                                  int hubid)
116 {
117         uint64_t value;
118         uint32_t tmp;
119
120         /* Program the AGP BAR */
121         WREG32_SOC15_OFFSET(MMHUB, 0, mmVMSHAREDVC0_MC_VM_AGP_BASE,
122                             hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
123                             0);
124         WREG32_SOC15_OFFSET(MMHUB, 0, mmVMSHAREDVC0_MC_VM_AGP_TOP,
125                             hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
126                             adev->gmc.agp_end >> 24);
127         WREG32_SOC15_OFFSET(MMHUB, 0, mmVMSHAREDVC0_MC_VM_AGP_BOT,
128                             hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
129                             adev->gmc.agp_start >> 24);
130
131         /* Program the system aperture low logical page number. */
132         WREG32_SOC15_OFFSET(MMHUB, 0,
133                             mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_LOW_ADDR,
134                             hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
135                             min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
136         WREG32_SOC15_OFFSET(MMHUB, 0,
137                             mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
138                             hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
139                             max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
140
141         /* Set default page address. */
142         value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
143                 adev->vm_manager.vram_base_offset;
144         WREG32_SOC15_OFFSET(MMHUB, 0,
145                         mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
146                         hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
147                         (u32)(value >> 12));
148         WREG32_SOC15_OFFSET(MMHUB, 0,
149                         mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
150                         hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
151                         (u32)(value >> 44));
152
153         /* Program "protection fault". */
154         WREG32_SOC15_OFFSET(MMHUB, 0,
155                             mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
156                             hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
157                             (u32)(adev->dummy_page_addr >> 12));
158         WREG32_SOC15_OFFSET(MMHUB, 0,
159                             mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
160                             hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
161                             (u32)((u64)adev->dummy_page_addr >> 44));
162
163         tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
164                                   mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
165                                   hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
166         tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
167                             ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
168         WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
169                             hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
170 }
171
172 static void mmhub_v9_4_init_tlb_regs(struct amdgpu_device *adev, int hubid)
173 {
174         uint32_t tmp;
175
176         /* Setup TLB control */
177         tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
178                            mmVMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
179                            hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
180
181         tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
182                             ENABLE_L1_TLB, 1);
183         tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
184                             SYSTEM_ACCESS_MODE, 3);
185         tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
186                             ENABLE_ADVANCED_DRIVER_MODEL, 1);
187         tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
188                             SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
189         tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
190                             ECO_BITS, 0);
191         tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
192                             MTYPE, MTYPE_UC);/* XXX for emulation. */
193         tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
194                             ATC_EN, 1);
195
196         WREG32_SOC15_OFFSET(MMHUB, 0, mmVMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
197                             hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
198 }
199
200 static void mmhub_v9_4_init_cache_regs(struct amdgpu_device *adev, int hubid)
201 {
202         uint32_t tmp;
203
204         /* Setup L2 cache */
205         tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL,
206                                   hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
207         tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
208                             ENABLE_L2_CACHE, 1);
209         tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
210                             ENABLE_L2_FRAGMENT_PROCESSING, 1);
211         /* XXX for emulation, Refer to closed source code.*/
212         tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
213                             L2_PDE0_CACHE_TAG_GENERATION_MODE, 0);
214         tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
215                             PDE_FAULT_CLASSIFICATION, 0);
216         tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
217                             CONTEXT1_IDENTITY_ACCESS_MODE, 1);
218         tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
219                             IDENTITY_MODE_FRAGMENT_SIZE, 0);
220         WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL,
221                      hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
222
223         tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL2,
224                                   hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
225         tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL2,
226                             INVALIDATE_ALL_L1_TLBS, 1);
227         tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL2,
228                             INVALIDATE_L2_CACHE, 1);
229         WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL2,
230                             hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
231
232         tmp = mmVML2PF0_VM_L2_CNTL3_DEFAULT;
233         if (adev->gmc.translate_further) {
234                 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3, BANK_SELECT, 12);
235                 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3,
236                                     L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
237         } else {
238                 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3, BANK_SELECT, 9);
239                 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3,
240                                     L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
241         }
242         WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL3,
243                             hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
244
245         tmp = mmVML2PF0_VM_L2_CNTL4_DEFAULT;
246         tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL4,
247                             VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
248         tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL4,
249                             VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
250         WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL4,
251                             hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
252 }
253
254 static void mmhub_v9_4_enable_system_domain(struct amdgpu_device *adev,
255                                             int hubid)
256 {
257         uint32_t tmp;
258
259         tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT0_CNTL,
260                                   hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
261         tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
262         tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
263         tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL,
264                             RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
265         WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT0_CNTL,
266                             hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
267 }
268
269 static void mmhub_v9_4_disable_identity_aperture(struct amdgpu_device *adev,
270                                                  int hubid)
271 {
272         WREG32_SOC15_OFFSET(MMHUB, 0,
273                     mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
274                     hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0XFFFFFFFF);
275         WREG32_SOC15_OFFSET(MMHUB, 0,
276                     mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
277                     hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0x0000000F);
278
279         WREG32_SOC15_OFFSET(MMHUB, 0,
280                     mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
281                     hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0);
282         WREG32_SOC15_OFFSET(MMHUB, 0,
283                     mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
284                     hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0);
285
286         WREG32_SOC15_OFFSET(MMHUB, 0,
287                     mmVML2PF0_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32,
288                     hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0);
289         WREG32_SOC15_OFFSET(MMHUB, 0,
290                     mmVML2PF0_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32,
291                     hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0);
292 }
293
294 static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid)
295 {
296         uint32_t tmp;
297         int i;
298
299         for (i = 0; i <= 14; i++) {
300                 tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT1_CNTL,
301                                 hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i);
302                 tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
303                                     ENABLE_CONTEXT, 1);
304                 tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
305                                     PAGE_TABLE_DEPTH,
306                                     adev->vm_manager.num_level);
307                 tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
308                                     RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
309                 tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
310                                     DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
311                                     1);
312                 tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
313                                     PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
314                 tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
315                                     VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
316                 tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
317                                     READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
318                 tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
319                                     WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
320                 tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
321                                     EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
322                 tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
323                                     PAGE_TABLE_BLOCK_SIZE,
324                                     adev->vm_manager.block_size - 9);
325                 /* Send no-retry XNACK on fault to suppress VM fault storm. */
326                 tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
327                                     RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
328                                     !amdgpu_noretry);
329                 WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT1_CNTL,
330                                     hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i,
331                                     tmp);
332                 WREG32_SOC15_OFFSET(MMHUB, 0,
333                             mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
334                             hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i*2, 0);
335                 WREG32_SOC15_OFFSET(MMHUB, 0,
336                             mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
337                             hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i*2, 0);
338                 WREG32_SOC15_OFFSET(MMHUB, 0,
339                                 mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
340                                 hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i*2,
341                                 lower_32_bits(adev->vm_manager.max_pfn - 1));
342                 WREG32_SOC15_OFFSET(MMHUB, 0,
343                                 mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
344                                 hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i*2,
345                                 upper_32_bits(adev->vm_manager.max_pfn - 1));
346         }
347 }
348
349 static void mmhub_v9_4_program_invalidation(struct amdgpu_device *adev,
350                                             int hubid)
351 {
352         unsigned i;
353
354         for (i = 0; i < 18; ++i) {
355                 WREG32_SOC15_OFFSET(MMHUB, 0,
356                                 mmVML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
357                                 hubid * MMHUB_INSTANCE_REGISTER_OFFSET + 2 * i,
358                                 0xffffffff);
359                 WREG32_SOC15_OFFSET(MMHUB, 0,
360                                 mmVML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
361                                 hubid * MMHUB_INSTANCE_REGISTER_OFFSET + 2 * i,
362                                 0x1f);
363         }
364 }
365
366 int mmhub_v9_4_gart_enable(struct amdgpu_device *adev)
367 {
368         int i;
369
370         for (i = 0; i < MMHUB_NUM_INSTANCES; i++) {
371                 /* GART Enable. */
372                 mmhub_v9_4_init_gart_aperture_regs(adev, i);
373                 mmhub_v9_4_init_system_aperture_regs(adev, i);
374                 mmhub_v9_4_init_tlb_regs(adev, i);
375                 mmhub_v9_4_init_cache_regs(adev, i);
376
377                 mmhub_v9_4_enable_system_domain(adev, i);
378                 mmhub_v9_4_disable_identity_aperture(adev, i);
379                 mmhub_v9_4_setup_vmid_config(adev, i);
380                 mmhub_v9_4_program_invalidation(adev, i);
381         }
382
383         return 0;
384 }
385
386 void mmhub_v9_4_gart_disable(struct amdgpu_device *adev)
387 {
388         u32 tmp;
389         u32 i, j;
390
391         for (j = 0; j < MMHUB_NUM_INSTANCES; j++) {
392                 /* Disable all tables */
393                 for (i = 0; i < 16; i++)
394                         WREG32_SOC15_OFFSET(MMHUB, 0,
395                                             mmVML2VC0_VM_CONTEXT0_CNTL,
396                                             j * MMHUB_INSTANCE_REGISTER_OFFSET +
397                                             i, 0);
398
399                 /* Setup TLB control */
400                 tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
401                                    mmVMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
402                                    j * MMHUB_INSTANCE_REGISTER_OFFSET);
403                 tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
404                                     ENABLE_L1_TLB, 0);
405                 tmp = REG_SET_FIELD(tmp,
406                                     VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
407                                     ENABLE_ADVANCED_DRIVER_MODEL, 0);
408                 WREG32_SOC15_OFFSET(MMHUB, 0,
409                                     mmVMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
410                                     j * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
411
412                 /* Setup L2 cache */
413                 tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL,
414                                           j * MMHUB_INSTANCE_REGISTER_OFFSET);
415                 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
416                                     ENABLE_L2_CACHE, 0);
417                 WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL,
418                                     j * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
419                 WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL3,
420                                     j * MMHUB_INSTANCE_REGISTER_OFFSET, 0);
421         }
422 }
423
424 /**
425  * mmhub_v1_0_set_fault_enable_default - update GART/VM fault handling
426  *
427  * @adev: amdgpu_device pointer
428  * @value: true redirects VM faults to the default page
429  */
430 void mmhub_v9_4_set_fault_enable_default(struct amdgpu_device *adev, bool value)
431 {
432         u32 tmp;
433         int i;
434
435         for (i = 0; i < MMHUB_NUM_INSTANCES; i++) {
436                 tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
437                                           mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
438                                           i * MMHUB_INSTANCE_REGISTER_OFFSET);
439                 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
440                                     RANGE_PROTECTION_FAULT_ENABLE_DEFAULT,
441                                     value);
442                 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
443                                     PDE0_PROTECTION_FAULT_ENABLE_DEFAULT,
444                                     value);
445                 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
446                                     PDE1_PROTECTION_FAULT_ENABLE_DEFAULT,
447                                     value);
448                 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
449                                     PDE2_PROTECTION_FAULT_ENABLE_DEFAULT,
450                                     value);
451                 tmp = REG_SET_FIELD(tmp,
452                             VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
453                             TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
454                             value);
455                 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
456                                     NACK_PROTECTION_FAULT_ENABLE_DEFAULT,
457                                     value);
458                 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
459                                     DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
460                                     value);
461                 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
462                                     VALID_PROTECTION_FAULT_ENABLE_DEFAULT,
463                                     value);
464                 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
465                                     READ_PROTECTION_FAULT_ENABLE_DEFAULT,
466                                     value);
467                 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
468                                     WRITE_PROTECTION_FAULT_ENABLE_DEFAULT,
469                                     value);
470                 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
471                                     EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT,
472                                     value);
473                 if (!value) {
474                         tmp = REG_SET_FIELD(tmp,
475                                             VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
476                                             CRASH_ON_NO_RETRY_FAULT, 1);
477                         tmp = REG_SET_FIELD(tmp,
478                                             VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
479                                             CRASH_ON_RETRY_FAULT, 1);
480                 }
481
482                 WREG32_SOC15_OFFSET(MMHUB, 0,
483                                     mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
484                                     i * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
485         }
486 }
487
488 void mmhub_v9_4_init(struct amdgpu_device *adev)
489 {
490         struct amdgpu_vmhub *hub[MMHUB_NUM_INSTANCES] =
491                 {&adev->vmhub[AMDGPU_MMHUB_0], &adev->vmhub[AMDGPU_MMHUB_1]};
492         int i;
493
494         for (i = 0; i < MMHUB_NUM_INSTANCES; i++) {
495                 hub[i]->ctx0_ptb_addr_lo32 =
496                         SOC15_REG_OFFSET(MMHUB, 0,
497                             mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) +
498                             i * MMHUB_INSTANCE_REGISTER_OFFSET;
499                 hub[i]->ctx0_ptb_addr_hi32 =
500                         SOC15_REG_OFFSET(MMHUB, 0,
501                             mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) +
502                             i * MMHUB_INSTANCE_REGISTER_OFFSET;
503                 hub[i]->vm_inv_eng0_sem =
504                         SOC15_REG_OFFSET(MMHUB, 0,
505                                          mmVML2VC0_VM_INVALIDATE_ENG0_SEM) +
506                                          i * MMHUB_INSTANCE_REGISTER_OFFSET;
507                 hub[i]->vm_inv_eng0_req =
508                         SOC15_REG_OFFSET(MMHUB, 0,
509                                          mmVML2VC0_VM_INVALIDATE_ENG0_REQ) +
510                                          i * MMHUB_INSTANCE_REGISTER_OFFSET;
511                 hub[i]->vm_inv_eng0_ack =
512                         SOC15_REG_OFFSET(MMHUB, 0,
513                                          mmVML2VC0_VM_INVALIDATE_ENG0_ACK) +
514                                          i * MMHUB_INSTANCE_REGISTER_OFFSET;
515                 hub[i]->vm_context0_cntl =
516                         SOC15_REG_OFFSET(MMHUB, 0,
517                                          mmVML2VC0_VM_CONTEXT0_CNTL) +
518                                          i * MMHUB_INSTANCE_REGISTER_OFFSET;
519                 hub[i]->vm_l2_pro_fault_status =
520                         SOC15_REG_OFFSET(MMHUB, 0,
521                                     mmVML2PF0_VM_L2_PROTECTION_FAULT_STATUS) +
522                                     i * MMHUB_INSTANCE_REGISTER_OFFSET;
523                 hub[i]->vm_l2_pro_fault_cntl =
524                         SOC15_REG_OFFSET(MMHUB, 0,
525                                     mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL) +
526                                     i * MMHUB_INSTANCE_REGISTER_OFFSET;
527         }
528 }
529
530 static void mmhub_v9_4_update_medium_grain_clock_gating(struct amdgpu_device *adev,
531                                                         bool enable)
532 {
533         uint32_t def, data, def1, data1;
534         int i, j;
535         int dist = mmDAGB1_CNTL_MISC2 - mmDAGB0_CNTL_MISC2;
536
537         for (i = 0; i < MMHUB_NUM_INSTANCES; i++) {
538                 def = data = RREG32_SOC15_OFFSET(MMHUB, 0,
539                                         mmATCL2_0_ATC_L2_MISC_CG,
540                                         i * MMHUB_INSTANCE_REGISTER_OFFSET);
541
542                 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
543                         data |= ATCL2_0_ATC_L2_MISC_CG__ENABLE_MASK;
544                 else
545                         data &= ~ATCL2_0_ATC_L2_MISC_CG__ENABLE_MASK;
546
547                 if (def != data)
548                         WREG32_SOC15_OFFSET(MMHUB, 0, mmATCL2_0_ATC_L2_MISC_CG,
549                                 i * MMHUB_INSTANCE_REGISTER_OFFSET, data);
550
551                 for (j = 0; j < 5; j++) {
552                         def1 = data1 = RREG32_SOC15_OFFSET(MMHUB, 0,
553                                         mmDAGB0_CNTL_MISC2,
554                                         i * MMHUB_INSTANCE_REGISTER_OFFSET +
555                                         j * dist);
556                         if (enable &&
557                             (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
558                                 data1 &=
559                                     ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
560                                     DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
561                                     DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
562                                     DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
563                                     DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
564                                     DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
565                         } else {
566                                 data1 |=
567                                     (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
568                                     DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
569                                     DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
570                                     DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
571                                     DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
572                                     DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
573                         }
574
575                         if (def1 != data1)
576                                 WREG32_SOC15_OFFSET(MMHUB, 0,
577                                         mmDAGB0_CNTL_MISC2,
578                                         i * MMHUB_INSTANCE_REGISTER_OFFSET +
579                                         j * dist, data1);
580
581                         if (i == 1 && j == 3)
582                                 break;
583                 }
584         }
585 }
586
587 static void mmhub_v9_4_update_medium_grain_light_sleep(struct amdgpu_device *adev,
588                                                        bool enable)
589 {
590         uint32_t def, data;
591         int i;
592
593         for (i = 0; i < MMHUB_NUM_INSTANCES; i++) {
594                 def = data = RREG32_SOC15_OFFSET(MMHUB, 0,
595                                         mmATCL2_0_ATC_L2_MISC_CG,
596                                         i * MMHUB_INSTANCE_REGISTER_OFFSET);
597
598                 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
599                         data |= ATCL2_0_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
600                 else
601                         data &= ~ATCL2_0_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
602
603                 if (def != data)
604                         WREG32_SOC15_OFFSET(MMHUB, 0, mmATCL2_0_ATC_L2_MISC_CG,
605                                 i * MMHUB_INSTANCE_REGISTER_OFFSET, data);
606         }
607 }
608
609 int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev,
610                                enum amd_clockgating_state state)
611 {
612         if (amdgpu_sriov_vf(adev))
613                 return 0;
614
615         switch (adev->asic_type) {
616         case CHIP_ARCTURUS:
617                 mmhub_v9_4_update_medium_grain_clock_gating(adev,
618                                 state == AMD_CG_STATE_GATE ? true : false);
619                 mmhub_v9_4_update_medium_grain_light_sleep(adev,
620                                 state == AMD_CG_STATE_GATE ? true : false);
621                 break;
622         default:
623                 break;
624         }
625
626         return 0;
627 }
628
629 void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags)
630 {
631         int data, data1;
632
633         if (amdgpu_sriov_vf(adev))
634                 *flags = 0;
635
636         /* AMD_CG_SUPPORT_MC_MGCG */
637         data = RREG32_SOC15(MMHUB, 0, mmATCL2_0_ATC_L2_MISC_CG);
638
639         data1 = RREG32_SOC15(MMHUB, 0, mmATCL2_0_ATC_L2_MISC_CG);
640
641         if ((data & ATCL2_0_ATC_L2_MISC_CG__ENABLE_MASK) &&
642             !(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
643                        DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
644                        DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
645                        DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
646                        DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
647                        DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK)))
648                 *flags |= AMD_CG_SUPPORT_MC_MGCG;
649
650         /* AMD_CG_SUPPORT_MC_LS */
651         if (data & ATCL2_0_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
652                 *flags |= AMD_CG_SUPPORT_MC_LS;
653 }
654
655 static const struct soc15_ras_field_entry mmhub_v9_4_ras_fields[] = {
656         { "MMEA0_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
657         SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
658         SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
659         },
660         { "MMEA0_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
661         SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
662         SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
663         },
664         { "MMEA0_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
665         SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
666         SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
667         },
668         { "MMEA0_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
669         SOC15_REG_FIELD(MMEA0_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
670         SOC15_REG_FIELD(MMEA0_EDC_CNT, RRET_TAGMEM_DED_COUNT),
671         },
672         { "MMEA0_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
673         SOC15_REG_FIELD(MMEA0_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
674         SOC15_REG_FIELD(MMEA0_EDC_CNT, WRET_TAGMEM_DED_COUNT),
675         },
676         { "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
677         SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
678         0, 0,
679         },
680         { "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
681         SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
682         0, 0,
683         },
684         { "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
685         SOC15_REG_FIELD(MMEA0_EDC_CNT, IORD_CMDMEM_SED_COUNT),
686         0, 0,
687         },
688         { "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
689         SOC15_REG_FIELD(MMEA0_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
690         0, 0,
691         },
692         { "MMEA0_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
693         SOC15_REG_FIELD(MMEA0_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
694         0, 0,
695         },
696         { "MMEA0_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
697         SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
698         SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
699         },
700         { "MMEA0_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
701         SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
702         SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
703         },
704         { "MMEA0_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
705         SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
706         SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
707         },
708         { "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
709         SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
710         0, 0,
711         },
712         { "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
713         SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
714         0, 0,
715         },
716         { "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
717         0, 0,
718         SOC15_REG_FIELD(MMEA0_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
719         },
720         { "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
721         0, 0,
722         SOC15_REG_FIELD(MMEA0_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
723         },
724         { "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
725         0, 0,
726         SOC15_REG_FIELD(MMEA0_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
727         },
728         { "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
729         0, 0,
730         SOC15_REG_FIELD(MMEA0_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
731         },
732         { "MMEA0_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
733         0, 0,
734         SOC15_REG_FIELD(MMEA0_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
735         },
736         { "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
737         0, 0,
738         SOC15_REG_FIELD(MMEA0_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
739         },
740         { "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
741         0, 0,
742         SOC15_REG_FIELD(MMEA0_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
743         },
744         { "MMEA1_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
745         SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
746         SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
747         },
748         { "MMEA1_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
749         SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
750         SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
751         },
752         { "MMEA1_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
753         SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
754         SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
755         },
756         { "MMEA1_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
757         SOC15_REG_FIELD(MMEA1_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
758         SOC15_REG_FIELD(MMEA1_EDC_CNT, RRET_TAGMEM_DED_COUNT),
759         },
760         { "MMEA1_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
761         SOC15_REG_FIELD(MMEA1_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
762         SOC15_REG_FIELD(MMEA1_EDC_CNT, WRET_TAGMEM_DED_COUNT),
763         },
764         { "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
765         SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
766         0, 0,
767         },
768         { "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
769         SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
770         0, 0,
771         },
772         { "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
773         SOC15_REG_FIELD(MMEA1_EDC_CNT, IORD_CMDMEM_SED_COUNT),
774         0, 0,
775         },
776         { "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
777         SOC15_REG_FIELD(MMEA1_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
778         0, 0,
779         },
780         { "MMEA1_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
781         SOC15_REG_FIELD(MMEA1_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
782         0, 0,
783         },
784         { "MMEA1_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
785         SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
786         SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
787         },
788         { "MMEA1_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
789         SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
790         SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
791         },
792         { "MMEA1_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
793         SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
794         SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
795         },
796         { "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
797         SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
798         0, 0,
799         },
800         { "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
801         SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
802         0, 0,
803         },
804         { "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
805         0, 0,
806         SOC15_REG_FIELD(MMEA1_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
807         },
808         { "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
809         0, 0,
810         SOC15_REG_FIELD(MMEA1_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
811         },
812         { "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
813         0, 0,
814         SOC15_REG_FIELD(MMEA1_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
815         },
816         { "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
817         0, 0,
818         SOC15_REG_FIELD(MMEA1_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
819         },
820         { "MMEA1_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
821         0, 0,
822         SOC15_REG_FIELD(MMEA1_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
823         },
824         { "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
825         0, 0,
826         SOC15_REG_FIELD(MMEA1_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
827         },
828         { "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
829         0, 0,
830         SOC15_REG_FIELD(MMEA1_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
831         }
832 };
833
834 static const struct soc15_reg_entry mmhub_v9_4_edc_cnt_regs[] = {
835    { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), 0, 0, 0},
836    { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), 0, 0, 0},
837    { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), 0, 0, 0},
838    { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), 0, 0, 0},
839    { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), 0, 0, 0},
840    { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), 0, 0, 0},
841 };
842
843 static int mmhub_v9_4_get_ras_error_count(const struct soc15_reg_entry *reg,
844         uint32_t value, uint32_t *sec_count, uint32_t *ded_count)
845 {
846         uint32_t i;
847         uint32_t sec_cnt, ded_cnt;
848
849         for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_ras_fields); i++) {
850                 if(mmhub_v9_4_ras_fields[i].reg_offset != reg->reg_offset)
851                         continue;
852
853                 sec_cnt = (value &
854                                 mmhub_v9_4_ras_fields[i].sec_count_mask) >>
855                                 mmhub_v9_4_ras_fields[i].sec_count_shift;
856                 if (sec_cnt) {
857                         DRM_INFO("MMHUB SubBlock %s, SEC %d\n",
858                                 mmhub_v9_4_ras_fields[i].name,
859                                 sec_cnt);
860                         *sec_count += sec_cnt;
861                 }
862
863                 ded_cnt = (value &
864                                 mmhub_v9_4_ras_fields[i].ded_count_mask) >>
865                                 mmhub_v9_4_ras_fields[i].ded_count_shift;
866                 if (ded_cnt) {
867                         DRM_INFO("MMHUB SubBlock %s, DED %d\n",
868                                 mmhub_v9_4_ras_fields[i].name,
869                                 ded_cnt);
870                         *ded_count += ded_cnt;
871                 }
872         }
873
874         return 0;
875 }
876
877 static void mmhub_v9_4_query_ras_error_count(struct amdgpu_device *adev,
878                                            void *ras_error_status)
879 {
880         struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
881         uint32_t sec_count = 0, ded_count = 0;
882         uint32_t i;
883         uint32_t reg_value;
884
885         err_data->ue_count = 0;
886         err_data->ce_count = 0;
887
888         for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_edc_cnt_regs); i++) {
889                 reg_value =
890                         RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_edc_cnt_regs[i]));
891                 if (reg_value)
892                         mmhub_v9_4_get_ras_error_count(&mmhub_v9_4_edc_cnt_regs[i],
893                                 reg_value, &sec_count, &ded_count);
894         }
895
896         err_data->ce_count += sec_count;
897         err_data->ue_count += ded_count;
898 }
899
900 const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = {
901         .ras_late_init = amdgpu_mmhub_ras_late_init,
902         .query_ras_error_count = mmhub_v9_4_query_ras_error_count,
903 };