2 * Copyright 2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include "amdgpu_atombios.h"
25 #include "nbio_v7_4.h"
26 #include "amdgpu_ras.h"
28 #include "nbio/nbio_7_4_offset.h"
29 #include "nbio/nbio_7_4_sh_mask.h"
30 #include "nbio/nbio_7_4_0_smn.h"
31 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
32 #include <uapi/linux/kfd_ioctl.h>
34 #define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c
37 * These are nbio v7_4_1 registers mask. Temporarily define these here since
38 * nbio v7_4_1 header is incomplete.
40 #define GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
41 #define GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
42 #define GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
43 #define GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
44 #define GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
45 #define GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
47 #define mmBIF_MMSCH1_DOORBELL_RANGE 0x01dc
48 #define mmBIF_MMSCH1_DOORBELL_RANGE_BASE_IDX 2
49 //BIF_MMSCH1_DOORBELL_RANGE
50 #define BIF_MMSCH1_DOORBELL_RANGE__OFFSET__SHIFT 0x2
51 #define BIF_MMSCH1_DOORBELL_RANGE__SIZE__SHIFT 0x10
52 #define BIF_MMSCH1_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL
53 #define BIF_MMSCH1_DOORBELL_RANGE__SIZE_MASK 0x001F0000L
55 static void nbio_v7_4_remap_hdp_registers(struct amdgpu_device *adev)
57 WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL,
58 adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
59 WREG32_SOC15(NBIO, 0, mmREMAP_HDP_REG_FLUSH_CNTL,
60 adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
63 static u32 nbio_v7_4_get_rev_id(struct amdgpu_device *adev)
65 u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
67 tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
68 tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
73 static void nbio_v7_4_mc_access_enable(struct amdgpu_device *adev, bool enable)
76 WREG32_SOC15(NBIO, 0, mmBIF_FB_EN,
77 BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
79 WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
82 static void nbio_v7_4_hdp_flush(struct amdgpu_device *adev,
83 struct amdgpu_ring *ring)
85 if (!ring || !ring->funcs->emit_wreg)
86 WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
88 amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
91 static u32 nbio_v7_4_get_memsize(struct amdgpu_device *adev)
93 return RREG32_SOC15(NBIO, 0, mmRCC_CONFIG_MEMSIZE);
96 static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
97 bool use_doorbell, int doorbell_index, int doorbell_size)
99 u32 reg, doorbell_range;
103 SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE);
106 * These registers address of SDMA2~7 is not consecutive
107 * from SDMA0~1. Need plus 4 dwords offset.
109 * BIF_SDMA0_DOORBELL_RANGE: 0x3bc0
110 * BIF_SDMA1_DOORBELL_RANGE: 0x3bc4
111 * BIF_SDMA2_DOORBELL_RANGE: 0x3bd8
113 reg = instance + 0x4 +
114 SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE);
116 doorbell_range = RREG32(reg);
119 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
120 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, doorbell_size);
122 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
124 WREG32(reg, doorbell_range);
127 static void nbio_v7_4_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell,
128 int doorbell_index, int instance)
134 reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH1_DOORBELL_RANGE);
136 reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH0_DOORBELL_RANGE);
138 doorbell_range = RREG32(reg);
141 doorbell_range = REG_SET_FIELD(doorbell_range,
142 BIF_MMSCH0_DOORBELL_RANGE, OFFSET,
144 doorbell_range = REG_SET_FIELD(doorbell_range,
145 BIF_MMSCH0_DOORBELL_RANGE, SIZE, 8);
147 doorbell_range = REG_SET_FIELD(doorbell_range,
148 BIF_MMSCH0_DOORBELL_RANGE, SIZE, 0);
150 WREG32(reg, doorbell_range);
153 static void nbio_v7_4_enable_doorbell_aperture(struct amdgpu_device *adev,
156 WREG32_FIELD15(NBIO, 0, RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, enable ? 1 : 0);
159 static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
165 tmp = REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_EN, 1) |
166 REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_MODE, 1) |
167 REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_SIZE, 0);
169 WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_LOW,
170 lower_32_bits(adev->doorbell.base));
171 WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_HIGH,
172 upper_32_bits(adev->doorbell.base));
175 WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_CNTL, tmp);
178 static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev,
179 bool use_doorbell, int doorbell_index)
181 u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0 , mmBIF_IH_DOORBELL_RANGE);
184 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index);
185 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 2);
187 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 0);
189 WREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE, ih_doorbell_range);
193 static void nbio_v7_4_update_medium_grain_clock_gating(struct amdgpu_device *adev,
196 //TODO: Add support for v7.4
199 static void nbio_v7_4_update_medium_grain_light_sleep(struct amdgpu_device *adev,
204 def = data = RREG32_PCIE(smnPCIE_CNTL2);
205 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
206 data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
207 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
208 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
210 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
211 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
212 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
216 WREG32_PCIE(smnPCIE_CNTL2, data);
219 static void nbio_v7_4_get_clockgating_state(struct amdgpu_device *adev,
224 /* AMD_CG_SUPPORT_BIF_MGCG */
225 data = RREG32_PCIE(smnCPM_CONTROL);
226 if (data & CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
227 *flags |= AMD_CG_SUPPORT_BIF_MGCG;
229 /* AMD_CG_SUPPORT_BIF_LS */
230 data = RREG32_PCIE(smnPCIE_CNTL2);
231 if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
232 *flags |= AMD_CG_SUPPORT_BIF_LS;
235 static void nbio_v7_4_ih_control(struct amdgpu_device *adev)
239 /* setup interrupt control */
240 WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
241 interrupt_cntl = RREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL);
242 /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
243 * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
245 interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0);
246 /* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
247 interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0);
248 WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl);
251 static u32 nbio_v7_4_get_hdp_flush_req_offset(struct amdgpu_device *adev)
253 return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_REQ);
256 static u32 nbio_v7_4_get_hdp_flush_done_offset(struct amdgpu_device *adev)
258 return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_DONE);
261 static u32 nbio_v7_4_get_pcie_index_offset(struct amdgpu_device *adev)
263 return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2);
266 static u32 nbio_v7_4_get_pcie_data_offset(struct amdgpu_device *adev)
268 return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
271 const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
272 .ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK,
273 .ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK,
274 .ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK,
275 .ref_and_mask_cp3 = GPU_HDP_FLUSH_DONE__CP3_MASK,
276 .ref_and_mask_cp4 = GPU_HDP_FLUSH_DONE__CP4_MASK,
277 .ref_and_mask_cp5 = GPU_HDP_FLUSH_DONE__CP5_MASK,
278 .ref_and_mask_cp6 = GPU_HDP_FLUSH_DONE__CP6_MASK,
279 .ref_and_mask_cp7 = GPU_HDP_FLUSH_DONE__CP7_MASK,
280 .ref_and_mask_cp8 = GPU_HDP_FLUSH_DONE__CP8_MASK,
281 .ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK,
282 .ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__SDMA0_MASK,
283 .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
284 .ref_and_mask_sdma2 = GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK,
285 .ref_and_mask_sdma3 = GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK,
286 .ref_and_mask_sdma4 = GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK,
287 .ref_and_mask_sdma5 = GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK,
288 .ref_and_mask_sdma6 = GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK,
289 .ref_and_mask_sdma7 = GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK,
292 static void nbio_v7_4_detect_hw_virt(struct amdgpu_device *adev)
296 reg = RREG32_SOC15(NBIO, 0, mmRCC_IOV_FUNC_IDENTIFIER);
298 adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
300 if (reg & 0x80000000)
301 adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
304 if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
305 adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
309 static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
314 static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device *adev)
316 uint32_t bif_doorbell_intr_cntl;
318 bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
319 if (REG_GET_FIELD(bif_doorbell_intr_cntl,
320 BIF_DOORBELL_INT_CNTL, RAS_CNTLR_INTERRUPT_STATUS)) {
321 /* driver has to clear the interrupt status when bif ring is disabled */
322 bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
323 BIF_DOORBELL_INT_CNTL,
324 RAS_CNTLR_INTERRUPT_CLEAR, 1);
325 WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
327 amdgpu_ras_global_ras_isr(adev);
331 static void nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring(struct amdgpu_device *adev)
333 uint32_t bif_doorbell_intr_cntl;
335 bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
336 if (REG_GET_FIELD(bif_doorbell_intr_cntl,
337 BIF_DOORBELL_INT_CNTL, RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) {
338 /* driver has to clear the interrupt status when bif ring is disabled */
339 bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
340 BIF_DOORBELL_INT_CNTL,
341 RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1);
342 WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
344 amdgpu_ras_global_ras_isr(adev);
349 static int nbio_v7_4_set_ras_controller_irq_state(struct amdgpu_device *adev,
350 struct amdgpu_irq_src *src,
352 enum amdgpu_interrupt_state state)
354 /* The ras_controller_irq enablement should be done in psp bl when it
355 * tries to enable ras feature. Driver only need to set the correct interrupt
356 * vector for bare-metal and sriov use case respectively
358 uint32_t bif_intr_cntl;
360 bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
361 if (state == AMDGPU_IRQ_STATE_ENABLE) {
362 /* set interrupt vector select bit to 0 to select
363 * vetcor 1 for bare metal case */
364 bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl,
366 RAS_INTR_VEC_SEL, 0);
367 WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
373 static int nbio_v7_4_process_ras_controller_irq(struct amdgpu_device *adev,
374 struct amdgpu_irq_src *source,
375 struct amdgpu_iv_entry *entry)
377 /* By design, the ih cookie for ras_controller_irq should be written
378 * to BIFring instead of general iv ring. However, due to known bif ring
379 * hw bug, it has to be disabled. There is no chance the process function
380 * will be involked. Just left it as a dummy one.
385 static int nbio_v7_4_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev,
386 struct amdgpu_irq_src *src,
388 enum amdgpu_interrupt_state state)
390 /* The ras_controller_irq enablement should be done in psp bl when it
391 * tries to enable ras feature. Driver only need to set the correct interrupt
392 * vector for bare-metal and sriov use case respectively
394 uint32_t bif_intr_cntl;
396 bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
397 if (state == AMDGPU_IRQ_STATE_ENABLE) {
398 /* set interrupt vector select bit to 0 to select
399 * vetcor 1 for bare metal case */
400 bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl,
402 RAS_INTR_VEC_SEL, 0);
403 WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
409 static int nbio_v7_4_process_err_event_athub_irq(struct amdgpu_device *adev,
410 struct amdgpu_irq_src *source,
411 struct amdgpu_iv_entry *entry)
413 /* By design, the ih cookie for err_event_athub_irq should be written
414 * to BIFring instead of general iv ring. However, due to known bif ring
415 * hw bug, it has to be disabled. There is no chance the process function
416 * will be involked. Just left it as a dummy one.
421 static const struct amdgpu_irq_src_funcs nbio_v7_4_ras_controller_irq_funcs = {
422 .set = nbio_v7_4_set_ras_controller_irq_state,
423 .process = nbio_v7_4_process_ras_controller_irq,
426 static const struct amdgpu_irq_src_funcs nbio_v7_4_ras_err_event_athub_irq_funcs = {
427 .set = nbio_v7_4_set_ras_err_event_athub_irq_state,
428 .process = nbio_v7_4_process_err_event_athub_irq,
431 static int nbio_v7_4_init_ras_controller_interrupt (struct amdgpu_device *adev)
435 /* init the irq funcs */
436 adev->nbio.ras_controller_irq.funcs =
437 &nbio_v7_4_ras_controller_irq_funcs;
438 adev->nbio.ras_controller_irq.num_types = 1;
440 /* register ras controller interrupt */
441 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
442 NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT,
443 &adev->nbio.ras_controller_irq);
450 static int nbio_v7_4_init_ras_err_event_athub_interrupt (struct amdgpu_device *adev)
455 /* init the irq funcs */
456 adev->nbio.ras_err_event_athub_irq.funcs =
457 &nbio_v7_4_ras_err_event_athub_irq_funcs;
458 adev->nbio.ras_err_event_athub_irq.num_types = 1;
460 /* register ras err event athub interrupt */
461 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
462 NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT,
463 &adev->nbio.ras_err_event_athub_irq);
470 static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
471 void *ras_error_status)
473 uint32_t global_sts, central_sts, int_eoi;
474 uint32_t corr, fatal, non_fatal;
475 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
477 global_sts = RREG32_PCIE(smnRAS_GLOBAL_STATUS_LO);
478 corr = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrCorr);
479 fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrFatal);
480 non_fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO,
484 err_data->ce_count++;
486 err_data->ue_count++;
488 if (corr || fatal || non_fatal) {
489 central_sts = RREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS);
490 /* clear error status register */
491 WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO, global_sts);
493 if (REG_GET_FIELD(central_sts, BIFL_RAS_CENTRAL_STATUS,
494 BIFL_RasContller_Intr_Recv)) {
495 /* clear interrupt status register */
496 WREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS, central_sts);
497 int_eoi = RREG32_PCIE(smnIOHC_INTERRUPT_EOI);
498 int_eoi = REG_SET_FIELD(int_eoi,
499 IOHC_INTERRUPT_EOI, SMI_EOI, 1);
500 WREG32_PCIE(smnIOHC_INTERRUPT_EOI, int_eoi);
505 static void nbio_v7_4_enable_doorbell_interrupt(struct amdgpu_device *adev,
508 WREG32_FIELD15(NBIO, 0, BIF_DOORBELL_INT_CNTL,
509 DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1);
512 const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
513 .get_hdp_flush_req_offset = nbio_v7_4_get_hdp_flush_req_offset,
514 .get_hdp_flush_done_offset = nbio_v7_4_get_hdp_flush_done_offset,
515 .get_pcie_index_offset = nbio_v7_4_get_pcie_index_offset,
516 .get_pcie_data_offset = nbio_v7_4_get_pcie_data_offset,
517 .get_rev_id = nbio_v7_4_get_rev_id,
518 .mc_access_enable = nbio_v7_4_mc_access_enable,
519 .hdp_flush = nbio_v7_4_hdp_flush,
520 .get_memsize = nbio_v7_4_get_memsize,
521 .sdma_doorbell_range = nbio_v7_4_sdma_doorbell_range,
522 .vcn_doorbell_range = nbio_v7_4_vcn_doorbell_range,
523 .enable_doorbell_aperture = nbio_v7_4_enable_doorbell_aperture,
524 .enable_doorbell_selfring_aperture = nbio_v7_4_enable_doorbell_selfring_aperture,
525 .ih_doorbell_range = nbio_v7_4_ih_doorbell_range,
526 .enable_doorbell_interrupt = nbio_v7_4_enable_doorbell_interrupt,
527 .update_medium_grain_clock_gating = nbio_v7_4_update_medium_grain_clock_gating,
528 .update_medium_grain_light_sleep = nbio_v7_4_update_medium_grain_light_sleep,
529 .get_clockgating_state = nbio_v7_4_get_clockgating_state,
530 .ih_control = nbio_v7_4_ih_control,
531 .init_registers = nbio_v7_4_init_registers,
532 .detect_hw_virt = nbio_v7_4_detect_hw_virt,
533 .remap_hdp_registers = nbio_v7_4_remap_hdp_registers,
534 .handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring,
535 .handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring,
536 .init_ras_controller_interrupt = nbio_v7_4_init_ras_controller_interrupt,
537 .init_ras_err_event_athub_interrupt = nbio_v7_4_init_ras_err_event_athub_interrupt,
538 .query_ras_error_count = nbio_v7_4_query_ras_error_count,
539 .ras_late_init = amdgpu_nbio_ras_late_init,