]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
drm/amdgpu: convert nbio to use callbacks (v2)
authorAlex Deucher <alexander.deucher@amd.com>
Fri, 8 Dec 2017 18:07:58 +0000 (13:07 -0500)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 13 Dec 2017 22:28:07 +0000 (17:28 -0500)
Cleans up and consolidates all of the per-asic logic.

v2: squash in "drm/amdgpu: fix NULL err for sriov detect" (Chunming)

Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/amdgpu/soc15_common.h
drivers/gpu/drm/amd/amdgpu/vega10_ih.c

index c8bc1a982dc480eeb958abea4c04e8ca575ffdcd..c73eca86c9f11cc55d3330ed3a613e9179a8b417 100644 (file)
@@ -1428,16 +1428,52 @@ typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, u
 /*
  * amdgpu nbio functions
  *
- * Fix me :
- *     Put more NBIO specifc func wraper here , for now just try to minimize the
- *     change to avoid use SOC15_REG_OFFSET in the constant array
  */
+struct nbio_hdp_flush_reg {
+       u32 ref_and_mask_cp0;
+       u32 ref_and_mask_cp1;
+       u32 ref_and_mask_cp2;
+       u32 ref_and_mask_cp3;
+       u32 ref_and_mask_cp4;
+       u32 ref_and_mask_cp5;
+       u32 ref_and_mask_cp6;
+       u32 ref_and_mask_cp7;
+       u32 ref_and_mask_cp8;
+       u32 ref_and_mask_cp9;
+       u32 ref_and_mask_sdma0;
+       u32 ref_and_mask_sdma1;
+};
 
 struct amdgpu_nbio_funcs {
-       u32 (*get_hdp_flush_req_offset)(struct amdgpu_device*);
-       u32 (*get_hdp_flush_done_offset)(struct amdgpu_device*);
-       u32 (*get_pcie_index_offset)(struct amdgpu_device*);
-       u32 (*get_pcie_data_offset)(struct amdgpu_device*);
+       const struct nbio_hdp_flush_reg *hdp_flush_reg;
+       u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev);
+       u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev);
+       u32 (*get_pcie_index_offset)(struct amdgpu_device *adev);
+       u32 (*get_pcie_data_offset)(struct amdgpu_device *adev);
+       u32 (*get_rev_id)(struct amdgpu_device *adev);
+       u32 (*get_atombios_scratch_regs)(struct amdgpu_device *adev, uint32_t idx);
+       void (*set_atombios_scratch_regs)(struct amdgpu_device *adev,
+                                         uint32_t idx, uint32_t val);
+       void (*mc_access_enable)(struct amdgpu_device *adev, bool enable);
+       void (*hdp_flush)(struct amdgpu_device *adev);
+       u32 (*get_memsize)(struct amdgpu_device *adev);
+       void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance,
+                                   bool use_doorbell, int doorbell_index);
+       void (*enable_doorbell_aperture)(struct amdgpu_device *adev,
+                                        bool enable);
+       void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev,
+                                                 bool enable);
+       void (*ih_doorbell_range)(struct amdgpu_device *adev,
+                                 bool use_doorbell, int doorbell_index);
+       void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
+                                                bool enable);
+       void (*update_medium_grain_light_sleep)(struct amdgpu_device *adev,
+                                               bool enable);
+       void (*get_clockgating_state)(struct amdgpu_device *adev,
+                                     u32 *flags);
+       void (*ih_control)(struct amdgpu_device *adev);
+       void (*init_registers)(struct amdgpu_device *adev);
+       void (*detect_hw_virt)(struct amdgpu_device *adev);
 };
 
 
index 30b41fc082e674b7fa4cb34998831155075de57e..e9a668bae1947001bd54193e4dc955e388c0be8d 100644 (file)
@@ -3552,12 +3552,7 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
        u32 ref_and_mask, reg_mem_engine;
-       const struct nbio_hdp_flush_reg *nbio_hf_reg;
-
-       if (ring->adev->flags & AMD_IS_APU)
-               nbio_hf_reg = &nbio_v7_0_hdp_flush_reg;
-       else
-               nbio_hf_reg = &nbio_v6_1_hdp_flush_reg;
+       const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
 
        if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
                switch (ring->me) {
index 51856e53d8c4927f956276c0112b8a063ec9afd6..1b5dfccfd5d5a86bdf31fb0fd83d5f10219e465b 100644 (file)
@@ -38,8 +38,6 @@
 #include "soc15_common.h"
 #include "umc/umc_6_0_sh_mask.h"
 
-#include "nbio_v6_1.h"
-#include "nbio_v7_0.h"
 #include "gfxhub_v1_0.h"
 #include "mmhub_v1_0.h"
 
@@ -332,10 +330,7 @@ static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
        unsigned i, j;
 
        /* flush hdp cache */
-       if (adev->flags & AMD_IS_APU)
-               nbio_v7_0_hdp_flush(adev);
-       else
-               nbio_v6_1_hdp_flush(adev);
+       adev->nbio_funcs->hdp_flush(adev);
 
        spin_lock(&adev->mc.invalidate_lock);
 
@@ -702,8 +697,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
 
        /* size in MB on si */
        adev->mc.mc_vram_size =
-               ((adev->flags & AMD_IS_APU) ? nbio_v7_0_get_memsize(adev) :
-                nbio_v6_1_get_memsize(adev)) * 1024ULL * 1024ULL;
+               adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
        adev->mc.real_vram_size = adev->mc.mc_vram_size;
 
        if (!(adev->flags & AMD_IS_APU)) {
@@ -951,10 +945,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
        WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
 
        /* After HDP is initialized, flush HDP.*/
-       if (adev->flags & AMD_IS_APU)
-               nbio_v7_0_hdp_flush(adev);
-       else
-               nbio_v6_1_hdp_flush(adev);
+       adev->nbio_funcs->hdp_flush(adev);
 
        if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
                value = false;
index 947d6e4a01f77d55918b2924f32986786cb80275..0d3514808092fa10c239bd9be0a065812c963189 100644 (file)
@@ -33,7 +33,7 @@
 #define smnPCIE_CNTL2                                                                                   0x11180070
 #define smnPCIE_CONFIG_CNTL                                                                             0x11180044
 
-u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev)
+static u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev)
 {
         u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
 
@@ -43,19 +43,19 @@ u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev)
        return tmp;
 }
 
-u32 nbio_v6_1_get_atombios_scratch_regs(struct amdgpu_device *adev,
-                                       uint32_t idx)
+static u32 nbio_v6_1_get_atombios_scratch_regs(struct amdgpu_device *adev,
+                                              uint32_t idx)
 {
        return RREG32_SOC15_OFFSET(NBIO, 0, mmBIOS_SCRATCH_0, idx);
 }
 
-void nbio_v6_1_set_atombios_scratch_regs(struct amdgpu_device *adev,
-                                        uint32_t idx, uint32_t val)
+static void nbio_v6_1_set_atombios_scratch_regs(struct amdgpu_device *adev,
+                                               uint32_t idx, uint32_t val)
 {
        WREG32_SOC15_OFFSET(NBIO, 0, mmBIOS_SCRATCH_0, idx, val);
 }
 
-void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable)
+static void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable)
 {
        if (enable)
                WREG32_SOC15(NBIO, 0, mmBIF_FB_EN,
@@ -65,17 +65,17 @@ void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable)
                WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
 }
 
-void nbio_v6_1_hdp_flush(struct amdgpu_device *adev)
+static void nbio_v6_1_hdp_flush(struct amdgpu_device *adev)
 {
        WREG32_SOC15_NO_KIQ(NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
 }
 
-u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev)
+static u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev)
 {
        return RREG32_SOC15(NBIO, 0, mmRCC_PF_0_0_RCC_CONFIG_MEMSIZE);
 }
 
-void nbio_v6_1_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
+static void nbio_v6_1_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
                                  bool use_doorbell, int doorbell_index)
 {
        u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) :
@@ -93,14 +93,14 @@ void nbio_v6_1_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
 
 }
 
-void nbio_v6_1_enable_doorbell_aperture(struct amdgpu_device *adev,
-                                       bool enable)
+static void nbio_v6_1_enable_doorbell_aperture(struct amdgpu_device *adev,
+                                              bool enable)
 {
        WREG32_FIELD15(NBIO, 0, RCC_PF_0_0_RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, enable ? 1 : 0);
 }
 
-void nbio_v6_1_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
-                                       bool enable)
+static void nbio_v6_1_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
+                                                       bool enable)
 {
        u32 tmp = 0;
 
@@ -119,8 +119,8 @@ void nbio_v6_1_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
 }
 
 
-void nbio_v6_1_ih_doorbell_range(struct amdgpu_device *adev,
-                               bool use_doorbell, int doorbell_index)
+static void nbio_v6_1_ih_doorbell_range(struct amdgpu_device *adev,
+                                       bool use_doorbell, int doorbell_index)
 {
        u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0 , mmBIF_IH_DOORBELL_RANGE);
 
@@ -133,7 +133,7 @@ void nbio_v6_1_ih_doorbell_range(struct amdgpu_device *adev,
        WREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE, ih_doorbell_range);
 }
 
-void nbio_v6_1_ih_control(struct amdgpu_device *adev)
+static void nbio_v6_1_ih_control(struct amdgpu_device *adev)
 {
        u32 interrupt_cntl;
 
@@ -149,8 +149,8 @@ void nbio_v6_1_ih_control(struct amdgpu_device *adev)
        WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl);
 }
 
-void nbio_v6_1_update_medium_grain_clock_gating(struct amdgpu_device *adev,
-                                               bool enable)
+static void nbio_v6_1_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+                                                      bool enable)
 {
        uint32_t def, data;
 
@@ -177,8 +177,8 @@ void nbio_v6_1_update_medium_grain_clock_gating(struct amdgpu_device *adev,
                WREG32_PCIE(smnCPM_CONTROL, data);
 }
 
-void nbio_v6_1_update_medium_grain_light_sleep(struct amdgpu_device *adev,
-                                              bool enable)
+static void nbio_v6_1_update_medium_grain_light_sleep(struct amdgpu_device *adev,
+                                                     bool enable)
 {
        uint32_t def, data;
 
@@ -197,7 +197,8 @@ void nbio_v6_1_update_medium_grain_light_sleep(struct amdgpu_device *adev,
                WREG32_PCIE(smnPCIE_CNTL2, data);
 }
 
-void nbio_v6_1_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
+static void nbio_v6_1_get_clockgating_state(struct amdgpu_device *adev,
+                                           u32 *flags)
 {
        int data;
 
@@ -232,7 +233,7 @@ static u32 nbio_v6_1_get_pcie_data_offset(struct amdgpu_device *adev)
        return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA);
 }
 
-const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
+static const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
        .ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK,
        .ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK,
        .ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK,
@@ -247,15 +248,7 @@ const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
        .ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK
 };
 
-const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
-       .get_hdp_flush_req_offset = nbio_v6_1_get_hdp_flush_req_offset,
-       .get_hdp_flush_done_offset = nbio_v6_1_get_hdp_flush_done_offset,
-       .get_pcie_index_offset = nbio_v6_1_get_pcie_index_offset,
-       .get_pcie_data_offset = nbio_v6_1_get_pcie_data_offset,
-};
-
-
-void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev)
+static void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev)
 {
        uint32_t reg;
 
@@ -272,7 +265,7 @@ void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev)
        }
 }
 
-void nbio_v6_1_init_registers(struct amdgpu_device *adev)
+static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
 {
        uint32_t def, data;
 
@@ -283,3 +276,27 @@ void nbio_v6_1_init_registers(struct amdgpu_device *adev)
        if (def != data)
                WREG32_PCIE(smnPCIE_CONFIG_CNTL, data);
 }
+
+const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
+       .hdp_flush_reg = &nbio_v6_1_hdp_flush_reg,
+       .get_hdp_flush_req_offset = nbio_v6_1_get_hdp_flush_req_offset,
+       .get_hdp_flush_done_offset = nbio_v6_1_get_hdp_flush_done_offset,
+       .get_pcie_index_offset = nbio_v6_1_get_pcie_index_offset,
+       .get_pcie_data_offset = nbio_v6_1_get_pcie_data_offset,
+       .get_rev_id = nbio_v6_1_get_rev_id,
+       .get_atombios_scratch_regs = nbio_v6_1_get_atombios_scratch_regs,
+       .set_atombios_scratch_regs = nbio_v6_1_set_atombios_scratch_regs,
+       .mc_access_enable = nbio_v6_1_mc_access_enable,
+       .hdp_flush = nbio_v6_1_hdp_flush,
+       .get_memsize = nbio_v6_1_get_memsize,
+       .sdma_doorbell_range = nbio_v6_1_sdma_doorbell_range,
+       .enable_doorbell_aperture = nbio_v6_1_enable_doorbell_aperture,
+       .enable_doorbell_selfring_aperture = nbio_v6_1_enable_doorbell_selfring_aperture,
+       .ih_doorbell_range = nbio_v6_1_ih_doorbell_range,
+       .update_medium_grain_clock_gating = nbio_v6_1_update_medium_grain_clock_gating,
+       .update_medium_grain_light_sleep = nbio_v6_1_update_medium_grain_light_sleep,
+       .get_clockgating_state = nbio_v6_1_get_clockgating_state,
+       .ih_control = nbio_v6_1_ih_control,
+       .init_registers = nbio_v6_1_init_registers,
+       .detect_hw_virt = nbio_v6_1_detect_hw_virt,
+};
index 973effed27e5aa61854b0b80dc9d383df1ac37fc..0743a6f016f37cfb793166c54c78050c8d199d67 100644 (file)
 
 #include "soc15_common.h"
 
-extern const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg;
 extern const struct amdgpu_nbio_funcs nbio_v6_1_funcs;
 
-int nbio_v6_1_init(struct amdgpu_device *adev);
-u32 nbio_v6_1_get_atombios_scratch_regs(struct amdgpu_device *adev,
-                                        uint32_t idx);
-void nbio_v6_1_set_atombios_scratch_regs(struct amdgpu_device *adev,
-                                         uint32_t idx, uint32_t val);
-void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable);
-void nbio_v6_1_hdp_flush(struct amdgpu_device *adev);
-u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev);
-void nbio_v6_1_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
-                                 bool use_doorbell, int doorbell_index);
-void nbio_v6_1_enable_doorbell_aperture(struct amdgpu_device *adev,
-                                       bool enable);
-void nbio_v6_1_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
-                                       bool enable);
-void nbio_v6_1_ih_doorbell_range(struct amdgpu_device *adev,
-                               bool use_doorbell, int doorbell_index);
-void nbio_v6_1_ih_control(struct amdgpu_device *adev);
-u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev);
-void nbio_v6_1_update_medium_grain_clock_gating(struct amdgpu_device *adev, bool enable);
-void nbio_v6_1_update_medium_grain_light_sleep(struct amdgpu_device *adev, bool enable);
-void nbio_v6_1_get_clockgating_state(struct amdgpu_device *adev, u32 *flags);
-void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev);
-void nbio_v6_1_init_registers(struct amdgpu_device *adev);
-
 #endif
index 851f58e0b9d90641f056527acd03fec9ebde3085..29d7b4fd7a8828a15465fca64e41fb4b634484fb 100644 (file)
 
 #define smnNBIF_MGCG_CTRL_LCLK 0x1013a05c
 
+#define smnCPM_CONTROL                                                                                  0x11180460
+#define smnPCIE_CNTL2                                                                                   0x11180070
 
-u32 nbio_v7_0_get_rev_id(struct amdgpu_device *adev)
+static u32 nbio_v7_0_get_rev_id(struct amdgpu_device *adev)
 {
         u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
 
@@ -42,19 +44,19 @@ u32 nbio_v7_0_get_rev_id(struct amdgpu_device *adev)
        return tmp;
 }
 
-u32 nbio_v7_0_get_atombios_scratch_regs(struct amdgpu_device *adev,
+static u32 nbio_v7_0_get_atombios_scratch_regs(struct amdgpu_device *adev,
                                        uint32_t idx)
 {
        return RREG32_SOC15_OFFSET(NBIO, 0, mmBIOS_SCRATCH_0, idx);
 }
 
-void nbio_v7_0_set_atombios_scratch_regs(struct amdgpu_device *adev,
-                                        uint32_t idx, uint32_t val)
+static void nbio_v7_0_set_atombios_scratch_regs(struct amdgpu_device *adev,
+                                               uint32_t idx, uint32_t val)
 {
        WREG32_SOC15_OFFSET(NBIO, 0, mmBIOS_SCRATCH_0, idx, val);
 }
 
-void nbio_v7_0_mc_access_enable(struct amdgpu_device *adev, bool enable)
+static void nbio_v7_0_mc_access_enable(struct amdgpu_device *adev, bool enable)
 {
        if (enable)
                WREG32_SOC15(NBIO, 0, mmBIF_FB_EN,
@@ -63,18 +65,18 @@ void nbio_v7_0_mc_access_enable(struct amdgpu_device *adev, bool enable)
                WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
 }
 
-void nbio_v7_0_hdp_flush(struct amdgpu_device *adev)
+static void nbio_v7_0_hdp_flush(struct amdgpu_device *adev)
 {
        WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
 }
 
-u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev)
+static u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev)
 {
        return RREG32_SOC15(NBIO, 0, mmRCC_CONFIG_MEMSIZE);
 }
 
-void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
-                                 bool use_doorbell, int doorbell_index)
+static void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
+                                         bool use_doorbell, int doorbell_index)
 {
        u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) :
                        SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE);
@@ -90,14 +92,20 @@ void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
        WREG32(reg, doorbell_range);
 }
 
-void nbio_v7_0_enable_doorbell_aperture(struct amdgpu_device *adev,
-                                       bool enable)
+static void nbio_v7_0_enable_doorbell_aperture(struct amdgpu_device *adev,
+                                              bool enable)
 {
        WREG32_FIELD15(NBIO, 0, RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, enable ? 1 : 0);
 }
 
-void nbio_v7_0_ih_doorbell_range(struct amdgpu_device *adev,
-                               bool use_doorbell, int doorbell_index)
+static void nbio_v7_0_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
+                                                       bool enable)
+{
+
+}
+
+static void nbio_v7_0_ih_doorbell_range(struct amdgpu_device *adev,
+                                       bool use_doorbell, int doorbell_index)
 {
        u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0 , mmBIF_IH_DOORBELL_RANGE);
 
@@ -127,8 +135,8 @@ static void nbio_7_0_write_syshub_ind_mmr(struct amdgpu_device *adev, uint32_t o
        WREG32_SOC15(NBIO, 0, mmSYSHUB_DATA, data);
 }
 
-void nbio_v7_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
-                                               bool enable)
+static void nbio_v7_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+                                                      bool enable)
 {
        uint32_t def, data;
 
@@ -166,7 +174,43 @@ void nbio_v7_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
                nbio_7_0_write_syshub_ind_mmr(adev, ixSYSHUB_MMREG_IND_SYSHUB_MGCG_CTRL_SHUBCLK, data);
 }
 
-void nbio_v7_0_ih_control(struct amdgpu_device *adev)
+static void nbio_v7_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
+                                                     bool enable)
+{
+       uint32_t def, data;
+
+       def = data = RREG32_PCIE(smnPCIE_CNTL2);
+       if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
+               data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
+                        PCIE_CNTL2__MST_MEM_LS_EN_MASK |
+                        PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
+       } else {
+               data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
+                         PCIE_CNTL2__MST_MEM_LS_EN_MASK |
+                         PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
+       }
+
+       if (def != data)
+               WREG32_PCIE(smnPCIE_CNTL2, data);
+}
+
+static void nbio_v7_0_get_clockgating_state(struct amdgpu_device *adev,
+                                           u32 *flags)
+{
+       int data;
+
+       /* AMD_CG_SUPPORT_BIF_MGCG */
+       data = RREG32_PCIE(smnCPM_CONTROL);
+       if (data & CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
+               *flags |= AMD_CG_SUPPORT_BIF_MGCG;
+
+       /* AMD_CG_SUPPORT_BIF_LS */
+       data = RREG32_PCIE(smnPCIE_CNTL2);
+       if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
+               *flags |= AMD_CG_SUPPORT_BIF_LS;
+}
+
+static void nbio_v7_0_ih_control(struct amdgpu_device *adev)
 {
        u32 interrupt_cntl;
 
@@ -217,10 +261,37 @@ const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = {
        .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
 };
 
+static void nbio_v7_0_detect_hw_virt(struct amdgpu_device *adev)
+{
+       if (is_virtual_machine())       /* passthrough mode exclus sriov mod */
+               adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
+}
+
+static void nbio_v7_0_init_registers(struct amdgpu_device *adev)
+{
+
+}
+
 const struct amdgpu_nbio_funcs nbio_v7_0_funcs = {
+       .hdp_flush_reg = &nbio_v7_0_hdp_flush_reg,
        .get_hdp_flush_req_offset = nbio_v7_0_get_hdp_flush_req_offset,
        .get_hdp_flush_done_offset = nbio_v7_0_get_hdp_flush_done_offset,
        .get_pcie_index_offset = nbio_v7_0_get_pcie_index_offset,
        .get_pcie_data_offset = nbio_v7_0_get_pcie_data_offset,
+       .get_rev_id = nbio_v7_0_get_rev_id,
+       .get_atombios_scratch_regs = nbio_v7_0_get_atombios_scratch_regs,
+       .set_atombios_scratch_regs = nbio_v7_0_set_atombios_scratch_regs,
+       .mc_access_enable = nbio_v7_0_mc_access_enable,
+       .hdp_flush = nbio_v7_0_hdp_flush,
+       .get_memsize = nbio_v7_0_get_memsize,
+       .sdma_doorbell_range = nbio_v7_0_sdma_doorbell_range,
+       .enable_doorbell_aperture = nbio_v7_0_enable_doorbell_aperture,
+       .enable_doorbell_selfring_aperture = nbio_v7_0_enable_doorbell_selfring_aperture,
+       .ih_doorbell_range = nbio_v7_0_ih_doorbell_range,
+       .update_medium_grain_clock_gating = nbio_v7_0_update_medium_grain_clock_gating,
+       .update_medium_grain_light_sleep = nbio_v7_0_update_medium_grain_light_sleep,
+       .get_clockgating_state = nbio_v7_0_get_clockgating_state,
+       .ih_control = nbio_v7_0_ih_control,
+       .init_registers = nbio_v7_0_init_registers,
+       .detect_hw_virt = nbio_v7_0_detect_hw_virt,
 };
-
index 070c3bdf5739846e1198626f4433704816cffd4a..508d549c50291fc89a40539b2a33642275b74a0d 100644 (file)
 
 #include "soc15_common.h"
 
-extern const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg;
 extern const struct amdgpu_nbio_funcs nbio_v7_0_funcs;
 
-int nbio_v7_0_init(struct amdgpu_device *adev);
-u32 nbio_v7_0_get_atombios_scratch_regs(struct amdgpu_device *adev,
-                                        uint32_t idx);
-void nbio_v7_0_set_atombios_scratch_regs(struct amdgpu_device *adev,
-                                         uint32_t idx, uint32_t val);
-void nbio_v7_0_mc_access_enable(struct amdgpu_device *adev, bool enable);
-void nbio_v7_0_hdp_flush(struct amdgpu_device *adev);
-u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev);
-void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
-                                 bool use_doorbell, int doorbell_index);
-void nbio_v7_0_enable_doorbell_aperture(struct amdgpu_device *adev,
-                                       bool enable);
-void nbio_v7_0_ih_doorbell_range(struct amdgpu_device *adev,
-                               bool use_doorbell, int doorbell_index);
-void nbio_v7_0_ih_control(struct amdgpu_device *adev);
-u32 nbio_v7_0_get_rev_id(struct amdgpu_device *adev);
-void nbio_v7_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
-                                               bool enable);
 #endif
index e190ce163e308d5c7d62a35bba75e20331452457..79e82bf35f7d2cffea2b2df994cc0156efb8a48a 100644 (file)
@@ -359,12 +359,7 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
        u32 ref_and_mask = 0;
-       const struct nbio_hdp_flush_reg *nbio_hf_reg;
-
-       if (ring->adev->flags & AMD_IS_APU)
-               nbio_hf_reg = &nbio_v7_0_hdp_flush_reg;
-       else
-               nbio_hf_reg = &nbio_v6_1_hdp_flush_reg;
+       const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
 
        if (ring == &ring->adev->sdma.instance[0].ring)
                ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0;
@@ -629,10 +624,8 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
                }
                WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
                WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
-               if (adev->flags & AMD_IS_APU)
-                       nbio_v7_0_sdma_doorbell_range(adev, i, ring->use_doorbell, ring->doorbell_index);
-               else
-                       nbio_v6_1_sdma_doorbell_range(adev, i, ring->use_doorbell, ring->doorbell_index);
+               adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
+                                                     ring->doorbell_index);
 
                if (amdgpu_sriov_vf(adev))
                        sdma_v4_0_ring_set_wptr(ring);
index 2752d8d3e5aef15b9860f0e5c05d019494112feb..873813fcc084a1848f50177b5f1cad109e6f4c08 100644 (file)
@@ -228,10 +228,7 @@ static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 
 static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
 {
-       if (adev->flags & AMD_IS_APU)
-               return nbio_v7_0_get_memsize(adev);
-       else
-               return nbio_v6_1_get_memsize(adev);
+       return adev->nbio_funcs->get_memsize(adev);
 }
 
 static const u32 vega10_golden_init[] =
@@ -460,9 +457,8 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
 
        /* wait for asic to come out of reset */
        for (i = 0; i < adev->usec_timeout; i++) {
-               u32 memsize = (adev->flags & AMD_IS_APU) ?
-                       nbio_v7_0_get_memsize(adev) :
-                       nbio_v6_1_get_memsize(adev);
+               u32 memsize = adev->nbio_funcs->get_memsize(adev);
+
                if (memsize != 0xffffffff)
                        break;
                udelay(1);
@@ -527,14 +523,10 @@ static void soc15_program_aspm(struct amdgpu_device *adev)
 }
 
 static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
-                                       bool enable)
+                                          bool enable)
 {
-       if (adev->flags & AMD_IS_APU) {
-               nbio_v7_0_enable_doorbell_aperture(adev, enable);
-       } else {
-               nbio_v6_1_enable_doorbell_aperture(adev, enable);
-               nbio_v6_1_enable_doorbell_selfring_aperture(adev, enable);
-       }
+       adev->nbio_funcs->enable_doorbell_aperture(adev, enable);
+       adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable);
 }
 
 static const struct amdgpu_ip_block_version vega10_common_ip_block =
@@ -558,7 +550,12 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
                return -EINVAL;
        }
 
-       nbio_v6_1_detect_hw_virt(adev);
+       if (adev->flags & AMD_IS_APU)
+               adev->nbio_funcs = &nbio_v7_0_funcs;
+       else
+               adev->nbio_funcs = &nbio_v6_1_funcs;
+
+       adev->nbio_funcs->detect_hw_virt(adev);
 
        if (amdgpu_sriov_vf(adev))
                adev->virt.ops = &xgpu_ai_virt_ops;
@@ -612,10 +609,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
 
 static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
 {
-       if (adev->flags & AMD_IS_APU)
-               return nbio_v7_0_get_rev_id(adev);
-       else
-               return nbio_v6_1_get_rev_id(adev);
+       return adev->nbio_funcs->get_rev_id(adev);
 }
 
 static const struct amdgpu_asic_funcs soc15_asic_funcs =
@@ -651,11 +645,6 @@ static int soc15_common_early_init(void *handle)
 
        adev->asic_funcs = &soc15_asic_funcs;
 
-       if (adev->flags & AMD_IS_APU)
-               adev->nbio_funcs = &nbio_v7_0_funcs;
-       else
-               adev->nbio_funcs = &nbio_v6_1_funcs;
-
        if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP) &&
                (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_PSP)))
                psp_enabled = true;
@@ -763,8 +752,7 @@ static int soc15_common_hw_init(void *handle)
        /* enable aspm */
        soc15_program_aspm(adev);
        /* setup nbio registers */
-       if (!(adev->flags & AMD_IS_APU))
-               nbio_v6_1_init_registers(adev);
+       adev->nbio_funcs->init_registers(adev);
        /* enable the doorbell aperture */
        soc15_enable_doorbell_aperture(adev, true);
 
@@ -925,9 +913,9 @@ static int soc15_common_set_clockgating_state(void *handle,
 
        switch (adev->asic_type) {
        case CHIP_VEGA10:
-               nbio_v6_1_update_medium_grain_clock_gating(adev,
+               adev->nbio_funcs->update_medium_grain_clock_gating(adev,
                                state == AMD_CG_STATE_GATE ? true : false);
-               nbio_v6_1_update_medium_grain_light_sleep(adev,
+               adev->nbio_funcs->update_medium_grain_light_sleep(adev,
                                state == AMD_CG_STATE_GATE ? true : false);
                soc15_update_hdp_light_sleep(adev,
                                state == AMD_CG_STATE_GATE ? true : false);
@@ -941,9 +929,9 @@ static int soc15_common_set_clockgating_state(void *handle,
                                state == AMD_CG_STATE_GATE ? true : false);
                break;
        case CHIP_RAVEN:
-               nbio_v7_0_update_medium_grain_clock_gating(adev,
+               adev->nbio_funcs->update_medium_grain_clock_gating(adev,
                                state == AMD_CG_STATE_GATE ? true : false);
-               nbio_v6_1_update_medium_grain_light_sleep(adev,
+               adev->nbio_funcs->update_medium_grain_light_sleep(adev,
                                state == AMD_CG_STATE_GATE ? true : false);
                soc15_update_hdp_light_sleep(adev,
                                state == AMD_CG_STATE_GATE ? true : false);
@@ -968,7 +956,7 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
        if (amdgpu_sriov_vf(adev))
                *flags = 0;
 
-       nbio_v6_1_get_clockgating_state(adev, flags);
+       adev->nbio_funcs->get_clockgating_state(adev, flags);
 
        /* AMD_CG_SUPPORT_HDP_LS */
        data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
index 413951c33983c2e5b89ebabe99f42b3a62f60a98..def865067edd2b2cadc820824442b0006b057b2c 100644 (file)
 #ifndef __SOC15_COMMON_H__
 #define __SOC15_COMMON_H__
 
-struct nbio_hdp_flush_reg {
-       u32 ref_and_mask_cp0;
-       u32 ref_and_mask_cp1;
-       u32 ref_and_mask_cp2;
-       u32 ref_and_mask_cp3;
-       u32 ref_and_mask_cp4;
-       u32 ref_and_mask_cp5;
-       u32 ref_and_mask_cp6;
-       u32 ref_and_mask_cp7;
-       u32 ref_and_mask_cp8;
-       u32 ref_and_mask_cp9;
-       u32 ref_and_mask_sdma0;
-       u32 ref_and_mask_sdma1;
-};
-
-
 /* Register Access Macros */
 #define SOC15_REG_OFFSET(ip, inst, reg)        (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
 
index a1d8bbbb269c4aa6190f6d019e20ae72abff5096..e1d7dae0989b20497b071a88a1453db0a4ee3238 100644 (file)
@@ -95,10 +95,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
        /* disable irqs */
        vega10_ih_disable_interrupts(adev);
 
-       if (adev->flags & AMD_IS_APU)
-               nbio_v7_0_ih_control(adev);
-       else
-               nbio_v6_1_ih_control(adev);
+       adev->nbio_funcs->ih_control(adev);
 
        ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
        /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
@@ -149,10 +146,8 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
                                                 ENABLE, 0);
        }
        WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr);
-       if (adev->flags & AMD_IS_APU)
-               nbio_v7_0_ih_doorbell_range(adev, adev->irq.ih.use_doorbell, adev->irq.ih.doorbell_index);
-       else
-               nbio_v6_1_ih_doorbell_range(adev, adev->irq.ih.use_doorbell, adev->irq.ih.doorbell_index);
+       adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
+                                           adev->irq.ih.doorbell_index);
 
        tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
        tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,