]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
drm/amdgpu: Export function to flush TLB of specific vm hub
authorOak Zeng <Oak.Zeng@amd.com>
Thu, 1 Aug 2019 19:55:45 +0000 (14:55 -0500)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 15 Aug 2019 15:57:48 +0000 (10:57 -0500)
This is for kfd to reuse amdgpu TLB invalidation function.
On gfx10, kfd only needs to flush TLB on gfx hub but not
on mm hub. So export a function for KFD flush TLB only on
specific hub.

Signed-off-by: Oak Zeng <Oak.Zeng@amd.com>
Reviewed-by: Christian Konig <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c

index 9d153cf395811aa9a001ef018a7c7b46320423b3..e262f2ac07a3590e6360960e60248d932c71ad00 100644 (file)
@@ -670,7 +670,7 @@ static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid,
 int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
-       int vmid;
+       int vmid, i;
        struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
        uint32_t flush_type = 0;
 
@@ -689,8 +689,9 @@ int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
                if (kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid(kgd, vmid)) {
                        if (kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid(kgd, vmid)
                                == pasid) {
-                               amdgpu_gmc_flush_gpu_tlb(adev, vmid,
-                                                        flush_type);
+                               for (i = 0; i < adev->num_vmhubs; i++)
+                                       amdgpu_gmc_flush_gpu_tlb(adev, vmid,
+                                                               i, flush_type);
                                break;
                        }
                }
@@ -702,6 +703,7 @@ int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
 int kgd_gfx_v9_invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+       int i;
 
        if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
                pr_err("non kfd vmid %d\n", vmid);
@@ -723,7 +725,9 @@ int kgd_gfx_v9_invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
         * TODO 2: support range-based invalidation, requires kfg2kgd
         * interface change
         */
-       amdgpu_gmc_flush_gpu_tlb(adev, vmid, 0);
+       for (i = 0; i < adev->num_vmhubs; i++)
+               amdgpu_gmc_flush_gpu_tlb(adev, vmid, i, 0);
+
        return 0;
 }
 
index d79ab1da9e077c6dd5c98d973ddc5998e9ea66e7..ac14d473a14324b51c6f44bae0b6b09b50d4a88f 100644 (file)
@@ -251,7 +251,9 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
        }
        mb();
        amdgpu_asic_flush_hdp(adev, NULL);
-       amdgpu_gmc_flush_gpu_tlb(adev, 0, 0);
+       for (i = 0; i < adev->num_vmhubs; i++)
+               amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
+
        return 0;
 }
 
@@ -312,7 +314,7 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
        unsigned i,t,p;
 #endif
-       int r;
+       int r, i;
 
        if (!adev->gart.ready) {
                WARN(1, "trying to bind memory to uninitialized GART !\n");
@@ -336,7 +338,8 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
 
        mb();
        amdgpu_asic_flush_hdp(adev, NULL);
-       amdgpu_gmc_flush_gpu_tlb(adev, 0, 0);
+       for (i = 0; i < adev->num_vmhubs; i++)
+               amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
        return 0;
 }
 
index cac2ef84a1a11f1df6b24ea3f07fa4c1abe3de4a..b6e1d98ef01e145b1f91a2e96fea13ec67954914 100644 (file)
@@ -89,8 +89,8 @@ struct amdgpu_vmhub {
  */
 struct amdgpu_gmc_funcs {
        /* flush the vm tlb via mmio */
-       void (*flush_gpu_tlb)(struct amdgpu_device *adev,
-                             uint32_t vmid, uint32_t flush_type);
+       void (*flush_gpu_tlb)(struct amdgpu_device *adev, uint32_t vmid,
+                               uint32_t vmhub, uint32_t flush_type);
        /* flush the vm tlb via ring */
        uint64_t (*emit_flush_gpu_tlb)(struct amdgpu_ring *ring, unsigned vmid,
                                       uint64_t pd_addr);
@@ -181,7 +181,7 @@ struct amdgpu_gmc {
        struct ras_common_if    *mmhub_ras_if;
 };
 
-#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, type) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (type))
+#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type)))
 #define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
 #define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
 #define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
index 7ae0e86ec6a71246f886002b06d9626cf9753786..79d3fbd3ba6394b8973df2669a88a4a1fdb5ea30 100644 (file)
@@ -1748,9 +1748,12 @@ static void gfx_v10_0_init_csb(struct amdgpu_device *adev)
 
 static void gfx_v10_0_init_pg(struct amdgpu_device *adev)
 {
+       int i;
+
        gfx_v10_0_init_csb(adev);
 
-       amdgpu_gmc_flush_gpu_tlb(adev, 0, 0);
+       for (i = 0; i < adev->num_vmhubs; i++)
+               amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
 
        /* TODO: init power gating */
        return;
index ee16ec1a01bb3bcd22c39a88007ef369e118214d..aafb16064338d13e9f869a18ff638a8e49b9e7e4 100644 (file)
@@ -230,8 +230,8 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
  *
  * Flush the TLB for the requested page table.
  */
-static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev,
-                                   uint32_t vmid, uint32_t flush_type)
+static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
+                                       uint32_t vmhub, uint32_t flush_type)
 {
        struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
        struct dma_fence *fence;
@@ -244,7 +244,14 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev,
 
        mutex_lock(&adev->mman.gtt_window_lock);
 
-       gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB_0, 0);
+       if (vmhub == AMDGPU_MMHUB_0) {
+               gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB_0, 0);
+               mutex_unlock(&adev->mman.gtt_window_lock);
+               return;
+       }
+
+       BUG_ON(vmhub != AMDGPU_GFXHUB_0);
+
        if (!adev->mman.buffer_funcs_enabled ||
            !adev->ib_pool_ready ||
            adev->in_gpu_reset) {
@@ -756,7 +763,8 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
 
        gfxhub_v2_0_set_fault_enable_default(adev, value);
        mmhub_v2_0_set_fault_enable_default(adev, value);
-       gmc_v10_0_flush_gpu_tlb(adev, 0, 0);
+       gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0);
+       gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0);
 
        DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
                 (unsigned)(adev->gmc.gart_size >> 20),
index 14073b506afe5b3517bbc1582658ba372f0f9299..f0f6c6da9f305fb9d68eecce82034bf4bcee7bee 100644 (file)
@@ -362,8 +362,8 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
        return 0;
 }
 
-static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev,
-                               uint32_t vmid, uint32_t flush_type)
+static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
+                                       uint32_t vmhub, uint32_t flush_type)
 {
        WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
 }
@@ -571,7 +571,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
        else
                gmc_v6_0_set_fault_enable_default(adev, true);
 
-       gmc_v6_0_flush_gpu_tlb(adev, 0, 0);
+       gmc_v6_0_flush_gpu_tlb(adev, 0, 0, 0);
        dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
                 (unsigned)(adev->gmc.gart_size >> 20),
                 (unsigned long long)table_addr);
index ca32915fbecbf73fa04fb49c6be2ff2d9917b898..d935a2f29e5f1cc1c506b0b9b9fd72b3c8e8b55d 100644 (file)
@@ -433,8 +433,8 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
  *
  * Flush the TLB for the requested page table (CIK).
  */
-static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev,
-                               uint32_t vmid, uint32_t flush_type)
+static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
+                                       uint32_t vmhub, uint32_t flush_type)
 {
        /* bits 0-15 are the VM contexts0-15 */
        WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
@@ -677,7 +677,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
                WREG32(mmCHUB_CONTROL, tmp);
        }
 
-       gmc_v7_0_flush_gpu_tlb(adev, 0, 0);
+       gmc_v7_0_flush_gpu_tlb(adev, 0, 0, 0);
        DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
                 (unsigned)(adev->gmc.gart_size >> 20),
                 (unsigned long long)table_addr);
index 909a8764703e364197eb9101c274374c526895a7..2c60e45e3fa001ae0765b69afeb919bff2760adc 100644 (file)
@@ -635,8 +635,8 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
  *
  * Flush the TLB for the requested page table (VI).
  */
-static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev,
-                               uint32_t vmid, uint32_t flush_type)
+static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
+                                       uint32_t vmhub, uint32_t flush_type)
 {
        /* bits 0-15 are the VM contexts0-15 */
        WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
@@ -921,7 +921,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
        else
                gmc_v8_0_set_fault_enable_default(adev, true);
 
-       gmc_v8_0_flush_gpu_tlb(adev, 0, 0);
+       gmc_v8_0_flush_gpu_tlb(adev, 0, 0, 0);
        DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
                 (unsigned)(adev->gmc.gart_size >> 20),
                 (unsigned long long)table_addr);
index ba4f939f657f719838f43a23d80fa493a8498cce..7da355bf6d89773bad3ee528c4b8c55f88d94aee 100644 (file)
@@ -453,44 +453,45 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
  *
  * Flush the TLB for the requested page table using certain type.
  */
-static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
-                               uint32_t vmid, uint32_t flush_type)
+static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
+                                       uint32_t vmhub, uint32_t flush_type)
 {
        const unsigned eng = 17;
-       unsigned i, j;
+       u32 j, tmp;
+       struct amdgpu_vmhub *hub;
 
-       for (i = 0; i < adev->num_vmhubs; ++i) {
-               struct amdgpu_vmhub *hub = &adev->vmhub[i];
-               u32 tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type);
+       BUG_ON(vmhub >= adev->num_vmhubs);
 
-               /* This is necessary for a HW workaround under SRIOV as well
-                * as GFXOFF under bare metal
-                */
-               if (adev->gfx.kiq.ring.sched.ready &&
-                   (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
-                   !adev->in_gpu_reset) {
-                       uint32_t req = hub->vm_inv_eng0_req + eng;
-                       uint32_t ack = hub->vm_inv_eng0_ack + eng;
-
-                       amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp,
-                                                          1 << vmid);
-                       continue;
-               }
+       hub = &adev->vmhub[vmhub];
+       tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type);
 
-               spin_lock(&adev->gmc.invalidate_lock);
-               WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
-               for (j = 0; j < adev->usec_timeout; j++) {
-                       tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
-                       if (tmp & (1 << vmid))
-                               break;
-                       udelay(1);
-               }
-               spin_unlock(&adev->gmc.invalidate_lock);
-               if (j < adev->usec_timeout)
-                       continue;
+       /* This is necessary for a HW workaround under SRIOV as well
+        * as GFXOFF under bare metal
+        */
+       if (adev->gfx.kiq.ring.sched.ready &&
+                       (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
+                       !adev->in_gpu_reset) {
+               uint32_t req = hub->vm_inv_eng0_req + eng;
+               uint32_t ack = hub->vm_inv_eng0_ack + eng;
+
+               amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp,
+                               1 << vmid);
+               return;
+       }
 
-               DRM_ERROR("Timeout waiting for VM flush ACK!\n");
+       spin_lock(&adev->gmc.invalidate_lock);
+       WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
+       for (j = 0; j < adev->usec_timeout; j++) {
+               tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
+               if (tmp & (1 << vmid))
+                       break;
+               udelay(1);
        }
+       spin_unlock(&adev->gmc.invalidate_lock);
+       if (j < adev->usec_timeout)
+               return;
+
+       DRM_ERROR("Timeout waiting for VM flush ACK!\n");
 }
 
 static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
@@ -1296,7 +1297,7 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
  */
 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
 {
-       int r;
+       int r, i;
        bool value;
        u32 tmp;
 
@@ -1353,7 +1354,9 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
                mmhub_v9_4_set_fault_enable_default(adev, value);
        else
                mmhub_v1_0_set_fault_enable_default(adev, value);
-       gmc_v9_0_flush_gpu_tlb(adev, 0, 0);
+
+       for (i = 0; i < adev->num_vmhubs; ++i)
+               gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
 
        DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
                 (unsigned)(adev->gmc.gart_size >> 20),