]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drm/amd/powerplay: properly set mp1 state for SW SMU suspend/reset routine
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
index 5a7f893cf72448d9a4d6e9c12b0d3827d3e7bdcb..d3524f19d79a45dc6f57c0841b6195d66c25c833 100644 (file)
 #include "amdgpu_ras.h"
 #include "amdgpu_pmu.h"
 
+#include <linux/suspend.h>
+
 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
+MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
+MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
+MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
+MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
 
 #define AMDGPU_RESUME_MS               2000
 
-static const char *amdgpu_asic_name[] = {
+const char *amdgpu_asic_name[] = {
        "TAHITI",
        "PITCAIRN",
        "VERDE",
@@ -98,7 +104,11 @@ static const char *amdgpu_asic_name[] = {
        "VEGA12",
        "VEGA20",
        "RAVEN",
+       "ARCTURUS",
+       "RENOIR",
        "NAVI10",
+       "NAVI14",
+       "NAVI12",
        "LAST",
 };
 
@@ -412,6 +422,40 @@ static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32
        BUG();
 }
 
+/**
+ * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
+ *
+ * @adev: amdgpu device pointer
+ * @reg: offset of register
+ *
+ * Dummy register read function.  Used for register blocks
+ * that certain asics don't have (all asics).
+ * Returns the value in the register.
+ */
+static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
+{
+       DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
+       BUG();
+       return 0;
+}
+
+/**
+ * amdgpu_invalid_wreg64 - dummy reg write function
+ *
+ * @adev: amdgpu device pointer
+ * @reg: offset of register
+ * @v: value to write to the register
+ *
+ * Dummy register read function.  Used for register blocks
+ * that certain asics don't have (all asics).
+ */
+static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
+{
+       DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
+                 reg, v);
+       BUG();
+}
+
 /**
  * amdgpu_block_invalid_rreg - dummy reg read function
  *
@@ -1384,9 +1428,21 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
                else
                        chip_name = "raven";
                break;
+       case CHIP_ARCTURUS:
+               chip_name = "arcturus";
+               break;
+       case CHIP_RENOIR:
+               chip_name = "renoir";
+               break;
        case CHIP_NAVI10:
                chip_name = "navi10";
                break;
+       case CHIP_NAVI14:
+               chip_name = "navi14";
+               break;
+       case CHIP_NAVI12:
+               chip_name = "navi12";
+               break;
        }
 
        snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
@@ -1529,7 +1585,10 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
        case CHIP_VEGA12:
        case CHIP_VEGA20:
        case CHIP_RAVEN:
-               if (adev->asic_type == CHIP_RAVEN)
+       case CHIP_ARCTURUS:
+       case CHIP_RENOIR:
+               if (adev->asic_type == CHIP_RAVEN ||
+                   adev->asic_type == CHIP_RENOIR)
                        adev->family = AMDGPU_FAMILY_RV;
                else
                        adev->family = AMDGPU_FAMILY_AI;
@@ -1539,6 +1598,8 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
                        return r;
                break;
        case  CHIP_NAVI10:
+       case  CHIP_NAVI14:
+       case  CHIP_NAVI12:
                adev->family = AMDGPU_FAMILY_NV;
 
                r = nv_set_ip_blocks(adev);
@@ -1560,13 +1621,10 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
                r = amdgpu_virt_request_full_gpu(adev, true);
                if (r)
                        return -EAGAIN;
-
-               /* query the reg access mode at the very beginning */
-               amdgpu_virt_init_reg_access_mode(adev);
        }
 
        adev->pm.pp_feature = amdgpu_pp_feature_mask;
-       if (amdgpu_sriov_vf(adev))
+       if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
                adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
 
        for (i = 0; i < adev->num_ip_blocks; i++) {
@@ -1665,28 +1723,34 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
 
        if (adev->asic_type >= CHIP_VEGA10) {
                for (i = 0; i < adev->num_ip_blocks; i++) {
-                       if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
-                               if (adev->in_gpu_reset || adev->in_suspend) {
-                                       if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset)
-                                               break; /* sriov gpu reset, psp need to do hw_init before IH because of hw limit */
-                                       r = adev->ip_blocks[i].version->funcs->resume(adev);
-                                       if (r) {
-                                               DRM_ERROR("resume of IP block <%s> failed %d\n",
+                       if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
+                               continue;
+
+                       /* no need to do the fw loading again if already done*/
+                       if (adev->ip_blocks[i].status.hw == true)
+                               break;
+
+                       if (adev->in_gpu_reset || adev->in_suspend) {
+                               r = adev->ip_blocks[i].version->funcs->resume(adev);
+                               if (r) {
+                                       DRM_ERROR("resume of IP block <%s> failed %d\n",
                                                          adev->ip_blocks[i].version->funcs->name, r);
-                                               return r;
-                                       }
-                               } else {
-                                       r = adev->ip_blocks[i].version->funcs->hw_init(adev);
-                                       if (r) {
-                                               DRM_ERROR("hw_init of IP block <%s> failed %d\n",
-                                                 adev->ip_blocks[i].version->funcs->name, r);
-                                               return r;
-                                       }
+                                       return r;
+                               }
+                       } else {
+                               r = adev->ip_blocks[i].version->funcs->hw_init(adev);
+                               if (r) {
+                                       DRM_ERROR("hw_init of IP block <%s> failed %d\n",
+                                                         adev->ip_blocks[i].version->funcs->name, r);
+                                       return r;
                                }
-                               adev->ip_blocks[i].status.hw = true;
                        }
+
+                       adev->ip_blocks[i].status.hw = true;
+                       break;
                }
        }
+
        r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
 
        return r;
@@ -2128,7 +2192,9 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
                        if (r) {
                                DRM_ERROR("suspend of IP block <%s> failed %d\n",
                                          adev->ip_blocks[i].version->funcs->name, r);
+                               return r;
                        }
+                       adev->ip_blocks[i].status.hw = false;
                }
        }
 
@@ -2163,6 +2229,25 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
                        DRM_ERROR("suspend of IP block <%s> failed %d\n",
                                  adev->ip_blocks[i].version->funcs->name, r);
                }
+               adev->ip_blocks[i].status.hw = false;
+               /* handle putting the SMC in the appropriate state */
+               if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
+                       if (is_support_sw_smu(adev)) {
+                               r = smu_set_mp1_state(&adev->smu, adev->mp1_state);
+                       } else if (adev->powerplay.pp_funcs &&
+                                          adev->powerplay.pp_funcs->set_mp1_state) {
+                               r = adev->powerplay.pp_funcs->set_mp1_state(
+                                       adev->powerplay.pp_handle,
+                                       adev->mp1_state);
+                       }
+                       if (r) {
+                               DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
+                                         adev->mp1_state, r);
+                               return r;
+                       }
+               }
+
+               adev->ip_blocks[i].status.hw = false;
        }
 
        return 0;
@@ -2215,6 +2300,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
                for (j = 0; j < adev->num_ip_blocks; j++) {
                        block = &adev->ip_blocks[j];
 
+                       block->status.hw = false;
                        if (block->version->type != ip_order[i] ||
                                !block->status.valid)
                                continue;
@@ -2223,6 +2309,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
                        DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
                        if (r)
                                return r;
+                       block->status.hw = true;
                }
        }
 
@@ -2250,13 +2337,15 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
                        block = &adev->ip_blocks[j];
 
                        if (block->version->type != ip_order[i] ||
-                               !block->status.valid)
+                               !block->status.valid ||
+                               block->status.hw)
                                continue;
 
                        r = block->version->funcs->hw_init(adev);
                        DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
                        if (r)
                                return r;
+                       block->status.hw = true;
                }
        }
 
@@ -2280,17 +2369,19 @@ static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
        int i, r;
 
        for (i = 0; i < adev->num_ip_blocks; i++) {
-               if (!adev->ip_blocks[i].status.valid)
+               if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
                        continue;
                if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
                    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
                    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
+
                        r = adev->ip_blocks[i].version->funcs->resume(adev);
                        if (r) {
                                DRM_ERROR("resume of IP block <%s> failed %d\n",
                                          adev->ip_blocks[i].version->funcs->name, r);
                                return r;
                        }
+                       adev->ip_blocks[i].status.hw = true;
                }
        }
 
@@ -2315,7 +2406,7 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
        int i, r;
 
        for (i = 0; i < adev->num_ip_blocks; i++) {
-               if (!adev->ip_blocks[i].status.valid)
+               if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
                        continue;
                if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
                    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
@@ -2328,6 +2419,7 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
                                  adev->ip_blocks[i].version->funcs->name, r);
                        return r;
                }
+               adev->ip_blocks[i].status.hw = true;
        }
 
        return 0;
@@ -2426,6 +2518,11 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
 #endif
 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
        case CHIP_NAVI10:
+       case CHIP_NAVI14:
+       case CHIP_NAVI12:
+#endif
+#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
+       case CHIP_RENOIR:
 #endif
                return amdgpu_dc != 0;
 #endif
@@ -2488,7 +2585,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        adev->ddev = ddev;
        adev->pdev = pdev;
        adev->flags = flags;
-       adev->asic_type = flags & AMD_ASIC_MASK;
+
+       if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
+               adev->asic_type = amdgpu_force_asic_type;
+       else
+               adev->asic_type = flags & AMD_ASIC_MASK;
+
        adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
        if (amdgpu_emu_mode == 1)
                adev->usec_timeout *= 2;
@@ -2509,6 +2611,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        adev->pcie_wreg = &amdgpu_invalid_wreg;
        adev->pciep_rreg = &amdgpu_invalid_rreg;
        adev->pciep_wreg = &amdgpu_invalid_wreg;
+       adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
+       adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
        adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
        adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
        adev->didt_rreg = &amdgpu_invalid_rreg;
@@ -3389,7 +3493,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
        amdgpu_virt_init_data_exchange(adev);
        amdgpu_virt_release_full_gpu(adev, true);
        if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
-               atomic_inc(&adev->vram_lost_counter);
+               amdgpu_inc_vram_lost(adev);
                r = amdgpu_device_recover_vram(adev);
        }
 
@@ -3431,6 +3535,7 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
                case CHIP_VEGA20:
                case CHIP_VEGA10:
                case CHIP_VEGA12:
+               case CHIP_RAVEN:
                        break;
                default:
                        goto disabled;
@@ -3530,11 +3635,6 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
                                                break;
                                }
                        }
-
-                       list_for_each_entry(tmp_adev, device_list_handle,
-                                       gmc.xgmi.head) {
-                               amdgpu_ras_reserve_bad_pages(tmp_adev);
-                       }
                }
        }
 
@@ -3554,7 +3654,7 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
                                vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
                                if (vram_lost) {
                                        DRM_INFO("VRAM is lost due to GPU reset!\n");
-                                       atomic_inc(&tmp_adev->vram_lost_counter);
+                                       amdgpu_inc_vram_lost(tmp_adev);
                                }
 
                                r = amdgpu_gtt_mgr_recover(
@@ -3627,24 +3727,29 @@ static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, bool trylock)
 
        atomic_inc(&adev->gpu_reset_counter);
        adev->in_gpu_reset = 1;
-       /* Block kfd: SRIOV would do it separately */
-       if (!amdgpu_sriov_vf(adev))
-                amdgpu_amdkfd_pre_reset(adev);
+       switch (amdgpu_asic_reset_method(adev)) {
+       case AMD_RESET_METHOD_MODE1:
+               adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
+               break;
+       case AMD_RESET_METHOD_MODE2:
+               adev->mp1_state = PP_MP1_STATE_RESET;
+               break;
+       default:
+               adev->mp1_state = PP_MP1_STATE_NONE;
+               break;
+       }
 
        return true;
 }
 
 static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
 {
-       /*unlock kfd: SRIOV would do it separately */
-       if (!amdgpu_sriov_vf(adev))
-                amdgpu_amdkfd_post_reset(adev);
        amdgpu_vf_error_trans_all(adev);
+       adev->mp1_state = PP_MP1_STATE_NONE;
        adev->in_gpu_reset = 0;
        mutex_unlock(&adev->lock_reset);
 }
 
-
 /**
  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
  *
@@ -3664,11 +3769,24 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
        struct amdgpu_hive_info *hive = NULL;
        struct amdgpu_device *tmp_adev = NULL;
        int i, r = 0;
+       bool in_ras_intr = amdgpu_ras_intr_triggered();
+
+       /*
+        * Flush RAM to disk so that after reboot
+        * the user can read log and see why the system rebooted.
+        */
+       if (in_ras_intr && amdgpu_ras_get_context(adev)->reboot) {
+
+               DRM_WARN("Emergency reboot.");
+
+               ksys_sync_helper();
+               emergency_restart();
+       }
 
        need_full_reset = job_signaled = false;
        INIT_LIST_HEAD(&device_list);
 
-       dev_info(adev->dev, "GPU reset begin!\n");
+       dev_info(adev->dev, "GPU %s begin!\n", in_ras_intr ? "jobs stop":"reset");
 
        cancel_delayed_work_sync(&adev->delayed_init_work);
 
@@ -3684,20 +3802,27 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 
        if (hive && !mutex_trylock(&hive->reset_lock)) {
                DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
-                        job->base.id, hive->hive_id);
+                         job ? job->base.id : -1, hive->hive_id);
                return 0;
        }
 
        /* Start with adev pre asic reset first for soft reset check.*/
        if (!amdgpu_device_lock_adev(adev, !hive)) {
                DRM_INFO("Bailing on TDR for s_job:%llx, as another already in progress",
-                                        job->base.id);
+                         job ? job->base.id : -1);
                return 0;
        }
 
+       /* Block kfd: SRIOV would do it separately */
+       if (!amdgpu_sriov_vf(adev))
+                amdgpu_amdkfd_pre_reset(adev);
+
        /* Build list of devices to reset */
        if  (adev->gmc.xgmi.num_physical_nodes > 1) {
                if (!hive) {
+                       /*unlock kfd: SRIOV would do it separately */
+                       if (!amdgpu_sriov_vf(adev))
+                               amdgpu_amdkfd_post_reset(adev);
                        amdgpu_device_unlock_adev(adev);
                        return -ENODEV;
                }
@@ -3713,17 +3838,22 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
                device_list_handle = &device_list;
        }
 
-       /*
-        * Mark these ASICs to be reseted as untracked first
-        * And add them back after reset completed
-        */
-       list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head)
-               amdgpu_unregister_gpu_instance(tmp_adev);
-
        /* block all schedulers and reset given job's ring */
        list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+               if (tmp_adev != adev) {
+                       amdgpu_device_lock_adev(tmp_adev, false);
+                       if (!amdgpu_sriov_vf(tmp_adev))
+                                       amdgpu_amdkfd_pre_reset(tmp_adev);
+               }
+
+               /*
+                * Mark these ASICs to be reseted as untracked first
+                * And add them back after reset completed
+                */
+               amdgpu_unregister_gpu_instance(tmp_adev);
+
                /* disable ras on ALL IPs */
-               if (amdgpu_device_ip_need_full_reset(tmp_adev))
+               if (!in_ras_intr && amdgpu_device_ip_need_full_reset(tmp_adev))
                        amdgpu_ras_suspend(tmp_adev);
 
                for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -3732,11 +3862,17 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
                        if (!ring || !ring->sched.thread)
                                continue;
 
-                       drm_sched_stop(&ring->sched, &job->base);
+                       drm_sched_stop(&ring->sched, job ? &job->base : NULL);
+
+                       if (in_ras_intr)
+                               amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
                }
        }
 
 
+       if (in_ras_intr)
+               goto skip_sched_resume;
+
        /*
         * Must check guilty signal here since after this point all old
         * HW fences are force signaled.
@@ -3747,9 +3883,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
            dma_fence_is_signaled(job->base.s_fence->parent))
                job_signaled = true;
 
-       if (!amdgpu_device_ip_need_full_reset(adev))
-               device_list_handle = &device_list;
-
        if (job_signaled) {
                dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
                goto skip_hw_reset;
@@ -3757,9 +3890,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 
 
        /* Guilty job will be freed after this*/
-       r = amdgpu_device_pre_asic_reset(adev,
-                                        job,
-                                        &need_full_reset);
+       r = amdgpu_device_pre_asic_reset(adev, job, &need_full_reset);
        if (r) {
                /*TODO Should we stop ?*/
                DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
@@ -3773,7 +3904,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
                if (tmp_adev == adev)
                        continue;
 
-               amdgpu_device_lock_adev(tmp_adev, false);
                r = amdgpu_device_pre_asic_reset(tmp_adev,
                                                 NULL,
                                                 &need_full_reset);
@@ -3801,6 +3931,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 
        /* Post ASIC reset for all devs .*/
        list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+
                for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                        struct amdgpu_ring *ring = tmp_adev->rings[i];
 
@@ -3822,12 +3953,18 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 
                if (r) {
                        /* bad news, how to tell it to userspace ? */
-                       dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
+                       dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
                        amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
                } else {
-                       dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&adev->gpu_reset_counter));
+                       dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
                }
+       }
 
+skip_sched_resume:
+       list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+               /*unlock kfd: SRIOV would do it separately */
+               if (!in_ras_intr && !amdgpu_sriov_vf(tmp_adev))
+                       amdgpu_amdkfd_post_reset(tmp_adev);
                amdgpu_device_unlock_adev(tmp_adev);
        }