]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
drm/amdgpu: add a fence after the VM flush
authorChristian König <christian.koenig@amd.com>
Tue, 1 Mar 2016 15:46:18 +0000 (16:46 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 5 May 2016 00:19:24 +0000 (20:19 -0400)
This way we can track when the flush is done.

Signed-off-by: Christian König <christian.koenig@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

index 148e2c61463ccd0651253b9041f09289e7e814ea..66e51f9e593bc0c92f8242e5248b871fd688e6a9 100644 (file)
@@ -880,6 +880,7 @@ struct amdgpu_vm_id {
        struct list_head        list;
        struct fence            *first;
        struct amdgpu_sync      active;
+       struct fence            *last_flush;
        atomic_long_t           owner;
 
        uint64_t                pd_gpu_addr;
@@ -926,11 +927,11 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
                      struct amdgpu_sync *sync, struct fence *fence,
                      unsigned *vm_id, uint64_t *vm_pd_addr);
-void amdgpu_vm_flush(struct amdgpu_ring *ring,
-                    unsigned vm_id, uint64_t pd_addr,
-                    uint32_t gds_base, uint32_t gds_size,
-                    uint32_t gws_base, uint32_t gws_size,
-                    uint32_t oa_base, uint32_t oa_size);
+int amdgpu_vm_flush(struct amdgpu_ring *ring,
+                   unsigned vm_id, uint64_t pd_addr,
+                   uint32_t gds_base, uint32_t gds_size,
+                   uint32_t gws_base, uint32_t gws_size,
+                   uint32_t oa_base, uint32_t oa_size);
 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
index 644336d76aca67c4f0582c399ab60c77c74c545f..83973d051080be1992b672e0266d09d09f419f78 100644 (file)
@@ -155,10 +155,14 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
 
        if (vm) {
                /* do context switch */
-               amdgpu_vm_flush(ring, ib->vm_id, ib->vm_pd_addr,
-                               ib->gds_base, ib->gds_size,
-                               ib->gws_base, ib->gws_size,
-                               ib->oa_base, ib->oa_size);
+               r = amdgpu_vm_flush(ring, ib->vm_id, ib->vm_pd_addr,
+                                   ib->gds_base, ib->gds_size,
+                                   ib->gws_base, ib->gws_size,
+                                   ib->oa_base, ib->oa_size);
+               if (r) {
+                       amdgpu_ring_undo(ring);
+                       return r;
+               }
 
                if (ring->funcs->emit_hdp_flush)
                        amdgpu_ring_emit_hdp_flush(ring);
index d0cce7c3129a215ce6c3d23299048c6b15ebddcc..252445f578f6c2ce3aa41e7728a91d0ca8a4cf12 100644 (file)
@@ -236,6 +236,9 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
        fence_put(id->first);
        id->first = fence_get(fence);
 
+       fence_put(id->last_flush);
+       id->last_flush = NULL;
+
        fence_put(id->flushed_updates);
        id->flushed_updates = fence_get(updates);
 
@@ -263,11 +266,11 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
  *
  * Emit a VM flush when it is necessary.
  */
-void amdgpu_vm_flush(struct amdgpu_ring *ring,
-                    unsigned vm_id, uint64_t pd_addr,
-                    uint32_t gds_base, uint32_t gds_size,
-                    uint32_t gws_base, uint32_t gws_size,
-                    uint32_t oa_base, uint32_t oa_size)
+int amdgpu_vm_flush(struct amdgpu_ring *ring,
+                   unsigned vm_id, uint64_t pd_addr,
+                   uint32_t gds_base, uint32_t gds_size,
+                   uint32_t gws_base, uint32_t gws_size,
+                   uint32_t oa_base, uint32_t oa_size)
 {
        struct amdgpu_device *adev = ring->adev;
        struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
@@ -278,14 +281,25 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring,
                id->gws_size != gws_size ||
                id->oa_base != oa_base ||
                id->oa_size != oa_size);
+       int r;
 
        if (ring->funcs->emit_pipeline_sync && (
            pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed))
                amdgpu_ring_emit_pipeline_sync(ring);
 
        if (pd_addr != AMDGPU_VM_NO_FLUSH) {
+               struct fence *fence;
+
                trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id);
                amdgpu_ring_emit_vm_flush(ring, vm_id, pd_addr);
+               r = amdgpu_fence_emit(ring, &fence);
+               if (r)
+                       return r;
+
+               mutex_lock(&adev->vm_manager.lock);
+               fence_put(id->last_flush);
+               id->last_flush = fence;
+               mutex_unlock(&adev->vm_manager.lock);
        }
 
        if (gds_switch_needed) {
@@ -300,6 +314,8 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring,
                                            gws_base, gws_size,
                                            oa_base, oa_size);
        }
+
+       return 0;
 }
 
 /**