]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
drm/ttm: add operation ctx to ttm_bo_validate v2
authorChristian König <christian.koenig@amd.com>
Wed, 12 Apr 2017 12:24:39 +0000 (14:24 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 6 Dec 2017 17:48:01 +0000 (12:48 -0500)
Give moving a BO into place an operation context to work with.

v2: rebased

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Tested-by: Dieter Nützel <Dieter@nuetzel-hh.de>
Tested-by: Michel Dänzer <michel.daenzer@amd.com>
Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
27 files changed:
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/ast/ast_ttm.c
drivers/gpu/drm/bochs/bochs_mm.c
drivers/gpu/drm/cirrus/cirrus_ttm.c
drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
drivers/gpu/drm/mgag200/mgag200_ttm.c
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/qxl/qxl_ioctl.c
drivers/gpu/drm/qxl/qxl_object.c
drivers/gpu/drm/qxl/qxl_release.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_mn.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_vm.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/virtio/virtgpu_ioctl.c
drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
include/drm/ttm/ttm_bo_api.h

index 743875ad4404b4731f3baf072ccb9443627d99d8..faab662ce680808ca31a80148b27b9a0fb307eef 100644 (file)
@@ -343,6 +343,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
                                 struct amdgpu_bo *bo)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+       struct ttm_operation_ctx ctx = { true, false };
        u64 initial_bytes_moved, bytes_moved;
        uint32_t domain;
        int r;
@@ -374,7 +375,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
 retry:
        amdgpu_ttm_placement_from_domain(bo, domain);
        initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
-       r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
        bytes_moved = atomic64_read(&adev->num_bytes_moved) -
                      initial_bytes_moved;
        p->bytes_moved += bytes_moved;
@@ -396,6 +397,7 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
                                struct amdgpu_bo *validated)
 {
        uint32_t domain = validated->allowed_domains;
+       struct ttm_operation_ctx ctx = { true, false };
        int r;
 
        if (!p->evictable)
@@ -437,7 +439,7 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
                        bo->tbo.mem.mem_type == TTM_PL_VRAM &&
                        bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT;
                initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
-               r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+               r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
                bytes_moved = atomic64_read(&adev->num_bytes_moved) -
                        initial_bytes_moved;
                p->bytes_moved += bytes_moved;
@@ -476,6 +478,7 @@ static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
 static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
                            struct list_head *validated)
 {
+       struct ttm_operation_ctx ctx = { true, false };
        struct amdgpu_bo_list_entry *lobj;
        int r;
 
@@ -493,8 +496,7 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
                    lobj->user_pages) {
                        amdgpu_ttm_placement_from_domain(bo,
                                                         AMDGPU_GEM_DOMAIN_CPU);
-                       r = ttm_bo_validate(&bo->tbo, &bo->placement, true,
-                                           false);
+                       r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
                        if (r)
                                return r;
                        amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
@@ -1575,6 +1577,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
                           struct amdgpu_bo_va_mapping **map)
 {
        struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
+       struct ttm_operation_ctx ctx = { false, false };
        struct amdgpu_vm *vm = &fpriv->vm;
        struct amdgpu_bo_va_mapping *mapping;
        int r;
@@ -1595,8 +1598,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
        if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
                (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
                amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains);
-               r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, false,
-                                   false);
+               r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
                if (r)
                        return r;
        }
index 3ad4cf0f22f84e50d33a26369727266491f2360d..c16579287aeeed3056e8d875a99b17668d0bddeb 100644 (file)
@@ -282,6 +282,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
 int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
                             struct drm_file *filp)
 {
+       struct ttm_operation_ctx ctx = { true, false };
        struct amdgpu_device *adev = dev->dev_private;
        struct drm_amdgpu_gem_userptr *args = data;
        struct drm_gem_object *gobj;
@@ -335,7 +336,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
                        goto free_pages;
 
                amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
-               r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+               r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
                amdgpu_bo_unreserve(bo);
                if (r)
                        goto free_pages;
index 3233d5988f66961567a7c3375e44b8de3cd988be..c2419bc6b3dfa5d9f9f453636cec2f2cfed93e78 100644 (file)
@@ -552,6 +552,7 @@ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
 
 int amdgpu_bo_validate(struct amdgpu_bo *bo)
 {
+       struct ttm_operation_ctx ctx = { false, false };
        uint32_t domain;
        int r;
 
@@ -562,7 +563,7 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo)
 
 retry:
        amdgpu_ttm_placement_from_domain(bo, domain);
-       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
        if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
                domain = bo->allowed_domains;
                goto retry;
@@ -673,6 +674,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
                             u64 *gpu_addr)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+       struct ttm_operation_ctx ctx = { false, false };
        int r, i;
 
        if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
@@ -723,7 +725,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
                bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
        }
 
-       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
        if (unlikely(r)) {
                dev_err(adev->dev, "%p pin failed\n", bo);
                goto error;
@@ -760,6 +762,7 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
 int amdgpu_bo_unpin(struct amdgpu_bo *bo)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+       struct ttm_operation_ctx ctx = { false, false };
        int r, i;
 
        if (!bo->pin_count) {
@@ -773,7 +776,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
                bo->placements[i].lpfn = 0;
                bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
        }
-       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
        if (unlikely(r)) {
                dev_err(adev->dev, "%p validate failed for unpin\n", bo);
                goto error;
@@ -945,6 +948,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+       struct ttm_operation_ctx ctx = { false, false };
        struct amdgpu_bo *abo;
        unsigned long offset, size;
        int r;
@@ -978,7 +982,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
        abo->placement.num_busy_placement = 1;
        abo->placement.busy_placement = &abo->placements[1];
 
-       r = ttm_bo_validate(bo, &abo->placement, false, false);
+       r = ttm_bo_validate(bo, &abo->placement, &ctx);
        if (unlikely(r != 0))
                return r;
 
index be607b2be4e9631355f00326ab2192f6b07c4f03..2f2a9e17fdb4df1ad5456c66aa1060f679c9246f 100644 (file)
@@ -408,6 +408,7 @@ static u64 amdgpu_uvd_get_addr_from_ctx(struct amdgpu_uvd_cs_ctx *ctx)
  */
 static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
 {
+       struct ttm_operation_ctx tctx = { false, false };
        struct amdgpu_bo_va_mapping *mapping;
        struct amdgpu_bo *bo;
        uint32_t cmd;
@@ -430,7 +431,7 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
                }
                amdgpu_uvd_force_into_uvd_segment(bo);
 
-               r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+               r = ttm_bo_validate(&bo->tbo, &bo->placement, &tctx);
        }
 
        return r;
@@ -949,6 +950,7 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
 static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
                               bool direct, struct dma_fence **fence)
 {
+       struct ttm_operation_ctx ctx = { true, false };
        struct ttm_validate_buffer tv;
        struct ww_acquire_ctx ticket;
        struct list_head head;
@@ -975,7 +977,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
                amdgpu_uvd_force_into_uvd_segment(bo);
        }
 
-       r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
        if (r)
                goto err;
 
index a91abfb327469270225a2cbe510bfe33fc95f158..ba6d846b08ff62e33a9f5b8ad0235d26e6c05a47 100644 (file)
@@ -558,6 +558,7 @@ static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p, uint32_t ib_idx,
                                  int lo, int hi, unsigned size, int32_t index)
 {
        int64_t offset = ((uint64_t)size) * ((int64_t)index);
+       struct ttm_operation_ctx ctx = { false, false };
        struct amdgpu_bo_va_mapping *mapping;
        unsigned i, fpfn, lpfn;
        struct amdgpu_bo *bo;
@@ -587,7 +588,7 @@ static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p, uint32_t ib_idx,
                bo->placements[i].lpfn = bo->placements[i].fpfn ?
                        min(bo->placements[i].fpfn, lpfn) : lpfn;
        }
-       return ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+       return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 }
 
 
index dabaca4da7f260af1910a1369c73a5d6397e0112..df218df332b3a167b5c190c4dafa928e31550205 100644 (file)
@@ -274,6 +274,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
 static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
                               bool direct, struct dma_fence **fence)
 {
+       struct ttm_operation_ctx ctx = { true, false };
        struct ttm_validate_buffer tv;
        struct ww_acquire_ctx ticket;
        struct list_head head;
@@ -294,7 +295,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *b
        if (r)
                return r;
 
-       r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
        if (r)
                goto err;
 
index 696a15dc2f3f9965e775ea3579383502edff4b57..28da7c2b7ed9dc060cacc0e67a4df45c4f1e0233 100644 (file)
@@ -354,6 +354,7 @@ static inline u64 ast_bo_gpu_offset(struct ast_bo *bo)
 
 int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
 {
+       struct ttm_operation_ctx ctx = { false, false };
        int i, ret;
 
        if (bo->pin_count) {
@@ -365,7 +366,7 @@ int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
        ast_ttm_placement(bo, pl_flag);
        for (i = 0; i < bo->placement.num_placement; i++)
                bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
-       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+       ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
        if (ret)
                return ret;
 
@@ -377,6 +378,7 @@ int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
 
 int ast_bo_unpin(struct ast_bo *bo)
 {
+       struct ttm_operation_ctx ctx = { false, false };
        int i;
        if (!bo->pin_count) {
                DRM_ERROR("unpin bad %p\n", bo);
@@ -388,11 +390,12 @@ int ast_bo_unpin(struct ast_bo *bo)
 
        for (i = 0; i < bo->placement.num_placement ; i++)
                bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
-       return ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+       return ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
 }
 
 int ast_bo_push_sysram(struct ast_bo *bo)
 {
+       struct ttm_operation_ctx ctx = { false, false };
        int i, ret;
        if (!bo->pin_count) {
                DRM_ERROR("unpin bad %p\n", bo);
@@ -409,7 +412,7 @@ int ast_bo_push_sysram(struct ast_bo *bo)
        for (i = 0; i < bo->placement.num_placement ; i++)
                bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
 
-       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+       ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
        if (ret) {
                DRM_ERROR("pushing to VRAM failed\n");
                return ret;
index c4cadb638460eb86f06488f4ab5f8a75d3be8701..8250b5e612d25f22b3a826aaf9c9c3c7a9a5f459 100644 (file)
@@ -283,6 +283,7 @@ static inline u64 bochs_bo_gpu_offset(struct bochs_bo *bo)
 
 int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr)
 {
+       struct ttm_operation_ctx ctx = { false, false };
        int i, ret;
 
        if (bo->pin_count) {
@@ -295,7 +296,7 @@ int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr)
        bochs_ttm_placement(bo, pl_flag);
        for (i = 0; i < bo->placement.num_placement; i++)
                bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
-       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+       ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
        if (ret)
                return ret;
 
@@ -307,6 +308,7 @@ int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr)
 
 int bochs_bo_unpin(struct bochs_bo *bo)
 {
+       struct ttm_operation_ctx ctx = { false, false };
        int i, ret;
 
        if (!bo->pin_count) {
@@ -320,7 +322,7 @@ int bochs_bo_unpin(struct bochs_bo *bo)
 
        for (i = 0; i < bo->placement.num_placement; i++)
                bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
-       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+       ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
        if (ret)
                return ret;
 
index 1ff1838c0d44fc37cbac9c81568179f072fd3fe7..2a5b54d3a03aedb1d7b60a635f3b8cea32939338 100644 (file)
@@ -358,6 +358,7 @@ static inline u64 cirrus_bo_gpu_offset(struct cirrus_bo *bo)
 
 int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
 {
+       struct ttm_operation_ctx ctx = { false, false };
        int i, ret;
 
        if (bo->pin_count) {
@@ -369,7 +370,7 @@ int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
        cirrus_ttm_placement(bo, pl_flag);
        for (i = 0; i < bo->placement.num_placement; i++)
                bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
-       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+       ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
        if (ret)
                return ret;
 
@@ -381,6 +382,7 @@ int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
 
 int cirrus_bo_push_sysram(struct cirrus_bo *bo)
 {
+       struct ttm_operation_ctx ctx = { false, false };
        int i, ret;
        if (!bo->pin_count) {
                DRM_ERROR("unpin bad %p\n", bo);
@@ -397,7 +399,7 @@ int cirrus_bo_push_sysram(struct cirrus_bo *bo)
        for (i = 0; i < bo->placement.num_placement ; i++)
                bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
 
-       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+       ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
        if (ret) {
                DRM_ERROR("pushing to VRAM failed\n");
                return ret;
index 3518167a7dc4c2512bec7e792ab9c0fcf7a50a56..ab4ee5953615acebe0f3c3c92c68fd45487e6afc 100644 (file)
@@ -344,6 +344,7 @@ int hibmc_bo_create(struct drm_device *dev, int size, int align,
 
 int hibmc_bo_pin(struct hibmc_bo *bo, u32 pl_flag, u64 *gpu_addr)
 {
+       struct ttm_operation_ctx ctx = { false, false };
        int i, ret;
 
        if (bo->pin_count) {
@@ -356,7 +357,7 @@ int hibmc_bo_pin(struct hibmc_bo *bo, u32 pl_flag, u64 *gpu_addr)
        hibmc_ttm_placement(bo, pl_flag);
        for (i = 0; i < bo->placement.num_placement; i++)
                bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
-       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+       ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
        if (ret)
                return ret;
 
@@ -368,6 +369,7 @@ int hibmc_bo_pin(struct hibmc_bo *bo, u32 pl_flag, u64 *gpu_addr)
 
 int hibmc_bo_unpin(struct hibmc_bo *bo)
 {
+       struct ttm_operation_ctx ctx = { false, false };
        int i, ret;
 
        if (!bo->pin_count) {
@@ -380,7 +382,7 @@ int hibmc_bo_unpin(struct hibmc_bo *bo)
 
        for (i = 0; i < bo->placement.num_placement ; i++)
                bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
-       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+       ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
        if (ret) {
                DRM_ERROR("validate failed for unpin: %d\n", ret);
                return ret;
index 3e7e1cd31395ee19c03240e48395a586e1e207ac..f03da63abc7b7ee0c7758cc63ea2fea6ab29495f 100644 (file)
@@ -354,6 +354,7 @@ static inline u64 mgag200_bo_gpu_offset(struct mgag200_bo *bo)
 
 int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
 {
+       struct ttm_operation_ctx ctx = { false, false };
        int i, ret;
 
        if (bo->pin_count) {
@@ -366,7 +367,7 @@ int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
        mgag200_ttm_placement(bo, pl_flag);
        for (i = 0; i < bo->placement.num_placement; i++)
                bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
-       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+       ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
        if (ret)
                return ret;
 
@@ -378,6 +379,7 @@ int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
 
 int mgag200_bo_unpin(struct mgag200_bo *bo)
 {
+       struct ttm_operation_ctx ctx = { false, false };
        int i;
        if (!bo->pin_count) {
                DRM_ERROR("unpin bad %p\n", bo);
@@ -389,11 +391,12 @@ int mgag200_bo_unpin(struct mgag200_bo *bo)
 
        for (i = 0; i < bo->placement.num_placement ; i++)
                bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
-       return ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+       return ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
 }
 
 int mgag200_bo_push_sysram(struct mgag200_bo *bo)
 {
+       struct ttm_operation_ctx ctx = { false, false };
        int i, ret;
        if (!bo->pin_count) {
                DRM_ERROR("unpin bad %p\n", bo);
@@ -410,7 +413,7 @@ int mgag200_bo_push_sysram(struct mgag200_bo *bo)
        for (i = 0; i < bo->placement.num_placement ; i++)
                bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
 
-       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+       ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
        if (ret) {
                DRM_ERROR("pushing to VRAM failed\n");
                return ret;
index 2615912430cc97098f0fe806e95e5e40c1ee96f7..1cf3da3d7bea0017060454bc6a6ea5c5af7f1097 100644 (file)
@@ -548,10 +548,10 @@ int
 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
                    bool no_wait_gpu)
 {
+       struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
        int ret;
 
-       ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
-                             interruptible, no_wait_gpu);
+       ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, &ctx);
        if (ret)
                return ret;
 
index 31effed4a3c823d8509af4338258d76c6ab65764..e8c0b10372301784956c9c32282041f9424102bf 100644 (file)
@@ -309,6 +309,7 @@ static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
        int ret;
        struct drm_gem_object *gobj = NULL;
        struct qxl_bo *qobj = NULL;
+       struct ttm_operation_ctx ctx = { true, false };
 
        if (update_area->left >= update_area->right ||
            update_area->top >= update_area->bottom)
@@ -326,8 +327,7 @@ static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
 
        if (!qobj->pin_count) {
                qxl_ttm_placement_from_domain(qobj, qobj->type, false);
-               ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
-                                     true, false);
+               ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
                if (unlikely(ret))
                        goto out;
        }
index 0a67ddf19c3d4fe5e091d730afbe20403e1160a0..f6b80fe47d1f76f748895042cbd506ddd40e93eb 100644 (file)
@@ -223,6 +223,7 @@ struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
 
 static int __qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
 {
+       struct ttm_operation_ctx ctx = { false, false };
        struct drm_device *ddev = bo->gem_base.dev;
        int r;
 
@@ -233,7 +234,7 @@ static int __qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
                return 0;
        }
        qxl_ttm_placement_from_domain(bo, domain, true);
-       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
        if (likely(r == 0)) {
                bo->pin_count = 1;
                if (gpu_addr != NULL)
@@ -246,6 +247,7 @@ static int __qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
 
 static int __qxl_bo_unpin(struct qxl_bo *bo)
 {
+       struct ttm_operation_ctx ctx = { false, false };
        struct drm_device *ddev = bo->gem_base.dev;
        int r, i;
 
@@ -258,7 +260,7 @@ static int __qxl_bo_unpin(struct qxl_bo *bo)
                return 0;
        for (i = 0; i < bo->placement.num_placement; i++)
                bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
-       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
        if (unlikely(r != 0))
                dev_err(ddev->dev, "%p validate failed for unpin\n", bo);
        return r;
index f27777daae63ef46b2409cfc7780ac2dbada02f0..b223c8d0a491b821428d7b422e1f546c53aa123a 100644 (file)
@@ -230,12 +230,12 @@ int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
 
 static int qxl_release_validate_bo(struct qxl_bo *bo)
 {
+       struct ttm_operation_ctx ctx = { true, false };
        int ret;
 
        if (!bo->pin_count) {
                qxl_ttm_placement_from_domain(bo, bo->type, false);
-               ret = ttm_bo_validate(&bo->tbo, &bo->placement,
-                                     true, false);
+               ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
                if (ret)
                        return ret;
        }
index cf3deb283da561914ee26904b57915fcf39ac24b..a9962ffba720b784b24de5f5f117f330b19e5710 100644 (file)
@@ -285,6 +285,7 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
 int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
                             struct drm_file *filp)
 {
+       struct ttm_operation_ctx ctx = { true, false };
        struct radeon_device *rdev = dev->dev_private;
        struct drm_radeon_gem_userptr *args = data;
        struct drm_gem_object *gobj;
@@ -343,7 +344,7 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
                }
 
                radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
-               r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+               r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
                radeon_bo_unreserve(bo);
                up_read(&current->mm->mmap_sem);
                if (r)
index 1d62288b7ee3e82ec05149623b9c5120747f1ecf..abd24975c9b1d946cec7c85ba7ebe3b820d1c8e4 100644 (file)
@@ -124,6 +124,7 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
                                             unsigned long end)
 {
        struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
+       struct ttm_operation_ctx ctx = { false, false };
        struct interval_tree_node *it;
 
        /* notification is exclusive, but interval is inclusive */
@@ -157,7 +158,7 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
                                DRM_ERROR("(%ld) failed to wait for user bo\n", r);
 
                        radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
-                       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+                       r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
                        if (r)
                                DRM_ERROR("(%ld) failed to validate user bo\n", r);
 
index 0935949761260ca2351a15a304634898921bf747..15404af9d740612d6882f832c7373eea04ec9f71 100644 (file)
@@ -329,6 +329,7 @@ void radeon_bo_unref(struct radeon_bo **bo)
 int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
                             u64 *gpu_addr)
 {
+       struct ttm_operation_ctx ctx = { false, false };
        int r, i;
 
        if (radeon_ttm_tt_has_userptr(bo->tbo.ttm))
@@ -371,7 +372,7 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
                bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
        }
 
-       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
        if (likely(r == 0)) {
                bo->pin_count = 1;
                if (gpu_addr != NULL)
@@ -393,6 +394,7 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
 
 int radeon_bo_unpin(struct radeon_bo *bo)
 {
+       struct ttm_operation_ctx ctx = { false, false };
        int r, i;
 
        if (!bo->pin_count) {
@@ -406,7 +408,7 @@ int radeon_bo_unpin(struct radeon_bo *bo)
                bo->placements[i].lpfn = 0;
                bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
        }
-       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
        if (likely(r == 0)) {
                if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
                        bo->rdev->vram_pin_size -= radeon_bo_size(bo);
@@ -531,6 +533,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
                            struct ww_acquire_ctx *ticket,
                            struct list_head *head, int ring)
 {
+       struct ttm_operation_ctx ctx = { true, false };
        struct radeon_bo_list *lobj;
        struct list_head duplicates;
        int r;
@@ -572,7 +575,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
                                radeon_uvd_force_into_uvd_segment(bo, allowed);
 
                        initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved);
-                       r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+                       r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
                        bytes_moved += atomic64_read(&rdev->num_bytes_moved) -
                                       initial_bytes_moved;
 
@@ -792,6 +795,7 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo,
 
 int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 {
+       struct ttm_operation_ctx ctx = { false, false };
        struct radeon_device *rdev;
        struct radeon_bo *rbo;
        unsigned long offset, size, lpfn;
@@ -823,10 +827,10 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
                    (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn))
                        rbo->placements[i].lpfn = lpfn;
        }
-       r = ttm_bo_validate(bo, &rbo->placement, false, false);
+       r = ttm_bo_validate(bo, &rbo->placement, &ctx);
        if (unlikely(r == -ENOMEM)) {
                radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
-               return ttm_bo_validate(bo, &rbo->placement, false, false);
+               return ttm_bo_validate(bo, &rbo->placement, &ctx);
        } else if (unlikely(r != 0)) {
                return r;
        }
index e5c0e635e3717f21bb6182894b11a0d1c3b131f3..7f1a9c787bd1325ec5b2a50b982f8aa69fec2438 100644 (file)
@@ -387,6 +387,7 @@ static void radeon_vm_set_pages(struct radeon_device *rdev,
 static int radeon_vm_clear_bo(struct radeon_device *rdev,
                              struct radeon_bo *bo)
 {
+       struct ttm_operation_ctx ctx = { true, false };
        struct radeon_ib ib;
        unsigned entries;
        uint64_t addr;
@@ -396,7 +397,7 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev,
        if (r)
                return r;
 
-       r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
        if (r)
                goto error_unreserve;
 
index 77a0fd22e5ba3203e16f2080fdc3a9146ee945c2..5347c3f3e2f49d90ae2fdc1da896d94dbf34d062 100644 (file)
@@ -1091,9 +1091,8 @@ bool ttm_bo_mem_compat(struct ttm_placement *placement,
 EXPORT_SYMBOL(ttm_bo_mem_compat);
 
 int ttm_bo_validate(struct ttm_buffer_object *bo,
-                       struct ttm_placement *placement,
-                       bool interruptible,
-                       bool no_wait_gpu)
+                   struct ttm_placement *placement,
+                   struct ttm_operation_ctx *ctx)
 {
        int ret;
        uint32_t new_flags;
@@ -1103,8 +1102,8 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
         * Check whether we need to move buffer.
         */
        if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
-               ret = ttm_bo_move_buffer(bo, placement, interruptible,
-                                        no_wait_gpu);
+               ret = ttm_bo_move_buffer(bo, placement, ctx->interruptible,
+                                        ctx->no_wait_gpu);
                if (ret)
                        return ret;
        } else {
@@ -1219,8 +1218,11 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
                WARN_ON(!locked);
        }
 
-       if (likely(!ret))
-               ret = ttm_bo_validate(bo, placement, interruptible, false);
+       if (likely(!ret)) {
+               struct ttm_operation_ctx ctx = { interruptible, false };
+
+               ret = ttm_bo_validate(bo, placement, &ctx);
+       }
 
        if (unlikely(ret)) {
                if (!resv)
index 461f81aa1bbeb072b8ba17152bbec528dffd83b6..5720a0d4ac0a9ebf242755921bc1ac6fea8c2630 100644 (file)
@@ -56,6 +56,7 @@ static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
 static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
                                           struct list_head *head)
 {
+       struct ttm_operation_ctx ctx = { false, false };
        struct ttm_validate_buffer *buf;
        struct ttm_buffer_object *bo;
        struct virtio_gpu_object *qobj;
@@ -68,7 +69,7 @@ static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
        list_for_each_entry(buf, head, head) {
                bo = buf->bo;
                qobj = container_of(bo, struct virtio_gpu_object, tbo);
-               ret = ttm_bo_validate(bo, &qobj->placement, false, false);
+               ret = ttm_bo_validate(bo, &qobj->placement, &ctx);
                if (ret) {
                        ttm_eu_backoff_reservation(ticket, head);
                        return ret;
@@ -352,6 +353,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
        struct virtio_gpu_device *vgdev = dev->dev_private;
        struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
        struct drm_virtgpu_3d_transfer_from_host *args = data;
+       struct ttm_operation_ctx ctx = { true, false };
        struct drm_gem_object *gobj = NULL;
        struct virtio_gpu_object *qobj = NULL;
        struct virtio_gpu_fence *fence;
@@ -372,8 +374,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
        if (ret)
                goto out;
 
-       ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
-                             true, false);
+       ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
        if (unlikely(ret))
                goto out_unres;
 
@@ -399,6 +400,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
        struct virtio_gpu_device *vgdev = dev->dev_private;
        struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
        struct drm_virtgpu_3d_transfer_to_host *args = data;
+       struct ttm_operation_ctx ctx = { true, false };
        struct drm_gem_object *gobj = NULL;
        struct virtio_gpu_object *qobj = NULL;
        struct virtio_gpu_fence *fence;
@@ -416,8 +418,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
        if (ret)
                goto out;
 
-       ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
-                             true, false);
+       ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
        if (unlikely(ret))
                goto out_unres;
 
index d87861bbe971b1d474e1ab2b43cd2097bd2c32de..92df0b08c194b430bdb02b7b48ea00fef7944480 100644 (file)
@@ -387,6 +387,7 @@ static int vmw_cotable_readback(struct vmw_resource *res)
  */
 static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
 {
+       struct ttm_operation_ctx ctx = { false, false };
        struct vmw_private *dev_priv = res->dev_priv;
        struct vmw_cotable *vcotbl = vmw_cotable(res);
        struct vmw_dma_buffer *buf, *old_buf = res->backup;
@@ -455,7 +456,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
        }
 
        /* Unpin new buffer, and switch backup buffers. */
-       ret = ttm_bo_validate(bo, &vmw_mob_placement, false, false);
+       ret = ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
        if (unlikely(ret != 0)) {
                DRM_ERROR("Failed validating new COTable backup buffer.\n");
                goto out_wait;
index 0cd889015dc57d5c0d0a8b89f39cd17b10303fbf..d45d2caffa5ad6d51a623d56e312e21b186a5955 100644 (file)
@@ -47,6 +47,7 @@ int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
                                struct ttm_placement *placement,
                                bool interruptible)
 {
+       struct ttm_operation_ctx ctx = {interruptible, false };
        struct ttm_buffer_object *bo = &buf->base;
        int ret;
        uint32_t new_flags;
@@ -65,7 +66,7 @@ int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
                ret = ttm_bo_mem_compat(placement, &bo->mem,
                                        &new_flags) == true ? 0 : -EINVAL;
        else
-               ret = ttm_bo_validate(bo, placement, interruptible, false);
+               ret = ttm_bo_validate(bo, placement, &ctx);
 
        if (!ret)
                vmw_bo_pin_reserved(buf, true);
@@ -95,6 +96,7 @@ int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
                                  struct vmw_dma_buffer *buf,
                                  bool interruptible)
 {
+       struct ttm_operation_ctx ctx = {interruptible, false };
        struct ttm_buffer_object *bo = &buf->base;
        int ret;
        uint32_t new_flags;
@@ -115,12 +117,11 @@ int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
                goto out_unreserve;
        }
 
-       ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
-                             false);
+       ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
        if (likely(ret == 0) || ret == -ERESTARTSYS)
                goto out_unreserve;
 
-       ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
+       ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
 
 out_unreserve:
        if (!ret)
@@ -170,6 +171,7 @@ int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
                                    struct vmw_dma_buffer *buf,
                                    bool interruptible)
 {
+       struct ttm_operation_ctx ctx = {interruptible, false };
        struct ttm_buffer_object *bo = &buf->base;
        struct ttm_placement placement;
        struct ttm_place place;
@@ -200,14 +202,16 @@ int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
        if (bo->mem.mem_type == TTM_PL_VRAM &&
            bo->mem.start < bo->num_pages &&
            bo->mem.start > 0 &&
-           buf->pin_count == 0)
-               (void) ttm_bo_validate(bo, &vmw_sys_placement, false, false);
+           buf->pin_count == 0) {
+               ctx.interruptible = false;
+               (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
+       }
 
        if (buf->pin_count > 0)
                ret = ttm_bo_mem_compat(&placement, &bo->mem,
                                        &new_flags) == true ? 0 : -EINVAL;
        else
-               ret = ttm_bo_validate(bo, &placement, interruptible, false);
+               ret = ttm_bo_validate(bo, &placement, &ctx);
 
        /* For some reason we didn't end up at the start of vram */
        WARN_ON(ret == 0 && bo->offset != 0);
@@ -286,6 +290,7 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
  */
 void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin)
 {
+       struct ttm_operation_ctx ctx = { false, true };
        struct ttm_place pl;
        struct ttm_placement placement;
        struct ttm_buffer_object *bo = &vbo->base;
@@ -314,7 +319,7 @@ void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin)
        placement.num_placement = 1;
        placement.placement = &pl;
 
-       ret = ttm_bo_validate(bo, &placement, false, true);
+       ret = ttm_bo_validate(bo, &placement, &ctx);
 
        BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
 }
index 21c62a34e5580af7e56505a64bbae48707ae2599..b700667f6f0b0601ee1a3db668aee6868a4cf75c 100644 (file)
@@ -3701,14 +3701,14 @@ int vmw_validate_single_buffer(struct vmw_private *dev_priv,
 {
        struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
                                                  base);
+       struct ttm_operation_ctx ctx = { interruptible, true };
        int ret;
 
        if (vbo->pin_count > 0)
                return 0;
 
        if (validate_as_mob)
-               return ttm_bo_validate(bo, &vmw_mob_placement, interruptible,
-                                      false);
+               return ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
 
        /**
         * Put BO in VRAM if there is space, otherwise as a GMR.
@@ -3717,8 +3717,7 @@ int vmw_validate_single_buffer(struct vmw_private *dev_priv,
         * used as a GMR, this will return -ENOMEM.
         */
 
-       ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
-                             false);
+       ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
        if (likely(ret == 0 || ret == -ERESTARTSYS))
                return ret;
 
@@ -3727,7 +3726,7 @@ int vmw_validate_single_buffer(struct vmw_private *dev_priv,
         * previous contents.
         */
 
-       ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
+       ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
        return ret;
 }
 
index a96f90f017d16072423a95302779e54444339cd4..200904ff9a221a0f372d7c20a7817d8bda9d8060 100644 (file)
@@ -968,6 +968,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
                          bool interruptible,
                          struct ttm_validate_buffer *val_buf)
 {
+       struct ttm_operation_ctx ctx = { true, false };
        struct list_head val_list;
        bool backup_dirty = false;
        int ret;
@@ -992,7 +993,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
        backup_dirty = res->backup_dirty;
        ret = ttm_bo_validate(&res->backup->base,
                              res->func->backup_placement,
-                             true, false);
+                             &ctx);
 
        if (unlikely(ret != 0))
                goto out_no_validate;
@@ -1446,6 +1447,7 @@ void vmw_resource_evict_all(struct vmw_private *dev_priv)
  */
 int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
 {
+       struct ttm_operation_ctx ctx = { interruptible, false };
        struct vmw_private *dev_priv = res->dev_priv;
        int ret;
 
@@ -1466,7 +1468,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
                                ret = ttm_bo_validate
                                        (&vbo->base,
                                         res->func->backup_placement,
-                                        interruptible, false);
+                                        &ctx);
                                if (ret) {
                                        ttm_bo_unreserve(&vbo->base);
                                        goto out_no_validate;
index 9b832f13681370e38cf3345f1eb6f5415f715c71..004e18b8832c41a40622f446befa889b0640d96a 100644 (file)
@@ -970,6 +970,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
                          size_t size,
                          struct list_head *list)
 {
+       struct ttm_operation_ctx ctx = { false, true };
        struct vmw_dma_buffer *buf;
        struct ttm_bo_kmap_obj map;
        bool is_iomem;
@@ -1005,7 +1006,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
        WARN_ON(is_iomem);
 
        ttm_bo_kunmap(&map);
-       ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, false, true);
+       ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, &ctx);
        WARN_ON(ret != 0);
        ttm_bo_unreserve(&buf->base);
 
index 833c3ad24091cfa21483bff851e1bf8ae6c16dc9..097951e999bcaf7f23d2822b6c2159afd9f0f66f 100644 (file)
@@ -258,6 +258,20 @@ struct ttm_bo_kmap_obj {
        struct ttm_buffer_object *bo;
 };
 
+/**
+ * struct ttm_operation_ctx
+ *
+ * @interruptible: Sleep interruptible if sleeping.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
+ *
+ * Context for TTM operations like changing buffer placement or general memory
+ * allocation.
+ */
+struct ttm_operation_ctx {
+       bool interruptible;
+       bool no_wait_gpu;
+};
+
 /**
  * ttm_bo_reference - reference a struct ttm_buffer_object
  *
@@ -306,8 +320,7 @@ bool ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_mem_reg *mem,
  *
  * @bo: The buffer object.
  * @placement: Proposed placement for the buffer object.
- * @interruptible: Sleep interruptible if sleeping.
- * @no_wait_gpu: Return immediately if the GPU is busy.
+ * @ctx: validation parameters.
  *
  * Changes placement and caching policy of the buffer object
  * according proposed placement.
@@ -319,8 +332,7 @@ bool ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_mem_reg *mem,
  */
 int ttm_bo_validate(struct ttm_buffer_object *bo,
                    struct ttm_placement *placement,
-                   bool interruptible,
-                   bool no_wait_gpu);
+                   struct ttm_operation_ctx *ctx);
 
 /**
  * ttm_bo_unref