]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
drm/ttm: remove pointers to globals
authorChristian König <christian.koenig@amd.com>
Wed, 25 Sep 2019 09:38:50 +0000 (11:38 +0200)
committerChristian König <christian.koenig@amd.com>
Fri, 25 Oct 2019 09:40:51 +0000 (11:40 +0200)
As the name says global memory and bo accounting is global. So it doesn't
make to much sense having pointers to global structures all around the code.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Thomas Hellström <thellstrom@vmware.com>
Link: https://patchwork.freedesktop.org/patch/332879/
15 files changed:
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/drm_gem_vram_helper.c
drivers/gpu/drm/qxl/qxl_release.c
drivers/gpu/drm/qxl/qxl_ttm.c
drivers/gpu/drm/ttm/ttm_agp_backend.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/ttm/ttm_bo_vm.c
drivers/gpu/drm/ttm/ttm_execbuf_util.c
drivers/gpu/drm/ttm/ttm_memory.c
drivers/gpu/drm/ttm/ttm_page_alloc.c
drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
include/drm/ttm/ttm_bo_driver.h
include/drm/ttm/ttm_memory.h

index 5e8bdded265faf6ec1dce44bff0ee8e885841d8a..19705e399905b5dacd1a8abeb64276f213d91054 100644 (file)
@@ -71,7 +71,7 @@
  */
 static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
 {
-       struct page *dummy_page = adev->mman.bdev.glob->dummy_read_page;
+       struct page *dummy_page = ttm_bo_glob.dummy_read_page;
 
        if (adev->dummy_page_addr)
                return 0;
index 5251352f59228733c6009598638332386eb52a5b..d8cfcf2d745598d537412790a4ab8661570f845c 100644 (file)
@@ -600,19 +600,18 @@ void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
                                struct amdgpu_vm *vm)
 {
-       struct ttm_bo_global *glob = adev->mman.bdev.glob;
        struct amdgpu_vm_bo_base *bo_base;
 
        if (vm->bulk_moveable) {
-               spin_lock(&glob->lru_lock);
+               spin_lock(&ttm_bo_glob.lru_lock);
                ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
-               spin_unlock(&glob->lru_lock);
+               spin_unlock(&ttm_bo_glob.lru_lock);
                return;
        }
 
        memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
 
-       spin_lock(&glob->lru_lock);
+       spin_lock(&ttm_bo_glob.lru_lock);
        list_for_each_entry(bo_base, &vm->idle, vm_status) {
                struct amdgpu_bo *bo = bo_base->bo;
 
@@ -624,7 +623,7 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
                        ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
                                                &vm->lru_bulk_move);
        }
-       spin_unlock(&glob->lru_lock);
+       spin_unlock(&ttm_bo_glob.lru_lock);
 
        vm->bulk_moveable = true;
 }
index 7fe93cd38eea2291a8d5b79456ea5534353f7d78..666cb4c22bb9b12fcfcf63b6d930d3c614143b33 100644 (file)
@@ -1013,12 +1013,11 @@ static int drm_vram_mm_debugfs(struct seq_file *m, void *data)
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_vram_mm *vmm = node->minor->dev->vram_mm;
        struct drm_mm *mm = vmm->bdev.man[TTM_PL_VRAM].priv;
-       struct ttm_bo_global *glob = vmm->bdev.glob;
        struct drm_printer p = drm_seq_file_printer(m);
 
-       spin_lock(&glob->lru_lock);
+       spin_lock(&ttm_bo_glob.lru_lock);
        drm_mm_print(mm, &p);
-       spin_unlock(&glob->lru_lock);
+       spin_unlock(&ttm_bo_glob.lru_lock);
        return 0;
 }
 
index c53c7e1a6b2625c56e7244c5e94b165f2e54eef0..2feca734c7b195fe385dea55417ad9c139c6655d 100644 (file)
@@ -429,7 +429,6 @@ void qxl_release_unmap(struct qxl_device *qdev,
 void qxl_release_fence_buffer_objects(struct qxl_release *release)
 {
        struct ttm_buffer_object *bo;
-       struct ttm_bo_global *glob;
        struct ttm_bo_device *bdev;
        struct ttm_validate_buffer *entry;
        struct qxl_device *qdev;
@@ -451,9 +450,7 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
                       release->id | 0xf0000000, release->base.seqno);
        trace_dma_fence_emit(&release->base);
 
-       glob = bdev->glob;
-
-       spin_lock(&glob->lru_lock);
+       spin_lock(&ttm_bo_glob.lru_lock);
 
        list_for_each_entry(entry, &release->bos, head) {
                bo = entry->bo;
@@ -462,7 +459,7 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
                ttm_bo_move_to_lru_tail(bo, NULL);
                dma_resv_unlock(bo->base.resv);
        }
-       spin_unlock(&glob->lru_lock);
+       spin_unlock(&ttm_bo_glob.lru_lock);
        ww_acquire_fini(&release->ticket);
 }
 
index 54cc5a5b607eac09fd42b671ac643c15a3574999..4b13b0b98a91fd5beb6618455dcd61e82863c410 100644 (file)
@@ -319,14 +319,11 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = (struct drm_info_node *)m->private;
        struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
-       struct drm_device *dev = node->minor->dev;
-       struct qxl_device *rdev = dev->dev_private;
-       struct ttm_bo_global *glob = rdev->mman.bdev.glob;
        struct drm_printer p = drm_seq_file_printer(m);
 
-       spin_lock(&glob->lru_lock);
+       spin_lock(&ttm_bo_glob.lru_lock);
        drm_mm_print(mm, &p);
-       spin_unlock(&glob->lru_lock);
+       spin_unlock(&ttm_bo_glob.lru_lock);
        return 0;
 }
 #endif
index ea4d59eb896694442685449a96837c46b0ff97a2..6050dc846894257ce4af23d2776256b827a00183 100644 (file)
@@ -51,7 +51,7 @@ struct ttm_agp_backend {
 static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
 {
        struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
-       struct page *dummy_read_page = ttm->bdev->glob->dummy_read_page;
+       struct page *dummy_read_page = ttm_bo_glob.dummy_read_page;
        struct drm_mm_node *node = bo_mem->mm_node;
        struct agp_memory *mem;
        int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
index 5a8443588ba1f7111703ba55c27ccc6b4d385784..d52fc16266cee639aa4b6b28ec8722e24bc38a8a 100644 (file)
@@ -51,6 +51,7 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj);
 DEFINE_MUTEX(ttm_global_mutex);
 unsigned ttm_bo_glob_use_count;
 struct ttm_bo_global ttm_bo_glob;
+EXPORT_SYMBOL(ttm_bo_glob);
 
 static struct attribute ttm_bo_count = {
        .name = "bo_count",
@@ -148,7 +149,6 @@ static void ttm_bo_release_list(struct kref *list_kref)
 {
        struct ttm_buffer_object *bo =
            container_of(list_kref, struct ttm_buffer_object, list_kref);
-       struct ttm_bo_device *bdev = bo->bdev;
        size_t acc_size = bo->acc_size;
 
        BUG_ON(kref_read(&bo->list_kref));
@@ -157,13 +157,13 @@ static void ttm_bo_release_list(struct kref *list_kref)
        BUG_ON(!list_empty(&bo->lru));
        BUG_ON(!list_empty(&bo->ddestroy));
        ttm_tt_destroy(bo->ttm);
-       atomic_dec(&bo->bdev->glob->bo_count);
+       atomic_dec(&ttm_bo_glob.bo_count);
        dma_fence_put(bo->moving);
        if (!ttm_bo_uses_embedded_gem_object(bo))
                dma_resv_fini(&bo->base._resv);
        mutex_destroy(&bo->wu_mutex);
        bo->destroy(bo);
-       ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
+       ttm_mem_global_free(&ttm_mem_glob, acc_size);
 }
 
 static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
@@ -187,7 +187,7 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
        if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm &&
            !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
                                     TTM_PAGE_FLAG_SWAPPED))) {
-               list_add_tail(&bo->swap, &bdev->glob->swap_lru[bo->priority]);
+               list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]);
                kref_get(&bo->list_kref);
        }
 }
@@ -294,7 +294,7 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
                dma_resv_assert_held(pos->first->base.resv);
                dma_resv_assert_held(pos->last->base.resv);
 
-               lru = &pos->first->bdev->glob->swap_lru[i];
+               lru = &ttm_bo_glob.swap_lru[i];
                list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap);
        }
 }
@@ -458,7 +458,6 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
 {
        struct ttm_bo_device *bdev = bo->bdev;
-       struct ttm_bo_global *glob = bdev->glob;
        int ret;
 
        ret = ttm_bo_individualize_resv(bo);
@@ -468,16 +467,16 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
                 */
                dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
                                                    30 * HZ);
-               spin_lock(&glob->lru_lock);
+               spin_lock(&ttm_bo_glob.lru_lock);
                goto error;
        }
 
-       spin_lock(&glob->lru_lock);
+       spin_lock(&ttm_bo_glob.lru_lock);
        ret = dma_resv_trylock(bo->base.resv) ? 0 : -EBUSY;
        if (!ret) {
                if (dma_resv_test_signaled_rcu(&bo->base._resv, true)) {
                        ttm_bo_del_from_lru(bo);
-                       spin_unlock(&glob->lru_lock);
+                       spin_unlock(&ttm_bo_glob.lru_lock);
                        if (bo->base.resv != &bo->base._resv)
                                dma_resv_unlock(&bo->base._resv);
 
@@ -506,7 +505,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
 error:
        kref_get(&bo->list_kref);
        list_add_tail(&bo->ddestroy, &bdev->ddestroy);
-       spin_unlock(&glob->lru_lock);
+       spin_unlock(&ttm_bo_glob.lru_lock);
 
        schedule_delayed_work(&bdev->wq,
                              ((HZ / 100) < 1) ? 1 : HZ / 100);
@@ -529,7 +528,6 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
                               bool interruptible, bool no_wait_gpu,
                               bool unlock_resv)
 {
-       struct ttm_bo_global *glob = bo->bdev->glob;
        struct dma_resv *resv;
        int ret;
 
@@ -548,7 +546,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 
                if (unlock_resv)
                        dma_resv_unlock(bo->base.resv);
-               spin_unlock(&glob->lru_lock);
+               spin_unlock(&ttm_bo_glob.lru_lock);
 
                lret = dma_resv_wait_timeout_rcu(resv, true,
                                                           interruptible,
@@ -559,7 +557,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
                else if (lret == 0)
                        return -EBUSY;
 
-               spin_lock(&glob->lru_lock);
+               spin_lock(&ttm_bo_glob.lru_lock);
                if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
                        /*
                         * We raced, and lost, someone else holds the reservation now,
@@ -569,7 +567,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
                         * delayed destruction would succeed, so just return success
                         * here.
                         */
-                       spin_unlock(&glob->lru_lock);
+                       spin_unlock(&ttm_bo_glob.lru_lock);
                        return 0;
                }
                ret = 0;
@@ -578,7 +576,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
        if (ret || unlikely(list_empty(&bo->ddestroy))) {
                if (unlock_resv)
                        dma_resv_unlock(bo->base.resv);
-               spin_unlock(&glob->lru_lock);
+               spin_unlock(&ttm_bo_glob.lru_lock);
                return ret;
        }
 
@@ -586,7 +584,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
        list_del_init(&bo->ddestroy);
        kref_put(&bo->list_kref, ttm_bo_ref_bug);
 
-       spin_unlock(&glob->lru_lock);
+       spin_unlock(&ttm_bo_glob.lru_lock);
        ttm_bo_cleanup_memtype_use(bo);
 
        if (unlock_resv)
@@ -601,7 +599,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
  */
 static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
 {
-       struct ttm_bo_global *glob = bdev->glob;
+       struct ttm_bo_global *glob = &ttm_bo_glob;
        struct list_head removed;
        bool empty;
 
@@ -825,13 +823,12 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
                               struct ww_acquire_ctx *ticket)
 {
        struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
-       struct ttm_bo_global *glob = bdev->glob;
        struct ttm_mem_type_manager *man = &bdev->man[mem_type];
        bool locked = false;
        unsigned i;
        int ret;
 
-       spin_lock(&glob->lru_lock);
+       spin_lock(&ttm_bo_glob.lru_lock);
        for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
                list_for_each_entry(bo, &man->lru[i], lru) {
                        bool busy;
@@ -863,7 +860,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
        if (!bo) {
                if (busy_bo)
                        kref_get(&busy_bo->list_kref);
-               spin_unlock(&glob->lru_lock);
+               spin_unlock(&ttm_bo_glob.lru_lock);
                ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
                if (busy_bo)
                        kref_put(&busy_bo->list_kref, ttm_bo_release_list);
@@ -879,7 +876,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
                return ret;
        }
 
-       spin_unlock(&glob->lru_lock);
+       spin_unlock(&ttm_bo_glob.lru_lock);
 
        ret = ttm_bo_evict(bo, ctx);
        if (locked)
@@ -1045,10 +1042,10 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
        mem->mem_type = mem_type;
        mem->placement = cur_flags;
 
-       spin_lock(&bo->bdev->glob->lru_lock);
+       spin_lock(&ttm_bo_glob.lru_lock);
        ttm_bo_del_from_lru(bo);
        ttm_bo_add_mem_to_lru(bo, mem);
-       spin_unlock(&bo->bdev->glob->lru_lock);
+       spin_unlock(&ttm_bo_glob.lru_lock);
 
        return 0;
 }
@@ -1135,9 +1132,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 
 error:
        if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) {
-               spin_lock(&bo->bdev->glob->lru_lock);
+               spin_lock(&ttm_bo_glob.lru_lock);
                ttm_bo_move_to_lru_tail(bo, NULL);
-               spin_unlock(&bo->bdev->glob->lru_lock);
+               spin_unlock(&ttm_bo_glob.lru_lock);
        }
 
        return ret;
@@ -1261,9 +1258,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
                         struct dma_resv *resv,
                         void (*destroy) (struct ttm_buffer_object *))
 {
+       struct ttm_mem_global *mem_glob = &ttm_mem_glob;
        int ret = 0;
        unsigned long num_pages;
-       struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
        bool locked;
 
        ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
@@ -1323,7 +1320,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
                dma_resv_init(&bo->base._resv);
                drm_vma_node_reset(&bo->base.vma_node);
        }
-       atomic_inc(&bo->bdev->glob->bo_count);
+       atomic_inc(&ttm_bo_glob.bo_count);
 
        /*
         * For ttm_bo_type_device buffers, allocate
@@ -1353,9 +1350,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
                return ret;
        }
 
-       spin_lock(&bdev->glob->lru_lock);
+       spin_lock(&ttm_bo_glob.lru_lock);
        ttm_bo_move_to_lru_tail(bo, NULL);
-       spin_unlock(&bdev->glob->lru_lock);
+       spin_unlock(&ttm_bo_glob.lru_lock);
 
        return ret;
 }
@@ -1453,7 +1450,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
                .flags = TTM_OPT_FLAG_FORCE_ALLOC
        };
        struct ttm_mem_type_manager *man = &bdev->man[mem_type];
-       struct ttm_bo_global *glob = bdev->glob;
+       struct ttm_bo_global *glob = &ttm_bo_glob;
        struct dma_fence *fence;
        int ret;
        unsigned i;
@@ -1622,8 +1619,6 @@ static int ttm_bo_global_init(void)
                goto out;
 
        spin_lock_init(&glob->lru_lock);
-       glob->mem_glob = &ttm_mem_glob;
-       glob->mem_glob->bo_glob = glob;
        glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
 
        if (unlikely(glob->dummy_read_page == NULL)) {
@@ -1647,10 +1642,10 @@ static int ttm_bo_global_init(void)
 
 int ttm_bo_device_release(struct ttm_bo_device *bdev)
 {
+       struct ttm_bo_global *glob = &ttm_bo_glob;
        int ret = 0;
        unsigned i = TTM_NUM_MEM_TYPES;
        struct ttm_mem_type_manager *man;
-       struct ttm_bo_global *glob = bdev->glob;
 
        while (i--) {
                man = &bdev->man[i];
@@ -1719,7 +1714,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
        INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
        INIT_LIST_HEAD(&bdev->ddestroy);
        bdev->dev_mapping = mapping;
-       bdev->glob = glob;
        bdev->need_dma32 = need_dma32;
        mutex_lock(&ttm_global_mutex);
        list_add_tail(&bdev->device_list, &glob->device_list);
@@ -1898,8 +1892,7 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
                .no_wait_gpu = false
        };
 
-       while (ttm_bo_swapout(bdev->glob, &ctx) == 0)
-               ;
+       while (ttm_bo_swapout(&ttm_bo_glob, &ctx) == 0);
 }
 EXPORT_SYMBOL(ttm_bo_swapout_all);
 
index b00039dcb48744c8a6ca9ab635644274ff861e4f..73a1b018602929e3c7a27fdd9967d7b5b5193cd0 100644 (file)
@@ -503,7 +503,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
         * TODO: Explicit member copy would probably be better here.
         */
 
-       atomic_inc(&bo->bdev->glob->bo_count);
+       atomic_inc(&ttm_bo_glob.bo_count);
        INIT_LIST_HEAD(&fbo->base.ddestroy);
        INIT_LIST_HEAD(&fbo->base.lru);
        INIT_LIST_HEAD(&fbo->base.swap);
index 79f01c5ff65ef0380ce5bbbff81ada8562a50f89..f4dd09b71a3fbda236f8913155f095c4c3b50a8b 100644 (file)
@@ -177,9 +177,9 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
                }
 
                if (bo->moving != moving) {
-                       spin_lock(&bdev->glob->lru_lock);
+                       spin_lock(&ttm_bo_glob.lru_lock);
                        ttm_bo_move_to_lru_tail(bo, NULL);
-                       spin_unlock(&bdev->glob->lru_lock);
+                       spin_unlock(&ttm_bo_glob.lru_lock);
                }
                dma_fence_put(moving);
        }
index a17645f705c7458d347f4a19d175f2ead2b4bbc2..1797f04c05345ba226b5ea566bf4a60ba8f4cd6d 100644 (file)
@@ -47,22 +47,18 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
                                struct list_head *list)
 {
        struct ttm_validate_buffer *entry;
-       struct ttm_bo_global *glob;
 
        if (list_empty(list))
                return;
 
-       entry = list_first_entry(list, struct ttm_validate_buffer, head);
-       glob = entry->bo->bdev->glob;
-
-       spin_lock(&glob->lru_lock);
+       spin_lock(&ttm_bo_glob.lru_lock);
        list_for_each_entry(entry, list, head) {
                struct ttm_buffer_object *bo = entry->bo;
 
                ttm_bo_move_to_lru_tail(bo, NULL);
                dma_resv_unlock(bo->base.resv);
        }
-       spin_unlock(&glob->lru_lock);
+       spin_unlock(&ttm_bo_glob.lru_lock);
 
        if (ticket)
                ww_acquire_fini(ticket);
@@ -85,16 +81,12 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
                           struct list_head *list, bool intr,
                           struct list_head *dups)
 {
-       struct ttm_bo_global *glob;
        struct ttm_validate_buffer *entry;
        int ret;
 
        if (list_empty(list))
                return 0;
 
-       entry = list_first_entry(list, struct ttm_validate_buffer, head);
-       glob = entry->bo->bdev->glob;
-
        if (ticket)
                ww_acquire_init(ticket, &reservation_ww_class);
 
@@ -166,19 +158,14 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
                                 struct dma_fence *fence)
 {
        struct ttm_validate_buffer *entry;
-       struct ttm_buffer_object *bo;
-       struct ttm_bo_global *glob;
 
        if (list_empty(list))
                return;
 
-       bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
-       glob = bo->bdev->glob;
-
-       spin_lock(&glob->lru_lock);
-
+       spin_lock(&ttm_bo_glob.lru_lock);
        list_for_each_entry(entry, list, head) {
-               bo = entry->bo;
+               struct ttm_buffer_object *bo = entry->bo;
+
                if (entry->num_shared)
                        dma_resv_add_shared_fence(bo->base.resv, fence);
                else
@@ -186,7 +173,7 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
                ttm_bo_move_to_lru_tail(bo, NULL);
                dma_resv_unlock(bo->base.resv);
        }
-       spin_unlock(&glob->lru_lock);
+       spin_unlock(&ttm_bo_glob.lru_lock);
        if (ticket)
                ww_acquire_fini(ticket);
 }
index 8617958b7ae6b7759e27c6e1ff3235e53060f0a1..acd63b70d8147a0e52b3d005b387bd0f1022c84d 100644 (file)
@@ -275,7 +275,7 @@ static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
 
        while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
                spin_unlock(&glob->lock);
-               ret = ttm_bo_swapout(glob->bo_glob, ctx);
+               ret = ttm_bo_swapout(&ttm_bo_glob, ctx);
                spin_lock(&glob->lock);
                if (unlikely(ret != 0))
                        break;
index 627f8dc91d0ed23e0958dfc39d106c967dcd376a..b40a4678c29639c3090be39e64033a46c61ed138 100644 (file)
@@ -1028,7 +1028,7 @@ void ttm_page_alloc_fini(void)
 static void
 ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update)
 {
-       struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
+       struct ttm_mem_global *mem_glob = &ttm_mem_glob;
        unsigned i;
 
        if (mem_count_update == 0)
@@ -1049,7 +1049,7 @@ ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update)
 
 int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
 {
-       struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
+       struct ttm_mem_global *mem_glob = &ttm_mem_glob;
        unsigned i;
        int ret;
 
index 7d78e6deac89737276f6151ee934e9a6614deefd..ff54e7609e8ff994dc38c3500e7c4047926dbefc 100644 (file)
@@ -886,8 +886,8 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
 int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
                        struct ttm_operation_ctx *ctx)
 {
+       struct ttm_mem_global *mem_glob = &ttm_mem_glob;
        struct ttm_tt *ttm = &ttm_dma->ttm;
-       struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
        unsigned long num_pages = ttm->num_pages;
        struct dma_pool *pool;
        struct dma_page *d_page;
@@ -991,8 +991,8 @@ EXPORT_SYMBOL_GPL(ttm_dma_populate);
 /* Put all pages in pages list to correct pool to wait for reuse */
 void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
 {
+       struct ttm_mem_global *mem_glob = &ttm_mem_glob;
        struct ttm_tt *ttm = &ttm_dma->ttm;
-       struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
        struct dma_pool *pool;
        struct dma_page *d_page, *next;
        enum pool_type type;
index 1976828ec0bd913551f60c9d150a689f0005c6c2..cac7a8a0825a75bd9032ce4320ddc212c0b44713 100644 (file)
@@ -423,7 +423,6 @@ extern struct ttm_bo_global {
         */
 
        struct kobject kobj;
-       struct ttm_mem_global *mem_glob;
        struct page *dummy_read_page;
        spinlock_t lru_lock;
 
@@ -467,7 +466,6 @@ struct ttm_bo_device {
         * Constant after bo device init / atomic.
         */
        struct list_head device_list;
-       struct ttm_bo_global *glob;
        struct ttm_bo_driver *driver;
        struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
 
@@ -768,9 +766,9 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
  */
 static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
 {
-       spin_lock(&bo->bdev->glob->lru_lock);
+       spin_lock(&ttm_bo_glob.lru_lock);
        ttm_bo_move_to_lru_tail(bo, NULL);
-       spin_unlock(&bo->bdev->glob->lru_lock);
+       spin_unlock(&ttm_bo_glob.lru_lock);
        dma_resv_unlock(bo->base.resv);
 }
 
index 3ff48a0a2d7b7728469034430434a311e4bbcb40..c78ea99c42cf3b04ed0fc5f71038917735f84f04 100644 (file)
@@ -65,7 +65,6 @@
 struct ttm_mem_zone;
 extern struct ttm_mem_global {
        struct kobject kobj;
-       struct ttm_bo_global *bo_glob;
        struct workqueue_struct *swap_queue;
        struct work_struct work;
        spinlock_t lock;