]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
drm: drop drm_[cm]alloc* helpers
authorMichal Hocko <mhocko@kernel.org>
Wed, 17 May 2017 12:23:12 +0000 (14:23 +0200)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Thu, 18 May 2017 15:22:39 +0000 (17:22 +0200)
Now that drm_[cm]alloc* helpers are simple one line wrappers around
kvmalloc_array and drm_free_large is just kvfree alias we can drop
them and replace by their native forms.

This shouldn't introduce any functional change.

Changes since v1
- fix typo in drivers/gpu//drm/etnaviv/etnaviv_gem.c - noticed by 0day
  build robot

Suggested-by: Daniel Vetter <daniel@ffwll.ch>
Signed-off-by: Michal Hocko <mhocko@suse.com>drm: drop drm_[cm]alloc* helpers
[danvet: Fixup vgem which grew another user very recently.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Acked-by: Christian König <christian.koenig@amd.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170517122312.GK18247@dhcp22.suse.cz
26 files changed:
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/etnaviv/etnaviv_gem.c
drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_userptr.c
drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_ring.c
drivers/gpu/drm/radeon/radeon_vm.c
drivers/gpu/drm/ttm/ttm_tt.c
drivers/gpu/drm/udl/udl_dmabuf.c
drivers/gpu/drm/udl/udl_gem.c
drivers/gpu/drm/vc4/vc4_gem.c
drivers/gpu/drm/vgem/vgem_drv.c
drivers/gpu/drm/virtio/virtgpu_ioctl.c
include/drm/drmP.h

index a6649874e6ce950e90ca46a8be99de1f4f3b0af1..9f0247cdda5e69ea5f14b6e2a2260ee0cbf8ef26 100644 (file)
@@ -96,7 +96,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
        int r;
        unsigned long total_size = 0;
 
-       array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry));
+       array = kvmalloc_array(num_entries, sizeof(struct amdgpu_bo_list_entry), GFP_KERNEL);
        if (!array)
                return -ENOMEM;
        memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));
@@ -148,7 +148,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
        for (i = 0; i < list->num_entries; ++i)
                amdgpu_bo_unref(&list->array[i].robj);
 
-       drm_free_large(list->array);
+       kvfree(list->array);
 
        list->gds_obj = gds_obj;
        list->gws_obj = gws_obj;
@@ -163,7 +163,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
 error_free:
        while (i--)
                amdgpu_bo_unref(&array[i].robj);
-       drm_free_large(array);
+       kvfree(array);
        return r;
 }
 
@@ -224,7 +224,7 @@ void amdgpu_bo_list_free(struct amdgpu_bo_list *list)
                amdgpu_bo_unref(&list->array[i].robj);
 
        mutex_destroy(&list->lock);
-       drm_free_large(list->array);
+       kvfree(list->array);
        kfree(list);
 }
 
@@ -244,8 +244,8 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
 
        int r;
 
-       info = drm_malloc_ab(args->in.bo_number,
-                            sizeof(struct drm_amdgpu_bo_list_entry));
+       info = kvmalloc_array(args->in.bo_number,
+                            sizeof(struct drm_amdgpu_bo_list_entry), GFP_KERNEL);
        if (!info)
                return -ENOMEM;
 
@@ -311,11 +311,11 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
 
        memset(args, 0, sizeof(*args));
        args->out.list_handle = handle;
-       drm_free_large(info);
+       kvfree(info);
 
        return 0;
 
 error_free:
-       drm_free_large(info);
+       kvfree(info);
        return r;
 }
index 4e6b9501ab0aac6cd4b33412f47ce23cf3301e47..5b3e0f63a1151f0b595e2f115fabcf74f14a49eb 100644 (file)
@@ -194,7 +194,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
                size = p->chunks[i].length_dw;
                cdata = (void __user *)(uintptr_t)user_chunk.chunk_data;
 
-               p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
+               p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
                if (p->chunks[i].kdata == NULL) {
                        ret = -ENOMEM;
                        i--;
@@ -247,7 +247,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
        i = p->nchunks - 1;
 free_partial_kdata:
        for (; i >= 0; i--)
-               drm_free_large(p->chunks[i].kdata);
+               kvfree(p->chunks[i].kdata);
        kfree(p->chunks);
        p->chunks = NULL;
        p->nchunks = 0;
@@ -505,7 +505,7 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
                        return r;
 
                if (binding_userptr) {
-                       drm_free_large(lobj->user_pages);
+                       kvfree(lobj->user_pages);
                        lobj->user_pages = NULL;
                }
        }
@@ -571,7 +571,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
                                release_pages(e->user_pages,
                                              e->robj->tbo.ttm->num_pages,
                                              false);
-                               drm_free_large(e->user_pages);
+                               kvfree(e->user_pages);
                                e->user_pages = NULL;
                        }
 
@@ -601,8 +601,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
                list_for_each_entry(e, &need_pages, tv.head) {
                        struct ttm_tt *ttm = e->robj->tbo.ttm;
 
-                       e->user_pages = drm_calloc_large(ttm->num_pages,
-                                                        sizeof(struct page*));
+                       e->user_pages = kvmalloc_array(ttm->num_pages,
+                                                        sizeof(struct page*),
+                                                        GFP_KERNEL | __GFP_ZERO);
                        if (!e->user_pages) {
                                r = -ENOMEM;
                                DRM_ERROR("calloc failure in %s\n", __func__);
@@ -612,7 +613,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
                        r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages);
                        if (r) {
                                DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n");
-                               drm_free_large(e->user_pages);
+                               kvfree(e->user_pages);
                                e->user_pages = NULL;
                                goto error_free_pages;
                        }
@@ -708,7 +709,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
                        release_pages(e->user_pages,
                                      e->robj->tbo.ttm->num_pages,
                                      false);
-                       drm_free_large(e->user_pages);
+                       kvfree(e->user_pages);
                }
        }
 
@@ -761,7 +762,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
                amdgpu_bo_list_put(parser->bo_list);
 
        for (i = 0; i < parser->nchunks; i++)
-               drm_free_large(parser->chunks[i].kdata);
+               kvfree(parser->chunks[i].kdata);
        kfree(parser->chunks);
        if (parser->job)
                amdgpu_job_free(parser->job);
index 07ff3b1514f129edc23875c1f42053f7ef1aaa72..749a6cde79854335809f472bb7603c29ab569585 100644 (file)
@@ -279,8 +279,9 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
        if (!parent->entries) {
                unsigned num_entries = amdgpu_vm_num_entries(adev, level);
 
-               parent->entries = drm_calloc_large(num_entries,
-                                                  sizeof(struct amdgpu_vm_pt));
+               parent->entries = kvmalloc_array(num_entries,
+                                                  sizeof(struct amdgpu_vm_pt),
+                                                  GFP_KERNEL | __GFP_ZERO);
                if (!parent->entries)
                        return -ENOMEM;
                memset(parent->entries, 0 , sizeof(struct amdgpu_vm_pt));
@@ -2198,7 +2199,7 @@ static void amdgpu_vm_free_levels(struct amdgpu_vm_pt *level)
                for (i = 0; i <= level->last_entry_used; i++)
                        amdgpu_vm_free_levels(&level->entries[i]);
 
-       drm_free_large(level->entries);
+       kvfree(level->entries);
 }
 
 /**
index b1e28c9446370928fc82e9c17e8c9531937eedf9..8dc11064253d9e5ed58f8c817a471b36c25c5951 100644 (file)
@@ -521,7 +521,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
 
        npages = obj->size >> PAGE_SHIFT;
 
-       pages = drm_malloc_ab(npages, sizeof(struct page *));
+       pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
        if (pages == NULL)
                return ERR_PTR(-ENOMEM);
 
@@ -546,7 +546,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
        while (i--)
                put_page(pages[i]);
 
-       drm_free_large(pages);
+       kvfree(pages);
        return ERR_CAST(p);
 }
 EXPORT_SYMBOL(drm_gem_get_pages);
@@ -582,7 +582,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
                put_page(pages[i]);
        }
 
-       drm_free_large(pages);
+       kvfree(pages);
 }
 EXPORT_SYMBOL(drm_gem_put_pages);
 
index fd56f92f3469a08ab50a82d87e8902eb87342f7f..d6fb724fc3ccc9ca8760c8590f7c963a9d134dc7 100644 (file)
@@ -748,7 +748,7 @@ static struct page **etnaviv_gem_userptr_do_get_pages(
        uintptr_t ptr;
        unsigned int flags = 0;
 
-       pvec = drm_malloc_ab(npages, sizeof(struct page *));
+       pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
        if (!pvec)
                return ERR_PTR(-ENOMEM);
 
@@ -772,7 +772,7 @@ static struct page **etnaviv_gem_userptr_do_get_pages(
 
        if (ret < 0) {
                release_pages(pvec, pinned, 0);
-               drm_free_large(pvec);
+               kvfree(pvec);
                return ERR_PTR(ret);
        }
 
@@ -823,7 +823,7 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
        mm = get_task_mm(etnaviv_obj->userptr.task);
        pinned = 0;
        if (mm == current->mm) {
-               pvec = drm_malloc_ab(npages, sizeof(struct page *));
+               pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
                if (!pvec) {
                        mmput(mm);
                        return -ENOMEM;
@@ -832,7 +832,7 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
                pinned = __get_user_pages_fast(etnaviv_obj->userptr.ptr, npages,
                                               !etnaviv_obj->userptr.ro, pvec);
                if (pinned < 0) {
-                       drm_free_large(pvec);
+                       kvfree(pvec);
                        mmput(mm);
                        return pinned;
                }
@@ -845,7 +845,7 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
        }
 
        release_pages(pvec, pinned, 0);
-       drm_free_large(pvec);
+       kvfree(pvec);
 
        work = kmalloc(sizeof(*work), GFP_KERNEL);
        if (!work) {
@@ -879,7 +879,7 @@ static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
                int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
 
                release_pages(etnaviv_obj->pages, npages, 0);
-               drm_free_large(etnaviv_obj->pages);
+               kvfree(etnaviv_obj->pages);
        }
        put_task_struct(etnaviv_obj->userptr.task);
 }
index 62b47972a52e6e71ddd066f3a61c0bef3c4ad46f..367bf952f61a18e2945e505f1c14c5539336fdd9 100644 (file)
@@ -87,7 +87,7 @@ static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj)
         * ours, just free the array we allocated:
         */
        if (etnaviv_obj->pages)
-               drm_free_large(etnaviv_obj->pages);
+               kvfree(etnaviv_obj->pages);
 
        drm_prime_gem_destroy(&etnaviv_obj->base, etnaviv_obj->sgt);
 }
@@ -128,7 +128,7 @@ struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
        npages = size / PAGE_SIZE;
 
        etnaviv_obj->sgt = sgt;
-       etnaviv_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
+       etnaviv_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
        if (!etnaviv_obj->pages) {
                ret = -ENOMEM;
                goto fail;
index e1909429837eebc2072d00d9a9f590c6e5f87bd2..a13930e1d8c99adacff649b12206477931d638c5 100644 (file)
@@ -343,9 +343,9 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
         * Copy the command submission and bo array to kernel space in
         * one go, and do this outside of any locks.
         */
-       bos = drm_malloc_ab(args->nr_bos, sizeof(*bos));
-       relocs = drm_malloc_ab(args->nr_relocs, sizeof(*relocs));
-       stream = drm_malloc_ab(1, args->stream_size);
+       bos = kvmalloc_array(args->nr_bos, sizeof(*bos), GFP_KERNEL);
+       relocs = kvmalloc_array(args->nr_relocs, sizeof(*relocs), GFP_KERNEL);
+       stream = kvmalloc_array(1, args->stream_size, GFP_KERNEL);
        cmdbuf = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc,
                                    ALIGN(args->stream_size, 8) + 8,
                                    args->nr_bos);
@@ -487,11 +487,11 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
        if (cmdbuf)
                etnaviv_cmdbuf_free(cmdbuf);
        if (stream)
-               drm_free_large(stream);
+               kvfree(stream);
        if (bos)
-               drm_free_large(bos);
+               kvfree(bos);
        if (relocs)
-               drm_free_large(relocs);
+               kvfree(relocs);
 
        return ret;
 }
index 55a1579d11b3d7c1ce604eeba2988de78de61e0b..c23479be48500091fcb7aa0e0bdaf962a1f9fb59 100644 (file)
@@ -59,7 +59,8 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
 
        nr_pages = exynos_gem->size >> PAGE_SHIFT;
 
-       exynos_gem->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
+       exynos_gem->pages = kvmalloc_array(nr_pages, sizeof(struct page *),
+                       GFP_KERNEL | __GFP_ZERO);
        if (!exynos_gem->pages) {
                DRM_ERROR("failed to allocate pages.\n");
                return -ENOMEM;
@@ -101,7 +102,7 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
        dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
                       exynos_gem->dma_addr, exynos_gem->dma_attrs);
 err_free:
-       drm_free_large(exynos_gem->pages);
+       kvfree(exynos_gem->pages);
 
        return ret;
 }
@@ -122,7 +123,7 @@ static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
                        (dma_addr_t)exynos_gem->dma_addr,
                        exynos_gem->dma_attrs);
 
-       drm_free_large(exynos_gem->pages);
+       kvfree(exynos_gem->pages);
 }
 
 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -559,7 +560,7 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
        exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
 
        npages = exynos_gem->size >> PAGE_SHIFT;
-       exynos_gem->pages = drm_malloc_ab(npages, sizeof(struct page *));
+       exynos_gem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
        if (!exynos_gem->pages) {
                ret = -ENOMEM;
                goto err;
@@ -588,7 +589,7 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
        return &exynos_gem->base;
 
 err_free_large:
-       drm_free_large(exynos_gem->pages);
+       kvfree(exynos_gem->pages);
 err:
        drm_gem_object_release(&exynos_gem->base);
        kfree(exynos_gem);
index d689e511744e8f2fc9508e2d7345827c6a70bbb2..07f87985ea0baedb06835d625b55b16f92683ef0 100644 (file)
@@ -229,7 +229,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
        int ret;
 
        total = READ_ONCE(dev_priv->mm.object_count);
-       objects = drm_malloc_ab(total, sizeof(*objects));
+       objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
        if (!objects)
                return -ENOMEM;
 
@@ -274,7 +274,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
 
        mutex_unlock(&dev->struct_mutex);
 out:
-       drm_free_large(objects);
+       kvfree(objects);
        return ret;
 }
 
index b6ac3df18b582534b118ab44aae1dbfe9f75186e..0e07f35e270ce8690abc3d16631443d30c3560f5 100644 (file)
@@ -2504,7 +2504,7 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
 
        if (n_pages > ARRAY_SIZE(stack_pages)) {
                /* Too big for stack -- allocate temporary array instead */
-               pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
+               pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_TEMPORARY);
                if (!pages)
                        return NULL;
        }
@@ -2526,7 +2526,7 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
        addr = vmap(pages, n_pages, 0, pgprot);
 
        if (pages != stack_pages)
-               drm_free_large(pages);
+               kvfree(pages);
 
        return addr;
 }
index a3e59c8ef27baf4f3584ff5016635d8005735af6..4ee2dc38b7c94af5086778c904235636e1de067e 100644 (file)
@@ -1019,11 +1019,11 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
        for (i = 0; i < count; i++)
                total += exec[i].relocation_count;
 
-       reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
-       reloc = drm_malloc_ab(total, sizeof(*reloc));
+       reloc_offset = kvmalloc_array(count, sizeof(*reloc_offset), GFP_KERNEL);
+       reloc = kvmalloc_array(total, sizeof(*reloc), GFP_KERNEL);
        if (reloc == NULL || reloc_offset == NULL) {
-               drm_free_large(reloc);
-               drm_free_large(reloc_offset);
+               kvfree(reloc);
+               kvfree(reloc_offset);
                mutex_lock(&dev->struct_mutex);
                return -ENOMEM;
        }
@@ -1099,8 +1099,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
         */
 
 err:
-       drm_free_large(reloc);
-       drm_free_large(reloc_offset);
+       kvfree(reloc);
+       kvfree(reloc_offset);
        return ret;
 }
 
@@ -1859,13 +1859,13 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
        }
 
        /* Copy in the exec list from userland */
-       exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
-       exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
+       exec_list = kvmalloc_array(sizeof(*exec_list), args->buffer_count, GFP_KERNEL);
+       exec2_list = kvmalloc_array(sizeof(*exec2_list), args->buffer_count, GFP_KERNEL);
        if (exec_list == NULL || exec2_list == NULL) {
                DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
                          args->buffer_count);
-               drm_free_large(exec_list);
-               drm_free_large(exec2_list);
+               kvfree(exec_list);
+               kvfree(exec2_list);
                return -ENOMEM;
        }
        ret = copy_from_user(exec_list,
@@ -1874,8 +1874,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
        if (ret != 0) {
                DRM_DEBUG("copy %d exec entries failed %d\n",
                          args->buffer_count, ret);
-               drm_free_large(exec_list);
-               drm_free_large(exec2_list);
+               kvfree(exec_list);
+               kvfree(exec2_list);
                return -EFAULT;
        }
 
@@ -1924,8 +1924,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
                }
        }
 
-       drm_free_large(exec_list);
-       drm_free_large(exec2_list);
+       kvfree(exec_list);
+       kvfree(exec2_list);
        return ret;
 }
 
@@ -1943,7 +1943,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
                return -EINVAL;
        }
 
-       exec2_list = drm_malloc_gfp(args->buffer_count,
+       exec2_list = kvmalloc_array(args->buffer_count,
                                    sizeof(*exec2_list),
                                    GFP_TEMPORARY);
        if (exec2_list == NULL) {
@@ -1957,7 +1957,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
        if (ret != 0) {
                DRM_DEBUG("copy %d exec entries failed %d\n",
                          args->buffer_count, ret);
-               drm_free_large(exec2_list);
+               kvfree(exec2_list);
                return -EFAULT;
        }
 
@@ -1984,6 +1984,6 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
                }
        }
 
-       drm_free_large(exec2_list);
+       kvfree(exec2_list);
        return ret;
 }
index 2aa6b97fd22f28436b984a6ae9a537f36ada545a..7e3193aa7da11105a25b8ae00f45f1efea1dc8b3 100644 (file)
@@ -3102,7 +3102,7 @@ intel_rotate_pages(struct intel_rotation_info *rot_info,
        int ret = -ENOMEM;
 
        /* Allocate a temporary list of source pages for random access. */
-       page_addr_list = drm_malloc_gfp(n_pages,
+       page_addr_list = kvmalloc_array(n_pages,
                                        sizeof(dma_addr_t),
                                        GFP_TEMPORARY);
        if (!page_addr_list)
@@ -3135,14 +3135,14 @@ intel_rotate_pages(struct intel_rotation_info *rot_info,
        DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages)\n",
                      obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
 
-       drm_free_large(page_addr_list);
+       kvfree(page_addr_list);
 
        return st;
 
 err_sg_alloc:
        kfree(st);
 err_st_alloc:
-       drm_free_large(page_addr_list);
+       kvfree(page_addr_list);
 
        DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
                      obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
index 58ccf8b8ca1c9262d173290089ae0d99947bb1cc..1a0ce1dc68f5489620801286bef18b4bf9b96fce 100644 (file)
@@ -507,7 +507,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
        ret = -ENOMEM;
        pinned = 0;
 
-       pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY);
+       pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_TEMPORARY);
        if (pvec != NULL) {
                struct mm_struct *mm = obj->userptr.mm->mm;
                unsigned int flags = 0;
@@ -555,7 +555,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
        mutex_unlock(&obj->mm.lock);
 
        release_pages(pvec, pinned, 0);
-       drm_free_large(pvec);
+       kvfree(pvec);
 
        i915_gem_object_put(obj);
        put_task_struct(work->task);
@@ -642,7 +642,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
        pinned = 0;
 
        if (mm == current->mm) {
-               pvec = drm_malloc_gfp(num_pages, sizeof(struct page *),
+               pvec = kvmalloc_array(num_pages, sizeof(struct page *),
                                      GFP_TEMPORARY |
                                      __GFP_NORETRY |
                                      __GFP_NOWARN);
@@ -669,7 +669,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
 
        if (IS_ERR(pages))
                release_pages(pvec, pinned, 0);
-       drm_free_large(pvec);
+       kvfree(pvec);
 
        return pages;
 }
index 19860a372d9005fc5e69d79c0f6620798cd34d47..7276194c04f7fab3330e032963e47f04c1bd3e78 100644 (file)
@@ -117,7 +117,7 @@ static int igt_random_insert_remove(void *arg)
 
        mock_engine_reset(engine);
 
-       waiters = drm_malloc_gfp(count, sizeof(*waiters), GFP_TEMPORARY);
+       waiters = kvmalloc_array(count, sizeof(*waiters), GFP_TEMPORARY);
        if (!waiters)
                goto out_engines;
 
@@ -169,7 +169,7 @@ static int igt_random_insert_remove(void *arg)
 out_bitmap:
        kfree(bitmap);
 out_waiters:
-       drm_free_large(waiters);
+       kvfree(waiters);
 out_engines:
        mock_engine_flush(engine);
        return err;
@@ -187,7 +187,7 @@ static int igt_insert_complete(void *arg)
 
        mock_engine_reset(engine);
 
-       waiters = drm_malloc_gfp(count, sizeof(*waiters), GFP_TEMPORARY);
+       waiters = kvmalloc_array(count, sizeof(*waiters), GFP_TEMPORARY);
        if (!waiters)
                goto out_engines;
 
@@ -254,7 +254,7 @@ static int igt_insert_complete(void *arg)
 out_bitmap:
        kfree(bitmap);
 out_waiters:
-       drm_free_large(waiters);
+       kvfree(waiters);
 out_engines:
        mock_engine_flush(engine);
        return err;
@@ -368,7 +368,7 @@ static int igt_wakeup(void *arg)
 
        mock_engine_reset(engine);
 
-       waiters = drm_malloc_gfp(count, sizeof(*waiters), GFP_TEMPORARY);
+       waiters = kvmalloc_array(count, sizeof(*waiters), GFP_TEMPORARY);
        if (!waiters)
                goto out_engines;
 
@@ -454,7 +454,7 @@ static int igt_wakeup(void *arg)
                put_task_struct(waiters[n].tsk);
        }
 
-       drm_free_large(waiters);
+       kvfree(waiters);
 out_engines:
        mock_engine_flush(engine);
        return err;
index 68e509b3b9e4d08730e3901f46a397519c33e77c..465dab942afa1d18e510106cf0d8c1d59d8567d0 100644 (file)
@@ -50,13 +50,13 @@ static struct page **get_pages_vram(struct drm_gem_object *obj,
        struct page **p;
        int ret, i;
 
-       p = drm_malloc_ab(npages, sizeof(struct page *));
+       p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
        if (!p)
                return ERR_PTR(-ENOMEM);
 
        ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
        if (ret) {
-               drm_free_large(p);
+               kvfree(p);
                return ERR_PTR(ret);
        }
 
@@ -127,7 +127,7 @@ static void put_pages(struct drm_gem_object *obj)
                        drm_gem_put_pages(obj, msm_obj->pages, true, false);
                else {
                        drm_mm_remove_node(msm_obj->vram_node);
-                       drm_free_large(msm_obj->pages);
+                       kvfree(msm_obj->pages);
                }
 
                msm_obj->pages = NULL;
@@ -707,7 +707,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
                 * ours, just free the array we allocated:
                 */
                if (msm_obj->pages)
-                       drm_free_large(msm_obj->pages);
+                       kvfree(msm_obj->pages);
 
                drm_prime_gem_destroy(obj, msm_obj->sgt);
        } else {
@@ -863,7 +863,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
 
        msm_obj = to_msm_bo(obj);
        msm_obj->sgt = sgt;
-       msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
+       msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
        if (!msm_obj->pages) {
                ret = -ENOMEM;
                goto fail;
index 3ac671f6c8e1f05ff3dda913d915641540d15611..00b22af70f5c093b07462fa084e7854bd18b38ca 100644 (file)
@@ -87,7 +87,8 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
        p->dma_reloc_idx = 0;
        /* FIXME: we assume that each relocs use 4 dwords */
        p->nrelocs = chunk->length_dw / 4;
-       p->relocs = drm_calloc_large(p->nrelocs, sizeof(struct radeon_bo_list));
+       p->relocs = kvmalloc_array(p->nrelocs, sizeof(struct radeon_bo_list),
+                       GFP_KERNEL | __GFP_ZERO);
        if (p->relocs == NULL) {
                return -ENOMEM;
        }
@@ -341,7 +342,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
                                continue;
                }
 
-               p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
+               p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
                size *= sizeof(uint32_t);
                if (p->chunks[i].kdata == NULL) {
                        return -ENOMEM;
@@ -440,10 +441,10 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
                }
        }
        kfree(parser->track);
-       drm_free_large(parser->relocs);
-       drm_free_large(parser->vm_bos);
+       kvfree(parser->relocs);
+       kvfree(parser->vm_bos);
        for (i = 0; i < parser->nchunks; i++)
-               drm_free_large(parser->chunks[i].kdata);
+               kvfree(parser->chunks[i].kdata);
        kfree(parser->chunks);
        kfree(parser->chunks_array);
        radeon_ib_free(parser->rdev, &parser->ib);
index dddb372de2b9de555ca0e0444acaf164ceed6d9d..574bf7e6b1186fed318cf10f05ff9ed313d671e1 100644 (file)
@@ -587,7 +587,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
        ttm_eu_backoff_reservation(&ticket, &list);
 
 error_free:
-       drm_free_large(vm_bos);
+       kvfree(vm_bos);
 
        if (r && r != -ERESTARTSYS)
                DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
index 8c7872339c2a6f5e94bf601c47ed421e5caee352..84802b201beff82db56f60589907760e1fb327ac 100644 (file)
@@ -314,7 +314,7 @@ unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring
        }
 
        /* and then save the content of the ring */
-       *data = drm_malloc_ab(size, sizeof(uint32_t));
+       *data = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
        if (!*data) {
                mutex_unlock(&rdev->ring_lock);
                return 0;
@@ -356,7 +356,7 @@ int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
        }
 
        radeon_ring_unlock_commit(rdev, ring, false);
-       drm_free_large(data);
+       kvfree(data);
        return 0;
 }
 
index a1358748cea56d638c549608a3d5730283e759c0..5f68245579a3b4d1bc0eb908c096bff2da757d8e 100644 (file)
@@ -132,8 +132,8 @@ struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
        struct radeon_bo_list *list;
        unsigned i, idx;
 
-       list = drm_malloc_ab(vm->max_pde_used + 2,
-                            sizeof(struct radeon_bo_list));
+       list = kvmalloc_array(vm->max_pde_used + 2,
+                            sizeof(struct radeon_bo_list), GFP_KERNEL);
        if (!list)
                return NULL;
 
index 5260179d788acea899ef4bbb17949ec97e07659e..8ebc8d3560c3669aab85072877703f977e9cc3ef 100644 (file)
@@ -39,7 +39,6 @@
 #include <linux/slab.h>
 #include <linux/export.h>
 #include <drm/drm_cache.h>
-#include <drm/drm_mem_util.h>
 #include <drm/ttm/ttm_module.h>
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_placement.h>
  */
 static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
 {
-       ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*));
+       ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*),
+                       GFP_KERNEL | __GFP_ZERO);
 }
 
 static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
 {
-       ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages,
+       ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages,
                                          sizeof(*ttm->ttm.pages) +
-                                         sizeof(*ttm->dma_address));
+                                         sizeof(*ttm->dma_address),
+                                         GFP_KERNEL | __GFP_ZERO);
        ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
 }
 
@@ -208,7 +209,7 @@ EXPORT_SYMBOL(ttm_tt_init);
 
 void ttm_tt_fini(struct ttm_tt *ttm)
 {
-       drm_free_large(ttm->pages);
+       kvfree(ttm->pages);
        ttm->pages = NULL;
 }
 EXPORT_SYMBOL(ttm_tt_fini);
@@ -243,7 +244,7 @@ void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
 {
        struct ttm_tt *ttm = &ttm_dma->ttm;
 
-       drm_free_large(ttm->pages);
+       kvfree(ttm->pages);
        ttm->pages = NULL;
        ttm_dma->dma_address = NULL;
 }
index ed0e636243b20c646352d01a75204c107908a219..2e031a894813678d1e7d84facc90520c1e8d40b8 100644 (file)
@@ -228,7 +228,7 @@ static int udl_prime_create(struct drm_device *dev,
                return -ENOMEM;
 
        obj->sg = sg;
-       obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
+       obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
        if (obj->pages == NULL) {
                DRM_ERROR("obj pages is NULL %d\n", npages);
                return -ENOMEM;
index 775c50e4f02cdc7d6b6ef1ddbb35a8d67e63badd..db9ceceba30e239c8668e360334d06cf061c38df 100644 (file)
@@ -146,7 +146,7 @@ int udl_gem_get_pages(struct udl_gem_object *obj)
 void udl_gem_put_pages(struct udl_gem_object *obj)
 {
        if (obj->base.import_attach) {
-               drm_free_large(obj->pages);
+               kvfree(obj->pages);
                obj->pages = NULL;
                return;
        }
index 735412e3725a39a02d8df0bd01f5a3b27c30fe96..9dc7646d49edd29888729ccbf42d2a9d248048bd 100644 (file)
@@ -660,14 +660,15 @@ vc4_cl_lookup_bos(struct drm_device *dev,
                return -EINVAL;
        }
 
-       exec->bo = drm_calloc_large(exec->bo_count,
-                                   sizeof(struct drm_gem_cma_object *));
+       exec->bo = kvmalloc_array(exec->bo_count,
+                                   sizeof(struct drm_gem_cma_object *),
+                                   GFP_KERNEL | __GFP_ZERO);
        if (!exec->bo) {
                DRM_ERROR("Failed to allocate validated BO pointers\n");
                return -ENOMEM;
        }
 
-       handles = drm_malloc_ab(exec->bo_count, sizeof(uint32_t));
+       handles = kvmalloc_array(exec->bo_count, sizeof(uint32_t), GFP_KERNEL);
        if (!handles) {
                ret = -ENOMEM;
                DRM_ERROR("Failed to allocate incoming GEM handles\n");
@@ -699,7 +700,7 @@ vc4_cl_lookup_bos(struct drm_device *dev,
        spin_unlock(&file_priv->table_lock);
 
 fail:
-       drm_free_large(handles);
+       kvfree(handles);
        return ret;
 }
 
@@ -737,7 +738,7 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
         * read the contents back for validation, and I think the
         * bo->vaddr is uncached access.
         */
-       temp = drm_malloc_ab(temp_size, 1);
+       temp = kvmalloc_array(temp_size, 1, GFP_KERNEL);
        if (!temp) {
                DRM_ERROR("Failed to allocate storage for copying "
                          "in bin/render CLs.\n");
@@ -812,7 +813,7 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
        ret = vc4_wait_for_seqno(dev, exec->bin_dep_seqno, ~0ull, true);
 
 fail:
-       drm_free_large(temp);
+       kvfree(temp);
        return ret;
 }
 
@@ -832,7 +833,7 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
        if (exec->bo) {
                for (i = 0; i < exec->bo_count; i++)
                        drm_gem_object_unreference_unlocked(&exec->bo[i]->base);
-               drm_free_large(exec->bo);
+               kvfree(exec->bo);
        }
 
        while (!list_empty(&exec->unref_list)) {
index 4b23ba0496322280c03f5697ae759c63e783e1ac..54ec94c5e9acfce3b1ffa0c3c5518b5ad08fce24 100644 (file)
@@ -51,7 +51,7 @@ static void vgem_gem_free_object(struct drm_gem_object *obj)
 {
        struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
 
-       drm_free_large(vgem_obj->pages);
+       kvfree(vgem_obj->pages);
 
        if (obj->import_attach)
                drm_prime_gem_destroy(obj, vgem_obj->table);
@@ -328,7 +328,7 @@ static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
        npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
 
        obj->table = sg;
-       obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
+       obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
        if (!obj->pages) {
                __vgem_gem_destroy(obj);
                return ERR_PTR(-ENOMEM);
index 06cb16d75f4b7a183eeff404a5a1582507e3b983..b94bd5440e5779ac96ebdfa10b9b48adef53e7db 100644 (file)
@@ -120,13 +120,14 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
        INIT_LIST_HEAD(&validate_list);
        if (exbuf->num_bo_handles) {
 
-               bo_handles = drm_malloc_ab(exbuf->num_bo_handles,
-                                          sizeof(uint32_t));
-               buflist = drm_calloc_large(exbuf->num_bo_handles,
-                                          sizeof(struct ttm_validate_buffer));
+               bo_handles = kvmalloc_array(exbuf->num_bo_handles,
+                                          sizeof(uint32_t), GFP_KERNEL);
+               buflist = kvmalloc_array(exbuf->num_bo_handles,
+                                          sizeof(struct ttm_validate_buffer),
+                                          GFP_KERNEL | __GFP_ZERO);
                if (!bo_handles || !buflist) {
-                       drm_free_large(bo_handles);
-                       drm_free_large(buflist);
+                       kvfree(bo_handles);
+                       kvfree(buflist);
                        return -ENOMEM;
                }
 
@@ -134,16 +135,16 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
                if (copy_from_user(bo_handles, user_bo_handles,
                                   exbuf->num_bo_handles * sizeof(uint32_t))) {
                        ret = -EFAULT;
-                       drm_free_large(bo_handles);
-                       drm_free_large(buflist);
+                       kvfree(bo_handles);
+                       kvfree(buflist);
                        return ret;
                }
 
                for (i = 0; i < exbuf->num_bo_handles; i++) {
                        gobj = drm_gem_object_lookup(drm_file, bo_handles[i]);
                        if (!gobj) {
-                               drm_free_large(bo_handles);
-                               drm_free_large(buflist);
+                               kvfree(bo_handles);
+                               kvfree(buflist);
                                return -ENOENT;
                        }
 
@@ -152,7 +153,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
 
                        list_add(&buflist[i].head, &validate_list);
                }
-               drm_free_large(bo_handles);
+               kvfree(bo_handles);
        }
 
        ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
@@ -172,7 +173,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
 
        /* fence the command bo */
        virtio_gpu_unref_list(&validate_list);
-       drm_free_large(buflist);
+       kvfree(buflist);
        dma_fence_put(&fence->f);
        return 0;
 
@@ -180,7 +181,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
        ttm_eu_backoff_reservation(&ticket, &validate_list);
 out_free:
        virtio_gpu_unref_list(&validate_list);
-       drm_free_large(buflist);
+       kvfree(buflist);
        return ret;
 }
 
index 52085832f7110479405bdc228427f39534abaf02..b9b5566acfe6035a7080c76adfdf1fc27eb4a9c4 100644 (file)
@@ -70,7 +70,6 @@
 #include <drm/drm_fourcc.h>
 #include <drm/drm_global.h>
 #include <drm/drm_hashtab.h>
-#include <drm/drm_mem_util.h>
 #include <drm/drm_mm.h>
 #include <drm/drm_os_linux.h>
 #include <drm/drm_sarea.h>