1 // SPDX-License-Identifier: GPL-2.0-or-later
3 #include <drm/drm_debugfs.h>
4 #include <drm/drm_device.h>
5 #include <drm/drm_file.h>
6 #include <drm/drm_gem_ttm_helper.h>
7 #include <drm/drm_gem_vram_helper.h>
8 #include <drm/drm_mode.h>
9 #include <drm/drm_prime.h>
10 #include <drm/ttm/ttm_page_alloc.h>
12 static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
17 * This library provides a GEM buffer object that is backed by video RAM
18 * (VRAM). It can be used for framebuffer devices with dedicated memory.
20 * The data structure &struct drm_vram_mm and its helpers implement a memory
21 * manager for simple framebuffer devices with dedicated video memory. Buffer
22 * objects are either placed in video RAM or evicted to system memory. The rsp.
23 * buffer object is provided by &struct drm_gem_vram_object.
27 * Buffer-objects helpers
30 static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo)
32 /* We got here via ttm_bo_put(), which means that the
33 * TTM buffer object in 'bo' has already been cleaned
34 * up; only release the GEM object.
37 WARN_ON(gbo->kmap_use_count);
38 WARN_ON(gbo->kmap.virtual);
40 drm_gem_object_release(&gbo->bo.base);
43 static void drm_gem_vram_destroy(struct drm_gem_vram_object *gbo)
45 drm_gem_vram_cleanup(gbo);
49 static void ttm_buffer_object_destroy(struct ttm_buffer_object *bo)
51 struct drm_gem_vram_object *gbo = drm_gem_vram_of_bo(bo);
53 drm_gem_vram_destroy(gbo);
56 static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
57 unsigned long pl_flag)
62 gbo->placement.placement = gbo->placements;
63 gbo->placement.busy_placement = gbo->placements;
65 if (pl_flag & TTM_PL_FLAG_VRAM)
66 gbo->placements[c++].flags = TTM_PL_FLAG_WC |
67 TTM_PL_FLAG_UNCACHED |
70 if (pl_flag & TTM_PL_FLAG_SYSTEM)
71 gbo->placements[c++].flags = TTM_PL_MASK_CACHING |
75 gbo->placements[c++].flags = TTM_PL_MASK_CACHING |
78 gbo->placement.num_placement = c;
79 gbo->placement.num_busy_placement = c;
81 for (i = 0; i < c; ++i) {
82 gbo->placements[i].fpfn = 0;
83 gbo->placements[i].lpfn = 0;
87 static int drm_gem_vram_init(struct drm_device *dev,
88 struct ttm_bo_device *bdev,
89 struct drm_gem_vram_object *gbo,
90 size_t size, unsigned long pg_align,
96 if (!gbo->bo.base.funcs)
97 gbo->bo.base.funcs = &drm_gem_vram_object_funcs;
99 ret = drm_gem_object_init(dev, &gbo->bo.base, size);
103 acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo));
106 drm_gem_vram_placement(gbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
108 ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device,
109 &gbo->placement, pg_align, interruptible, acc_size,
110 NULL, NULL, ttm_buffer_object_destroy);
112 goto err_drm_gem_object_release;
116 err_drm_gem_object_release:
117 drm_gem_object_release(&gbo->bo.base);
122 * drm_gem_vram_create() - Creates a VRAM-backed GEM object
123 * @dev: the DRM device
124 * @bdev: the TTM BO device backing the object
125 * @size: the buffer size in bytes
126 * @pg_align: the buffer's alignment in multiples of the page size
127 * @interruptible: sleep interruptible if waiting for memory
130 * A new instance of &struct drm_gem_vram_object on success, or
131 * an ERR_PTR()-encoded error code otherwise.
133 struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
134 struct ttm_bo_device *bdev,
136 unsigned long pg_align,
139 struct drm_gem_vram_object *gbo;
142 gbo = kzalloc(sizeof(*gbo), GFP_KERNEL);
144 return ERR_PTR(-ENOMEM);
146 ret = drm_gem_vram_init(dev, bdev, gbo, size, pg_align, interruptible);
156 EXPORT_SYMBOL(drm_gem_vram_create);
159 * drm_gem_vram_put() - Releases a reference to a VRAM-backed GEM object
160 * @gbo: the GEM VRAM object
162 * See ttm_bo_put() for more information.
164 void drm_gem_vram_put(struct drm_gem_vram_object *gbo)
166 ttm_bo_put(&gbo->bo);
168 EXPORT_SYMBOL(drm_gem_vram_put);
171 * drm_gem_vram_mmap_offset() - Returns a GEM VRAM object's mmap offset
172 * @gbo: the GEM VRAM object
174 * See drm_vma_node_offset_addr() for more information.
177 * The buffer object's offset for userspace mappings on success, or
178 * 0 if no offset is allocated.
180 u64 drm_gem_vram_mmap_offset(struct drm_gem_vram_object *gbo)
182 return drm_vma_node_offset_addr(&gbo->bo.base.vma_node);
184 EXPORT_SYMBOL(drm_gem_vram_mmap_offset);
187 * drm_gem_vram_offset() - \
188 Returns a GEM VRAM object's offset in video memory
189 * @gbo: the GEM VRAM object
191 * This function returns the buffer object's offset in the device's video
192 * memory. The buffer object has to be pinned to %TTM_PL_VRAM.
195 * The buffer object's offset in video memory on success, or
196 * a negative errno code otherwise.
198 s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo)
200 if (WARN_ON_ONCE(!gbo->pin_count))
202 return gbo->bo.offset;
204 EXPORT_SYMBOL(drm_gem_vram_offset);
206 static int drm_gem_vram_pin_locked(struct drm_gem_vram_object *gbo,
207 unsigned long pl_flag)
210 struct ttm_operation_ctx ctx = { false, false };
216 drm_gem_vram_placement(gbo, pl_flag);
218 for (i = 0; i < gbo->placement.num_placement; ++i)
219 gbo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
221 ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx);
232 * drm_gem_vram_pin() - Pins a GEM VRAM object in a region.
233 * @gbo: the GEM VRAM object
234 * @pl_flag: a bitmask of possible memory regions
236 * Pinning a buffer object ensures that it is not evicted from
237 * a memory region. A pinned buffer object has to be unpinned before
238 * it can be pinned to another region. If the pl_flag argument is 0,
239 * the buffer is pinned at its current location (video RAM or system
244 * a negative error code otherwise.
246 int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
250 ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
253 ret = drm_gem_vram_pin_locked(gbo, pl_flag);
254 ttm_bo_unreserve(&gbo->bo);
258 EXPORT_SYMBOL(drm_gem_vram_pin);
260 static int drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo)
263 struct ttm_operation_ctx ctx = { false, false };
265 if (WARN_ON_ONCE(!gbo->pin_count))
272 for (i = 0; i < gbo->placement.num_placement ; ++i)
273 gbo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
275 ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx);
283 * drm_gem_vram_unpin() - Unpins a GEM VRAM object
284 * @gbo: the GEM VRAM object
288 * a negative error code otherwise.
290 int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
294 ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
297 ret = drm_gem_vram_unpin_locked(gbo);
298 ttm_bo_unreserve(&gbo->bo);
302 EXPORT_SYMBOL(drm_gem_vram_unpin);
304 static void *drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo,
305 bool map, bool *is_iomem)
308 struct ttm_bo_kmap_obj *kmap = &gbo->kmap;
310 if (gbo->kmap_use_count > 0)
313 if (kmap->virtual || !map)
316 ret = ttm_bo_kmap(&gbo->bo, 0, gbo->bo.num_pages, kmap);
321 if (!kmap->virtual) {
324 return NULL; /* not mapped; don't increment ref */
326 ++gbo->kmap_use_count;
328 return ttm_kmap_obj_virtual(kmap, is_iomem);
329 return kmap->virtual;
333 * drm_gem_vram_kmap() - Maps a GEM VRAM object into kernel address space
334 * @gbo: the GEM VRAM object
335 * @map: establish a mapping if necessary
336 * @is_iomem: returns true if the mapped memory is I/O memory, or false \
337 otherwise; can be NULL
339 * This function maps the buffer object into the kernel's address space
340 * or returns the current mapping. If the parameter map is false, the
341 * function only queries the current mapping, but does not establish a
345 * The buffers virtual address if mapped, or
346 * NULL if not mapped, or
347 * an ERR_PTR()-encoded error code otherwise.
349 void *drm_gem_vram_kmap(struct drm_gem_vram_object *gbo, bool map,
355 ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
358 virtual = drm_gem_vram_kmap_locked(gbo, map, is_iomem);
359 ttm_bo_unreserve(&gbo->bo);
363 EXPORT_SYMBOL(drm_gem_vram_kmap);
365 static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo)
367 if (WARN_ON_ONCE(!gbo->kmap_use_count))
369 if (--gbo->kmap_use_count > 0)
373 * Permanently mapping and unmapping buffers adds overhead from
374 * updating the page tables and creates debugging output. Therefore,
375 * we delay the actual unmap operation until the BO gets evicted
376 * from memory. See drm_gem_vram_bo_driver_move_notify().
381 * drm_gem_vram_kunmap() - Unmaps a GEM VRAM object
382 * @gbo: the GEM VRAM object
384 void drm_gem_vram_kunmap(struct drm_gem_vram_object *gbo)
388 ret = ttm_bo_reserve(&gbo->bo, false, false, NULL);
389 if (WARN_ONCE(ret, "ttm_bo_reserve_failed(): ret=%d\n", ret))
391 drm_gem_vram_kunmap_locked(gbo);
392 ttm_bo_unreserve(&gbo->bo);
394 EXPORT_SYMBOL(drm_gem_vram_kunmap);
397 * drm_gem_vram_fill_create_dumb() - \
398 Helper for implementing &struct drm_driver.dumb_create
399 * @file: the DRM file
400 * @dev: the DRM device
401 * @bdev: the TTM BO device managing the buffer object
402 * @pg_align: the buffer's alignment in multiples of the page size
403 * @interruptible: sleep interruptible if waiting for memory
404 * @args: the arguments as provided to \
405 &struct drm_driver.dumb_create
407 * This helper function fills &struct drm_mode_create_dumb, which is used
408 * by &struct drm_driver.dumb_create. Implementations of this interface
409 * should forwards their arguments to this helper, plus the driver-specific
414 * a negative error code otherwise.
416 int drm_gem_vram_fill_create_dumb(struct drm_file *file,
417 struct drm_device *dev,
418 struct ttm_bo_device *bdev,
419 unsigned long pg_align,
421 struct drm_mode_create_dumb *args)
424 struct drm_gem_vram_object *gbo;
428 pitch = args->width * ((args->bpp + 7) / 8);
429 size = pitch * args->height;
431 size = roundup(size, PAGE_SIZE);
435 gbo = drm_gem_vram_create(dev, bdev, size, pg_align, interruptible);
439 ret = drm_gem_handle_create(file, &gbo->bo.base, &handle);
441 goto err_drm_gem_object_put_unlocked;
443 drm_gem_object_put_unlocked(&gbo->bo.base);
447 args->handle = handle;
451 err_drm_gem_object_put_unlocked:
452 drm_gem_object_put_unlocked(&gbo->bo.base);
455 EXPORT_SYMBOL(drm_gem_vram_fill_create_dumb);
458 * Helpers for struct ttm_bo_driver
461 static bool drm_is_gem_vram(struct ttm_buffer_object *bo)
463 return (bo->destroy == ttm_buffer_object_destroy);
466 static void drm_gem_vram_bo_driver_evict_flags(struct drm_gem_vram_object *gbo,
467 struct ttm_placement *pl)
469 drm_gem_vram_placement(gbo, TTM_PL_FLAG_SYSTEM);
470 *pl = gbo->placement;
473 static int drm_gem_vram_bo_driver_verify_access(struct drm_gem_vram_object *gbo,
476 return drm_vma_node_verify_access(&gbo->bo.base.vma_node,
480 static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo,
482 struct ttm_mem_reg *new_mem)
484 struct ttm_bo_kmap_obj *kmap = &gbo->kmap;
486 if (WARN_ON_ONCE(gbo->kmap_use_count))
492 kmap->virtual = NULL;
496 * Helpers for struct drm_gem_object_funcs
500 * drm_gem_vram_object_free() - \
501 Implements &struct drm_gem_object_funcs.free
502 * @gem: GEM object. Refers to &struct drm_gem_vram_object.gem
504 static void drm_gem_vram_object_free(struct drm_gem_object *gem)
506 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
508 drm_gem_vram_put(gbo);
512 * Helpers for dump buffers
516 * drm_gem_vram_driver_create_dumb() - \
517 Implements &struct drm_driver.dumb_create
518 * @file: the DRM file
519 * @dev: the DRM device
520 * @args: the arguments as provided to \
521 &struct drm_driver.dumb_create
523 * This function requires the driver to use @drm_device.vram_mm for its
524 * instance of VRAM MM.
528 * a negative error code otherwise.
530 int drm_gem_vram_driver_dumb_create(struct drm_file *file,
531 struct drm_device *dev,
532 struct drm_mode_create_dumb *args)
534 if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized"))
537 return drm_gem_vram_fill_create_dumb(file, dev, &dev->vram_mm->bdev, 0,
540 EXPORT_SYMBOL(drm_gem_vram_driver_dumb_create);
543 * drm_gem_vram_driver_dumb_mmap_offset() - \
544 Implements &struct drm_driver.dumb_mmap_offset
545 * @file: DRM file pointer.
547 * @handle: GEM handle
548 * @offset: Returns the mapping's memory offset on success
552 * a negative errno code otherwise.
554 int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file,
555 struct drm_device *dev,
556 uint32_t handle, uint64_t *offset)
558 struct drm_gem_object *gem;
559 struct drm_gem_vram_object *gbo;
561 gem = drm_gem_object_lookup(file, handle);
565 gbo = drm_gem_vram_of_gem(gem);
566 *offset = drm_gem_vram_mmap_offset(gbo);
568 drm_gem_object_put_unlocked(gem);
572 EXPORT_SYMBOL(drm_gem_vram_driver_dumb_mmap_offset);
579 * drm_gem_vram_object_pin() - \
580 Implements &struct drm_gem_object_funcs.pin
581 * @gem: The GEM object to pin
585 * a negative errno code otherwise.
587 static int drm_gem_vram_object_pin(struct drm_gem_object *gem)
589 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
591 /* Fbdev console emulation is the use case of these PRIME
592 * helpers. This may involve updating a hardware buffer from
593 * a shadow FB. We pin the buffer to it's current location
594 * (either video RAM or system memory) to prevent it from
595 * being relocated during the update operation. If you require
596 * the buffer to be pinned to VRAM, implement a callback that
597 * sets the flags accordingly.
599 return drm_gem_vram_pin(gbo, 0);
603 * drm_gem_vram_object_unpin() - \
604 Implements &struct drm_gem_object_funcs.unpin
605 * @gem: The GEM object to unpin
607 static void drm_gem_vram_object_unpin(struct drm_gem_object *gem)
609 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
611 drm_gem_vram_unpin(gbo);
615 * drm_gem_vram_object_vmap() - \
616 Implements &struct drm_gem_object_funcs.vmap
617 * @gem: The GEM object to map
620 * The buffers virtual address on success, or
623 static void *drm_gem_vram_object_vmap(struct drm_gem_object *gem)
625 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
629 ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
633 ret = drm_gem_vram_pin_locked(gbo, 0);
635 goto err_ttm_bo_unreserve;
636 base = drm_gem_vram_kmap_locked(gbo, true, NULL);
639 goto err_drm_gem_vram_unpin_locked;
642 ttm_bo_unreserve(&gbo->bo);
646 err_drm_gem_vram_unpin_locked:
647 drm_gem_vram_unpin_locked(gbo);
648 err_ttm_bo_unreserve:
649 ttm_bo_unreserve(&gbo->bo);
654 * drm_gem_vram_object_vunmap() - \
655 Implements &struct drm_gem_object_funcs.vunmap
656 * @gem: The GEM object to unmap
657 * @vaddr: The mapping's base address
659 static void drm_gem_vram_object_vunmap(struct drm_gem_object *gem,
662 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
665 ret = ttm_bo_reserve(&gbo->bo, false, false, NULL);
666 if (WARN_ONCE(ret, "ttm_bo_reserve_failed(): ret=%d\n", ret))
669 drm_gem_vram_kunmap_locked(gbo);
670 drm_gem_vram_unpin_locked(gbo);
672 ttm_bo_unreserve(&gbo->bo);
679 static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = {
680 .free = drm_gem_vram_object_free,
681 .pin = drm_gem_vram_object_pin,
682 .unpin = drm_gem_vram_object_unpin,
683 .vmap = drm_gem_vram_object_vmap,
684 .vunmap = drm_gem_vram_object_vunmap,
685 .print_info = drm_gem_ttm_print_info,
689 * VRAM memory manager
696 static void backend_func_destroy(struct ttm_tt *tt)
702 static struct ttm_backend_func backend_func = {
703 .destroy = backend_func_destroy
710 static struct ttm_tt *bo_driver_ttm_tt_create(struct ttm_buffer_object *bo,
716 tt = kzalloc(sizeof(*tt), GFP_KERNEL);
720 tt->func = &backend_func;
722 ret = ttm_tt_init(tt, bo, page_flags);
724 goto err_ttm_tt_init;
733 static int bo_driver_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
734 struct ttm_mem_type_manager *man)
738 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
739 man->available_caching = TTM_PL_MASK_CACHING;
740 man->default_caching = TTM_PL_FLAG_CACHED;
743 man->func = &ttm_bo_manager_func;
744 man->flags = TTM_MEMTYPE_FLAG_FIXED |
745 TTM_MEMTYPE_FLAG_MAPPABLE;
746 man->available_caching = TTM_PL_FLAG_UNCACHED |
748 man->default_caching = TTM_PL_FLAG_WC;
756 static void bo_driver_evict_flags(struct ttm_buffer_object *bo,
757 struct ttm_placement *placement)
759 struct drm_gem_vram_object *gbo;
761 /* TTM may pass BOs that are not GEM VRAM BOs. */
762 if (!drm_is_gem_vram(bo))
765 gbo = drm_gem_vram_of_bo(bo);
767 drm_gem_vram_bo_driver_evict_flags(gbo, placement);
770 static int bo_driver_verify_access(struct ttm_buffer_object *bo,
773 struct drm_gem_vram_object *gbo;
775 /* TTM may pass BOs that are not GEM VRAM BOs. */
776 if (!drm_is_gem_vram(bo))
779 gbo = drm_gem_vram_of_bo(bo);
781 return drm_gem_vram_bo_driver_verify_access(gbo, filp);
784 static void bo_driver_move_notify(struct ttm_buffer_object *bo,
786 struct ttm_mem_reg *new_mem)
788 struct drm_gem_vram_object *gbo;
790 /* TTM may pass BOs that are not GEM VRAM BOs. */
791 if (!drm_is_gem_vram(bo))
794 gbo = drm_gem_vram_of_bo(bo);
796 drm_gem_vram_bo_driver_move_notify(gbo, evict, new_mem);
799 static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev,
800 struct ttm_mem_reg *mem)
802 struct ttm_mem_type_manager *man = bdev->man + mem->mem_type;
803 struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bdev);
805 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
808 mem->bus.addr = NULL;
809 mem->bus.size = mem->num_pages << PAGE_SHIFT;
811 switch (mem->mem_type) {
812 case TTM_PL_SYSTEM: /* nothing to do */
815 mem->bus.is_iomem = false;
818 mem->bus.offset = mem->start << PAGE_SHIFT;
819 mem->bus.base = vmm->vram_base;
820 mem->bus.is_iomem = true;
829 static void bo_driver_io_mem_free(struct ttm_bo_device *bdev,
830 struct ttm_mem_reg *mem)
833 static struct ttm_bo_driver bo_driver = {
834 .ttm_tt_create = bo_driver_ttm_tt_create,
835 .ttm_tt_populate = ttm_pool_populate,
836 .ttm_tt_unpopulate = ttm_pool_unpopulate,
837 .init_mem_type = bo_driver_init_mem_type,
838 .eviction_valuable = ttm_bo_eviction_valuable,
839 .evict_flags = bo_driver_evict_flags,
840 .verify_access = bo_driver_verify_access,
841 .move_notify = bo_driver_move_notify,
842 .io_mem_reserve = bo_driver_io_mem_reserve,
843 .io_mem_free = bo_driver_io_mem_free,
850 #if defined(CONFIG_DEBUG_FS)
851 static int drm_vram_mm_debugfs(struct seq_file *m, void *data)
853 struct drm_info_node *node = (struct drm_info_node *) m->private;
854 struct drm_vram_mm *vmm = node->minor->dev->vram_mm;
855 struct drm_mm *mm = vmm->bdev.man[TTM_PL_VRAM].priv;
856 struct ttm_bo_global *glob = vmm->bdev.glob;
857 struct drm_printer p = drm_seq_file_printer(m);
859 spin_lock(&glob->lru_lock);
860 drm_mm_print(mm, &p);
861 spin_unlock(&glob->lru_lock);
865 static const struct drm_info_list drm_vram_mm_debugfs_list[] = {
866 { "vram-mm", drm_vram_mm_debugfs, 0, NULL },
871 * drm_vram_mm_debugfs_init() - Register VRAM MM debugfs file.
873 * @minor: drm minor device.
877 * a negative error code otherwise.
879 int drm_vram_mm_debugfs_init(struct drm_minor *minor)
883 #if defined(CONFIG_DEBUG_FS)
884 ret = drm_debugfs_create_files(drm_vram_mm_debugfs_list,
885 ARRAY_SIZE(drm_vram_mm_debugfs_list),
886 minor->debugfs_root, minor);
890 EXPORT_SYMBOL(drm_vram_mm_debugfs_init);
893 * drm_vram_mm_init() - Initialize an instance of VRAM MM.
894 * @vmm: the VRAM MM instance to initialize
895 * @dev: the DRM device
896 * @vram_base: the base address of the video memory
897 * @vram_size: the size of the video memory in bytes
901 * a negative error code otherwise.
903 int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
904 uint64_t vram_base, size_t vram_size)
908 vmm->vram_base = vram_base;
909 vmm->vram_size = vram_size;
911 ret = ttm_bo_device_init(&vmm->bdev, &bo_driver,
912 dev->anon_inode->i_mapping,
913 dev->vma_offset_manager,
918 ret = ttm_bo_init_mm(&vmm->bdev, TTM_PL_VRAM, vram_size >> PAGE_SHIFT);
924 EXPORT_SYMBOL(drm_vram_mm_init);
927 * drm_vram_mm_cleanup() - Cleans up an initialized instance of VRAM MM.
928 * @vmm: the VRAM MM instance to clean up
930 void drm_vram_mm_cleanup(struct drm_vram_mm *vmm)
932 ttm_bo_device_release(&vmm->bdev);
934 EXPORT_SYMBOL(drm_vram_mm_cleanup);
937 * drm_vram_mm_mmap() - Helper for implementing &struct file_operations.mmap()
938 * @filp: the mapping's file structure
939 * @vma: the mapping's memory area
940 * @vmm: the VRAM MM instance
944 * a negative error code otherwise.
946 int drm_vram_mm_mmap(struct file *filp, struct vm_area_struct *vma,
947 struct drm_vram_mm *vmm)
949 return ttm_bo_mmap(filp, vma, &vmm->bdev);
951 EXPORT_SYMBOL(drm_vram_mm_mmap);
954 * Helpers for integration with struct drm_device
958 * drm_vram_helper_alloc_mm - Allocates a device's instance of \
960 * @dev: the DRM device
961 * @vram_base: the base address of the video memory
962 * @vram_size: the size of the video memory in bytes
965 * The new instance of &struct drm_vram_mm on success, or
966 * an ERR_PTR()-encoded errno code otherwise.
968 struct drm_vram_mm *drm_vram_helper_alloc_mm(
969 struct drm_device *dev, uint64_t vram_base, size_t vram_size)
973 if (WARN_ON(dev->vram_mm))
976 dev->vram_mm = kzalloc(sizeof(*dev->vram_mm), GFP_KERNEL);
978 return ERR_PTR(-ENOMEM);
980 ret = drm_vram_mm_init(dev->vram_mm, dev, vram_base, vram_size);
991 EXPORT_SYMBOL(drm_vram_helper_alloc_mm);
994 * drm_vram_helper_release_mm - Releases a device's instance of \
996 * @dev: the DRM device
998 void drm_vram_helper_release_mm(struct drm_device *dev)
1003 drm_vram_mm_cleanup(dev->vram_mm);
1004 kfree(dev->vram_mm);
1005 dev->vram_mm = NULL;
1007 EXPORT_SYMBOL(drm_vram_helper_release_mm);
1010 * Helpers for &struct file_operations
1014 * drm_vram_mm_file_operations_mmap() - \
1015 Implements &struct file_operations.mmap()
1016 * @filp: the mapping's file structure
1017 * @vma: the mapping's memory area
1021 * a negative error code otherwise.
1023 int drm_vram_mm_file_operations_mmap(
1024 struct file *filp, struct vm_area_struct *vma)
1026 struct drm_file *file_priv = filp->private_data;
1027 struct drm_device *dev = file_priv->minor->dev;
1029 if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized"))
1032 return drm_vram_mm_mmap(filp, vma, dev->vram_mm);
1034 EXPORT_SYMBOL(drm_vram_mm_file_operations_mmap);