1 // SPDX-License-Identifier: GPL-2.0-only
3 * NVIDIA Tegra DRM GEM helper functions
5 * Copyright (C) 2012 Sascha Hauer, Pengutronix
6 * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved.
8 * Based on the GEM/CMA helpers
10 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
13 #include <linux/dma-buf.h>
14 #include <linux/iommu.h>
16 #include <drm/drm_drv.h>
17 #include <drm/drm_prime.h>
18 #include <drm/tegra_drm.h>
23 static void tegra_bo_put(struct host1x_bo *bo)
25 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
27 drm_gem_object_put_unlocked(&obj->gem);
30 static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
33 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
40 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
42 return ERR_PTR(-ENOMEM);
45 err = sg_alloc_table_from_pages(sgt, obj->pages, obj->num_pages,
46 0, obj->gem.size, GFP_KERNEL);
50 err = dma_get_sgtable(dev, sgt, obj->vaddr, obj->iova,
63 static void tegra_bo_unpin(struct device *dev, struct sg_table *sgt)
69 static void *tegra_bo_mmap(struct host1x_bo *bo)
71 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
75 else if (obj->gem.import_attach)
76 return dma_buf_vmap(obj->gem.import_attach->dmabuf);
78 return vmap(obj->pages, obj->num_pages, VM_MAP,
79 pgprot_writecombine(PAGE_KERNEL));
82 static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
84 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
88 else if (obj->gem.import_attach)
89 dma_buf_vunmap(obj->gem.import_attach->dmabuf, addr);
94 static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
96 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
99 return obj->vaddr + page * PAGE_SIZE;
100 else if (obj->gem.import_attach)
101 return dma_buf_kmap(obj->gem.import_attach->dmabuf, page);
103 return vmap(obj->pages + page, 1, VM_MAP,
104 pgprot_writecombine(PAGE_KERNEL));
107 static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
110 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
114 else if (obj->gem.import_attach)
115 dma_buf_kunmap(obj->gem.import_attach->dmabuf, page, addr);
120 static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
122 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
124 drm_gem_object_get(&obj->gem);
129 static const struct host1x_bo_ops tegra_bo_ops = {
133 .unpin = tegra_bo_unpin,
134 .mmap = tegra_bo_mmap,
135 .munmap = tegra_bo_munmap,
136 .kmap = tegra_bo_kmap,
137 .kunmap = tegra_bo_kunmap,
140 static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
142 int prot = IOMMU_READ | IOMMU_WRITE;
148 bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
152 mutex_lock(&tegra->mm_lock);
154 err = drm_mm_insert_node_generic(&tegra->mm,
155 bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
157 dev_err(tegra->drm->dev, "out of I/O virtual memory: %d\n",
162 bo->iova = bo->mm->start;
164 bo->size = iommu_map_sg(tegra->domain, bo->iova, bo->sgt->sgl,
165 bo->sgt->nents, prot);
167 dev_err(tegra->drm->dev, "failed to map buffer\n");
172 mutex_unlock(&tegra->mm_lock);
177 drm_mm_remove_node(bo->mm);
179 mutex_unlock(&tegra->mm_lock);
184 static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
189 mutex_lock(&tegra->mm_lock);
190 iommu_unmap(tegra->domain, bo->iova, bo->size);
191 drm_mm_remove_node(bo->mm);
192 mutex_unlock(&tegra->mm_lock);
199 static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
205 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
207 return ERR_PTR(-ENOMEM);
209 host1x_bo_init(&bo->base, &tegra_bo_ops);
210 size = round_up(size, PAGE_SIZE);
212 err = drm_gem_object_init(drm, &bo->gem, size);
216 err = drm_gem_create_mmap_offset(&bo->gem);
223 drm_gem_object_release(&bo->gem);
229 static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
232 dma_unmap_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
234 drm_gem_put_pages(&bo->gem, bo->pages, true, true);
235 sg_free_table(bo->sgt);
237 } else if (bo->vaddr) {
238 dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
242 static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
246 bo->pages = drm_gem_get_pages(&bo->gem);
247 if (IS_ERR(bo->pages))
248 return PTR_ERR(bo->pages);
250 bo->num_pages = bo->gem.size >> PAGE_SHIFT;
252 bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
253 if (IS_ERR(bo->sgt)) {
254 err = PTR_ERR(bo->sgt);
258 err = dma_map_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
268 sg_free_table(bo->sgt);
271 drm_gem_put_pages(&bo->gem, bo->pages, false, false);
275 static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
277 struct tegra_drm *tegra = drm->dev_private;
281 err = tegra_bo_get_pages(drm, bo);
285 err = tegra_bo_iommu_map(tegra, bo);
287 tegra_bo_free(drm, bo);
291 size_t size = bo->gem.size;
293 bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
294 GFP_KERNEL | __GFP_NOWARN);
297 "failed to allocate buffer of size %zu\n",
306 struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
312 bo = tegra_bo_alloc_object(drm, size);
316 err = tegra_bo_alloc(drm, bo);
320 if (flags & DRM_TEGRA_GEM_CREATE_TILED)
321 bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
323 if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
324 bo->flags |= TEGRA_BO_BOTTOM_UP;
329 drm_gem_object_release(&bo->gem);
334 struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
335 struct drm_device *drm,
343 bo = tegra_bo_create(drm, size, flags);
347 err = drm_gem_handle_create(file, &bo->gem, handle);
349 tegra_bo_free_object(&bo->gem);
353 drm_gem_object_put_unlocked(&bo->gem);
358 static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
361 struct tegra_drm *tegra = drm->dev_private;
362 struct dma_buf_attachment *attach;
366 bo = tegra_bo_alloc_object(drm, buf->size);
370 attach = dma_buf_attach(buf, drm->dev);
371 if (IS_ERR(attach)) {
372 err = PTR_ERR(attach);
378 bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
379 if (IS_ERR(bo->sgt)) {
380 err = PTR_ERR(bo->sgt);
385 err = tegra_bo_iommu_map(tegra, bo);
389 if (bo->sgt->nents > 1) {
394 bo->iova = sg_dma_address(bo->sgt->sgl);
397 bo->gem.import_attach = attach;
402 if (!IS_ERR_OR_NULL(bo->sgt))
403 dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
405 dma_buf_detach(buf, attach);
408 drm_gem_object_release(&bo->gem);
413 void tegra_bo_free_object(struct drm_gem_object *gem)
415 struct tegra_drm *tegra = gem->dev->dev_private;
416 struct tegra_bo *bo = to_tegra_bo(gem);
419 tegra_bo_iommu_unmap(tegra, bo);
421 if (gem->import_attach) {
422 dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
424 drm_prime_gem_destroy(gem, NULL);
426 tegra_bo_free(gem->dev, bo);
429 drm_gem_object_release(gem);
433 int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
434 struct drm_mode_create_dumb *args)
436 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
437 struct tegra_drm *tegra = drm->dev_private;
440 args->pitch = round_up(min_pitch, tegra->pitch_align);
441 args->size = args->pitch * args->height;
443 bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
451 static vm_fault_t tegra_bo_fault(struct vm_fault *vmf)
453 struct vm_area_struct *vma = vmf->vma;
454 struct drm_gem_object *gem = vma->vm_private_data;
455 struct tegra_bo *bo = to_tegra_bo(gem);
460 return VM_FAULT_SIGBUS;
462 offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
463 page = bo->pages[offset];
465 return vmf_insert_page(vma, vmf->address, page);
468 const struct vm_operations_struct tegra_bo_vm_ops = {
469 .fault = tegra_bo_fault,
470 .open = drm_gem_vm_open,
471 .close = drm_gem_vm_close,
474 int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
476 struct tegra_bo *bo = to_tegra_bo(gem);
479 unsigned long vm_pgoff = vma->vm_pgoff;
483 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(),
484 * and set the vm_pgoff (used as a fake buffer offset by DRM)
485 * to 0 as we want to map the whole buffer.
487 vma->vm_flags &= ~VM_PFNMAP;
490 err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
493 drm_gem_vm_close(vma);
497 vma->vm_pgoff = vm_pgoff;
499 pgprot_t prot = vm_get_page_prot(vma->vm_flags);
501 vma->vm_flags |= VM_MIXEDMAP;
502 vma->vm_flags &= ~VM_PFNMAP;
504 vma->vm_page_prot = pgprot_writecombine(prot);
510 int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
512 struct drm_gem_object *gem;
515 err = drm_gem_mmap(file, vma);
519 gem = vma->vm_private_data;
521 return __tegra_gem_mmap(gem, vma);
524 static struct sg_table *
525 tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
526 enum dma_data_direction dir)
528 struct drm_gem_object *gem = attach->dmabuf->priv;
529 struct tegra_bo *bo = to_tegra_bo(gem);
530 struct sg_table *sgt;
532 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
537 if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages,
538 0, gem->size, GFP_KERNEL) < 0)
541 if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
546 if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
557 static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
558 struct sg_table *sgt,
559 enum dma_data_direction dir)
561 struct drm_gem_object *gem = attach->dmabuf->priv;
562 struct tegra_bo *bo = to_tegra_bo(gem);
565 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
571 static void tegra_gem_prime_release(struct dma_buf *buf)
573 drm_gem_dmabuf_release(buf);
576 static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf,
577 enum dma_data_direction direction)
579 struct drm_gem_object *gem = buf->priv;
580 struct tegra_bo *bo = to_tegra_bo(gem);
581 struct drm_device *drm = gem->dev;
584 dma_sync_sg_for_cpu(drm->dev, bo->sgt->sgl, bo->sgt->nents,
590 static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
591 enum dma_data_direction direction)
593 struct drm_gem_object *gem = buf->priv;
594 struct tegra_bo *bo = to_tegra_bo(gem);
595 struct drm_device *drm = gem->dev;
598 dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents,
604 static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
609 static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page,
614 static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
616 struct drm_gem_object *gem = buf->priv;
619 err = drm_gem_mmap_obj(gem, gem->size, vma);
623 return __tegra_gem_mmap(gem, vma);
626 static void *tegra_gem_prime_vmap(struct dma_buf *buf)
628 struct drm_gem_object *gem = buf->priv;
629 struct tegra_bo *bo = to_tegra_bo(gem);
634 static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)
638 static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
639 .map_dma_buf = tegra_gem_prime_map_dma_buf,
640 .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
641 .release = tegra_gem_prime_release,
642 .begin_cpu_access = tegra_gem_prime_begin_cpu_access,
643 .end_cpu_access = tegra_gem_prime_end_cpu_access,
644 .map = tegra_gem_prime_kmap,
645 .unmap = tegra_gem_prime_kunmap,
646 .mmap = tegra_gem_prime_mmap,
647 .vmap = tegra_gem_prime_vmap,
648 .vunmap = tegra_gem_prime_vunmap,
651 struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
654 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
656 exp_info.exp_name = KBUILD_MODNAME;
657 exp_info.owner = gem->dev->driver->fops->owner;
658 exp_info.ops = &tegra_gem_prime_dmabuf_ops;
659 exp_info.size = gem->size;
660 exp_info.flags = flags;
663 return drm_gem_dmabuf_export(gem->dev, &exp_info);
666 struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
671 if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
672 struct drm_gem_object *gem = buf->priv;
674 if (gem->dev == drm) {
675 drm_gem_object_get(gem);
680 bo = tegra_bo_import(drm, buf);