2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/spinlock.h>
19 #include <linux/shmem_fs.h>
20 #include <linux/dma-buf.h>
21 #include <linux/pfn_t.h>
24 #include "msm_fence.h"
29 static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
32 static dma_addr_t physaddr(struct drm_gem_object *obj)
34 struct msm_gem_object *msm_obj = to_msm_bo(obj);
35 struct msm_drm_private *priv = obj->dev->dev_private;
36 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
40 static bool use_pages(struct drm_gem_object *obj)
42 struct msm_gem_object *msm_obj = to_msm_bo(obj);
43 return !msm_obj->vram_node;
46 /* allocate pages from VRAM carveout, used when no IOMMU: */
47 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
49 struct msm_gem_object *msm_obj = to_msm_bo(obj);
50 struct msm_drm_private *priv = obj->dev->dev_private;
55 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
57 return ERR_PTR(-ENOMEM);
59 spin_lock(&priv->vram.lock);
60 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
61 spin_unlock(&priv->vram.lock);
67 paddr = physaddr(obj);
68 for (i = 0; i < npages; i++) {
69 p[i] = phys_to_page(paddr);
76 static struct page **get_pages(struct drm_gem_object *obj)
78 struct msm_gem_object *msm_obj = to_msm_bo(obj);
80 if (!msm_obj->pages) {
81 struct drm_device *dev = obj->dev;
83 int npages = obj->size >> PAGE_SHIFT;
86 p = drm_gem_get_pages(obj);
88 p = get_pages_vram(obj, npages);
91 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
98 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
99 if (IS_ERR(msm_obj->sgt)) {
100 void *ptr = ERR_CAST(msm_obj->sgt);
102 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
107 /* For non-cached buffers, ensure the new pages are clean
108 * because display controller, GPU, etc. are not coherent:
110 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
111 dma_map_sg(dev->dev, msm_obj->sgt->sgl,
112 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
115 return msm_obj->pages;
118 static void put_pages_vram(struct drm_gem_object *obj)
120 struct msm_gem_object *msm_obj = to_msm_bo(obj);
121 struct msm_drm_private *priv = obj->dev->dev_private;
123 spin_lock(&priv->vram.lock);
124 drm_mm_remove_node(msm_obj->vram_node);
125 spin_unlock(&priv->vram.lock);
127 kvfree(msm_obj->pages);
130 static void put_pages(struct drm_gem_object *obj)
132 struct msm_gem_object *msm_obj = to_msm_bo(obj);
134 if (msm_obj->pages) {
136 /* For non-cached buffers, ensure the new
137 * pages are clean because display controller,
138 * GPU, etc. are not coherent:
140 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
141 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
145 sg_free_table(msm_obj->sgt);
150 drm_gem_put_pages(obj, msm_obj->pages, true, false);
154 msm_obj->pages = NULL;
158 struct page **msm_gem_get_pages(struct drm_gem_object *obj)
160 struct msm_gem_object *msm_obj = to_msm_bo(obj);
163 mutex_lock(&msm_obj->lock);
165 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
166 mutex_unlock(&msm_obj->lock);
167 return ERR_PTR(-EBUSY);
171 mutex_unlock(&msm_obj->lock);
175 void msm_gem_put_pages(struct drm_gem_object *obj)
177 /* when we start tracking the pin count, then do something here */
180 int msm_gem_mmap_obj(struct drm_gem_object *obj,
181 struct vm_area_struct *vma)
183 struct msm_gem_object *msm_obj = to_msm_bo(obj);
185 vma->vm_flags &= ~VM_PFNMAP;
186 vma->vm_flags |= VM_MIXEDMAP;
188 if (msm_obj->flags & MSM_BO_WC) {
189 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
190 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
191 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
194 * Shunt off cached objs to shmem file so they have their own
195 * address_space (so unmap_mapping_range does what we want,
196 * in particular in the case of mmap'd dmabufs)
201 vma->vm_file = obj->filp;
203 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
209 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
213 ret = drm_gem_mmap(filp, vma);
215 DBG("mmap failed: %d", ret);
219 return msm_gem_mmap_obj(vma->vm_private_data, vma);
222 vm_fault_t msm_gem_fault(struct vm_fault *vmf)
224 struct vm_area_struct *vma = vmf->vma;
225 struct drm_gem_object *obj = vma->vm_private_data;
226 struct msm_gem_object *msm_obj = to_msm_bo(obj);
234 * vm_ops.open/drm_gem_mmap_obj and close get and put
235 * a reference on obj. So, we dont need to hold one here.
237 err = mutex_lock_interruptible(&msm_obj->lock);
239 ret = VM_FAULT_NOPAGE;
243 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
244 mutex_unlock(&msm_obj->lock);
245 return VM_FAULT_SIGBUS;
248 /* make sure we have pages attached now */
249 pages = get_pages(obj);
251 ret = vmf_error(PTR_ERR(pages));
255 /* We don't use vmf->pgoff since that has the fake offset: */
256 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
258 pfn = page_to_pfn(pages[pgoff]);
260 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
261 pfn, pfn << PAGE_SHIFT);
263 ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
265 mutex_unlock(&msm_obj->lock);
270 /** get mmap offset */
271 static uint64_t mmap_offset(struct drm_gem_object *obj)
273 struct drm_device *dev = obj->dev;
274 struct msm_gem_object *msm_obj = to_msm_bo(obj);
277 WARN_ON(!mutex_is_locked(&msm_obj->lock));
279 /* Make it mmapable */
280 ret = drm_gem_create_mmap_offset(obj);
283 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
287 return drm_vma_node_offset_addr(&obj->vma_node);
290 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
293 struct msm_gem_object *msm_obj = to_msm_bo(obj);
295 mutex_lock(&msm_obj->lock);
296 offset = mmap_offset(obj);
297 mutex_unlock(&msm_obj->lock);
301 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
302 struct msm_gem_address_space *aspace)
304 struct msm_gem_object *msm_obj = to_msm_bo(obj);
305 struct msm_gem_vma *vma;
307 WARN_ON(!mutex_is_locked(&msm_obj->lock));
309 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
311 return ERR_PTR(-ENOMEM);
313 vma->aspace = aspace;
315 list_add_tail(&vma->list, &msm_obj->vmas);
320 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
321 struct msm_gem_address_space *aspace)
323 struct msm_gem_object *msm_obj = to_msm_bo(obj);
324 struct msm_gem_vma *vma;
326 WARN_ON(!mutex_is_locked(&msm_obj->lock));
328 list_for_each_entry(vma, &msm_obj->vmas, list) {
329 if (vma->aspace == aspace)
336 static void del_vma(struct msm_gem_vma *vma)
341 list_del(&vma->list);
345 /* Called with msm_obj->lock locked */
347 put_iova(struct drm_gem_object *obj)
349 struct msm_gem_object *msm_obj = to_msm_bo(obj);
350 struct msm_gem_vma *vma, *tmp;
352 WARN_ON(!mutex_is_locked(&msm_obj->lock));
354 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
355 msm_gem_purge_vma(vma->aspace, vma);
356 msm_gem_close_vma(vma->aspace, vma);
361 static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
362 struct msm_gem_address_space *aspace, uint64_t *iova)
364 struct msm_gem_object *msm_obj = to_msm_bo(obj);
365 struct msm_gem_vma *vma;
368 WARN_ON(!mutex_is_locked(&msm_obj->lock));
370 vma = lookup_vma(obj, aspace);
373 vma = add_vma(obj, aspace);
377 ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT);
388 static int msm_gem_pin_iova(struct drm_gem_object *obj,
389 struct msm_gem_address_space *aspace)
391 struct msm_gem_object *msm_obj = to_msm_bo(obj);
392 struct msm_gem_vma *vma;
394 int prot = IOMMU_READ;
396 if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
399 WARN_ON(!mutex_is_locked(&msm_obj->lock));
401 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
404 vma = lookup_vma(obj, aspace);
408 pages = get_pages(obj);
410 return PTR_ERR(pages);
412 return msm_gem_map_vma(aspace, vma, prot,
413 msm_obj->sgt, obj->size >> PAGE_SHIFT);
416 /* get iova and pin it. Should have a matching put */
417 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
418 struct msm_gem_address_space *aspace, uint64_t *iova)
420 struct msm_gem_object *msm_obj = to_msm_bo(obj);
424 mutex_lock(&msm_obj->lock);
426 ret = msm_gem_get_iova_locked(obj, aspace, &local);
429 ret = msm_gem_pin_iova(obj, aspace);
434 mutex_unlock(&msm_obj->lock);
439 * Get an iova but don't pin it. Doesn't need a put because iovas are currently
440 * valid for the life of the object
442 int msm_gem_get_iova(struct drm_gem_object *obj,
443 struct msm_gem_address_space *aspace, uint64_t *iova)
445 struct msm_gem_object *msm_obj = to_msm_bo(obj);
448 mutex_lock(&msm_obj->lock);
449 ret = msm_gem_get_iova_locked(obj, aspace, iova);
450 mutex_unlock(&msm_obj->lock);
455 /* get iova without taking a reference, used in places where you have
456 * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
458 uint64_t msm_gem_iova(struct drm_gem_object *obj,
459 struct msm_gem_address_space *aspace)
461 struct msm_gem_object *msm_obj = to_msm_bo(obj);
462 struct msm_gem_vma *vma;
464 mutex_lock(&msm_obj->lock);
465 vma = lookup_vma(obj, aspace);
466 mutex_unlock(&msm_obj->lock);
469 return vma ? vma->iova : 0;
473 * Unpin a iova by updating the reference counts. The memory isn't actually
474 * purged until something else (shrinker, mm_notifier, destroy, etc) decides
477 void msm_gem_unpin_iova(struct drm_gem_object *obj,
478 struct msm_gem_address_space *aspace)
480 struct msm_gem_object *msm_obj = to_msm_bo(obj);
481 struct msm_gem_vma *vma;
483 mutex_lock(&msm_obj->lock);
484 vma = lookup_vma(obj, aspace);
487 msm_gem_unmap_vma(aspace, vma);
489 mutex_unlock(&msm_obj->lock);
492 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
493 struct drm_mode_create_dumb *args)
495 args->pitch = align_pitch(args->width, args->bpp);
496 args->size = PAGE_ALIGN(args->pitch * args->height);
497 return msm_gem_new_handle(dev, file, args->size,
498 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
501 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
502 uint32_t handle, uint64_t *offset)
504 struct drm_gem_object *obj;
507 /* GEM does all our handle to object mapping */
508 obj = drm_gem_object_lookup(file, handle);
514 *offset = msm_gem_mmap_offset(obj);
516 drm_gem_object_put_unlocked(obj);
522 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
524 struct msm_gem_object *msm_obj = to_msm_bo(obj);
527 mutex_lock(&msm_obj->lock);
529 if (WARN_ON(msm_obj->madv > madv)) {
530 DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
531 msm_obj->madv, madv);
532 mutex_unlock(&msm_obj->lock);
533 return ERR_PTR(-EBUSY);
536 /* increment vmap_count *before* vmap() call, so shrinker can
537 * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
538 * This guarantees that we won't try to msm_gem_vunmap() this
539 * same object from within the vmap() call (while we already
540 * hold msm_obj->lock)
542 msm_obj->vmap_count++;
544 if (!msm_obj->vaddr) {
545 struct page **pages = get_pages(obj);
547 ret = PTR_ERR(pages);
550 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
551 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
552 if (msm_obj->vaddr == NULL) {
558 mutex_unlock(&msm_obj->lock);
559 return msm_obj->vaddr;
562 msm_obj->vmap_count--;
563 mutex_unlock(&msm_obj->lock);
567 void *msm_gem_get_vaddr(struct drm_gem_object *obj)
569 return get_vaddr(obj, MSM_MADV_WILLNEED);
573 * Don't use this! It is for the very special case of dumping
574 * submits from GPU hangs or faults, were the bo may already
575 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
578 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
580 return get_vaddr(obj, __MSM_MADV_PURGED);
583 void msm_gem_put_vaddr(struct drm_gem_object *obj)
585 struct msm_gem_object *msm_obj = to_msm_bo(obj);
587 mutex_lock(&msm_obj->lock);
588 WARN_ON(msm_obj->vmap_count < 1);
589 msm_obj->vmap_count--;
590 mutex_unlock(&msm_obj->lock);
593 /* Update madvise status, returns true if not purged, else
596 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
598 struct msm_gem_object *msm_obj = to_msm_bo(obj);
600 mutex_lock(&msm_obj->lock);
602 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
604 if (msm_obj->madv != __MSM_MADV_PURGED)
605 msm_obj->madv = madv;
607 madv = msm_obj->madv;
609 mutex_unlock(&msm_obj->lock);
611 return (madv != __MSM_MADV_PURGED);
614 void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
616 struct drm_device *dev = obj->dev;
617 struct msm_gem_object *msm_obj = to_msm_bo(obj);
619 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
620 WARN_ON(!is_purgeable(msm_obj));
621 WARN_ON(obj->import_attach);
623 mutex_lock_nested(&msm_obj->lock, subclass);
627 msm_gem_vunmap_locked(obj);
631 msm_obj->madv = __MSM_MADV_PURGED;
633 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
634 drm_gem_free_mmap_offset(obj);
636 /* Our goal here is to return as much of the memory as
637 * is possible back to the system as we are called from OOM.
638 * To do this we must instruct the shmfs to drop all of its
639 * backing pages, *now*.
641 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
643 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
646 mutex_unlock(&msm_obj->lock);
649 static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
651 struct msm_gem_object *msm_obj = to_msm_bo(obj);
653 WARN_ON(!mutex_is_locked(&msm_obj->lock));
655 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
658 vunmap(msm_obj->vaddr);
659 msm_obj->vaddr = NULL;
662 void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
664 struct msm_gem_object *msm_obj = to_msm_bo(obj);
666 mutex_lock_nested(&msm_obj->lock, subclass);
667 msm_gem_vunmap_locked(obj);
668 mutex_unlock(&msm_obj->lock);
671 /* must be called before _move_to_active().. */
672 int msm_gem_sync_object(struct drm_gem_object *obj,
673 struct msm_fence_context *fctx, bool exclusive)
675 struct msm_gem_object *msm_obj = to_msm_bo(obj);
676 struct reservation_object_list *fobj;
677 struct dma_fence *fence;
680 fobj = reservation_object_get_list(msm_obj->resv);
681 if (!fobj || (fobj->shared_count == 0)) {
682 fence = reservation_object_get_excl(msm_obj->resv);
683 /* don't need to wait on our own fences, since ring is fifo */
684 if (fence && (fence->context != fctx->context)) {
685 ret = dma_fence_wait(fence, true);
691 if (!exclusive || !fobj)
694 for (i = 0; i < fobj->shared_count; i++) {
695 fence = rcu_dereference_protected(fobj->shared[i],
696 reservation_object_held(msm_obj->resv));
697 if (fence->context != fctx->context) {
698 ret = dma_fence_wait(fence, true);
707 void msm_gem_move_to_active(struct drm_gem_object *obj,
708 struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
710 struct msm_gem_object *msm_obj = to_msm_bo(obj);
711 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
714 reservation_object_add_excl_fence(msm_obj->resv, fence);
716 reservation_object_add_shared_fence(msm_obj->resv, fence);
717 list_del_init(&msm_obj->mm_list);
718 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
721 void msm_gem_move_to_inactive(struct drm_gem_object *obj)
723 struct drm_device *dev = obj->dev;
724 struct msm_drm_private *priv = dev->dev_private;
725 struct msm_gem_object *msm_obj = to_msm_bo(obj);
727 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
730 list_del_init(&msm_obj->mm_list);
731 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
734 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
736 struct msm_gem_object *msm_obj = to_msm_bo(obj);
737 bool write = !!(op & MSM_PREP_WRITE);
738 unsigned long remain =
739 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
742 ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
745 return remain == 0 ? -EBUSY : -ETIMEDOUT;
749 /* TODO cache maintenance */
754 int msm_gem_cpu_fini(struct drm_gem_object *obj)
756 /* TODO cache maintenance */
760 #ifdef CONFIG_DEBUG_FS
761 static void describe_fence(struct dma_fence *fence, const char *type,
764 if (!dma_fence_is_signaled(fence))
765 seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
766 fence->ops->get_driver_name(fence),
767 fence->ops->get_timeline_name(fence),
771 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
773 struct msm_gem_object *msm_obj = to_msm_bo(obj);
774 struct reservation_object *robj = msm_obj->resv;
775 struct reservation_object_list *fobj;
776 struct dma_fence *fence;
777 struct msm_gem_vma *vma;
778 uint64_t off = drm_vma_node_start(&obj->vma_node);
781 mutex_lock(&msm_obj->lock);
783 switch (msm_obj->madv) {
784 case __MSM_MADV_PURGED:
787 case MSM_MADV_DONTNEED:
790 case MSM_MADV_WILLNEED:
796 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
797 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
798 obj->name, kref_read(&obj->refcount),
799 off, msm_obj->vaddr);
801 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
803 if (!list_empty(&msm_obj->vmas)) {
805 seq_puts(m, " vmas:");
807 list_for_each_entry(vma, &msm_obj->vmas, list)
808 seq_printf(m, " [%s: %08llx,%s,inuse=%d]", vma->aspace->name,
809 vma->iova, vma->mapped ? "mapped" : "unmapped",
816 fobj = rcu_dereference(robj->fence);
818 unsigned int i, shared_count = fobj->shared_count;
820 for (i = 0; i < shared_count; i++) {
821 fence = rcu_dereference(fobj->shared[i]);
822 describe_fence(fence, "Shared", m);
826 fence = rcu_dereference(robj->fence_excl);
828 describe_fence(fence, "Exclusive", m);
831 mutex_unlock(&msm_obj->lock);
834 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
836 struct msm_gem_object *msm_obj;
840 seq_puts(m, " flags id ref offset kaddr size madv name\n");
841 list_for_each_entry(msm_obj, list, mm_list) {
842 struct drm_gem_object *obj = &msm_obj->base;
844 msm_gem_describe(obj, m);
849 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
853 /* don't call directly! Use drm_gem_object_put() and friends */
854 void msm_gem_free_object(struct drm_gem_object *obj)
856 struct drm_device *dev = obj->dev;
857 struct msm_gem_object *msm_obj = to_msm_bo(obj);
859 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
861 /* object should not be on active list: */
862 WARN_ON(is_active(msm_obj));
864 list_del(&msm_obj->mm_list);
866 mutex_lock(&msm_obj->lock);
870 if (obj->import_attach) {
872 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
874 /* Don't drop the pages for imported dmabuf, as they are not
875 * ours, just free the array we allocated:
878 kvfree(msm_obj->pages);
880 drm_prime_gem_destroy(obj, msm_obj->sgt);
882 msm_gem_vunmap_locked(obj);
886 if (msm_obj->resv == &msm_obj->_resv)
887 reservation_object_fini(msm_obj->resv);
889 drm_gem_object_release(obj);
891 mutex_unlock(&msm_obj->lock);
895 /* convenience method to construct a GEM buffer object, and userspace handle */
896 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
897 uint32_t size, uint32_t flags, uint32_t *handle,
900 struct drm_gem_object *obj;
903 obj = msm_gem_new(dev, size, flags);
909 msm_gem_object_set_name(obj, "%s", name);
911 ret = drm_gem_handle_create(file, obj, handle);
913 /* drop reference from allocate - handle holds it now */
914 drm_gem_object_put_unlocked(obj);
919 static int msm_gem_new_impl(struct drm_device *dev,
920 uint32_t size, uint32_t flags,
921 struct reservation_object *resv,
922 struct drm_gem_object **obj,
923 bool struct_mutex_locked)
925 struct msm_drm_private *priv = dev->dev_private;
926 struct msm_gem_object *msm_obj;
928 switch (flags & MSM_BO_CACHE_MASK) {
929 case MSM_BO_UNCACHED:
934 DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
935 (flags & MSM_BO_CACHE_MASK));
939 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
943 mutex_init(&msm_obj->lock);
945 msm_obj->flags = flags;
946 msm_obj->madv = MSM_MADV_WILLNEED;
949 msm_obj->resv = resv;
951 msm_obj->resv = &msm_obj->_resv;
952 reservation_object_init(msm_obj->resv);
955 INIT_LIST_HEAD(&msm_obj->submit_entry);
956 INIT_LIST_HEAD(&msm_obj->vmas);
958 if (struct_mutex_locked) {
959 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
960 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
962 mutex_lock(&dev->struct_mutex);
963 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
964 mutex_unlock(&dev->struct_mutex);
967 *obj = &msm_obj->base;
972 static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
973 uint32_t size, uint32_t flags, bool struct_mutex_locked)
975 struct msm_drm_private *priv = dev->dev_private;
976 struct drm_gem_object *obj = NULL;
977 bool use_vram = false;
980 size = PAGE_ALIGN(size);
982 if (!msm_use_mmu(dev))
984 else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
987 if (WARN_ON(use_vram && !priv->vram.size))
988 return ERR_PTR(-EINVAL);
990 /* Disallow zero sized objects as they make the underlying
991 * infrastructure grumpy
994 return ERR_PTR(-EINVAL);
996 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
1001 struct msm_gem_vma *vma;
1002 struct page **pages;
1003 struct msm_gem_object *msm_obj = to_msm_bo(obj);
1005 mutex_lock(&msm_obj->lock);
1007 vma = add_vma(obj, NULL);
1008 mutex_unlock(&msm_obj->lock);
1014 to_msm_bo(obj)->vram_node = &vma->node;
1016 drm_gem_private_object_init(dev, obj, size);
1018 pages = get_pages(obj);
1019 if (IS_ERR(pages)) {
1020 ret = PTR_ERR(pages);
1024 vma->iova = physaddr(obj);
1026 ret = drm_gem_object_init(dev, obj, size);
1034 drm_gem_object_put_unlocked(obj);
1035 return ERR_PTR(ret);
1038 struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
1039 uint32_t size, uint32_t flags)
1041 return _msm_gem_new(dev, size, flags, true);
1044 struct drm_gem_object *msm_gem_new(struct drm_device *dev,
1045 uint32_t size, uint32_t flags)
1047 return _msm_gem_new(dev, size, flags, false);
1050 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1051 struct dma_buf *dmabuf, struct sg_table *sgt)
1053 struct msm_gem_object *msm_obj;
1054 struct drm_gem_object *obj;
1058 /* if we don't have IOMMU, don't bother pretending we can import: */
1059 if (!msm_use_mmu(dev)) {
1060 DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1061 return ERR_PTR(-EINVAL);
1064 size = PAGE_ALIGN(dmabuf->size);
1066 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
1070 drm_gem_private_object_init(dev, obj, size);
1072 npages = size / PAGE_SIZE;
1074 msm_obj = to_msm_bo(obj);
1075 mutex_lock(&msm_obj->lock);
1077 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1078 if (!msm_obj->pages) {
1079 mutex_unlock(&msm_obj->lock);
1084 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
1086 mutex_unlock(&msm_obj->lock);
1090 mutex_unlock(&msm_obj->lock);
1094 drm_gem_object_put_unlocked(obj);
1095 return ERR_PTR(ret);
1098 static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1099 uint32_t flags, struct msm_gem_address_space *aspace,
1100 struct drm_gem_object **bo, uint64_t *iova, bool locked)
1103 struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
1107 return ERR_CAST(obj);
1110 ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1115 vaddr = msm_gem_get_vaddr(obj);
1116 if (IS_ERR(vaddr)) {
1117 msm_gem_unpin_iova(obj, aspace);
1118 ret = PTR_ERR(vaddr);
1128 drm_gem_object_put(obj);
1130 drm_gem_object_put_unlocked(obj);
1132 return ERR_PTR(ret);
1136 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1137 uint32_t flags, struct msm_gem_address_space *aspace,
1138 struct drm_gem_object **bo, uint64_t *iova)
1140 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
1143 void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
1144 uint32_t flags, struct msm_gem_address_space *aspace,
1145 struct drm_gem_object **bo, uint64_t *iova)
1147 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
1150 void msm_gem_kernel_put(struct drm_gem_object *bo,
1151 struct msm_gem_address_space *aspace, bool locked)
1153 if (IS_ERR_OR_NULL(bo))
1156 msm_gem_put_vaddr(bo);
1157 msm_gem_unpin_iova(bo, aspace);
1160 drm_gem_object_put(bo);
1162 drm_gem_object_put_unlocked(bo);
1165 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1167 struct msm_gem_object *msm_obj = to_msm_bo(bo);
1174 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);