2 * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
3 * Author: Rob Clark <rob.clark@linaro.org>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/dma-mapping.h>
19 #include <linux/seq_file.h>
20 #include <linux/shmem_fs.h>
21 #include <linux/spinlock.h>
22 #include <linux/pfn_t.h>
24 #include <drm/drm_prime.h>
25 #include <drm/drm_vma_manager.h>
28 #include "omap_dmm_tiler.h"
31 * GEM buffer object implementation.
34 /* note: we use upper 8 bits of flags for driver-internal flags: */
35 #define OMAP_BO_MEM_DMA_API 0x01000000 /* memory allocated with the dma_alloc_* API */
36 #define OMAP_BO_MEM_SHMEM 0x02000000 /* memory allocated through shmem backing */
37 #define OMAP_BO_MEM_DMABUF 0x08000000 /* memory imported from a dmabuf */
39 struct omap_gem_object {
40 struct drm_gem_object base;
42 struct list_head mm_list;
46 /** width/height for tiled formats (rounded up to slot boundaries) */
49 /** roll applied when mapping to DMM */
52 /** protects dma_addr_cnt, block, pages, dma_addrs and vaddr */
56 * dma_addr contains the buffer DMA address. It is valid for
58 * - buffers allocated through the DMA mapping API (with the
59 * OMAP_BO_MEM_DMA_API flag set)
61 * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
62 * if they are physically contiguous (when sgt->orig_nents == 1)
64 * - buffers mapped through the TILER when dma_addr_cnt is not zero, in
65 * which case the DMA address points to the TILER aperture
67 * Physically contiguous buffers have their DMA address equal to the
68 * physical address as we don't remap those buffers through the TILER.
70 * Buffers mapped to the TILER have their DMA address pointing to the
71 * TILER aperture. As TILER mappings are refcounted (through
72 * dma_addr_cnt) the DMA address must be accessed through omap_gem_pin()
73 * to ensure that the mapping won't disappear unexpectedly. References
74 * must be released with omap_gem_unpin().
79 * # of users of dma_addr
84 * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
85 * is set and the sgt field is valid.
90 * tiler block used when buffer is remapped in DMM/TILER.
92 struct tiler_block *block;
95 * Array of backing pages, if allocated. Note that pages are never
96 * allocated for buffers originally allocated from contiguous memory
100 /** addresses corresponding to pages in above array */
101 dma_addr_t *dma_addrs;
104 * Virtual address, if mapped.
109 #define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
111 /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
112 * not necessarily pinned in TILER all the time, and (b) when they are
113 * they are not necessarily page aligned, we reserve one or more small
114 * regions in each of the 2d containers to use as a user-GART where we
115 * can create a second page-aligned mapping of parts of the buffer
116 * being accessed from userspace.
118 * Note that we could optimize slightly when we know that multiple
119 * tiler containers are backed by the same PAT.. but I'll leave that
122 #define NUM_USERGART_ENTRIES 2
123 struct omap_drm_usergart_entry {
124 struct tiler_block *block; /* the reserved tiler block */
126 struct drm_gem_object *obj; /* the current pinned obj */
127 pgoff_t obj_pgoff; /* page offset of obj currently
131 struct omap_drm_usergart {
132 struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
133 int height; /* height in rows */
134 int height_shift; /* ilog2(height in rows) */
135 int slot_shift; /* ilog2(width per slot) */
136 int stride_pfn; /* stride in pages */
137 int last; /* index of last used entry */
140 /* -----------------------------------------------------------------------------
144 /** get mmap offset */
145 u64 omap_gem_mmap_offset(struct drm_gem_object *obj)
147 struct drm_device *dev = obj->dev;
151 /* Make it mmapable */
152 size = omap_gem_mmap_size(obj);
153 ret = drm_gem_create_mmap_offset_size(obj, size);
155 dev_err(dev->dev, "could not allocate mmap offset\n");
159 return drm_vma_node_offset_addr(&obj->vma_node);
162 static bool omap_gem_is_contiguous(struct omap_gem_object *omap_obj)
164 if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
167 if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1)
173 /* -----------------------------------------------------------------------------
177 static void omap_gem_evict_entry(struct drm_gem_object *obj,
178 enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
180 struct omap_gem_object *omap_obj = to_omap_bo(obj);
181 struct omap_drm_private *priv = obj->dev->dev_private;
182 int n = priv->usergart[fmt].height;
183 size_t size = PAGE_SIZE * n;
184 loff_t off = omap_gem_mmap_offset(obj) +
185 (entry->obj_pgoff << PAGE_SHIFT);
186 const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
190 /* if stride > than PAGE_SIZE then sparse mapping: */
191 for (i = n; i > 0; i--) {
192 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
194 off += PAGE_SIZE * m;
197 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
204 /* Evict a buffer from usergart, if it is mapped there */
205 static void omap_gem_evict(struct drm_gem_object *obj)
207 struct omap_gem_object *omap_obj = to_omap_bo(obj);
208 struct omap_drm_private *priv = obj->dev->dev_private;
210 if (omap_obj->flags & OMAP_BO_TILED) {
211 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
214 for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
215 struct omap_drm_usergart_entry *entry =
216 &priv->usergart[fmt].entry[i];
218 if (entry->obj == obj)
219 omap_gem_evict_entry(obj, fmt, entry);
224 /* -----------------------------------------------------------------------------
229 * Ensure backing pages are allocated. Must be called with the omap_obj.lock
232 static int omap_gem_attach_pages(struct drm_gem_object *obj)
234 struct drm_device *dev = obj->dev;
235 struct omap_gem_object *omap_obj = to_omap_bo(obj);
237 int npages = obj->size >> PAGE_SHIFT;
241 lockdep_assert_held(&omap_obj->lock);
244 * If not using shmem (in which case backing pages don't need to be
245 * allocated) or if pages are already allocated we're done.
247 if (!(omap_obj->flags & OMAP_BO_MEM_SHMEM) || omap_obj->pages)
250 pages = drm_gem_get_pages(obj);
252 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
253 return PTR_ERR(pages);
256 /* for non-cached buffers, ensure the new pages are clean because
257 * DSS, GPU, etc. are not cache coherent:
259 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
260 addrs = kmalloc_array(npages, sizeof(*addrs), GFP_KERNEL);
266 for (i = 0; i < npages; i++) {
267 addrs[i] = dma_map_page(dev->dev, pages[i],
268 0, PAGE_SIZE, DMA_TO_DEVICE);
270 if (dma_mapping_error(dev->dev, addrs[i])) {
272 "%s: failed to map page\n", __func__);
274 for (i = i - 1; i >= 0; --i) {
275 dma_unmap_page(dev->dev, addrs[i],
276 PAGE_SIZE, DMA_TO_DEVICE);
284 addrs = kcalloc(npages, sizeof(*addrs), GFP_KERNEL);
291 omap_obj->dma_addrs = addrs;
292 omap_obj->pages = pages;
299 drm_gem_put_pages(obj, pages, true, false);
304 /* Release backing pages. Must be called with the omap_obj.lock held. */
305 static void omap_gem_detach_pages(struct drm_gem_object *obj)
307 struct omap_gem_object *omap_obj = to_omap_bo(obj);
308 unsigned int npages = obj->size >> PAGE_SHIFT;
311 lockdep_assert_held(&omap_obj->lock);
313 for (i = 0; i < npages; i++) {
314 if (omap_obj->dma_addrs[i])
315 dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i],
316 PAGE_SIZE, DMA_TO_DEVICE);
319 kfree(omap_obj->dma_addrs);
320 omap_obj->dma_addrs = NULL;
322 drm_gem_put_pages(obj, omap_obj->pages, true, false);
323 omap_obj->pages = NULL;
326 /* get buffer flags */
327 u32 omap_gem_flags(struct drm_gem_object *obj)
329 return to_omap_bo(obj)->flags;
333 size_t omap_gem_mmap_size(struct drm_gem_object *obj)
335 struct omap_gem_object *omap_obj = to_omap_bo(obj);
336 size_t size = obj->size;
338 if (omap_obj->flags & OMAP_BO_TILED) {
339 /* for tiled buffers, the virtual size has stride rounded up
340 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
341 * 32kb later!). But we don't back the entire buffer with
342 * pages, only the valid picture part.. so need to adjust for
343 * this in the size used to mmap and generate mmap offset
345 size = tiler_vsize(gem2fmt(omap_obj->flags),
346 omap_obj->width, omap_obj->height);
352 /* -----------------------------------------------------------------------------
356 /* Normal handling for the case of faulting in non-tiled buffers */
357 static vm_fault_t omap_gem_fault_1d(struct drm_gem_object *obj,
358 struct vm_area_struct *vma, struct vm_fault *vmf)
360 struct omap_gem_object *omap_obj = to_omap_bo(obj);
364 /* We don't use vmf->pgoff since that has the fake offset: */
365 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
367 if (omap_obj->pages) {
368 omap_gem_cpu_sync_page(obj, pgoff);
369 pfn = page_to_pfn(omap_obj->pages[pgoff]);
371 BUG_ON(!omap_gem_is_contiguous(omap_obj));
372 pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff;
375 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
376 pfn, pfn << PAGE_SHIFT);
378 return vmf_insert_mixed(vma, vmf->address,
379 __pfn_to_pfn_t(pfn, PFN_DEV));
382 /* Special handling for the case of faulting in 2d tiled buffers */
383 static vm_fault_t omap_gem_fault_2d(struct drm_gem_object *obj,
384 struct vm_area_struct *vma, struct vm_fault *vmf)
386 struct omap_gem_object *omap_obj = to_omap_bo(obj);
387 struct omap_drm_private *priv = obj->dev->dev_private;
388 struct omap_drm_usergart_entry *entry;
389 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
390 struct page *pages[64]; /* XXX is this too much to have on stack? */
392 pgoff_t pgoff, base_pgoff;
395 vm_fault_t ret = VM_FAULT_NOPAGE;
398 * Note the height of the slot is also equal to the number of pages
399 * that need to be mapped in to fill 4kb wide CPU page. If the slot
400 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
402 const int n = priv->usergart[fmt].height;
403 const int n_shift = priv->usergart[fmt].height_shift;
406 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
407 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
408 * into account in some of the math, so figure out virtual stride
411 const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
413 /* We don't use vmf->pgoff since that has the fake offset: */
414 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
417 * Actual address we start mapping at is rounded down to previous slot
418 * boundary in the y direction:
420 base_pgoff = round_down(pgoff, m << n_shift);
422 /* figure out buffer width in slots */
423 slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
425 vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT);
427 entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
429 /* evict previous buffer using this usergart entry, if any: */
431 omap_gem_evict_entry(entry->obj, fmt, entry);
434 entry->obj_pgoff = base_pgoff;
436 /* now convert base_pgoff to phys offset from virt offset: */
437 base_pgoff = (base_pgoff >> n_shift) * slots;
439 /* for wider-than 4k.. figure out which part of the slot-row we want: */
442 entry->obj_pgoff += off;
444 slots = min(slots - (off << n_shift), n);
445 base_pgoff += off << n_shift;
446 vaddr += off << PAGE_SHIFT;
450 * Map in pages. Beyond the valid pixel part of the buffer, we set
451 * pages[i] to NULL to get a dummy page mapped in.. if someone
452 * reads/writes it they will get random/undefined content, but at
453 * least it won't be corrupting whatever other random page used to
454 * be mapped in, or other undefined behavior.
456 memcpy(pages, &omap_obj->pages[base_pgoff],
457 sizeof(struct page *) * slots);
458 memset(pages + slots, 0,
459 sizeof(struct page *) * (n - slots));
461 err = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
463 ret = vmf_error(err);
464 dev_err(obj->dev->dev, "failed to pin: %d\n", err);
468 pfn = entry->dma_addr >> PAGE_SHIFT;
470 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
471 pfn, pfn << PAGE_SHIFT);
473 for (i = n; i > 0; i--) {
474 ret = vmf_insert_mixed(vma,
475 vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
476 if (ret & VM_FAULT_ERROR)
478 pfn += priv->usergart[fmt].stride_pfn;
479 vaddr += PAGE_SIZE * m;
482 /* simple round-robin: */
483 priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
484 % NUM_USERGART_ENTRIES;
490 * omap_gem_fault - pagefault handler for GEM objects
493 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
494 * does most of the work for us including the actual map/unmap calls
495 * but we need to do the actual page work.
497 * The VMA was set up by GEM. In doing so it also ensured that the
498 * vma->vm_private_data points to the GEM object that is backing this
501 vm_fault_t omap_gem_fault(struct vm_fault *vmf)
503 struct vm_area_struct *vma = vmf->vma;
504 struct drm_gem_object *obj = vma->vm_private_data;
505 struct omap_gem_object *omap_obj = to_omap_bo(obj);
509 /* Make sure we don't parallel update on a fault, nor move or remove
510 * something from beneath our feet
512 mutex_lock(&omap_obj->lock);
514 /* if a shmem backed object, make sure we have pages attached now */
515 err = omap_gem_attach_pages(obj);
517 ret = vmf_error(err);
521 /* where should we do corresponding put_pages().. we are mapping
522 * the original page, rather than thru a GART, so we can't rely
523 * on eviction to trigger this. But munmap() or all mappings should
524 * probably trigger put_pages()?
527 if (omap_obj->flags & OMAP_BO_TILED)
528 ret = omap_gem_fault_2d(obj, vma, vmf);
530 ret = omap_gem_fault_1d(obj, vma, vmf);
534 mutex_unlock(&omap_obj->lock);
538 /** We override mainly to fix up some of the vm mapping flags.. */
539 int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
543 ret = drm_gem_mmap(filp, vma);
545 DBG("mmap failed: %d", ret);
549 return omap_gem_mmap_obj(vma->vm_private_data, vma);
552 int omap_gem_mmap_obj(struct drm_gem_object *obj,
553 struct vm_area_struct *vma)
555 struct omap_gem_object *omap_obj = to_omap_bo(obj);
557 vma->vm_flags &= ~VM_PFNMAP;
558 vma->vm_flags |= VM_MIXEDMAP;
560 if (omap_obj->flags & OMAP_BO_WC) {
561 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
562 } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
563 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
566 * We do have some private objects, at least for scanout buffers
567 * on hardware without DMM/TILER. But these are allocated write-
570 if (WARN_ON(!obj->filp))
574 * Shunt off cached objs to shmem file so they have their own
575 * address_space (so unmap_mapping_range does what we want,
576 * in particular in the case of mmap'd dmabufs)
580 vma->vm_file = get_file(obj->filp);
582 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
588 /* -----------------------------------------------------------------------------
593 * omap_gem_dumb_create - create a dumb buffer
594 * @drm_file: our client file
596 * @args: the requested arguments copied from userspace
598 * Allocate a buffer suitable for use for a frame buffer of the
599 * form described by user space. Give userspace a handle by which
602 int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
603 struct drm_mode_create_dumb *args)
605 union omap_gem_size gsize;
607 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
609 args->size = PAGE_ALIGN(args->pitch * args->height);
611 gsize = (union omap_gem_size){
615 return omap_gem_new_handle(dev, file, gsize,
616 OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
620 * omap_gem_dumb_map - buffer mapping for dumb interface
621 * @file: our drm client file
623 * @handle: GEM handle to the object (from dumb_create)
625 * Do the necessary setup to allow the mapping of the frame buffer
626 * into user memory. We don't have to do much here at the moment.
628 int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
629 u32 handle, u64 *offset)
631 struct drm_gem_object *obj;
634 /* GEM does all our handle to object mapping */
635 obj = drm_gem_object_lookup(file, handle);
641 *offset = omap_gem_mmap_offset(obj);
643 drm_gem_object_put_unlocked(obj);
649 #ifdef CONFIG_DRM_FBDEV_EMULATION
650 /* Set scrolling position. This allows us to implement fast scrolling
653 * Call only from non-atomic contexts.
655 int omap_gem_roll(struct drm_gem_object *obj, u32 roll)
657 struct omap_gem_object *omap_obj = to_omap_bo(obj);
658 u32 npages = obj->size >> PAGE_SHIFT;
662 dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
666 omap_obj->roll = roll;
668 mutex_lock(&omap_obj->lock);
670 /* if we aren't mapped yet, we don't need to do anything */
671 if (omap_obj->block) {
672 ret = omap_gem_attach_pages(obj);
676 ret = tiler_pin(omap_obj->block, omap_obj->pages, npages,
679 dev_err(obj->dev->dev, "could not repin: %d\n", ret);
683 mutex_unlock(&omap_obj->lock);
689 /* -----------------------------------------------------------------------------
690 * Memory Management & DMA Sync
694 * shmem buffers that are mapped cached are not coherent.
696 * We keep track of dirty pages using page faulting to perform cache management.
697 * When a page is mapped to the CPU in read/write mode the device can't access
698 * it and omap_obj->dma_addrs[i] is NULL. When a page is mapped to the device
699 * the omap_obj->dma_addrs[i] is set to the DMA address, and the page is
700 * unmapped from the CPU.
702 static inline bool omap_gem_is_cached_coherent(struct drm_gem_object *obj)
704 struct omap_gem_object *omap_obj = to_omap_bo(obj);
706 return !((omap_obj->flags & OMAP_BO_MEM_SHMEM) &&
707 ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED));
710 /* Sync the buffer for CPU access.. note pages should already be
711 * attached, ie. omap_gem_get_pages()
713 void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff)
715 struct drm_device *dev = obj->dev;
716 struct omap_gem_object *omap_obj = to_omap_bo(obj);
718 if (omap_gem_is_cached_coherent(obj))
721 if (omap_obj->dma_addrs[pgoff]) {
722 dma_unmap_page(dev->dev, omap_obj->dma_addrs[pgoff],
723 PAGE_SIZE, DMA_TO_DEVICE);
724 omap_obj->dma_addrs[pgoff] = 0;
728 /* sync the buffer for DMA access */
729 void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
730 enum dma_data_direction dir)
732 struct drm_device *dev = obj->dev;
733 struct omap_gem_object *omap_obj = to_omap_bo(obj);
734 int i, npages = obj->size >> PAGE_SHIFT;
735 struct page **pages = omap_obj->pages;
738 if (omap_gem_is_cached_coherent(obj))
741 for (i = 0; i < npages; i++) {
742 if (!omap_obj->dma_addrs[i]) {
745 addr = dma_map_page(dev->dev, pages[i], 0,
747 if (dma_mapping_error(dev->dev, addr)) {
748 dev_warn(dev->dev, "%s: failed to map page\n",
754 omap_obj->dma_addrs[i] = addr;
759 unmap_mapping_range(obj->filp->f_mapping, 0,
760 omap_gem_mmap_size(obj), 1);
765 * omap_gem_pin() - Pin a GEM object in memory
766 * @obj: the GEM object
767 * @dma_addr: the DMA address
769 * Pin the given GEM object in memory and fill the dma_addr pointer with the
770 * object's DMA address. If the buffer is not physically contiguous it will be
771 * remapped through the TILER to provide a contiguous view.
773 * Pins are reference-counted, calling this function multiple times is allowed
774 * as long the corresponding omap_gem_unpin() calls are balanced.
776 * Return 0 on success or a negative error code otherwise.
778 int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
780 struct omap_drm_private *priv = obj->dev->dev_private;
781 struct omap_gem_object *omap_obj = to_omap_bo(obj);
784 mutex_lock(&omap_obj->lock);
786 if (!omap_gem_is_contiguous(omap_obj) && priv->has_dmm) {
787 if (omap_obj->dma_addr_cnt == 0) {
788 u32 npages = obj->size >> PAGE_SHIFT;
789 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
790 struct tiler_block *block;
792 BUG_ON(omap_obj->block);
794 ret = omap_gem_attach_pages(obj);
798 if (omap_obj->flags & OMAP_BO_TILED) {
799 block = tiler_reserve_2d(fmt,
801 omap_obj->height, 0);
803 block = tiler_reserve_1d(obj->size);
807 ret = PTR_ERR(block);
808 dev_err(obj->dev->dev,
809 "could not remap: %d (%d)\n", ret, fmt);
813 /* TODO: enable async refill.. */
814 ret = tiler_pin(block, omap_obj->pages, npages,
815 omap_obj->roll, true);
817 tiler_release(block);
818 dev_err(obj->dev->dev,
819 "could not pin: %d\n", ret);
823 omap_obj->dma_addr = tiler_ssptr(block);
824 omap_obj->block = block;
826 DBG("got dma address: %pad", &omap_obj->dma_addr);
829 omap_obj->dma_addr_cnt++;
831 *dma_addr = omap_obj->dma_addr;
832 } else if (omap_gem_is_contiguous(omap_obj)) {
833 *dma_addr = omap_obj->dma_addr;
840 mutex_unlock(&omap_obj->lock);
846 * omap_gem_unpin() - Unpin a GEM object from memory
847 * @obj: the GEM object
849 * Unpin the given GEM object previously pinned with omap_gem_pin(). Pins are
850 * reference-counted, the actualy unpin will only be performed when the number
851 * of calls to this function matches the number of calls to omap_gem_pin().
853 void omap_gem_unpin(struct drm_gem_object *obj)
855 struct omap_gem_object *omap_obj = to_omap_bo(obj);
858 mutex_lock(&omap_obj->lock);
860 if (omap_obj->dma_addr_cnt > 0) {
861 omap_obj->dma_addr_cnt--;
862 if (omap_obj->dma_addr_cnt == 0) {
863 ret = tiler_unpin(omap_obj->block);
865 dev_err(obj->dev->dev,
866 "could not unpin pages: %d\n", ret);
868 ret = tiler_release(omap_obj->block);
870 dev_err(obj->dev->dev,
871 "could not release unmap: %d\n", ret);
873 omap_obj->dma_addr = 0;
874 omap_obj->block = NULL;
878 mutex_unlock(&omap_obj->lock);
881 /* Get rotated scanout address (only valid if already pinned), at the
882 * specified orientation and x,y offset from top-left corner of buffer
883 * (only valid for tiled 2d buffers)
885 int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
886 int x, int y, dma_addr_t *dma_addr)
888 struct omap_gem_object *omap_obj = to_omap_bo(obj);
891 mutex_lock(&omap_obj->lock);
893 if ((omap_obj->dma_addr_cnt > 0) && omap_obj->block &&
894 (omap_obj->flags & OMAP_BO_TILED)) {
895 *dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
899 mutex_unlock(&omap_obj->lock);
904 /* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
905 int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient)
907 struct omap_gem_object *omap_obj = to_omap_bo(obj);
909 if (omap_obj->flags & OMAP_BO_TILED)
910 ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
914 /* if !remap, and we don't have pages backing, then fail, rather than
915 * increasing the pin count (which we don't really do yet anyways,
916 * because we don't support swapping pages back out). And 'remap'
917 * might not be quite the right name, but I wanted to keep it working
918 * similarly to omap_gem_pin(). Note though that mutex is not
919 * aquired if !remap (because this can be called in atomic ctxt),
920 * but probably omap_gem_unpin() should be changed to work in the
921 * same way. If !remap, a matching omap_gem_put_pages() call is not
922 * required (and should not be made).
924 int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
927 struct omap_gem_object *omap_obj = to_omap_bo(obj);
930 mutex_lock(&omap_obj->lock);
933 ret = omap_gem_attach_pages(obj);
938 if (!omap_obj->pages) {
943 *pages = omap_obj->pages;
946 mutex_unlock(&omap_obj->lock);
951 /* release pages when DMA no longer being performed */
952 int omap_gem_put_pages(struct drm_gem_object *obj)
954 /* do something here if we dynamically attach/detach pages.. at
955 * least they would no longer need to be pinned if everyone has
956 * released the pages..
961 #ifdef CONFIG_DRM_FBDEV_EMULATION
963 * Get kernel virtual address for CPU access.. this more or less only
964 * exists for omap_fbdev.
966 void *omap_gem_vaddr(struct drm_gem_object *obj)
968 struct omap_gem_object *omap_obj = to_omap_bo(obj);
972 mutex_lock(&omap_obj->lock);
974 if (!omap_obj->vaddr) {
975 ret = omap_gem_attach_pages(obj);
977 vaddr = ERR_PTR(ret);
981 omap_obj->vaddr = vmap(omap_obj->pages, obj->size >> PAGE_SHIFT,
982 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
985 vaddr = omap_obj->vaddr;
988 mutex_unlock(&omap_obj->lock);
993 /* -----------------------------------------------------------------------------
998 /* re-pin objects in DMM in resume path: */
999 int omap_gem_resume(struct drm_device *dev)
1001 struct omap_drm_private *priv = dev->dev_private;
1002 struct omap_gem_object *omap_obj;
1005 mutex_lock(&priv->list_lock);
1006 list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
1007 if (omap_obj->block) {
1008 struct drm_gem_object *obj = &omap_obj->base;
1009 u32 npages = obj->size >> PAGE_SHIFT;
1011 WARN_ON(!omap_obj->pages); /* this can't happen */
1012 ret = tiler_pin(omap_obj->block,
1013 omap_obj->pages, npages,
1014 omap_obj->roll, true);
1016 dev_err(dev->dev, "could not repin: %d\n", ret);
1023 mutex_unlock(&priv->list_lock);
1028 /* -----------------------------------------------------------------------------
1032 #ifdef CONFIG_DEBUG_FS
1033 void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
1035 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1038 off = drm_vma_node_start(&obj->vma_node);
1040 mutex_lock(&omap_obj->lock);
1042 seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
1043 omap_obj->flags, obj->name, kref_read(&obj->refcount),
1044 off, &omap_obj->dma_addr, omap_obj->dma_addr_cnt,
1045 omap_obj->vaddr, omap_obj->roll);
1047 if (omap_obj->flags & OMAP_BO_TILED) {
1048 seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
1049 if (omap_obj->block) {
1050 struct tcm_area *area = &omap_obj->block->area;
1051 seq_printf(m, " (%dx%d, %dx%d)",
1052 area->p0.x, area->p0.y,
1053 area->p1.x, area->p1.y);
1056 seq_printf(m, " %zu", obj->size);
1059 mutex_unlock(&omap_obj->lock);
1061 seq_printf(m, "\n");
1064 void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1066 struct omap_gem_object *omap_obj;
1070 list_for_each_entry(omap_obj, list, mm_list) {
1071 struct drm_gem_object *obj = &omap_obj->base;
1073 omap_gem_describe(obj, m);
1078 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1082 /* -----------------------------------------------------------------------------
1083 * Constructor & Destructor
1086 void omap_gem_free_object(struct drm_gem_object *obj)
1088 struct drm_device *dev = obj->dev;
1089 struct omap_drm_private *priv = dev->dev_private;
1090 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1092 omap_gem_evict(obj);
1094 mutex_lock(&priv->list_lock);
1095 list_del(&omap_obj->mm_list);
1096 mutex_unlock(&priv->list_lock);
1099 * We own the sole reference to the object at this point, but to keep
1100 * lockdep happy, we must still take the omap_obj_lock to call
1101 * omap_gem_detach_pages(). This should hardly make any difference as
1102 * there can't be any lock contention.
1104 mutex_lock(&omap_obj->lock);
1106 /* The object should not be pinned. */
1107 WARN_ON(omap_obj->dma_addr_cnt > 0);
1109 if (omap_obj->pages) {
1110 if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
1111 kfree(omap_obj->pages);
1113 omap_gem_detach_pages(obj);
1116 if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
1117 dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
1118 omap_obj->dma_addr);
1119 } else if (omap_obj->vaddr) {
1120 vunmap(omap_obj->vaddr);
1121 } else if (obj->import_attach) {
1122 drm_prime_gem_destroy(obj, omap_obj->sgt);
1125 mutex_unlock(&omap_obj->lock);
1127 drm_gem_object_release(obj);
1129 mutex_destroy(&omap_obj->lock);
1134 /* GEM buffer object constructor */
1135 struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1136 union omap_gem_size gsize, u32 flags)
1138 struct omap_drm_private *priv = dev->dev_private;
1139 struct omap_gem_object *omap_obj;
1140 struct drm_gem_object *obj;
1141 struct address_space *mapping;
1145 /* Validate the flags and compute the memory and cache flags. */
1146 if (flags & OMAP_BO_TILED) {
1147 if (!priv->usergart) {
1148 dev_err(dev->dev, "Tiled buffers require DMM\n");
1153 * Tiled buffers are always shmem paged backed. When they are
1154 * scanned out, they are remapped into DMM/TILER.
1156 flags &= ~OMAP_BO_SCANOUT;
1157 flags |= OMAP_BO_MEM_SHMEM;
1160 * Currently don't allow cached buffers. There is some caching
1161 * stuff that needs to be handled better.
1163 flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
1164 flags |= tiler_get_cpu_cache_flags();
1165 } else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1167 * OMAP_BO_SCANOUT hints that the buffer doesn't need to be
1168 * tiled. However, to lower the pressure on memory allocation,
1169 * use contiguous memory only if no TILER is available.
1171 flags |= OMAP_BO_MEM_DMA_API;
1172 } else if (!(flags & OMAP_BO_MEM_DMABUF)) {
1174 * All other buffers not backed by dma_buf are shmem-backed.
1176 flags |= OMAP_BO_MEM_SHMEM;
1179 /* Allocate the initialize the OMAP GEM object. */
1180 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
1184 obj = &omap_obj->base;
1185 omap_obj->flags = flags;
1186 mutex_init(&omap_obj->lock);
1188 if (flags & OMAP_BO_TILED) {
1190 * For tiled buffers align dimensions to slot boundaries and
1191 * calculate size based on aligned dimensions.
1193 tiler_align(gem2fmt(flags), &gsize.tiled.width,
1194 &gsize.tiled.height);
1196 size = tiler_size(gem2fmt(flags), gsize.tiled.width,
1197 gsize.tiled.height);
1199 omap_obj->width = gsize.tiled.width;
1200 omap_obj->height = gsize.tiled.height;
1202 size = PAGE_ALIGN(gsize.bytes);
1205 /* Initialize the GEM object. */
1206 if (!(flags & OMAP_BO_MEM_SHMEM)) {
1207 drm_gem_private_object_init(dev, obj, size);
1209 ret = drm_gem_object_init(dev, obj, size);
1213 mapping = obj->filp->f_mapping;
1214 mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
1217 /* Allocate memory if needed. */
1218 if (flags & OMAP_BO_MEM_DMA_API) {
1219 omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
1220 &omap_obj->dma_addr,
1222 if (!omap_obj->vaddr)
1226 mutex_lock(&priv->list_lock);
1227 list_add(&omap_obj->mm_list, &priv->obj_list);
1228 mutex_unlock(&priv->list_lock);
1233 drm_gem_object_release(obj);
1239 struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
1240 struct sg_table *sgt)
1242 struct omap_drm_private *priv = dev->dev_private;
1243 struct omap_gem_object *omap_obj;
1244 struct drm_gem_object *obj;
1245 union omap_gem_size gsize;
1247 /* Without a DMM only physically contiguous buffers can be supported. */
1248 if (sgt->orig_nents != 1 && !priv->has_dmm)
1249 return ERR_PTR(-EINVAL);
1251 gsize.bytes = PAGE_ALIGN(size);
1252 obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
1254 return ERR_PTR(-ENOMEM);
1256 omap_obj = to_omap_bo(obj);
1258 mutex_lock(&omap_obj->lock);
1260 omap_obj->sgt = sgt;
1262 if (sgt->orig_nents == 1) {
1263 omap_obj->dma_addr = sg_dma_address(sgt->sgl);
1265 /* Create pages list from sgt */
1266 struct sg_page_iter iter;
1267 struct page **pages;
1268 unsigned int npages;
1271 npages = DIV_ROUND_UP(size, PAGE_SIZE);
1272 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
1274 omap_gem_free_object(obj);
1275 obj = ERR_PTR(-ENOMEM);
1279 omap_obj->pages = pages;
1281 for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
1282 pages[i++] = sg_page_iter_page(&iter);
1287 if (WARN_ON(i != npages)) {
1288 omap_gem_free_object(obj);
1289 obj = ERR_PTR(-ENOMEM);
1295 mutex_unlock(&omap_obj->lock);
1299 /* convenience method to construct a GEM buffer object, and userspace handle */
1300 int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1301 union omap_gem_size gsize, u32 flags, u32 *handle)
1303 struct drm_gem_object *obj;
1306 obj = omap_gem_new(dev, gsize, flags);
1310 ret = drm_gem_handle_create(file, obj, handle);
1312 omap_gem_free_object(obj);
1316 /* drop reference from allocate - handle holds it now */
1317 drm_gem_object_put_unlocked(obj);
1322 /* -----------------------------------------------------------------------------
1326 /* If DMM is used, we need to set some stuff up.. */
1327 void omap_gem_init(struct drm_device *dev)
1329 struct omap_drm_private *priv = dev->dev_private;
1330 struct omap_drm_usergart *usergart;
1331 const enum tiler_fmt fmts[] = {
1332 TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1336 if (!dmm_is_available()) {
1337 /* DMM only supported on OMAP4 and later, so this isn't fatal */
1338 dev_warn(dev->dev, "DMM not available, disable DMM support\n");
1342 usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1346 /* reserve 4k aligned/wide regions for userspace mappings: */
1347 for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1348 u16 h = 1, w = PAGE_SIZE >> i;
1350 tiler_align(fmts[i], &w, &h);
1351 /* note: since each region is 1 4kb page wide, and minimum
1352 * number of rows, the height ends up being the same as the
1353 * # of pages in the region
1355 usergart[i].height = h;
1356 usergart[i].height_shift = ilog2(h);
1357 usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
1358 usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1359 for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
1360 struct omap_drm_usergart_entry *entry;
1361 struct tiler_block *block;
1363 entry = &usergart[i].entry[j];
1364 block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
1365 if (IS_ERR(block)) {
1367 "reserve failed: %d, %d, %ld\n",
1368 i, j, PTR_ERR(block));
1371 entry->dma_addr = tiler_ssptr(block);
1372 entry->block = block;
1374 DBG("%d:%d: %dx%d: dma_addr=%pad stride=%d", i, j, w, h,
1376 usergart[i].stride_pfn << PAGE_SHIFT);
1380 priv->usergart = usergart;
1381 priv->has_dmm = true;
1384 void omap_gem_deinit(struct drm_device *dev)
1386 struct omap_drm_private *priv = dev->dev_private;
1388 /* I believe we can rely on there being no more outstanding GEM
1389 * objects which could depend on usergart/dmm at this point.
1391 kfree(priv->usergart);