2 * Copyright © 2010 Daniel Vetter
3 * Copyright © 2011-2014 Intel Corporation
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include <linux/slab.h> /* fault-inject.h is not standalone! */
28 #include <linux/fault-inject.h>
29 #include <linux/log2.h>
30 #include <linux/random.h>
31 #include <linux/seq_file.h>
32 #include <linux/stop_machine.h>
34 #include <asm/set_memory.h>
36 #include <drm/i915_drm.h>
39 #include "i915_scatterlist.h"
40 #include "i915_trace.h"
41 #include "i915_vgpu.h"
42 #include "intel_drv.h"
43 #include "intel_frontbuffer.h"
45 #define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
48 * DOC: Global GTT views
50 * Background and previous state
52 * Historically objects could exists (be bound) in global GTT space only as
53 * singular instances with a view representing all of the object's backing pages
54 * in a linear fashion. This view will be called a normal view.
56 * To support multiple views of the same object, where the number of mapped
57 * pages is not equal to the backing store, or where the layout of the pages
58 * is not linear, concept of a GGTT view was added.
60 * One example of an alternative view is a stereo display driven by a single
61 * image. In this case we would have a framebuffer looking like this
67 * Above would represent a normal GGTT view as normally mapped for GPU or CPU
68 * rendering. In contrast, fed to the display engine would be an alternative
69 * view which could look something like this:
74 * In this example both the size and layout of pages in the alternative view is
75 * different from the normal view.
77 * Implementation and usage
79 * GGTT views are implemented using VMAs and are distinguished via enum
80 * i915_ggtt_view_type and struct i915_ggtt_view.
82 * A new flavour of core GEM functions which work with GGTT bound objects were
83 * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
84 * renaming in large amounts of code. They take the struct i915_ggtt_view
85 * parameter encapsulating all metadata required to implement a view.
87 * As a helper for callers which are only interested in the normal view,
88 * globally const i915_ggtt_view_normal singleton instance exists. All old core
89 * GEM API functions, the ones not taking the view parameter, are operating on,
90 * or with the normal GGTT view.
92 * Code wanting to add or use a new GGTT view needs to:
94 * 1. Add a new enum with a suitable name.
95 * 2. Extend the metadata in the i915_ggtt_view structure if required.
96 * 3. Add support to i915_get_vma_pages().
98 * New views are required to build a scatter-gather table from within the
99 * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
100 * exists for the lifetime of an VMA.
102 * Core API is designed to have copy semantics which means that passed in
103 * struct i915_ggtt_view does not need to be persistent (left around after
104 * calling the core API functions).
109 i915_get_ggtt_vma_pages(struct i915_vma *vma);
111 static void gen6_ggtt_invalidate(struct drm_i915_private *dev_priv)
114 * Note that as an uncached mmio write, this will flush the
115 * WCB of the writes into the GGTT before it triggers the invalidate.
117 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
120 static void guc_ggtt_invalidate(struct drm_i915_private *dev_priv)
122 gen6_ggtt_invalidate(dev_priv);
123 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
126 static void gmch_ggtt_invalidate(struct drm_i915_private *dev_priv)
128 intel_gtt_chipset_flush();
131 static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
133 i915->ggtt.invalidate(i915);
136 static int ppgtt_bind_vma(struct i915_vma *vma,
137 enum i915_cache_level cache_level,
143 if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
144 err = vma->vm->allocate_va_range(vma->vm,
145 vma->node.start, vma->size);
150 /* Applicable to VLV, and gen8+ */
152 if (i915_gem_object_is_readonly(vma->obj))
153 pte_flags |= PTE_READ_ONLY;
155 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
160 static void ppgtt_unbind_vma(struct i915_vma *vma)
162 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
165 static int ppgtt_set_pages(struct i915_vma *vma)
167 GEM_BUG_ON(vma->pages);
169 vma->pages = vma->obj->mm.pages;
171 vma->page_sizes = vma->obj->mm.page_sizes;
176 static void clear_pages(struct i915_vma *vma)
178 GEM_BUG_ON(!vma->pages);
180 if (vma->pages != vma->obj->mm.pages) {
181 sg_free_table(vma->pages);
186 memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
189 static u64 gen8_pte_encode(dma_addr_t addr,
190 enum i915_cache_level level,
193 gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
195 if (unlikely(flags & PTE_READ_ONLY))
199 case I915_CACHE_NONE:
200 pte |= PPAT_UNCACHED;
203 pte |= PPAT_DISPLAY_ELLC;
213 static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
214 const enum i915_cache_level level)
216 gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
218 if (level != I915_CACHE_NONE)
219 pde |= PPAT_CACHED_PDE;
221 pde |= PPAT_UNCACHED;
225 #define gen8_pdpe_encode gen8_pde_encode
226 #define gen8_pml4e_encode gen8_pde_encode
228 static u64 snb_pte_encode(dma_addr_t addr,
229 enum i915_cache_level level,
232 gen6_pte_t pte = GEN6_PTE_VALID;
233 pte |= GEN6_PTE_ADDR_ENCODE(addr);
236 case I915_CACHE_L3_LLC:
238 pte |= GEN6_PTE_CACHE_LLC;
240 case I915_CACHE_NONE:
241 pte |= GEN6_PTE_UNCACHED;
250 static u64 ivb_pte_encode(dma_addr_t addr,
251 enum i915_cache_level level,
254 gen6_pte_t pte = GEN6_PTE_VALID;
255 pte |= GEN6_PTE_ADDR_ENCODE(addr);
258 case I915_CACHE_L3_LLC:
259 pte |= GEN7_PTE_CACHE_L3_LLC;
262 pte |= GEN6_PTE_CACHE_LLC;
264 case I915_CACHE_NONE:
265 pte |= GEN6_PTE_UNCACHED;
274 static u64 byt_pte_encode(dma_addr_t addr,
275 enum i915_cache_level level,
278 gen6_pte_t pte = GEN6_PTE_VALID;
279 pte |= GEN6_PTE_ADDR_ENCODE(addr);
281 if (!(flags & PTE_READ_ONLY))
282 pte |= BYT_PTE_WRITEABLE;
284 if (level != I915_CACHE_NONE)
285 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
290 static u64 hsw_pte_encode(dma_addr_t addr,
291 enum i915_cache_level level,
294 gen6_pte_t pte = GEN6_PTE_VALID;
295 pte |= HSW_PTE_ADDR_ENCODE(addr);
297 if (level != I915_CACHE_NONE)
298 pte |= HSW_WB_LLC_AGE3;
303 static u64 iris_pte_encode(dma_addr_t addr,
304 enum i915_cache_level level,
307 gen6_pte_t pte = GEN6_PTE_VALID;
308 pte |= HSW_PTE_ADDR_ENCODE(addr);
311 case I915_CACHE_NONE:
314 pte |= HSW_WT_ELLC_LLC_AGE3;
317 pte |= HSW_WB_ELLC_LLC_AGE3;
324 static void stash_init(struct pagestash *stash)
326 pagevec_init(&stash->pvec);
327 spin_lock_init(&stash->lock);
330 static struct page *stash_pop_page(struct pagestash *stash)
332 struct page *page = NULL;
334 spin_lock(&stash->lock);
335 if (likely(stash->pvec.nr))
336 page = stash->pvec.pages[--stash->pvec.nr];
337 spin_unlock(&stash->lock);
342 static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec)
346 spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING);
348 nr = min_t(typeof(nr), pvec->nr, pagevec_space(&stash->pvec));
349 memcpy(stash->pvec.pages + stash->pvec.nr,
350 pvec->pages + pvec->nr - nr,
351 sizeof(pvec->pages[0]) * nr);
352 stash->pvec.nr += nr;
354 spin_unlock(&stash->lock);
359 static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
361 struct pagevec stack;
364 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
365 i915_gem_shrink_all(vm->i915);
367 page = stash_pop_page(&vm->free_pages);
372 return alloc_page(gfp);
374 /* Look in our global stash of WC pages... */
375 page = stash_pop_page(&vm->i915->mm.wc_stash);
380 * Otherwise batch allocate pages to amortize cost of set_pages_wc.
382 * We have to be careful as page allocation may trigger the shrinker
383 * (via direct reclaim) which will fill up the WC stash underneath us.
384 * So we add our WB pages into a temporary pvec on the stack and merge
385 * them into the WC stash after all the allocations are complete.
387 pagevec_init(&stack);
391 page = alloc_page(gfp);
395 stack.pages[stack.nr++] = page;
396 } while (pagevec_space(&stack));
398 if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) {
399 page = stack.pages[--stack.nr];
401 /* Merge spare WC pages to the global stash */
403 stash_push_pagevec(&vm->i915->mm.wc_stash, &stack);
405 /* Push any surplus WC pages onto the local VM stash */
407 stash_push_pagevec(&vm->free_pages, &stack);
410 /* Return unwanted leftovers */
411 if (unlikely(stack.nr)) {
412 WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr));
413 __pagevec_release(&stack);
419 static void vm_free_pages_release(struct i915_address_space *vm,
422 struct pagevec *pvec = &vm->free_pages.pvec;
423 struct pagevec stack;
425 lockdep_assert_held(&vm->free_pages.lock);
426 GEM_BUG_ON(!pagevec_count(pvec));
428 if (vm->pt_kmap_wc) {
430 * When we use WC, first fill up the global stash and then
431 * only if full immediately free the overflow.
433 stash_push_pagevec(&vm->i915->mm.wc_stash, pvec);
436 * As we have made some room in the VM's free_pages,
437 * we can wait for it to fill again. Unless we are
438 * inside i915_address_space_fini() and must
439 * immediately release the pages!
441 if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1))
445 * We have to drop the lock to allow ourselves to sleep,
446 * so take a copy of the pvec and clear the stash for
447 * others to use it as we sleep.
450 pagevec_reinit(pvec);
451 spin_unlock(&vm->free_pages.lock);
454 set_pages_array_wb(pvec->pages, pvec->nr);
456 spin_lock(&vm->free_pages.lock);
459 __pagevec_release(pvec);
462 static void vm_free_page(struct i915_address_space *vm, struct page *page)
465 * On !llc, we need to change the pages back to WB. We only do so
466 * in bulk, so we rarely need to change the page attributes here,
467 * but doing so requires a stop_machine() from deep inside arch/x86/mm.
468 * To make detection of the possible sleep more likely, use an
469 * unconditional might_sleep() for everybody.
472 spin_lock(&vm->free_pages.lock);
473 while (!pagevec_space(&vm->free_pages.pvec))
474 vm_free_pages_release(vm, false);
475 GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec) >= PAGEVEC_SIZE);
476 pagevec_add(&vm->free_pages.pvec, page);
477 spin_unlock(&vm->free_pages.lock);
480 static void i915_address_space_init(struct i915_address_space *vm, int subclass)
483 * The vm->mutex must be reclaim safe (for use in the shrinker).
484 * Do a dummy acquire now under fs_reclaim so that any allocation
485 * attempt holding the lock is immediately reported by lockdep.
487 mutex_init(&vm->mutex);
488 lockdep_set_subclass(&vm->mutex, subclass);
489 i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
491 GEM_BUG_ON(!vm->total);
492 drm_mm_init(&vm->mm, 0, vm->total);
493 vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
495 stash_init(&vm->free_pages);
497 INIT_LIST_HEAD(&vm->unbound_list);
498 INIT_LIST_HEAD(&vm->bound_list);
501 static void i915_address_space_fini(struct i915_address_space *vm)
503 spin_lock(&vm->free_pages.lock);
504 if (pagevec_count(&vm->free_pages.pvec))
505 vm_free_pages_release(vm, true);
506 GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec));
507 spin_unlock(&vm->free_pages.lock);
509 drm_mm_takedown(&vm->mm);
511 mutex_destroy(&vm->mutex);
514 static int __setup_page_dma(struct i915_address_space *vm,
515 struct i915_page_dma *p,
518 p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL);
519 if (unlikely(!p->page))
522 p->daddr = dma_map_page_attrs(vm->dma,
523 p->page, 0, PAGE_SIZE,
524 PCI_DMA_BIDIRECTIONAL,
525 DMA_ATTR_SKIP_CPU_SYNC |
527 if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
528 vm_free_page(vm, p->page);
535 static int setup_page_dma(struct i915_address_space *vm,
536 struct i915_page_dma *p)
538 return __setup_page_dma(vm, p, __GFP_HIGHMEM);
541 static void cleanup_page_dma(struct i915_address_space *vm,
542 struct i915_page_dma *p)
544 dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
545 vm_free_page(vm, p->page);
548 #define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
550 #define setup_px(vm, px) setup_page_dma((vm), px_base(px))
551 #define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px))
552 #define fill_px(vm, px, v) fill_page_dma((vm), px_base(px), (v))
553 #define fill32_px(vm, px, v) fill_page_dma_32((vm), px_base(px), (v))
555 static void fill_page_dma(struct i915_address_space *vm,
556 struct i915_page_dma *p,
559 u64 * const vaddr = kmap_atomic(p->page);
561 memset64(vaddr, val, PAGE_SIZE / sizeof(val));
563 kunmap_atomic(vaddr);
566 static void fill_page_dma_32(struct i915_address_space *vm,
567 struct i915_page_dma *p,
570 fill_page_dma(vm, p, (u64)v << 32 | v);
574 setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
579 * In order to utilize 64K pages for an object with a size < 2M, we will
580 * need to support a 64K scratch page, given that every 16th entry for a
581 * page-table operating in 64K mode must point to a properly aligned 64K
582 * region, including any PTEs which happen to point to scratch.
584 * This is only relevant for the 48b PPGTT where we support
585 * huge-gtt-pages, see also i915_vma_insert(). However, as we share the
586 * scratch (read-only) between all vm, we create one 64k scratch page
589 size = I915_GTT_PAGE_SIZE_4K;
590 if (i915_vm_is_4lvl(vm) &&
591 HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
592 size = I915_GTT_PAGE_SIZE_64K;
595 gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL;
598 int order = get_order(size);
602 page = alloc_pages(gfp, order);
606 addr = dma_map_page_attrs(vm->dma,
608 PCI_DMA_BIDIRECTIONAL,
609 DMA_ATTR_SKIP_CPU_SYNC |
611 if (unlikely(dma_mapping_error(vm->dma, addr)))
614 if (unlikely(!IS_ALIGNED(addr, size)))
617 vm->scratch_page.page = page;
618 vm->scratch_page.daddr = addr;
619 vm->scratch_order = order;
623 dma_unmap_page(vm->dma, addr, size, PCI_DMA_BIDIRECTIONAL);
625 __free_pages(page, order);
627 if (size == I915_GTT_PAGE_SIZE_4K)
630 size = I915_GTT_PAGE_SIZE_4K;
631 gfp &= ~__GFP_NOWARN;
635 static void cleanup_scratch_page(struct i915_address_space *vm)
637 struct i915_page_dma *p = &vm->scratch_page;
638 int order = vm->scratch_order;
640 dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT,
641 PCI_DMA_BIDIRECTIONAL);
642 __free_pages(p->page, order);
645 static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
647 struct i915_page_table *pt;
649 pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
651 return ERR_PTR(-ENOMEM);
653 if (unlikely(setup_px(vm, pt))) {
655 return ERR_PTR(-ENOMEM);
662 static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt)
668 static void gen8_initialize_pt(struct i915_address_space *vm,
669 struct i915_page_table *pt)
671 fill_px(vm, pt, vm->scratch_pte);
674 static void gen6_initialize_pt(struct i915_address_space *vm,
675 struct i915_page_table *pt)
677 fill32_px(vm, pt, vm->scratch_pte);
680 static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
682 struct i915_page_directory *pd;
684 pd = kzalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL);
686 return ERR_PTR(-ENOMEM);
688 if (unlikely(setup_px(vm, pd))) {
690 return ERR_PTR(-ENOMEM);
697 static void free_pd(struct i915_address_space *vm,
698 struct i915_page_directory *pd)
704 static void gen8_initialize_pd(struct i915_address_space *vm,
705 struct i915_page_directory *pd)
708 gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC));
709 memset_p((void **)pd->page_table, vm->scratch_pt, I915_PDES);
712 static int __pdp_init(struct i915_address_space *vm,
713 struct i915_page_directory_pointer *pdp)
715 const unsigned int pdpes = i915_pdpes_per_pdp(vm);
717 pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory),
718 I915_GFP_ALLOW_FAIL);
719 if (unlikely(!pdp->page_directory))
722 memset_p((void **)pdp->page_directory, vm->scratch_pd, pdpes);
727 static void __pdp_fini(struct i915_page_directory_pointer *pdp)
729 kfree(pdp->page_directory);
730 pdp->page_directory = NULL;
733 static struct i915_page_directory_pointer *
734 alloc_pdp(struct i915_address_space *vm)
736 struct i915_page_directory_pointer *pdp;
739 GEM_BUG_ON(!i915_vm_is_4lvl(vm));
741 pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
743 return ERR_PTR(-ENOMEM);
745 ret = __pdp_init(vm, pdp);
749 ret = setup_px(vm, pdp);
763 static void free_pdp(struct i915_address_space *vm,
764 struct i915_page_directory_pointer *pdp)
768 if (!i915_vm_is_4lvl(vm))
775 static void gen8_initialize_pdp(struct i915_address_space *vm,
776 struct i915_page_directory_pointer *pdp)
778 gen8_ppgtt_pdpe_t scratch_pdpe;
780 scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
782 fill_px(vm, pdp, scratch_pdpe);
785 static void gen8_initialize_pml4(struct i915_address_space *vm,
786 struct i915_pml4 *pml4)
789 gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC));
790 memset_p((void **)pml4->pdps, vm->scratch_pdp, GEN8_PML4ES_PER_PML4);
794 * PDE TLBs are a pain to invalidate on GEN8+. When we modify
795 * the page table structures, we mark them dirty so that
796 * context switching/execlist queuing code takes extra steps
797 * to ensure that tlbs are flushed.
799 static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
801 ppgtt->pd_dirty_engines = ALL_ENGINES;
804 /* Removes entries from a single page table, releasing it if it's empty.
805 * Caller can use the return value to update higher-level entries.
807 static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm,
808 struct i915_page_table *pt,
809 u64 start, u64 length)
811 unsigned int num_entries = gen8_pte_count(start, length);
814 GEM_BUG_ON(num_entries > pt->used_ptes);
816 pt->used_ptes -= num_entries;
820 vaddr = kmap_atomic_px(pt);
821 memset64(vaddr + gen8_pte_index(start), vm->scratch_pte, num_entries);
822 kunmap_atomic(vaddr);
827 static void gen8_ppgtt_set_pde(struct i915_address_space *vm,
828 struct i915_page_directory *pd,
829 struct i915_page_table *pt,
834 pd->page_table[pde] = pt;
836 vaddr = kmap_atomic_px(pd);
837 vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC);
838 kunmap_atomic(vaddr);
841 static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
842 struct i915_page_directory *pd,
843 u64 start, u64 length)
845 struct i915_page_table *pt;
848 gen8_for_each_pde(pt, pd, start, length, pde) {
849 GEM_BUG_ON(pt == vm->scratch_pt);
851 if (!gen8_ppgtt_clear_pt(vm, pt, start, length))
854 gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde);
855 GEM_BUG_ON(!pd->used_pdes);
861 return !pd->used_pdes;
864 static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm,
865 struct i915_page_directory_pointer *pdp,
866 struct i915_page_directory *pd,
869 gen8_ppgtt_pdpe_t *vaddr;
871 pdp->page_directory[pdpe] = pd;
872 if (!i915_vm_is_4lvl(vm))
875 vaddr = kmap_atomic_px(pdp);
876 vaddr[pdpe] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
877 kunmap_atomic(vaddr);
880 /* Removes entries from a single page dir pointer, releasing it if it's empty.
881 * Caller can use the return value to update higher-level entries
883 static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
884 struct i915_page_directory_pointer *pdp,
885 u64 start, u64 length)
887 struct i915_page_directory *pd;
890 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
891 GEM_BUG_ON(pd == vm->scratch_pd);
893 if (!gen8_ppgtt_clear_pd(vm, pd, start, length))
896 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
897 GEM_BUG_ON(!pdp->used_pdpes);
903 return !pdp->used_pdpes;
906 static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm,
907 u64 start, u64 length)
909 gen8_ppgtt_clear_pdp(vm, &i915_vm_to_ppgtt(vm)->pdp, start, length);
912 static void gen8_ppgtt_set_pml4e(struct i915_pml4 *pml4,
913 struct i915_page_directory_pointer *pdp,
916 gen8_ppgtt_pml4e_t *vaddr;
918 pml4->pdps[pml4e] = pdp;
920 vaddr = kmap_atomic_px(pml4);
921 vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
922 kunmap_atomic(vaddr);
925 /* Removes entries from a single pml4.
926 * This is the top-level structure in 4-level page tables used on gen8+.
927 * Empty entries are always scratch pml4e.
929 static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
930 u64 start, u64 length)
932 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
933 struct i915_pml4 *pml4 = &ppgtt->pml4;
934 struct i915_page_directory_pointer *pdp;
937 GEM_BUG_ON(!i915_vm_is_4lvl(vm));
939 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
940 GEM_BUG_ON(pdp == vm->scratch_pdp);
942 if (!gen8_ppgtt_clear_pdp(vm, pdp, start, length))
945 gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
951 static inline struct sgt_dma {
952 struct scatterlist *sg;
954 } sgt_dma(struct i915_vma *vma) {
955 struct scatterlist *sg = vma->pages->sgl;
956 dma_addr_t addr = sg_dma_address(sg);
957 return (struct sgt_dma) { sg, addr, addr + sg->length };
960 struct gen8_insert_pte {
967 static __always_inline struct gen8_insert_pte gen8_insert_pte(u64 start)
969 return (struct gen8_insert_pte) {
970 gen8_pml4e_index(start),
971 gen8_pdpe_index(start),
972 gen8_pde_index(start),
973 gen8_pte_index(start),
977 static __always_inline bool
978 gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
979 struct i915_page_directory_pointer *pdp,
980 struct sgt_dma *iter,
981 struct gen8_insert_pte *idx,
982 enum i915_cache_level cache_level,
985 struct i915_page_directory *pd;
986 const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
990 GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
991 pd = pdp->page_directory[idx->pdpe];
992 vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
994 vaddr[idx->pte] = pte_encode | iter->dma;
996 iter->dma += I915_GTT_PAGE_SIZE;
997 if (iter->dma >= iter->max) {
998 iter->sg = __sg_next(iter->sg);
1004 iter->dma = sg_dma_address(iter->sg);
1005 iter->max = iter->dma + iter->sg->length;
1008 if (++idx->pte == GEN8_PTES) {
1011 if (++idx->pde == I915_PDES) {
1014 /* Limited by sg length for 3lvl */
1015 if (++idx->pdpe == GEN8_PML4ES_PER_PML4) {
1021 GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
1022 pd = pdp->page_directory[idx->pdpe];
1025 kunmap_atomic(vaddr);
1026 vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
1029 kunmap_atomic(vaddr);
1034 static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
1035 struct i915_vma *vma,
1036 enum i915_cache_level cache_level,
1039 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1040 struct sgt_dma iter = sgt_dma(vma);
1041 struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
1043 gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
1044 cache_level, flags);
1046 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
1049 static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
1050 struct i915_page_directory_pointer **pdps,
1051 struct sgt_dma *iter,
1052 enum i915_cache_level cache_level,
1055 const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
1056 u64 start = vma->node.start;
1057 dma_addr_t rem = iter->sg->length;
1060 struct gen8_insert_pte idx = gen8_insert_pte(start);
1061 struct i915_page_directory_pointer *pdp = pdps[idx.pml4e];
1062 struct i915_page_directory *pd = pdp->page_directory[idx.pdpe];
1063 unsigned int page_size;
1064 bool maybe_64K = false;
1065 gen8_pte_t encode = pte_encode;
1069 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
1070 IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
1071 rem >= I915_GTT_PAGE_SIZE_2M && !idx.pte) {
1074 page_size = I915_GTT_PAGE_SIZE_2M;
1076 encode |= GEN8_PDE_PS_2M;
1078 vaddr = kmap_atomic_px(pd);
1080 struct i915_page_table *pt = pd->page_table[idx.pde];
1084 page_size = I915_GTT_PAGE_SIZE;
1087 vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
1088 IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
1089 (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
1090 rem >= (max - index) * I915_GTT_PAGE_SIZE))
1093 vaddr = kmap_atomic_px(pt);
1097 GEM_BUG_ON(iter->sg->length < page_size);
1098 vaddr[index++] = encode | iter->dma;
1101 iter->dma += page_size;
1103 if (iter->dma >= iter->max) {
1104 iter->sg = __sg_next(iter->sg);
1108 rem = iter->sg->length;
1109 iter->dma = sg_dma_address(iter->sg);
1110 iter->max = iter->dma + rem;
1112 if (maybe_64K && index < max &&
1113 !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
1114 (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
1115 rem >= (max - index) * I915_GTT_PAGE_SIZE)))
1118 if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
1121 } while (rem >= page_size && index < max);
1123 kunmap_atomic(vaddr);
1126 * Is it safe to mark the 2M block as 64K? -- Either we have
1127 * filled whole page-table with 64K entries, or filled part of
1128 * it and have reached the end of the sg table and we have
1133 (i915_vm_has_scratch_64K(vma->vm) &&
1134 !iter->sg && IS_ALIGNED(vma->node.start +
1136 I915_GTT_PAGE_SIZE_2M)))) {
1137 vaddr = kmap_atomic_px(pd);
1138 vaddr[idx.pde] |= GEN8_PDE_IPS_64K;
1139 kunmap_atomic(vaddr);
1140 page_size = I915_GTT_PAGE_SIZE_64K;
1143 * We write all 4K page entries, even when using 64K
1144 * pages. In order to verify that the HW isn't cheating
1145 * by using the 4K PTE instead of the 64K PTE, we want
1146 * to remove all the surplus entries. If the HW skipped
1147 * the 64K PTE, it will read/write into the scratch page
1148 * instead - which we detect as missing results during
1151 if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
1154 encode = vma->vm->scratch_pte;
1155 vaddr = kmap_atomic_px(pd->page_table[idx.pde]);
1157 for (i = 1; i < index; i += 16)
1158 memset64(vaddr + i, encode, 15);
1160 kunmap_atomic(vaddr);
1164 vma->page_sizes.gtt |= page_size;
1168 static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
1169 struct i915_vma *vma,
1170 enum i915_cache_level cache_level,
1173 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1174 struct sgt_dma iter = sgt_dma(vma);
1175 struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
1177 if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
1178 gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level,
1181 struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
1183 while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++],
1184 &iter, &idx, cache_level,
1186 GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
1188 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
1192 static void gen8_free_page_tables(struct i915_address_space *vm,
1193 struct i915_page_directory *pd)
1197 for (i = 0; i < I915_PDES; i++) {
1198 if (pd->page_table[i] != vm->scratch_pt)
1199 free_pt(vm, pd->page_table[i]);
1203 static int gen8_init_scratch(struct i915_address_space *vm)
1208 * If everybody agrees to not to write into the scratch page,
1209 * we can reuse it for all vm, keeping contexts and processes separate.
1211 if (vm->has_read_only &&
1212 vm->i915->kernel_context &&
1213 vm->i915->kernel_context->ppgtt) {
1214 struct i915_address_space *clone =
1215 &vm->i915->kernel_context->ppgtt->vm;
1217 GEM_BUG_ON(!clone->has_read_only);
1219 vm->scratch_order = clone->scratch_order;
1220 vm->scratch_pte = clone->scratch_pte;
1221 vm->scratch_pt = clone->scratch_pt;
1222 vm->scratch_pd = clone->scratch_pd;
1223 vm->scratch_pdp = clone->scratch_pdp;
1227 ret = setup_scratch_page(vm, __GFP_HIGHMEM);
1232 gen8_pte_encode(vm->scratch_page.daddr,
1236 vm->scratch_pt = alloc_pt(vm);
1237 if (IS_ERR(vm->scratch_pt)) {
1238 ret = PTR_ERR(vm->scratch_pt);
1239 goto free_scratch_page;
1242 vm->scratch_pd = alloc_pd(vm);
1243 if (IS_ERR(vm->scratch_pd)) {
1244 ret = PTR_ERR(vm->scratch_pd);
1248 if (i915_vm_is_4lvl(vm)) {
1249 vm->scratch_pdp = alloc_pdp(vm);
1250 if (IS_ERR(vm->scratch_pdp)) {
1251 ret = PTR_ERR(vm->scratch_pdp);
1256 gen8_initialize_pt(vm, vm->scratch_pt);
1257 gen8_initialize_pd(vm, vm->scratch_pd);
1258 if (i915_vm_is_4lvl(vm))
1259 gen8_initialize_pdp(vm, vm->scratch_pdp);
1264 free_pd(vm, vm->scratch_pd);
1266 free_pt(vm, vm->scratch_pt);
1268 cleanup_scratch_page(vm);
1273 static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
1275 struct i915_address_space *vm = &ppgtt->vm;
1276 struct drm_i915_private *dev_priv = vm->i915;
1277 enum vgt_g2v_type msg;
1280 if (i915_vm_is_4lvl(vm)) {
1281 const u64 daddr = px_dma(&ppgtt->pml4);
1283 I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
1284 I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
1286 msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
1287 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
1289 for (i = 0; i < GEN8_3LVL_PDPES; i++) {
1290 const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
1292 I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
1293 I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
1296 msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
1297 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
1300 I915_WRITE(vgtif_reg(g2v_notify), msg);
1305 static void gen8_free_scratch(struct i915_address_space *vm)
1307 if (!vm->scratch_page.daddr)
1310 if (i915_vm_is_4lvl(vm))
1311 free_pdp(vm, vm->scratch_pdp);
1312 free_pd(vm, vm->scratch_pd);
1313 free_pt(vm, vm->scratch_pt);
1314 cleanup_scratch_page(vm);
1317 static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm,
1318 struct i915_page_directory_pointer *pdp)
1320 const unsigned int pdpes = i915_pdpes_per_pdp(vm);
1323 for (i = 0; i < pdpes; i++) {
1324 if (pdp->page_directory[i] == vm->scratch_pd)
1327 gen8_free_page_tables(vm, pdp->page_directory[i]);
1328 free_pd(vm, pdp->page_directory[i]);
1334 static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
1338 for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) {
1339 if (ppgtt->pml4.pdps[i] == ppgtt->vm.scratch_pdp)
1342 gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, ppgtt->pml4.pdps[i]);
1345 cleanup_px(&ppgtt->vm, &ppgtt->pml4);
1348 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
1350 struct drm_i915_private *dev_priv = vm->i915;
1351 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1353 if (intel_vgpu_active(dev_priv))
1354 gen8_ppgtt_notify_vgt(ppgtt, false);
1356 if (i915_vm_is_4lvl(vm))
1357 gen8_ppgtt_cleanup_4lvl(ppgtt);
1359 gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, &ppgtt->pdp);
1361 gen8_free_scratch(vm);
1364 static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
1365 struct i915_page_directory *pd,
1366 u64 start, u64 length)
1368 struct i915_page_table *pt;
1372 gen8_for_each_pde(pt, pd, start, length, pde) {
1373 int count = gen8_pte_count(start, length);
1375 if (pt == vm->scratch_pt) {
1384 if (count < GEN8_PTES || intel_vgpu_active(vm->i915))
1385 gen8_initialize_pt(vm, pt);
1387 gen8_ppgtt_set_pde(vm, pd, pt, pde);
1388 GEM_BUG_ON(pd->used_pdes > I915_PDES);
1391 pt->used_ptes += count;
1396 gen8_ppgtt_clear_pd(vm, pd, from, start - from);
1400 static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
1401 struct i915_page_directory_pointer *pdp,
1402 u64 start, u64 length)
1404 struct i915_page_directory *pd;
1409 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1410 if (pd == vm->scratch_pd) {
1419 gen8_initialize_pd(vm, pd);
1420 gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
1421 GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm));
1424 ret = gen8_ppgtt_alloc_pd(vm, pd, start, length);
1432 if (!pd->used_pdes) {
1433 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
1434 GEM_BUG_ON(!pdp->used_pdpes);
1439 gen8_ppgtt_clear_pdp(vm, pdp, from, start - from);
1443 static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm,
1444 u64 start, u64 length)
1446 return gen8_ppgtt_alloc_pdp(vm,
1447 &i915_vm_to_ppgtt(vm)->pdp, start, length);
1450 static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
1451 u64 start, u64 length)
1453 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1454 struct i915_pml4 *pml4 = &ppgtt->pml4;
1455 struct i915_page_directory_pointer *pdp;
1460 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1461 if (pml4->pdps[pml4e] == vm->scratch_pdp) {
1462 pdp = alloc_pdp(vm);
1466 gen8_initialize_pdp(vm, pdp);
1467 gen8_ppgtt_set_pml4e(pml4, pdp, pml4e);
1470 ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length);
1478 if (!pdp->used_pdpes) {
1479 gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
1483 gen8_ppgtt_clear_4lvl(vm, from, start - from);
1487 static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
1489 struct i915_address_space *vm = &ppgtt->vm;
1490 struct i915_page_directory_pointer *pdp = &ppgtt->pdp;
1491 struct i915_page_directory *pd;
1492 u64 start = 0, length = ppgtt->vm.total;
1496 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1501 gen8_initialize_pd(vm, pd);
1502 gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
1506 pdp->used_pdpes++; /* never remove */
1511 gen8_for_each_pdpe(pd, pdp, from, start, pdpe) {
1512 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
1515 pdp->used_pdpes = 0;
1519 static void ppgtt_init(struct drm_i915_private *i915,
1520 struct i915_hw_ppgtt *ppgtt)
1522 kref_init(&ppgtt->ref);
1524 ppgtt->vm.i915 = i915;
1525 ppgtt->vm.dma = &i915->drm.pdev->dev;
1526 ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
1528 i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
1530 ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
1531 ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
1532 ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages;
1533 ppgtt->vm.vma_ops.clear_pages = clear_pages;
1537 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
1538 * with a net effect resembling a 2-level page table in normal x86 terms. Each
1539 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
1543 static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
1545 struct i915_hw_ppgtt *ppgtt;
1548 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
1550 return ERR_PTR(-ENOMEM);
1552 ppgtt_init(i915, ppgtt);
1555 * From bdw, there is hw support for read-only pages in the PPGTT.
1557 * Gen11 has HSDES#:1807136187 unresolved. Disable ro support
1560 ppgtt->vm.has_read_only = INTEL_GEN(i915) != 11;
1562 /* There are only few exceptions for gen >=6. chv and bxt.
1563 * And we are not sure about the latter so play safe for now.
1565 if (IS_CHERRYVIEW(i915) || IS_BROXTON(i915))
1566 ppgtt->vm.pt_kmap_wc = true;
1568 err = gen8_init_scratch(&ppgtt->vm);
1572 if (i915_vm_is_4lvl(&ppgtt->vm)) {
1573 err = setup_px(&ppgtt->vm, &ppgtt->pml4);
1577 gen8_initialize_pml4(&ppgtt->vm, &ppgtt->pml4);
1579 ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_4lvl;
1580 ppgtt->vm.insert_entries = gen8_ppgtt_insert_4lvl;
1581 ppgtt->vm.clear_range = gen8_ppgtt_clear_4lvl;
1583 err = __pdp_init(&ppgtt->vm, &ppgtt->pdp);
1587 if (intel_vgpu_active(i915)) {
1588 err = gen8_preallocate_top_level_pdp(ppgtt);
1590 __pdp_fini(&ppgtt->pdp);
1595 ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_3lvl;
1596 ppgtt->vm.insert_entries = gen8_ppgtt_insert_3lvl;
1597 ppgtt->vm.clear_range = gen8_ppgtt_clear_3lvl;
1600 if (intel_vgpu_active(i915))
1601 gen8_ppgtt_notify_vgt(ppgtt, true);
1603 ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
1608 gen8_free_scratch(&ppgtt->vm);
1611 return ERR_PTR(err);
1614 /* Write pde (index) from the page directory @pd to the page table @pt */
1615 static inline void gen6_write_pde(const struct gen6_hw_ppgtt *ppgtt,
1616 const unsigned int pde,
1617 const struct i915_page_table *pt)
1619 /* Caller needs to make sure the write completes if necessary */
1620 iowrite32(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
1621 ppgtt->pd_addr + pde);
1624 static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
1626 struct intel_engine_cs *engine;
1627 u32 ecochk, ecobits;
1628 enum intel_engine_id id;
1630 ecobits = I915_READ(GAC_ECO_BITS);
1631 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
1633 ecochk = I915_READ(GAM_ECOCHK);
1634 if (IS_HASWELL(dev_priv)) {
1635 ecochk |= ECOCHK_PPGTT_WB_HSW;
1637 ecochk |= ECOCHK_PPGTT_LLC_IVB;
1638 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
1640 I915_WRITE(GAM_ECOCHK, ecochk);
1642 for_each_engine(engine, dev_priv, id) {
1643 /* GFX_MODE is per-ring on gen7+ */
1644 I915_WRITE(RING_MODE_GEN7(engine),
1645 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
1649 static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
1651 u32 ecochk, gab_ctl, ecobits;
1653 ecobits = I915_READ(GAC_ECO_BITS);
1654 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
1655 ECOBITS_PPGTT_CACHE64B);
1657 gab_ctl = I915_READ(GAB_CTL);
1658 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
1660 ecochk = I915_READ(GAM_ECOCHK);
1661 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
1663 if (HAS_PPGTT(dev_priv)) /* may be disabled for VT-d */
1664 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
1667 /* PPGTT support for Sandybdrige/Gen6 and later */
1668 static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
1669 u64 start, u64 length)
1671 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
1672 unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
1673 unsigned int pde = first_entry / GEN6_PTES;
1674 unsigned int pte = first_entry % GEN6_PTES;
1675 unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
1676 const gen6_pte_t scratch_pte = vm->scratch_pte;
1678 while (num_entries) {
1679 struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++];
1680 const unsigned int count = min(num_entries, GEN6_PTES - pte);
1683 GEM_BUG_ON(pt == vm->scratch_pt);
1685 num_entries -= count;
1687 GEM_BUG_ON(count > pt->used_ptes);
1688 pt->used_ptes -= count;
1690 ppgtt->scan_for_unused_pt = true;
1693 * Note that the hw doesn't support removing PDE on the fly
1694 * (they are cached inside the context with no means to
1695 * invalidate the cache), so we can only reset the PTE
1696 * entries back to scratch.
1699 vaddr = kmap_atomic_px(pt);
1700 memset32(vaddr + pte, scratch_pte, count);
1701 kunmap_atomic(vaddr);
1707 static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
1708 struct i915_vma *vma,
1709 enum i915_cache_level cache_level,
1712 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1713 unsigned first_entry = vma->node.start / I915_GTT_PAGE_SIZE;
1714 unsigned act_pt = first_entry / GEN6_PTES;
1715 unsigned act_pte = first_entry % GEN6_PTES;
1716 const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
1717 struct sgt_dma iter = sgt_dma(vma);
1720 GEM_BUG_ON(ppgtt->pd.page_table[act_pt] == vm->scratch_pt);
1722 vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
1724 vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
1726 iter.dma += I915_GTT_PAGE_SIZE;
1727 if (iter.dma == iter.max) {
1728 iter.sg = __sg_next(iter.sg);
1732 iter.dma = sg_dma_address(iter.sg);
1733 iter.max = iter.dma + iter.sg->length;
1736 if (++act_pte == GEN6_PTES) {
1737 kunmap_atomic(vaddr);
1738 vaddr = kmap_atomic_px(ppgtt->pd.page_table[++act_pt]);
1742 kunmap_atomic(vaddr);
1744 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
1747 static int gen6_alloc_va_range(struct i915_address_space *vm,
1748 u64 start, u64 length)
1750 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
1751 struct i915_page_table *pt;
1752 intel_wakeref_t wakeref;
1757 wakeref = intel_runtime_pm_get(vm->i915);
1759 gen6_for_each_pde(pt, &ppgtt->base.pd, start, length, pde) {
1760 const unsigned int count = gen6_pte_count(start, length);
1762 if (pt == vm->scratch_pt) {
1767 gen6_initialize_pt(vm, pt);
1768 ppgtt->base.pd.page_table[pde] = pt;
1770 if (i915_vma_is_bound(ppgtt->vma,
1771 I915_VMA_GLOBAL_BIND)) {
1772 gen6_write_pde(ppgtt, pde, pt);
1776 GEM_BUG_ON(pt->used_ptes);
1779 pt->used_ptes += count;
1783 mark_tlbs_dirty(&ppgtt->base);
1784 gen6_ggtt_invalidate(vm->i915);
1787 intel_runtime_pm_put(vm->i915, wakeref);
1792 intel_runtime_pm_put(vm->i915, wakeref);
1793 gen6_ppgtt_clear_range(vm, from, start - from);
1797 static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt)
1799 struct i915_address_space * const vm = &ppgtt->base.vm;
1800 struct i915_page_table *unused;
1804 ret = setup_scratch_page(vm, __GFP_HIGHMEM);
1808 vm->scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
1812 vm->scratch_pt = alloc_pt(vm);
1813 if (IS_ERR(vm->scratch_pt)) {
1814 cleanup_scratch_page(vm);
1815 return PTR_ERR(vm->scratch_pt);
1818 gen6_initialize_pt(vm, vm->scratch_pt);
1819 gen6_for_all_pdes(unused, &ppgtt->base.pd, pde)
1820 ppgtt->base.pd.page_table[pde] = vm->scratch_pt;
1825 static void gen6_ppgtt_free_scratch(struct i915_address_space *vm)
1827 free_pt(vm, vm->scratch_pt);
1828 cleanup_scratch_page(vm);
1831 static void gen6_ppgtt_free_pd(struct gen6_hw_ppgtt *ppgtt)
1833 struct i915_page_table *pt;
1836 gen6_for_all_pdes(pt, &ppgtt->base.pd, pde)
1837 if (pt != ppgtt->base.vm.scratch_pt)
1838 free_pt(&ppgtt->base.vm, pt);
1841 struct gen6_ppgtt_cleanup_work {
1842 struct work_struct base;
1843 struct i915_vma *vma;
1846 static void gen6_ppgtt_cleanup_work(struct work_struct *wrk)
1848 struct gen6_ppgtt_cleanup_work *work =
1849 container_of(wrk, typeof(*work), base);
1850 /* Side note, vma->vm is the GGTT not the ppgtt we just destroyed! */
1851 struct drm_i915_private *i915 = work->vma->vm->i915;
1853 mutex_lock(&i915->drm.struct_mutex);
1854 i915_vma_destroy(work->vma);
1855 mutex_unlock(&i915->drm.struct_mutex);
1860 static int nop_set_pages(struct i915_vma *vma)
1865 static void nop_clear_pages(struct i915_vma *vma)
1869 static int nop_bind(struct i915_vma *vma,
1870 enum i915_cache_level cache_level,
1876 static void nop_unbind(struct i915_vma *vma)
1880 static const struct i915_vma_ops nop_vma_ops = {
1881 .set_pages = nop_set_pages,
1882 .clear_pages = nop_clear_pages,
1883 .bind_vma = nop_bind,
1884 .unbind_vma = nop_unbind,
1887 static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
1889 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
1890 struct gen6_ppgtt_cleanup_work *work = ppgtt->work;
1892 /* FIXME remove the struct_mutex to bring the locking under control */
1893 INIT_WORK(&work->base, gen6_ppgtt_cleanup_work);
1894 work->vma = ppgtt->vma;
1895 work->vma->ops = &nop_vma_ops;
1896 schedule_work(&work->base);
1898 gen6_ppgtt_free_pd(ppgtt);
1899 gen6_ppgtt_free_scratch(vm);
1902 static int pd_vma_set_pages(struct i915_vma *vma)
1904 vma->pages = ERR_PTR(-ENODEV);
1908 static void pd_vma_clear_pages(struct i915_vma *vma)
1910 GEM_BUG_ON(!vma->pages);
1915 static int pd_vma_bind(struct i915_vma *vma,
1916 enum i915_cache_level cache_level,
1919 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
1920 struct gen6_hw_ppgtt *ppgtt = vma->private;
1921 u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE;
1922 struct i915_page_table *pt;
1925 ppgtt->base.pd.base.ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
1926 ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
1928 gen6_for_all_pdes(pt, &ppgtt->base.pd, pde)
1929 gen6_write_pde(ppgtt, pde, pt);
1931 mark_tlbs_dirty(&ppgtt->base);
1932 gen6_ggtt_invalidate(ppgtt->base.vm.i915);
1937 static void pd_vma_unbind(struct i915_vma *vma)
1939 struct gen6_hw_ppgtt *ppgtt = vma->private;
1940 struct i915_page_table * const scratch_pt = ppgtt->base.vm.scratch_pt;
1941 struct i915_page_table *pt;
1944 if (!ppgtt->scan_for_unused_pt)
1947 /* Free all no longer used page tables */
1948 gen6_for_all_pdes(pt, &ppgtt->base.pd, pde) {
1949 if (pt->used_ptes || pt == scratch_pt)
1952 free_pt(&ppgtt->base.vm, pt);
1953 ppgtt->base.pd.page_table[pde] = scratch_pt;
1956 ppgtt->scan_for_unused_pt = false;
1959 static const struct i915_vma_ops pd_vma_ops = {
1960 .set_pages = pd_vma_set_pages,
1961 .clear_pages = pd_vma_clear_pages,
1962 .bind_vma = pd_vma_bind,
1963 .unbind_vma = pd_vma_unbind,
1966 static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
1968 struct drm_i915_private *i915 = ppgtt->base.vm.i915;
1969 struct i915_ggtt *ggtt = &i915->ggtt;
1970 struct i915_vma *vma;
1972 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
1973 GEM_BUG_ON(size > ggtt->vm.total);
1975 vma = i915_vma_alloc();
1977 return ERR_PTR(-ENOMEM);
1979 i915_active_init(i915, &vma->active, NULL);
1980 INIT_ACTIVE_REQUEST(&vma->last_fence);
1982 vma->vm = &ggtt->vm;
1983 vma->ops = &pd_vma_ops;
1984 vma->private = ppgtt;
1987 vma->fence_size = size;
1988 vma->flags = I915_VMA_GGTT;
1989 vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
1991 INIT_LIST_HEAD(&vma->obj_link);
1993 mutex_lock(&vma->vm->mutex);
1994 list_add(&vma->vm_link, &vma->vm->unbound_list);
1995 mutex_unlock(&vma->vm->mutex);
2000 int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
2002 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
2005 GEM_BUG_ON(ppgtt->base.vm.closed);
2008 * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
2009 * which will be pinned into every active context.
2010 * (When vma->pin_count becomes atomic, I expect we will naturally
2011 * need a larger, unpacked, type and kill this redundancy.)
2013 if (ppgtt->pin_count++)
2017 * PPGTT PDEs reside in the GGTT and consists of 512 entries. The
2018 * allocator works in address space sizes, so it's multiplied by page
2019 * size. We allocate at the top of the GTT to avoid fragmentation.
2021 err = i915_vma_pin(ppgtt->vma,
2023 PIN_GLOBAL | PIN_HIGH);
2030 ppgtt->pin_count = 0;
2034 void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
2036 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
2038 GEM_BUG_ON(!ppgtt->pin_count);
2039 if (--ppgtt->pin_count)
2042 i915_vma_unpin(ppgtt->vma);
2045 void gen6_ppgtt_unpin_all(struct i915_hw_ppgtt *base)
2047 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
2049 if (!ppgtt->pin_count)
2052 ppgtt->pin_count = 0;
2053 i915_vma_unpin(ppgtt->vma);
2056 static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
2058 struct i915_ggtt * const ggtt = &i915->ggtt;
2059 struct gen6_hw_ppgtt *ppgtt;
2062 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
2064 return ERR_PTR(-ENOMEM);
2066 ppgtt_init(i915, &ppgtt->base);
2068 ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
2069 ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
2070 ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
2071 ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
2073 ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
2075 ppgtt->work = kmalloc(sizeof(*ppgtt->work), GFP_KERNEL);
2081 err = gen6_ppgtt_init_scratch(ppgtt);
2085 ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE);
2086 if (IS_ERR(ppgtt->vma)) {
2087 err = PTR_ERR(ppgtt->vma);
2091 return &ppgtt->base;
2094 gen6_ppgtt_free_scratch(&ppgtt->base.vm);
2099 return ERR_PTR(err);
2102 static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
2104 /* This function is for gtt related workarounds. This function is
2105 * called on driver load and after a GPU reset, so you can place
2106 * workarounds here even if they get overwritten by GPU reset.
2108 /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
2109 if (IS_BROADWELL(dev_priv))
2110 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
2111 else if (IS_CHERRYVIEW(dev_priv))
2112 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
2113 else if (IS_GEN9_LP(dev_priv))
2114 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
2115 else if (INTEL_GEN(dev_priv) >= 9)
2116 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
2119 * To support 64K PTEs we need to first enable the use of the
2120 * Intermediate-Page-Size(IPS) bit of the PDE field via some magical
2121 * mmio, otherwise the page-walker will simply ignore the IPS bit. This
2122 * shouldn't be needed after GEN10.
2124 * 64K pages were first introduced from BDW+, although technically they
2125 * only *work* from gen9+. For pre-BDW we instead have the option for
2126 * 32K pages, but we don't currently have any support for it in our
2129 if (HAS_PAGE_SIZES(dev_priv, I915_GTT_PAGE_SIZE_64K) &&
2130 INTEL_GEN(dev_priv) <= 10)
2131 I915_WRITE(GEN8_GAMW_ECO_DEV_RW_IA,
2132 I915_READ(GEN8_GAMW_ECO_DEV_RW_IA) |
2133 GAMW_ECO_ENABLE_64K_IPS_FIELD);
2136 int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
2138 gtt_write_workarounds(dev_priv);
2140 if (IS_GEN(dev_priv, 6))
2141 gen6_ppgtt_enable(dev_priv);
2142 else if (IS_GEN(dev_priv, 7))
2143 gen7_ppgtt_enable(dev_priv);
2148 static struct i915_hw_ppgtt *
2149 __hw_ppgtt_create(struct drm_i915_private *i915)
2151 if (INTEL_GEN(i915) < 8)
2152 return gen6_ppgtt_create(i915);
2154 return gen8_ppgtt_create(i915);
2157 struct i915_hw_ppgtt *
2158 i915_ppgtt_create(struct drm_i915_private *i915)
2160 struct i915_hw_ppgtt *ppgtt;
2162 ppgtt = __hw_ppgtt_create(i915);
2166 trace_i915_ppgtt_create(&ppgtt->vm);
2171 static void ppgtt_destroy_vma(struct i915_address_space *vm)
2173 struct list_head *phases[] = {
2180 for (phase = phases; *phase; phase++) {
2181 struct i915_vma *vma, *vn;
2183 list_for_each_entry_safe(vma, vn, *phase, vm_link)
2184 i915_vma_destroy(vma);
2188 void i915_ppgtt_release(struct kref *kref)
2190 struct i915_hw_ppgtt *ppgtt =
2191 container_of(kref, struct i915_hw_ppgtt, ref);
2193 trace_i915_ppgtt_release(&ppgtt->vm);
2195 ppgtt_destroy_vma(&ppgtt->vm);
2197 GEM_BUG_ON(!list_empty(&ppgtt->vm.bound_list));
2198 GEM_BUG_ON(!list_empty(&ppgtt->vm.unbound_list));
2200 ppgtt->vm.cleanup(&ppgtt->vm);
2201 i915_address_space_fini(&ppgtt->vm);
2205 /* Certain Gen5 chipsets require require idling the GPU before
2206 * unmapping anything from the GTT when VT-d is enabled.
2208 static bool needs_idle_maps(struct drm_i915_private *dev_priv)
2210 /* Query intel_iommu to see if we need the workaround. Presumably that
2213 return IS_GEN(dev_priv, 5) && IS_MOBILE(dev_priv) && intel_vtd_active();
2216 static void gen6_check_faults(struct drm_i915_private *dev_priv)
2218 struct intel_engine_cs *engine;
2219 enum intel_engine_id id;
2222 for_each_engine(engine, dev_priv, id) {
2223 fault = I915_READ(RING_FAULT_REG(engine));
2224 if (fault & RING_FAULT_VALID) {
2225 DRM_DEBUG_DRIVER("Unexpected fault\n"
2227 "\tAddress space: %s\n"
2231 fault & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
2232 RING_FAULT_SRCID(fault),
2233 RING_FAULT_FAULT_TYPE(fault));
2238 static void gen8_check_faults(struct drm_i915_private *dev_priv)
2240 u32 fault = I915_READ(GEN8_RING_FAULT_REG);
2242 if (fault & RING_FAULT_VALID) {
2243 u32 fault_data0, fault_data1;
2246 fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
2247 fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
2248 fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
2249 ((u64)fault_data0 << 12);
2251 DRM_DEBUG_DRIVER("Unexpected fault\n"
2252 "\tAddr: 0x%08x_%08x\n"
2253 "\tAddress space: %s\n"
2257 upper_32_bits(fault_addr),
2258 lower_32_bits(fault_addr),
2259 fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
2260 GEN8_RING_FAULT_ENGINE_ID(fault),
2261 RING_FAULT_SRCID(fault),
2262 RING_FAULT_FAULT_TYPE(fault));
2266 void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
2268 /* From GEN8 onwards we only have one 'All Engine Fault Register' */
2269 if (INTEL_GEN(dev_priv) >= 8)
2270 gen8_check_faults(dev_priv);
2271 else if (INTEL_GEN(dev_priv) >= 6)
2272 gen6_check_faults(dev_priv);
2276 i915_clear_error_registers(dev_priv);
2279 void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
2281 struct i915_ggtt *ggtt = &dev_priv->ggtt;
2283 /* Don't bother messing with faults pre GEN6 as we have little
2284 * documentation supporting that it's a good idea.
2286 if (INTEL_GEN(dev_priv) < 6)
2289 i915_check_and_clear_faults(dev_priv);
2291 ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
2293 i915_ggtt_invalidate(dev_priv);
2296 int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
2297 struct sg_table *pages)
2300 if (dma_map_sg_attrs(&obj->base.dev->pdev->dev,
2301 pages->sgl, pages->nents,
2302 PCI_DMA_BIDIRECTIONAL,
2307 * If the DMA remap fails, one cause can be that we have
2308 * too many objects pinned in a small remapping table,
2309 * such as swiotlb. Incrementally purge all other objects and
2310 * try again - if there are no more pages to remove from
2311 * the DMA remapper, i915_gem_shrink will return 0.
2313 GEM_BUG_ON(obj->mm.pages == pages);
2314 } while (i915_gem_shrink(to_i915(obj->base.dev),
2315 obj->base.size >> PAGE_SHIFT, NULL,
2317 I915_SHRINK_UNBOUND));
2322 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
2327 static void gen8_ggtt_insert_page(struct i915_address_space *vm,
2330 enum i915_cache_level level,
2333 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2334 gen8_pte_t __iomem *pte =
2335 (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
2337 gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
2339 ggtt->invalidate(vm->i915);
2342 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
2343 struct i915_vma *vma,
2344 enum i915_cache_level level,
2347 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2348 struct sgt_iter sgt_iter;
2349 gen8_pte_t __iomem *gtt_entries;
2350 const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
2354 * Note that we ignore PTE_READ_ONLY here. The caller must be careful
2355 * not to allow the user to override access to a read only page.
2358 gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
2359 gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE;
2360 for_each_sgt_dma(addr, sgt_iter, vma->pages)
2361 gen8_set_pte(gtt_entries++, pte_encode | addr);
2364 * We want to flush the TLBs only after we're certain all the PTE
2365 * updates have finished.
2367 ggtt->invalidate(vm->i915);
2370 static void gen6_ggtt_insert_page(struct i915_address_space *vm,
2373 enum i915_cache_level level,
2376 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2377 gen6_pte_t __iomem *pte =
2378 (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
2380 iowrite32(vm->pte_encode(addr, level, flags), pte);
2382 ggtt->invalidate(vm->i915);
2386 * Binds an object into the global gtt with the specified cache level. The object
2387 * will be accessible to the GPU via commands whose operands reference offsets
2388 * within the global GTT as well as accessible by the GPU through the GMADR
2389 * mapped BAR (dev_priv->mm.gtt->gtt).
2391 static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
2392 struct i915_vma *vma,
2393 enum i915_cache_level level,
2396 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2397 gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
2398 unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE;
2399 struct sgt_iter iter;
2401 for_each_sgt_dma(addr, iter, vma->pages)
2402 iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
2405 * We want to flush the TLBs only after we're certain all the PTE
2406 * updates have finished.
2408 ggtt->invalidate(vm->i915);
2411 static void nop_clear_range(struct i915_address_space *vm,
2412 u64 start, u64 length)
2416 static void gen8_ggtt_clear_range(struct i915_address_space *vm,
2417 u64 start, u64 length)
2419 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2420 unsigned first_entry = start / I915_GTT_PAGE_SIZE;
2421 unsigned num_entries = length / I915_GTT_PAGE_SIZE;
2422 const gen8_pte_t scratch_pte = vm->scratch_pte;
2423 gen8_pte_t __iomem *gtt_base =
2424 (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
2425 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
2428 if (WARN(num_entries > max_entries,
2429 "First entry = %d; Num entries = %d (max=%d)\n",
2430 first_entry, num_entries, max_entries))
2431 num_entries = max_entries;
2433 for (i = 0; i < num_entries; i++)
2434 gen8_set_pte(>t_base[i], scratch_pte);
2437 static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
2439 struct drm_i915_private *dev_priv = vm->i915;
2442 * Make sure the internal GAM fifo has been cleared of all GTT
2443 * writes before exiting stop_machine(). This guarantees that
2444 * any aperture accesses waiting to start in another process
2445 * cannot back up behind the GTT writes causing a hang.
2446 * The register can be any arbitrary GAM register.
2448 POSTING_READ(GFX_FLSH_CNTL_GEN6);
2451 struct insert_page {
2452 struct i915_address_space *vm;
2455 enum i915_cache_level level;
2458 static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
2460 struct insert_page *arg = _arg;
2462 gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
2463 bxt_vtd_ggtt_wa(arg->vm);
2468 static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
2471 enum i915_cache_level level,
2474 struct insert_page arg = { vm, addr, offset, level };
2476 stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
2479 struct insert_entries {
2480 struct i915_address_space *vm;
2481 struct i915_vma *vma;
2482 enum i915_cache_level level;
2486 static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
2488 struct insert_entries *arg = _arg;
2490 gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags);
2491 bxt_vtd_ggtt_wa(arg->vm);
2496 static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
2497 struct i915_vma *vma,
2498 enum i915_cache_level level,
2501 struct insert_entries arg = { vm, vma, level, flags };
2503 stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
2506 struct clear_range {
2507 struct i915_address_space *vm;
2512 static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
2514 struct clear_range *arg = _arg;
2516 gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
2517 bxt_vtd_ggtt_wa(arg->vm);
2522 static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
2526 struct clear_range arg = { vm, start, length };
2528 stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
2531 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
2532 u64 start, u64 length)
2534 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2535 unsigned first_entry = start / I915_GTT_PAGE_SIZE;
2536 unsigned num_entries = length / I915_GTT_PAGE_SIZE;
2537 gen6_pte_t scratch_pte, __iomem *gtt_base =
2538 (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
2539 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
2542 if (WARN(num_entries > max_entries,
2543 "First entry = %d; Num entries = %d (max=%d)\n",
2544 first_entry, num_entries, max_entries))
2545 num_entries = max_entries;
2547 scratch_pte = vm->scratch_pte;
2549 for (i = 0; i < num_entries; i++)
2550 iowrite32(scratch_pte, >t_base[i]);
2553 static void i915_ggtt_insert_page(struct i915_address_space *vm,
2556 enum i915_cache_level cache_level,
2559 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2560 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2562 intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
2565 static void i915_ggtt_insert_entries(struct i915_address_space *vm,
2566 struct i915_vma *vma,
2567 enum i915_cache_level cache_level,
2570 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2571 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2573 intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
2577 static void i915_ggtt_clear_range(struct i915_address_space *vm,
2578 u64 start, u64 length)
2580 intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
2583 static int ggtt_bind_vma(struct i915_vma *vma,
2584 enum i915_cache_level cache_level,
2587 struct drm_i915_private *i915 = vma->vm->i915;
2588 struct drm_i915_gem_object *obj = vma->obj;
2589 intel_wakeref_t wakeref;
2592 /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
2594 if (i915_gem_object_is_readonly(obj))
2595 pte_flags |= PTE_READ_ONLY;
2597 with_intel_runtime_pm(i915, wakeref)
2598 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
2600 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
2603 * Without aliasing PPGTT there's no difference between
2604 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
2605 * upgrade to both bound if we bind either to avoid double-binding.
2607 vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
2612 static void ggtt_unbind_vma(struct i915_vma *vma)
2614 struct drm_i915_private *i915 = vma->vm->i915;
2615 intel_wakeref_t wakeref;
2617 with_intel_runtime_pm(i915, wakeref)
2618 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
2621 static int aliasing_gtt_bind_vma(struct i915_vma *vma,
2622 enum i915_cache_level cache_level,
2625 struct drm_i915_private *i915 = vma->vm->i915;
2629 /* Currently applicable only to VLV */
2631 if (i915_gem_object_is_readonly(vma->obj))
2632 pte_flags |= PTE_READ_ONLY;
2634 if (flags & I915_VMA_LOCAL_BIND) {
2635 struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
2637 if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
2638 ret = appgtt->vm.allocate_va_range(&appgtt->vm,
2645 appgtt->vm.insert_entries(&appgtt->vm, vma, cache_level,
2649 if (flags & I915_VMA_GLOBAL_BIND) {
2650 intel_wakeref_t wakeref;
2652 with_intel_runtime_pm(i915, wakeref) {
2653 vma->vm->insert_entries(vma->vm, vma,
2654 cache_level, pte_flags);
2661 static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
2663 struct drm_i915_private *i915 = vma->vm->i915;
2665 if (vma->flags & I915_VMA_GLOBAL_BIND) {
2666 struct i915_address_space *vm = vma->vm;
2667 intel_wakeref_t wakeref;
2669 with_intel_runtime_pm(i915, wakeref)
2670 vm->clear_range(vm, vma->node.start, vma->size);
2673 if (vma->flags & I915_VMA_LOCAL_BIND) {
2674 struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->vm;
2676 vm->clear_range(vm, vma->node.start, vma->size);
2680 void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
2681 struct sg_table *pages)
2683 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2684 struct device *kdev = &dev_priv->drm.pdev->dev;
2685 struct i915_ggtt *ggtt = &dev_priv->ggtt;
2687 if (unlikely(ggtt->do_idle_maps)) {
2688 if (i915_gem_wait_for_idle(dev_priv, 0, MAX_SCHEDULE_TIMEOUT)) {
2689 DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
2690 /* Wait a bit, in hopes it avoids the hang */
2695 dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
2698 static int ggtt_set_pages(struct i915_vma *vma)
2702 GEM_BUG_ON(vma->pages);
2704 ret = i915_get_ggtt_vma_pages(vma);
2708 vma->page_sizes = vma->obj->mm.page_sizes;
2713 static void i915_gtt_color_adjust(const struct drm_mm_node *node,
2714 unsigned long color,
2718 if (node->allocated && node->color != color)
2719 *start += I915_GTT_PAGE_SIZE;
2721 /* Also leave a space between the unallocated reserved node after the
2722 * GTT and any objects within the GTT, i.e. we use the color adjustment
2723 * to insert a guard page to prevent prefetches crossing over the
2726 node = list_next_entry(node, node_list);
2727 if (node->color != color)
2728 *end -= I915_GTT_PAGE_SIZE;
2731 int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
2733 struct i915_ggtt *ggtt = &i915->ggtt;
2734 struct i915_hw_ppgtt *ppgtt;
2737 ppgtt = i915_ppgtt_create(i915);
2739 return PTR_ERR(ppgtt);
2741 if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
2747 * Note we only pre-allocate as far as the end of the global
2748 * GTT. On 48b / 4-level page-tables, the difference is very,
2749 * very significant! We have to preallocate as GVT/vgpu does
2750 * not like the page directory disappearing.
2752 err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, ggtt->vm.total);
2756 i915->mm.aliasing_ppgtt = ppgtt;
2758 GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
2759 ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
2761 GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma);
2762 ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
2767 i915_ppgtt_put(ppgtt);
2771 void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915)
2773 struct i915_ggtt *ggtt = &i915->ggtt;
2774 struct i915_hw_ppgtt *ppgtt;
2776 ppgtt = fetch_and_zero(&i915->mm.aliasing_ppgtt);
2780 i915_ppgtt_put(ppgtt);
2782 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
2783 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
2786 int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
2788 /* Let GEM Manage all of the aperture.
2790 * However, leave one page at the end still bound to the scratch page.
2791 * There are a number of places where the hardware apparently prefetches
2792 * past the end of the object, and we've seen multiple hangs with the
2793 * GPU head pointer stuck in a batchbuffer bound at the last page of the
2794 * aperture. One page should be enough to keep any prefetching inside
2797 struct i915_ggtt *ggtt = &dev_priv->ggtt;
2798 unsigned long hole_start, hole_end;
2799 struct drm_mm_node *entry;
2803 * GuC requires all resources that we're sharing with it to be placed in
2804 * non-WOPCM memory. If GuC is not present or not in use we still need a
2805 * small bias as ring wraparound at offset 0 sometimes hangs. No idea
2808 ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
2809 intel_guc_reserved_gtt_size(&dev_priv->guc));
2811 ret = intel_vgt_balloon(dev_priv);
2815 /* Reserve a mappable slot for our lockless error capture */
2816 ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture,
2817 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
2818 0, ggtt->mappable_end,
2823 if (USES_GUC(dev_priv)) {
2824 ret = intel_guc_reserve_ggtt_top(&dev_priv->guc);
2829 /* Clear any non-preallocated blocks */
2830 drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
2831 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
2832 hole_start, hole_end);
2833 ggtt->vm.clear_range(&ggtt->vm, hole_start,
2834 hole_end - hole_start);
2837 /* And finally clear the reserved guard page */
2838 ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
2840 if (INTEL_PPGTT(dev_priv) == INTEL_PPGTT_ALIASING) {
2841 ret = i915_gem_init_aliasing_ppgtt(dev_priv);
2849 intel_guc_release_ggtt_top(&dev_priv->guc);
2851 drm_mm_remove_node(&ggtt->error_capture);
2856 * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
2857 * @dev_priv: i915 device
2859 void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
2861 struct i915_ggtt *ggtt = &dev_priv->ggtt;
2862 struct i915_vma *vma, *vn;
2863 struct pagevec *pvec;
2865 ggtt->vm.closed = true;
2867 mutex_lock(&dev_priv->drm.struct_mutex);
2868 i915_gem_fini_aliasing_ppgtt(dev_priv);
2870 list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
2871 WARN_ON(i915_vma_unbind(vma));
2873 if (drm_mm_node_allocated(&ggtt->error_capture))
2874 drm_mm_remove_node(&ggtt->error_capture);
2876 intel_guc_release_ggtt_top(&dev_priv->guc);
2878 if (drm_mm_initialized(&ggtt->vm.mm)) {
2879 intel_vgt_deballoon(dev_priv);
2880 i915_address_space_fini(&ggtt->vm);
2883 ggtt->vm.cleanup(&ggtt->vm);
2885 pvec = &dev_priv->mm.wc_stash.pvec;
2887 set_pages_array_wb(pvec->pages, pvec->nr);
2888 __pagevec_release(pvec);
2891 mutex_unlock(&dev_priv->drm.struct_mutex);
2893 arch_phys_wc_del(ggtt->mtrr);
2894 io_mapping_fini(&ggtt->iomap);
2896 i915_gem_cleanup_stolen(dev_priv);
2899 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
2901 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
2902 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
2903 return snb_gmch_ctl << 20;
2906 static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
2908 bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
2909 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
2911 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
2913 #ifdef CONFIG_X86_32
2914 /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
2915 if (bdw_gmch_ctl > 4)
2919 return bdw_gmch_ctl << 20;
2922 static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
2924 gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
2925 gmch_ctrl &= SNB_GMCH_GGMS_MASK;
2928 return 1 << (20 + gmch_ctrl);
2933 static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
2935 struct drm_i915_private *dev_priv = ggtt->vm.i915;
2936 struct pci_dev *pdev = dev_priv->drm.pdev;
2937 phys_addr_t phys_addr;
2940 /* For Modern GENs the PTEs and register space are split in the BAR */
2941 phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
2944 * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range
2945 * will be dropped. For WC mappings in general we have 64 byte burst
2946 * writes when the WC buffer is flushed, so we can't use it, but have to
2947 * resort to an uncached mapping. The WC issue is easily caught by the
2948 * readback check when writing GTT PTE entries.
2950 if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10)
2951 ggtt->gsm = ioremap_nocache(phys_addr, size);
2953 ggtt->gsm = ioremap_wc(phys_addr, size);
2955 DRM_ERROR("Failed to map the ggtt page table\n");
2959 ret = setup_scratch_page(&ggtt->vm, GFP_DMA32);
2961 DRM_ERROR("Scratch setup failed\n");
2962 /* iounmap will also get called at remove, but meh */
2967 ggtt->vm.scratch_pte =
2968 ggtt->vm.pte_encode(ggtt->vm.scratch_page.daddr,
2969 I915_CACHE_NONE, 0);
2974 static struct intel_ppat_entry *
2975 __alloc_ppat_entry(struct intel_ppat *ppat, unsigned int index, u8 value)
2977 struct intel_ppat_entry *entry = &ppat->entries[index];
2979 GEM_BUG_ON(index >= ppat->max_entries);
2980 GEM_BUG_ON(test_bit(index, ppat->used));
2983 entry->value = value;
2984 kref_init(&entry->ref);
2985 set_bit(index, ppat->used);
2986 set_bit(index, ppat->dirty);
2991 static void __free_ppat_entry(struct intel_ppat_entry *entry)
2993 struct intel_ppat *ppat = entry->ppat;
2994 unsigned int index = entry - ppat->entries;
2996 GEM_BUG_ON(index >= ppat->max_entries);
2997 GEM_BUG_ON(!test_bit(index, ppat->used));
2999 entry->value = ppat->clear_value;
3000 clear_bit(index, ppat->used);
3001 set_bit(index, ppat->dirty);
3005 * intel_ppat_get - get a usable PPAT entry
3006 * @i915: i915 device instance
3007 * @value: the PPAT value required by the caller
3009 * The function tries to search if there is an existing PPAT entry which
3010 * matches with the required value. If perfectly matched, the existing PPAT
3011 * entry will be used. If only partially matched, it will try to check if
3012 * there is any available PPAT index. If yes, it will allocate a new PPAT
3013 * index for the required entry and update the HW. If not, the partially
3014 * matched entry will be used.
3016 const struct intel_ppat_entry *
3017 intel_ppat_get(struct drm_i915_private *i915, u8 value)
3019 struct intel_ppat *ppat = &i915->ppat;
3020 struct intel_ppat_entry *entry = NULL;
3021 unsigned int scanned, best_score;
3024 GEM_BUG_ON(!ppat->max_entries);
3026 scanned = best_score = 0;
3027 for_each_set_bit(i, ppat->used, ppat->max_entries) {
3030 score = ppat->match(ppat->entries[i].value, value);
3031 if (score > best_score) {
3032 entry = &ppat->entries[i];
3033 if (score == INTEL_PPAT_PERFECT_MATCH) {
3034 kref_get(&entry->ref);
3042 if (scanned == ppat->max_entries) {
3044 return ERR_PTR(-ENOSPC);
3046 kref_get(&entry->ref);
3050 i = find_first_zero_bit(ppat->used, ppat->max_entries);
3051 entry = __alloc_ppat_entry(ppat, i, value);
3052 ppat->update_hw(i915);
3056 static void release_ppat(struct kref *kref)
3058 struct intel_ppat_entry *entry =
3059 container_of(kref, struct intel_ppat_entry, ref);
3060 struct drm_i915_private *i915 = entry->ppat->i915;
3062 __free_ppat_entry(entry);
3063 entry->ppat->update_hw(i915);
3067 * intel_ppat_put - put back the PPAT entry got from intel_ppat_get()
3068 * @entry: an intel PPAT entry
3070 * Put back the PPAT entry got from intel_ppat_get(). If the PPAT index of the
3071 * entry is dynamically allocated, its reference count will be decreased. Once
3072 * the reference count becomes into zero, the PPAT index becomes free again.
3074 void intel_ppat_put(const struct intel_ppat_entry *entry)
3076 struct intel_ppat *ppat = entry->ppat;
3077 unsigned int index = entry - ppat->entries;
3079 GEM_BUG_ON(!ppat->max_entries);
3081 kref_put(&ppat->entries[index].ref, release_ppat);
3084 static void cnl_private_pat_update_hw(struct drm_i915_private *dev_priv)
3086 struct intel_ppat *ppat = &dev_priv->ppat;
3089 for_each_set_bit(i, ppat->dirty, ppat->max_entries) {
3090 I915_WRITE(GEN10_PAT_INDEX(i), ppat->entries[i].value);
3091 clear_bit(i, ppat->dirty);
3095 static void bdw_private_pat_update_hw(struct drm_i915_private *dev_priv)
3097 struct intel_ppat *ppat = &dev_priv->ppat;
3101 for (i = 0; i < ppat->max_entries; i++)
3102 pat |= GEN8_PPAT(i, ppat->entries[i].value);
3104 bitmap_clear(ppat->dirty, 0, ppat->max_entries);
3106 I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
3107 I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
3110 static unsigned int bdw_private_pat_match(u8 src, u8 dst)
3112 unsigned int score = 0;
3119 /* Cache attribute has to be matched. */
3120 if (GEN8_PPAT_GET_CA(src) != GEN8_PPAT_GET_CA(dst))
3125 if (GEN8_PPAT_GET_TC(src) == GEN8_PPAT_GET_TC(dst))
3128 if (GEN8_PPAT_GET_AGE(src) == GEN8_PPAT_GET_AGE(dst))
3131 if (score == (AGE_MATCH | TC_MATCH | CA_MATCH))
3132 return INTEL_PPAT_PERFECT_MATCH;
3137 static unsigned int chv_private_pat_match(u8 src, u8 dst)
3139 return (CHV_PPAT_GET_SNOOP(src) == CHV_PPAT_GET_SNOOP(dst)) ?
3140 INTEL_PPAT_PERFECT_MATCH : 0;
3143 static void cnl_setup_private_ppat(struct intel_ppat *ppat)
3145 ppat->max_entries = 8;
3146 ppat->update_hw = cnl_private_pat_update_hw;
3147 ppat->match = bdw_private_pat_match;
3148 ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
3150 __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC);
3151 __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
3152 __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
3153 __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC);
3154 __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
3155 __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
3156 __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
3157 __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
3160 /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
3161 * bits. When using advanced contexts each context stores its own PAT, but
3162 * writing this data shouldn't be harmful even in those cases. */
3163 static void bdw_setup_private_ppat(struct intel_ppat *ppat)
3165 ppat->max_entries = 8;
3166 ppat->update_hw = bdw_private_pat_update_hw;
3167 ppat->match = bdw_private_pat_match;
3168 ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
3170 if (!HAS_PPGTT(ppat->i915)) {
3171 /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
3172 * so RTL will always use the value corresponding to
3174 * So let's disable cache for GGTT to avoid screen corruptions.
3175 * MOCS still can be used though.
3176 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
3177 * before this patch, i.e. the same uncached + snooping access
3178 * like on gen6/7 seems to be in effect.
3179 * - So this just fixes blitter/render access. Again it looks
3180 * like it's not just uncached access, but uncached + snooping.
3181 * So we can still hold onto all our assumptions wrt cpu
3182 * clflushing on LLC machines.
3184 __alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC);
3188 __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC); /* for normal objects, no eLLC */
3189 __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); /* for something pointing to ptes? */
3190 __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); /* for scanout with eLLC */
3191 __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC); /* Uncached objects, mostly for scanout */
3192 __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
3193 __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
3194 __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
3195 __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
3198 static void chv_setup_private_ppat(struct intel_ppat *ppat)
3200 ppat->max_entries = 8;
3201 ppat->update_hw = bdw_private_pat_update_hw;
3202 ppat->match = chv_private_pat_match;
3203 ppat->clear_value = CHV_PPAT_SNOOP;
3206 * Map WB on BDW to snooped on CHV.
3208 * Only the snoop bit has meaning for CHV, the rest is
3211 * The hardware will never snoop for certain types of accesses:
3212 * - CPU GTT (GMADR->GGTT->no snoop->memory)
3213 * - PPGTT page tables
3214 * - some other special cycles
3216 * As with BDW, we also need to consider the following for GT accesses:
3217 * "For GGTT, there is NO pat_sel[2:0] from the entry,
3218 * so RTL will always use the value corresponding to
3220 * Which means we must set the snoop bit in PAT entry 0
3221 * in order to keep the global status page working.
3224 __alloc_ppat_entry(ppat, 0, CHV_PPAT_SNOOP);
3225 __alloc_ppat_entry(ppat, 1, 0);
3226 __alloc_ppat_entry(ppat, 2, 0);
3227 __alloc_ppat_entry(ppat, 3, 0);
3228 __alloc_ppat_entry(ppat, 4, CHV_PPAT_SNOOP);
3229 __alloc_ppat_entry(ppat, 5, CHV_PPAT_SNOOP);
3230 __alloc_ppat_entry(ppat, 6, CHV_PPAT_SNOOP);
3231 __alloc_ppat_entry(ppat, 7, CHV_PPAT_SNOOP);
3234 static void gen6_gmch_remove(struct i915_address_space *vm)
3236 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
3239 cleanup_scratch_page(vm);
3242 static void setup_private_pat(struct drm_i915_private *dev_priv)
3244 struct intel_ppat *ppat = &dev_priv->ppat;
3247 ppat->i915 = dev_priv;
3249 if (INTEL_GEN(dev_priv) >= 10)
3250 cnl_setup_private_ppat(ppat);
3251 else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
3252 chv_setup_private_ppat(ppat);
3254 bdw_setup_private_ppat(ppat);
3256 GEM_BUG_ON(ppat->max_entries > INTEL_MAX_PPAT_ENTRIES);
3258 for_each_clear_bit(i, ppat->used, ppat->max_entries) {
3259 ppat->entries[i].value = ppat->clear_value;
3260 ppat->entries[i].ppat = ppat;
3261 set_bit(i, ppat->dirty);
3264 ppat->update_hw(dev_priv);
3267 static int gen8_gmch_probe(struct i915_ggtt *ggtt)
3269 struct drm_i915_private *dev_priv = ggtt->vm.i915;
3270 struct pci_dev *pdev = dev_priv->drm.pdev;
3275 /* TODO: We're not aware of mappable constraints on gen8 yet */
3277 (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
3278 pci_resource_len(pdev, 2));
3279 ggtt->mappable_end = resource_size(&ggtt->gmadr);
3281 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
3283 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
3285 DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
3287 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
3288 if (IS_CHERRYVIEW(dev_priv))
3289 size = chv_get_total_gtt_size(snb_gmch_ctl);
3291 size = gen8_get_total_gtt_size(snb_gmch_ctl);
3293 ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
3294 ggtt->vm.cleanup = gen6_gmch_remove;
3295 ggtt->vm.insert_page = gen8_ggtt_insert_page;
3296 ggtt->vm.clear_range = nop_clear_range;
3297 if (intel_scanout_needs_vtd_wa(dev_priv))
3298 ggtt->vm.clear_range = gen8_ggtt_clear_range;
3300 ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
3302 /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
3303 if (intel_ggtt_update_needs_vtd_wa(dev_priv) ||
3304 IS_CHERRYVIEW(dev_priv) /* fails with concurrent use/update */) {
3305 ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
3306 ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
3307 if (ggtt->vm.clear_range != nop_clear_range)
3308 ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
3310 /* Prevent recursively calling stop_machine() and deadlocks. */
3311 dev_info(dev_priv->drm.dev,
3312 "Disabling error capture for VT-d workaround\n");
3313 i915_disable_error_state(dev_priv, -ENODEV);
3316 ggtt->invalidate = gen6_ggtt_invalidate;
3318 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
3319 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
3320 ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
3321 ggtt->vm.vma_ops.clear_pages = clear_pages;
3323 ggtt->vm.pte_encode = gen8_pte_encode;
3325 setup_private_pat(dev_priv);
3327 return ggtt_probe_common(ggtt, size);
3330 static int gen6_gmch_probe(struct i915_ggtt *ggtt)
3332 struct drm_i915_private *dev_priv = ggtt->vm.i915;
3333 struct pci_dev *pdev = dev_priv->drm.pdev;
3339 (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
3340 pci_resource_len(pdev, 2));
3341 ggtt->mappable_end = resource_size(&ggtt->gmadr);
3343 /* 64/512MB is the current min/max we actually know of, but this is just
3344 * a coarse sanity check.
3346 if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
3347 DRM_ERROR("Unknown GMADR size (%pa)\n", &ggtt->mappable_end);
3351 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
3353 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
3355 DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
3356 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
3358 size = gen6_get_total_gtt_size(snb_gmch_ctl);
3359 ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
3361 ggtt->vm.clear_range = nop_clear_range;
3362 if (!HAS_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
3363 ggtt->vm.clear_range = gen6_ggtt_clear_range;
3364 ggtt->vm.insert_page = gen6_ggtt_insert_page;
3365 ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
3366 ggtt->vm.cleanup = gen6_gmch_remove;
3368 ggtt->invalidate = gen6_ggtt_invalidate;
3370 if (HAS_EDRAM(dev_priv))
3371 ggtt->vm.pte_encode = iris_pte_encode;
3372 else if (IS_HASWELL(dev_priv))
3373 ggtt->vm.pte_encode = hsw_pte_encode;
3374 else if (IS_VALLEYVIEW(dev_priv))
3375 ggtt->vm.pte_encode = byt_pte_encode;
3376 else if (INTEL_GEN(dev_priv) >= 7)
3377 ggtt->vm.pte_encode = ivb_pte_encode;
3379 ggtt->vm.pte_encode = snb_pte_encode;
3381 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
3382 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
3383 ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
3384 ggtt->vm.vma_ops.clear_pages = clear_pages;
3386 return ggtt_probe_common(ggtt, size);
3389 static void i915_gmch_remove(struct i915_address_space *vm)
3391 intel_gmch_remove();
3394 static int i915_gmch_probe(struct i915_ggtt *ggtt)
3396 struct drm_i915_private *dev_priv = ggtt->vm.i915;
3397 phys_addr_t gmadr_base;
3400 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
3402 DRM_ERROR("failed to set up gmch\n");
3406 intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
3409 (struct resource) DEFINE_RES_MEM(gmadr_base,
3410 ggtt->mappable_end);
3412 ggtt->do_idle_maps = needs_idle_maps(dev_priv);
3413 ggtt->vm.insert_page = i915_ggtt_insert_page;
3414 ggtt->vm.insert_entries = i915_ggtt_insert_entries;
3415 ggtt->vm.clear_range = i915_ggtt_clear_range;
3416 ggtt->vm.cleanup = i915_gmch_remove;
3418 ggtt->invalidate = gmch_ggtt_invalidate;
3420 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
3421 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
3422 ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
3423 ggtt->vm.vma_ops.clear_pages = clear_pages;
3425 if (unlikely(ggtt->do_idle_maps))
3426 DRM_INFO("applying Ironlake quirks for intel_iommu\n");
3432 * i915_ggtt_probe_hw - Probe GGTT hardware location
3433 * @dev_priv: i915 device
3435 int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
3437 struct i915_ggtt *ggtt = &dev_priv->ggtt;
3440 ggtt->vm.i915 = dev_priv;
3441 ggtt->vm.dma = &dev_priv->drm.pdev->dev;
3443 if (INTEL_GEN(dev_priv) <= 5)
3444 ret = i915_gmch_probe(ggtt);
3445 else if (INTEL_GEN(dev_priv) < 8)
3446 ret = gen6_gmch_probe(ggtt);
3448 ret = gen8_gmch_probe(ggtt);
3452 if ((ggtt->vm.total - 1) >> 32) {
3453 DRM_ERROR("We never expected a Global GTT with more than 32bits"
3454 " of address space! Found %lldM!\n",
3455 ggtt->vm.total >> 20);
3456 ggtt->vm.total = 1ULL << 32;
3457 ggtt->mappable_end =
3458 min_t(u64, ggtt->mappable_end, ggtt->vm.total);
3461 if (ggtt->mappable_end > ggtt->vm.total) {
3462 DRM_ERROR("mappable aperture extends past end of GGTT,"
3463 " aperture=%pa, total=%llx\n",
3464 &ggtt->mappable_end, ggtt->vm.total);
3465 ggtt->mappable_end = ggtt->vm.total;
3468 /* GMADR is the PCI mmio aperture into the global GTT. */
3469 DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20);
3470 DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
3471 DRM_DEBUG_DRIVER("DSM size = %lluM\n",
3472 (u64)resource_size(&intel_graphics_stolen_res) >> 20);
3473 if (intel_vtd_active())
3474 DRM_INFO("VT-d active for gfx access\n");
3480 * i915_ggtt_init_hw - Initialize GGTT hardware
3481 * @dev_priv: i915 device
3483 int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
3485 struct i915_ggtt *ggtt = &dev_priv->ggtt;
3488 stash_init(&dev_priv->mm.wc_stash);
3490 /* Note that we use page colouring to enforce a guard page at the
3491 * end of the address space. This is required as the CS may prefetch
3492 * beyond the end of the batch buffer, across the page boundary,
3493 * and beyond the end of the GTT if we do not provide a guard.
3495 mutex_lock(&dev_priv->drm.struct_mutex);
3496 i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
3498 ggtt->vm.is_ggtt = true;
3500 /* Only VLV supports read-only GGTT mappings */
3501 ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv);
3503 if (!HAS_LLC(dev_priv) && !HAS_PPGTT(dev_priv))
3504 ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
3505 mutex_unlock(&dev_priv->drm.struct_mutex);
3507 if (!io_mapping_init_wc(&dev_priv->ggtt.iomap,
3508 dev_priv->ggtt.gmadr.start,
3509 dev_priv->ggtt.mappable_end)) {
3511 goto out_gtt_cleanup;
3514 ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end);
3517 * Initialise stolen early so that we may reserve preallocated
3518 * objects for the BIOS to KMS transition.
3520 ret = i915_gem_init_stolen(dev_priv);
3522 goto out_gtt_cleanup;
3527 ggtt->vm.cleanup(&ggtt->vm);
3531 int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
3533 if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt())
3539 void i915_ggtt_enable_guc(struct drm_i915_private *i915)
3541 GEM_BUG_ON(i915->ggtt.invalidate != gen6_ggtt_invalidate);
3543 i915->ggtt.invalidate = guc_ggtt_invalidate;
3545 i915_ggtt_invalidate(i915);
3548 void i915_ggtt_disable_guc(struct drm_i915_private *i915)
3550 /* XXX Temporary pardon for error unload */
3551 if (i915->ggtt.invalidate == gen6_ggtt_invalidate)
3554 /* We should only be called after i915_ggtt_enable_guc() */
3555 GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate);
3557 i915->ggtt.invalidate = gen6_ggtt_invalidate;
3559 i915_ggtt_invalidate(i915);
3562 void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
3564 struct i915_ggtt *ggtt = &dev_priv->ggtt;
3565 struct i915_vma *vma, *vn;
3567 i915_check_and_clear_faults(dev_priv);
3569 mutex_lock(&ggtt->vm.mutex);
3571 /* First fill our portion of the GTT with scratch pages */
3572 ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
3573 ggtt->vm.closed = true; /* skip rewriting PTE on VMA unbind */
3575 /* clflush objects bound into the GGTT and rebind them. */
3576 list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
3577 struct drm_i915_gem_object *obj = vma->obj;
3579 if (!(vma->flags & I915_VMA_GLOBAL_BIND))
3582 mutex_unlock(&ggtt->vm.mutex);
3584 if (!i915_vma_unbind(vma))
3587 WARN_ON(i915_vma_bind(vma,
3588 obj ? obj->cache_level : 0,
3591 i915_gem_object_lock(obj);
3592 WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
3593 i915_gem_object_unlock(obj);
3597 mutex_lock(&ggtt->vm.mutex);
3600 ggtt->vm.closed = false;
3601 i915_ggtt_invalidate(dev_priv);
3603 mutex_unlock(&ggtt->vm.mutex);
3605 if (INTEL_GEN(dev_priv) >= 8) {
3606 struct intel_ppat *ppat = &dev_priv->ppat;
3608 bitmap_set(ppat->dirty, 0, ppat->max_entries);
3609 dev_priv->ppat.update_hw(dev_priv);
3614 static struct scatterlist *
3615 rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
3616 unsigned int width, unsigned int height,
3617 unsigned int stride,
3618 struct sg_table *st, struct scatterlist *sg)
3620 unsigned int column, row;
3621 unsigned int src_idx;
3623 for (column = 0; column < width; column++) {
3624 src_idx = stride * (height - 1) + column + offset;
3625 for (row = 0; row < height; row++) {
3627 /* We don't need the pages, but need to initialize
3628 * the entries so the sg list can be happily traversed.
3629 * The only thing we need are DMA addresses.
3631 sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
3632 sg_dma_address(sg) =
3633 i915_gem_object_get_dma_address(obj, src_idx);
3634 sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
3643 static noinline struct sg_table *
3644 intel_rotate_pages(struct intel_rotation_info *rot_info,
3645 struct drm_i915_gem_object *obj)
3647 unsigned int size = intel_rotation_info_size(rot_info);
3648 struct sg_table *st;
3649 struct scatterlist *sg;
3653 /* Allocate target SG list. */
3654 st = kmalloc(sizeof(*st), GFP_KERNEL);
3658 ret = sg_alloc_table(st, size, GFP_KERNEL);
3665 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
3666 sg = rotate_pages(obj, rot_info->plane[i].offset,
3667 rot_info->plane[i].width, rot_info->plane[i].height,
3668 rot_info->plane[i].stride, st, sg);
3677 DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
3678 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
3680 return ERR_PTR(ret);
3683 static struct scatterlist *
3684 remap_pages(struct drm_i915_gem_object *obj, unsigned int offset,
3685 unsigned int width, unsigned int height,
3686 unsigned int stride,
3687 struct sg_table *st, struct scatterlist *sg)
3691 for (row = 0; row < height; row++) {
3692 unsigned int left = width * I915_GTT_PAGE_SIZE;
3696 unsigned int length;
3698 /* We don't need the pages, but need to initialize
3699 * the entries so the sg list can be happily traversed.
3700 * The only thing we need are DMA addresses.
3703 addr = i915_gem_object_get_dma_address_len(obj, offset, &length);
3705 length = min(left, length);
3709 sg_set_page(sg, NULL, length, 0);
3710 sg_dma_address(sg) = addr;
3711 sg_dma_len(sg) = length;
3714 offset += length / I915_GTT_PAGE_SIZE;
3718 offset += stride - width;
3724 static noinline struct sg_table *
3725 intel_remap_pages(struct intel_remapped_info *rem_info,
3726 struct drm_i915_gem_object *obj)
3728 unsigned int size = intel_remapped_info_size(rem_info);
3729 struct sg_table *st;
3730 struct scatterlist *sg;
3734 /* Allocate target SG list. */
3735 st = kmalloc(sizeof(*st), GFP_KERNEL);
3739 ret = sg_alloc_table(st, size, GFP_KERNEL);
3746 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
3747 sg = remap_pages(obj, rem_info->plane[i].offset,
3748 rem_info->plane[i].width, rem_info->plane[i].height,
3749 rem_info->plane[i].stride, st, sg);
3760 DRM_DEBUG_DRIVER("Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
3761 obj->base.size, rem_info->plane[0].width, rem_info->plane[0].height, size);
3763 return ERR_PTR(ret);
3766 static noinline struct sg_table *
3767 intel_partial_pages(const struct i915_ggtt_view *view,
3768 struct drm_i915_gem_object *obj)
3770 struct sg_table *st;
3771 struct scatterlist *sg, *iter;
3772 unsigned int count = view->partial.size;
3773 unsigned int offset;
3776 st = kmalloc(sizeof(*st), GFP_KERNEL);
3780 ret = sg_alloc_table(st, count, GFP_KERNEL);
3784 iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
3792 len = min(iter->length - (offset << PAGE_SHIFT),
3793 count << PAGE_SHIFT);
3794 sg_set_page(sg, NULL, len, 0);
3795 sg_dma_address(sg) =
3796 sg_dma_address(iter) + (offset << PAGE_SHIFT);
3797 sg_dma_len(sg) = len;
3800 count -= len >> PAGE_SHIFT;
3803 i915_sg_trim(st); /* Drop any unused tail entries. */
3809 iter = __sg_next(iter);
3816 return ERR_PTR(ret);
3820 i915_get_ggtt_vma_pages(struct i915_vma *vma)
3824 /* The vma->pages are only valid within the lifespan of the borrowed
3825 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
3826 * must be the vma->pages. A simple rule is that vma->pages must only
3827 * be accessed when the obj->mm.pages are pinned.
3829 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
3831 switch (vma->ggtt_view.type) {
3833 GEM_BUG_ON(vma->ggtt_view.type);
3835 case I915_GGTT_VIEW_NORMAL:
3836 vma->pages = vma->obj->mm.pages;
3839 case I915_GGTT_VIEW_ROTATED:
3841 intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
3844 case I915_GGTT_VIEW_REMAPPED:
3846 intel_remap_pages(&vma->ggtt_view.remapped, vma->obj);
3849 case I915_GGTT_VIEW_PARTIAL:
3850 vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
3855 if (IS_ERR(vma->pages)) {
3856 ret = PTR_ERR(vma->pages);
3858 DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
3859 vma->ggtt_view.type, ret);
3865 * i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
3866 * @vm: the &struct i915_address_space
3867 * @node: the &struct drm_mm_node (typically i915_vma.mode)
3868 * @size: how much space to allocate inside the GTT,
3869 * must be #I915_GTT_PAGE_SIZE aligned
3870 * @offset: where to insert inside the GTT,
3871 * must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
3872 * (@offset + @size) must fit within the address space
3873 * @color: color to apply to node, if this node is not from a VMA,
3874 * color must be #I915_COLOR_UNEVICTABLE
3875 * @flags: control search and eviction behaviour
3877 * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
3878 * the address space (using @size and @color). If the @node does not fit, it
3879 * tries to evict any overlapping nodes from the GTT, including any
3880 * neighbouring nodes if the colors do not match (to ensure guard pages between
3881 * differing domains). See i915_gem_evict_for_node() for the gory details
3882 * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
3883 * evicting active overlapping objects, and any overlapping node that is pinned
3884 * or marked as unevictable will also result in failure.
3886 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3887 * asked to wait for eviction and interrupted.
3889 int i915_gem_gtt_reserve(struct i915_address_space *vm,
3890 struct drm_mm_node *node,
3891 u64 size, u64 offset, unsigned long color,
3897 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3898 GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
3899 GEM_BUG_ON(range_overflows(offset, size, vm->total));
3900 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
3901 GEM_BUG_ON(drm_mm_node_allocated(node));
3904 node->start = offset;
3905 node->color = color;
3907 err = drm_mm_reserve_node(&vm->mm, node);
3911 if (flags & PIN_NOEVICT)
3914 err = i915_gem_evict_for_node(vm, node, flags);
3916 err = drm_mm_reserve_node(&vm->mm, node);
3921 static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
3925 GEM_BUG_ON(range_overflows(start, len, end));
3926 GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));
3928 range = round_down(end - len, align) - round_up(start, align);
3930 if (sizeof(unsigned long) == sizeof(u64)) {
3931 addr = get_random_long();
3933 addr = get_random_int();
3934 if (range > U32_MAX) {
3936 addr |= get_random_int();
3939 div64_u64_rem(addr, range, &addr);
3943 return round_up(start, align);
3947 * i915_gem_gtt_insert - insert a node into an address_space (GTT)
3948 * @vm: the &struct i915_address_space
3949 * @node: the &struct drm_mm_node (typically i915_vma.node)
3950 * @size: how much space to allocate inside the GTT,
3951 * must be #I915_GTT_PAGE_SIZE aligned
3952 * @alignment: required alignment of starting offset, may be 0 but
3953 * if specified, this must be a power-of-two and at least
3954 * #I915_GTT_MIN_ALIGNMENT
3955 * @color: color to apply to node
3956 * @start: start of any range restriction inside GTT (0 for all),
3957 * must be #I915_GTT_PAGE_SIZE aligned
3958 * @end: end of any range restriction inside GTT (U64_MAX for all),
3959 * must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX
3960 * @flags: control search and eviction behaviour
3962 * i915_gem_gtt_insert() first searches for an available hole into which
3963 * is can insert the node. The hole address is aligned to @alignment and
3964 * its @size must then fit entirely within the [@start, @end] bounds. The
3965 * nodes on either side of the hole must match @color, or else a guard page
3966 * will be inserted between the two nodes (or the node evicted). If no
3967 * suitable hole is found, first a victim is randomly selected and tested
3968 * for eviction, otherwise then the LRU list of objects within the GTT
3969 * is scanned to find the first set of replacement nodes to create the hole.
3970 * Those old overlapping nodes are evicted from the GTT (and so must be
3971 * rebound before any future use). Any node that is currently pinned cannot
3972 * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently
3973 * active and #PIN_NONBLOCK is specified, that node is also skipped when
3974 * searching for an eviction candidate. See i915_gem_evict_something() for
3975 * the gory details on the eviction algorithm.
3977 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3978 * asked to wait for eviction and interrupted.
3980 int i915_gem_gtt_insert(struct i915_address_space *vm,
3981 struct drm_mm_node *node,
3982 u64 size, u64 alignment, unsigned long color,
3983 u64 start, u64 end, unsigned int flags)
3985 enum drm_mm_insert_mode mode;
3989 lockdep_assert_held(&vm->i915->drm.struct_mutex);
3991 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3992 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
3993 GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
3994 GEM_BUG_ON(start >= end);
3995 GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
3996 GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
3997 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
3998 GEM_BUG_ON(drm_mm_node_allocated(node));
4000 if (unlikely(range_overflows(start, size, end)))
4003 if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
4006 mode = DRM_MM_INSERT_BEST;
4007 if (flags & PIN_HIGH)
4008 mode = DRM_MM_INSERT_HIGHEST;
4009 if (flags & PIN_MAPPABLE)
4010 mode = DRM_MM_INSERT_LOW;
4012 /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
4013 * so we know that we always have a minimum alignment of 4096.
4014 * The drm_mm range manager is optimised to return results
4015 * with zero alignment, so where possible use the optimal
4018 BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
4019 if (alignment <= I915_GTT_MIN_ALIGNMENT)
4022 err = drm_mm_insert_node_in_range(&vm->mm, node,
4023 size, alignment, color,
4028 if (mode & DRM_MM_INSERT_ONCE) {
4029 err = drm_mm_insert_node_in_range(&vm->mm, node,
4030 size, alignment, color,
4032 DRM_MM_INSERT_BEST);
4037 if (flags & PIN_NOEVICT)
4040 /* No free space, pick a slot at random.
4042 * There is a pathological case here using a GTT shared between
4043 * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
4045 * |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->|
4046 * (64k objects) (448k objects)
4048 * Now imagine that the eviction LRU is ordered top-down (just because
4049 * pathology meets real life), and that we need to evict an object to
4050 * make room inside the aperture. The eviction scan then has to walk
4051 * the 448k list before it finds one within range. And now imagine that
4052 * it has to search for a new hole between every byte inside the memcpy,
4053 * for several simultaneous clients.
4055 * On a full-ppgtt system, if we have run out of available space, there
4056 * will be lots and lots of objects in the eviction list! Again,
4057 * searching that LRU list may be slow if we are also applying any
4058 * range restrictions (e.g. restriction to low 4GiB) and so, for
4059 * simplicity and similarilty between different GTT, try the single
4060 * random replacement first.
4062 offset = random_offset(start, end,
4063 size, alignment ?: I915_GTT_MIN_ALIGNMENT);
4064 err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags);
4068 /* Randomly selected placement is pinned, do a search */
4069 err = i915_gem_evict_something(vm, size, alignment, color,
4074 return drm_mm_insert_node_in_range(&vm->mm, node,
4075 size, alignment, color,
4076 start, end, DRM_MM_INSERT_EVICT);
4079 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
4080 #include "selftests/mock_gtt.c"
4081 #include "selftests/i915_gem_gtt.c"