]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/i915_gem_gtt.c
ca8a69e8b098fe102ff66d9226376482cb213155
[linux.git] / drivers / gpu / drm / i915 / i915_gem_gtt.c
1 /*
2  * Copyright © 2010 Daniel Vetter
3  * Copyright © 2011-2014 Intel Corporation
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  *
24  */
25
26 #include <linux/slab.h> /* fault-inject.h is not standalone! */
27
28 #include <linux/fault-inject.h>
29 #include <linux/log2.h>
30 #include <linux/random.h>
31 #include <linux/seq_file.h>
32 #include <linux/stop_machine.h>
33
34 #include <asm/set_memory.h>
35
36 #include <drm/i915_drm.h>
37
38 #include "i915_drv.h"
39 #include "i915_scatterlist.h"
40 #include "i915_trace.h"
41 #include "i915_vgpu.h"
42 #include "intel_drv.h"
43 #include "intel_frontbuffer.h"
44
45 #define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
46
47 /**
48  * DOC: Global GTT views
49  *
50  * Background and previous state
51  *
52  * Historically objects could exists (be bound) in global GTT space only as
53  * singular instances with a view representing all of the object's backing pages
54  * in a linear fashion. This view will be called a normal view.
55  *
56  * To support multiple views of the same object, where the number of mapped
57  * pages is not equal to the backing store, or where the layout of the pages
58  * is not linear, concept of a GGTT view was added.
59  *
60  * One example of an alternative view is a stereo display driven by a single
61  * image. In this case we would have a framebuffer looking like this
62  * (2x2 pages):
63  *
64  *    12
65  *    34
66  *
67  * Above would represent a normal GGTT view as normally mapped for GPU or CPU
68  * rendering. In contrast, fed to the display engine would be an alternative
69  * view which could look something like this:
70  *
71  *   1212
72  *   3434
73  *
74  * In this example both the size and layout of pages in the alternative view is
75  * different from the normal view.
76  *
77  * Implementation and usage
78  *
79  * GGTT views are implemented using VMAs and are distinguished via enum
80  * i915_ggtt_view_type and struct i915_ggtt_view.
81  *
82  * A new flavour of core GEM functions which work with GGTT bound objects were
83  * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
84  * renaming  in large amounts of code. They take the struct i915_ggtt_view
85  * parameter encapsulating all metadata required to implement a view.
86  *
87  * As a helper for callers which are only interested in the normal view,
88  * globally const i915_ggtt_view_normal singleton instance exists. All old core
89  * GEM API functions, the ones not taking the view parameter, are operating on,
90  * or with the normal GGTT view.
91  *
92  * Code wanting to add or use a new GGTT view needs to:
93  *
94  * 1. Add a new enum with a suitable name.
95  * 2. Extend the metadata in the i915_ggtt_view structure if required.
96  * 3. Add support to i915_get_vma_pages().
97  *
98  * New views are required to build a scatter-gather table from within the
99  * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
100  * exists for the lifetime of an VMA.
101  *
102  * Core API is designed to have copy semantics which means that passed in
103  * struct i915_ggtt_view does not need to be persistent (left around after
104  * calling the core API functions).
105  *
106  */
107
108 static int
109 i915_get_ggtt_vma_pages(struct i915_vma *vma);
110
111 static void gen6_ggtt_invalidate(struct drm_i915_private *dev_priv)
112 {
113         /*
114          * Note that as an uncached mmio write, this will flush the
115          * WCB of the writes into the GGTT before it triggers the invalidate.
116          */
117         I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
118 }
119
120 static void guc_ggtt_invalidate(struct drm_i915_private *dev_priv)
121 {
122         gen6_ggtt_invalidate(dev_priv);
123         I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
124 }
125
126 static void gmch_ggtt_invalidate(struct drm_i915_private *dev_priv)
127 {
128         intel_gtt_chipset_flush();
129 }
130
131 static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
132 {
133         i915->ggtt.invalidate(i915);
134 }
135
136 static int ppgtt_bind_vma(struct i915_vma *vma,
137                           enum i915_cache_level cache_level,
138                           u32 unused)
139 {
140         u32 pte_flags;
141         int err;
142
143         if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
144                 err = vma->vm->allocate_va_range(vma->vm,
145                                                  vma->node.start, vma->size);
146                 if (err)
147                         return err;
148         }
149
150         /* Applicable to VLV, and gen8+ */
151         pte_flags = 0;
152         if (i915_gem_object_is_readonly(vma->obj))
153                 pte_flags |= PTE_READ_ONLY;
154
155         vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
156
157         return 0;
158 }
159
160 static void ppgtt_unbind_vma(struct i915_vma *vma)
161 {
162         vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
163 }
164
165 static int ppgtt_set_pages(struct i915_vma *vma)
166 {
167         GEM_BUG_ON(vma->pages);
168
169         vma->pages = vma->obj->mm.pages;
170
171         vma->page_sizes = vma->obj->mm.page_sizes;
172
173         return 0;
174 }
175
176 static void clear_pages(struct i915_vma *vma)
177 {
178         GEM_BUG_ON(!vma->pages);
179
180         if (vma->pages != vma->obj->mm.pages) {
181                 sg_free_table(vma->pages);
182                 kfree(vma->pages);
183         }
184         vma->pages = NULL;
185
186         memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
187 }
188
189 static u64 gen8_pte_encode(dma_addr_t addr,
190                            enum i915_cache_level level,
191                            u32 flags)
192 {
193         gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
194
195         if (unlikely(flags & PTE_READ_ONLY))
196                 pte &= ~_PAGE_RW;
197
198         switch (level) {
199         case I915_CACHE_NONE:
200                 pte |= PPAT_UNCACHED;
201                 break;
202         case I915_CACHE_WT:
203                 pte |= PPAT_DISPLAY_ELLC;
204                 break;
205         default:
206                 pte |= PPAT_CACHED;
207                 break;
208         }
209
210         return pte;
211 }
212
213 static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
214                                   const enum i915_cache_level level)
215 {
216         gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
217         pde |= addr;
218         if (level != I915_CACHE_NONE)
219                 pde |= PPAT_CACHED_PDE;
220         else
221                 pde |= PPAT_UNCACHED;
222         return pde;
223 }
224
225 #define gen8_pdpe_encode gen8_pde_encode
226 #define gen8_pml4e_encode gen8_pde_encode
227
228 static u64 snb_pte_encode(dma_addr_t addr,
229                           enum i915_cache_level level,
230                           u32 flags)
231 {
232         gen6_pte_t pte = GEN6_PTE_VALID;
233         pte |= GEN6_PTE_ADDR_ENCODE(addr);
234
235         switch (level) {
236         case I915_CACHE_L3_LLC:
237         case I915_CACHE_LLC:
238                 pte |= GEN6_PTE_CACHE_LLC;
239                 break;
240         case I915_CACHE_NONE:
241                 pte |= GEN6_PTE_UNCACHED;
242                 break;
243         default:
244                 MISSING_CASE(level);
245         }
246
247         return pte;
248 }
249
250 static u64 ivb_pte_encode(dma_addr_t addr,
251                           enum i915_cache_level level,
252                           u32 flags)
253 {
254         gen6_pte_t pte = GEN6_PTE_VALID;
255         pte |= GEN6_PTE_ADDR_ENCODE(addr);
256
257         switch (level) {
258         case I915_CACHE_L3_LLC:
259                 pte |= GEN7_PTE_CACHE_L3_LLC;
260                 break;
261         case I915_CACHE_LLC:
262                 pte |= GEN6_PTE_CACHE_LLC;
263                 break;
264         case I915_CACHE_NONE:
265                 pte |= GEN6_PTE_UNCACHED;
266                 break;
267         default:
268                 MISSING_CASE(level);
269         }
270
271         return pte;
272 }
273
274 static u64 byt_pte_encode(dma_addr_t addr,
275                           enum i915_cache_level level,
276                           u32 flags)
277 {
278         gen6_pte_t pte = GEN6_PTE_VALID;
279         pte |= GEN6_PTE_ADDR_ENCODE(addr);
280
281         if (!(flags & PTE_READ_ONLY))
282                 pte |= BYT_PTE_WRITEABLE;
283
284         if (level != I915_CACHE_NONE)
285                 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
286
287         return pte;
288 }
289
290 static u64 hsw_pte_encode(dma_addr_t addr,
291                           enum i915_cache_level level,
292                           u32 flags)
293 {
294         gen6_pte_t pte = GEN6_PTE_VALID;
295         pte |= HSW_PTE_ADDR_ENCODE(addr);
296
297         if (level != I915_CACHE_NONE)
298                 pte |= HSW_WB_LLC_AGE3;
299
300         return pte;
301 }
302
303 static u64 iris_pte_encode(dma_addr_t addr,
304                            enum i915_cache_level level,
305                            u32 flags)
306 {
307         gen6_pte_t pte = GEN6_PTE_VALID;
308         pte |= HSW_PTE_ADDR_ENCODE(addr);
309
310         switch (level) {
311         case I915_CACHE_NONE:
312                 break;
313         case I915_CACHE_WT:
314                 pte |= HSW_WT_ELLC_LLC_AGE3;
315                 break;
316         default:
317                 pte |= HSW_WB_ELLC_LLC_AGE3;
318                 break;
319         }
320
321         return pte;
322 }
323
324 static void stash_init(struct pagestash *stash)
325 {
326         pagevec_init(&stash->pvec);
327         spin_lock_init(&stash->lock);
328 }
329
330 static struct page *stash_pop_page(struct pagestash *stash)
331 {
332         struct page *page = NULL;
333
334         spin_lock(&stash->lock);
335         if (likely(stash->pvec.nr))
336                 page = stash->pvec.pages[--stash->pvec.nr];
337         spin_unlock(&stash->lock);
338
339         return page;
340 }
341
342 static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec)
343 {
344         unsigned int nr;
345
346         spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING);
347
348         nr = min_t(typeof(nr), pvec->nr, pagevec_space(&stash->pvec));
349         memcpy(stash->pvec.pages + stash->pvec.nr,
350                pvec->pages + pvec->nr - nr,
351                sizeof(pvec->pages[0]) * nr);
352         stash->pvec.nr += nr;
353
354         spin_unlock(&stash->lock);
355
356         pvec->nr -= nr;
357 }
358
359 static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
360 {
361         struct pagevec stack;
362         struct page *page;
363
364         if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
365                 i915_gem_shrink_all(vm->i915);
366
367         page = stash_pop_page(&vm->free_pages);
368         if (page)
369                 return page;
370
371         if (!vm->pt_kmap_wc)
372                 return alloc_page(gfp);
373
374         /* Look in our global stash of WC pages... */
375         page = stash_pop_page(&vm->i915->mm.wc_stash);
376         if (page)
377                 return page;
378
379         /*
380          * Otherwise batch allocate pages to amortize cost of set_pages_wc.
381          *
382          * We have to be careful as page allocation may trigger the shrinker
383          * (via direct reclaim) which will fill up the WC stash underneath us.
384          * So we add our WB pages into a temporary pvec on the stack and merge
385          * them into the WC stash after all the allocations are complete.
386          */
387         pagevec_init(&stack);
388         do {
389                 struct page *page;
390
391                 page = alloc_page(gfp);
392                 if (unlikely(!page))
393                         break;
394
395                 stack.pages[stack.nr++] = page;
396         } while (pagevec_space(&stack));
397
398         if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) {
399                 page = stack.pages[--stack.nr];
400
401                 /* Merge spare WC pages to the global stash */
402                 if (stack.nr)
403                         stash_push_pagevec(&vm->i915->mm.wc_stash, &stack);
404
405                 /* Push any surplus WC pages onto the local VM stash */
406                 if (stack.nr)
407                         stash_push_pagevec(&vm->free_pages, &stack);
408         }
409
410         /* Return unwanted leftovers */
411         if (unlikely(stack.nr)) {
412                 WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr));
413                 __pagevec_release(&stack);
414         }
415
416         return page;
417 }
418
419 static void vm_free_pages_release(struct i915_address_space *vm,
420                                   bool immediate)
421 {
422         struct pagevec *pvec = &vm->free_pages.pvec;
423         struct pagevec stack;
424
425         lockdep_assert_held(&vm->free_pages.lock);
426         GEM_BUG_ON(!pagevec_count(pvec));
427
428         if (vm->pt_kmap_wc) {
429                 /*
430                  * When we use WC, first fill up the global stash and then
431                  * only if full immediately free the overflow.
432                  */
433                 stash_push_pagevec(&vm->i915->mm.wc_stash, pvec);
434
435                 /*
436                  * As we have made some room in the VM's free_pages,
437                  * we can wait for it to fill again. Unless we are
438                  * inside i915_address_space_fini() and must
439                  * immediately release the pages!
440                  */
441                 if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1))
442                         return;
443
444                 /*
445                  * We have to drop the lock to allow ourselves to sleep,
446                  * so take a copy of the pvec and clear the stash for
447                  * others to use it as we sleep.
448                  */
449                 stack = *pvec;
450                 pagevec_reinit(pvec);
451                 spin_unlock(&vm->free_pages.lock);
452
453                 pvec = &stack;
454                 set_pages_array_wb(pvec->pages, pvec->nr);
455
456                 spin_lock(&vm->free_pages.lock);
457         }
458
459         __pagevec_release(pvec);
460 }
461
462 static void vm_free_page(struct i915_address_space *vm, struct page *page)
463 {
464         /*
465          * On !llc, we need to change the pages back to WB. We only do so
466          * in bulk, so we rarely need to change the page attributes here,
467          * but doing so requires a stop_machine() from deep inside arch/x86/mm.
468          * To make detection of the possible sleep more likely, use an
469          * unconditional might_sleep() for everybody.
470          */
471         might_sleep();
472         spin_lock(&vm->free_pages.lock);
473         while (!pagevec_space(&vm->free_pages.pvec))
474                 vm_free_pages_release(vm, false);
475         GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec) >= PAGEVEC_SIZE);
476         pagevec_add(&vm->free_pages.pvec, page);
477         spin_unlock(&vm->free_pages.lock);
478 }
479
480 static void i915_address_space_init(struct i915_address_space *vm, int subclass)
481 {
482         /*
483          * The vm->mutex must be reclaim safe (for use in the shrinker).
484          * Do a dummy acquire now under fs_reclaim so that any allocation
485          * attempt holding the lock is immediately reported by lockdep.
486          */
487         mutex_init(&vm->mutex);
488         lockdep_set_subclass(&vm->mutex, subclass);
489         i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
490
491         GEM_BUG_ON(!vm->total);
492         drm_mm_init(&vm->mm, 0, vm->total);
493         vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
494
495         stash_init(&vm->free_pages);
496
497         INIT_LIST_HEAD(&vm->unbound_list);
498         INIT_LIST_HEAD(&vm->bound_list);
499 }
500
501 static void i915_address_space_fini(struct i915_address_space *vm)
502 {
503         spin_lock(&vm->free_pages.lock);
504         if (pagevec_count(&vm->free_pages.pvec))
505                 vm_free_pages_release(vm, true);
506         GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec));
507         spin_unlock(&vm->free_pages.lock);
508
509         drm_mm_takedown(&vm->mm);
510
511         mutex_destroy(&vm->mutex);
512 }
513
514 static int __setup_page_dma(struct i915_address_space *vm,
515                             struct i915_page_dma *p,
516                             gfp_t gfp)
517 {
518         p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL);
519         if (unlikely(!p->page))
520                 return -ENOMEM;
521
522         p->daddr = dma_map_page_attrs(vm->dma,
523                                       p->page, 0, PAGE_SIZE,
524                                       PCI_DMA_BIDIRECTIONAL,
525                                       DMA_ATTR_SKIP_CPU_SYNC |
526                                       DMA_ATTR_NO_WARN);
527         if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
528                 vm_free_page(vm, p->page);
529                 return -ENOMEM;
530         }
531
532         return 0;
533 }
534
535 static int setup_page_dma(struct i915_address_space *vm,
536                           struct i915_page_dma *p)
537 {
538         return __setup_page_dma(vm, p, __GFP_HIGHMEM);
539 }
540
541 static void cleanup_page_dma(struct i915_address_space *vm,
542                              struct i915_page_dma *p)
543 {
544         dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
545         vm_free_page(vm, p->page);
546 }
547
548 #define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
549
550 #define setup_px(vm, px) setup_page_dma((vm), px_base(px))
551 #define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px))
552 #define fill_px(vm, px, v) fill_page_dma((vm), px_base(px), (v))
553 #define fill32_px(vm, px, v) fill_page_dma_32((vm), px_base(px), (v))
554
555 static void fill_page_dma(struct i915_address_space *vm,
556                           struct i915_page_dma *p,
557                           const u64 val)
558 {
559         u64 * const vaddr = kmap_atomic(p->page);
560
561         memset64(vaddr, val, PAGE_SIZE / sizeof(val));
562
563         kunmap_atomic(vaddr);
564 }
565
566 static void fill_page_dma_32(struct i915_address_space *vm,
567                              struct i915_page_dma *p,
568                              const u32 v)
569 {
570         fill_page_dma(vm, p, (u64)v << 32 | v);
571 }
572
573 static int
574 setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
575 {
576         unsigned long size;
577
578         /*
579          * In order to utilize 64K pages for an object with a size < 2M, we will
580          * need to support a 64K scratch page, given that every 16th entry for a
581          * page-table operating in 64K mode must point to a properly aligned 64K
582          * region, including any PTEs which happen to point to scratch.
583          *
584          * This is only relevant for the 48b PPGTT where we support
585          * huge-gtt-pages, see also i915_vma_insert(). However, as we share the
586          * scratch (read-only) between all vm, we create one 64k scratch page
587          * for all.
588          */
589         size = I915_GTT_PAGE_SIZE_4K;
590         if (i915_vm_is_4lvl(vm) &&
591             HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
592                 size = I915_GTT_PAGE_SIZE_64K;
593                 gfp |= __GFP_NOWARN;
594         }
595         gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL;
596
597         do {
598                 int order = get_order(size);
599                 struct page *page;
600                 dma_addr_t addr;
601
602                 page = alloc_pages(gfp, order);
603                 if (unlikely(!page))
604                         goto skip;
605
606                 addr = dma_map_page_attrs(vm->dma,
607                                           page, 0, size,
608                                           PCI_DMA_BIDIRECTIONAL,
609                                           DMA_ATTR_SKIP_CPU_SYNC |
610                                           DMA_ATTR_NO_WARN);
611                 if (unlikely(dma_mapping_error(vm->dma, addr)))
612                         goto free_page;
613
614                 if (unlikely(!IS_ALIGNED(addr, size)))
615                         goto unmap_page;
616
617                 vm->scratch_page.page = page;
618                 vm->scratch_page.daddr = addr;
619                 vm->scratch_order = order;
620                 return 0;
621
622 unmap_page:
623                 dma_unmap_page(vm->dma, addr, size, PCI_DMA_BIDIRECTIONAL);
624 free_page:
625                 __free_pages(page, order);
626 skip:
627                 if (size == I915_GTT_PAGE_SIZE_4K)
628                         return -ENOMEM;
629
630                 size = I915_GTT_PAGE_SIZE_4K;
631                 gfp &= ~__GFP_NOWARN;
632         } while (1);
633 }
634
635 static void cleanup_scratch_page(struct i915_address_space *vm)
636 {
637         struct i915_page_dma *p = &vm->scratch_page;
638         int order = vm->scratch_order;
639
640         dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT,
641                        PCI_DMA_BIDIRECTIONAL);
642         __free_pages(p->page, order);
643 }
644
645 static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
646 {
647         struct i915_page_table *pt;
648
649         pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
650         if (unlikely(!pt))
651                 return ERR_PTR(-ENOMEM);
652
653         if (unlikely(setup_px(vm, pt))) {
654                 kfree(pt);
655                 return ERR_PTR(-ENOMEM);
656         }
657
658         pt->used_ptes = 0;
659         return pt;
660 }
661
662 static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt)
663 {
664         cleanup_px(vm, pt);
665         kfree(pt);
666 }
667
668 static void gen8_initialize_pt(struct i915_address_space *vm,
669                                struct i915_page_table *pt)
670 {
671         fill_px(vm, pt, vm->scratch_pte);
672 }
673
674 static void gen6_initialize_pt(struct i915_address_space *vm,
675                                struct i915_page_table *pt)
676 {
677         fill32_px(vm, pt, vm->scratch_pte);
678 }
679
680 static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
681 {
682         struct i915_page_directory *pd;
683
684         pd = kzalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL);
685         if (unlikely(!pd))
686                 return ERR_PTR(-ENOMEM);
687
688         if (unlikely(setup_px(vm, pd))) {
689                 kfree(pd);
690                 return ERR_PTR(-ENOMEM);
691         }
692
693         pd->used_pdes = 0;
694         return pd;
695 }
696
697 static void free_pd(struct i915_address_space *vm,
698                     struct i915_page_directory *pd)
699 {
700         cleanup_px(vm, pd);
701         kfree(pd);
702 }
703
704 static void gen8_initialize_pd(struct i915_address_space *vm,
705                                struct i915_page_directory *pd)
706 {
707         fill_px(vm, pd,
708                 gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC));
709         memset_p((void **)pd->page_table, vm->scratch_pt, I915_PDES);
710 }
711
712 static int __pdp_init(struct i915_address_space *vm,
713                       struct i915_page_directory_pointer *pdp)
714 {
715         const unsigned int pdpes = i915_pdpes_per_pdp(vm);
716
717         pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory),
718                                             I915_GFP_ALLOW_FAIL);
719         if (unlikely(!pdp->page_directory))
720                 return -ENOMEM;
721
722         memset_p((void **)pdp->page_directory, vm->scratch_pd, pdpes);
723
724         return 0;
725 }
726
727 static void __pdp_fini(struct i915_page_directory_pointer *pdp)
728 {
729         kfree(pdp->page_directory);
730         pdp->page_directory = NULL;
731 }
732
733 static struct i915_page_directory_pointer *
734 alloc_pdp(struct i915_address_space *vm)
735 {
736         struct i915_page_directory_pointer *pdp;
737         int ret = -ENOMEM;
738
739         GEM_BUG_ON(!i915_vm_is_4lvl(vm));
740
741         pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
742         if (!pdp)
743                 return ERR_PTR(-ENOMEM);
744
745         ret = __pdp_init(vm, pdp);
746         if (ret)
747                 goto fail_bitmap;
748
749         ret = setup_px(vm, pdp);
750         if (ret)
751                 goto fail_page_m;
752
753         return pdp;
754
755 fail_page_m:
756         __pdp_fini(pdp);
757 fail_bitmap:
758         kfree(pdp);
759
760         return ERR_PTR(ret);
761 }
762
763 static void free_pdp(struct i915_address_space *vm,
764                      struct i915_page_directory_pointer *pdp)
765 {
766         __pdp_fini(pdp);
767
768         if (!i915_vm_is_4lvl(vm))
769                 return;
770
771         cleanup_px(vm, pdp);
772         kfree(pdp);
773 }
774
775 static void gen8_initialize_pdp(struct i915_address_space *vm,
776                                 struct i915_page_directory_pointer *pdp)
777 {
778         gen8_ppgtt_pdpe_t scratch_pdpe;
779
780         scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
781
782         fill_px(vm, pdp, scratch_pdpe);
783 }
784
785 static void gen8_initialize_pml4(struct i915_address_space *vm,
786                                  struct i915_pml4 *pml4)
787 {
788         fill_px(vm, pml4,
789                 gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC));
790         memset_p((void **)pml4->pdps, vm->scratch_pdp, GEN8_PML4ES_PER_PML4);
791 }
792
793 /*
794  * PDE TLBs are a pain to invalidate on GEN8+. When we modify
795  * the page table structures, we mark them dirty so that
796  * context switching/execlist queuing code takes extra steps
797  * to ensure that tlbs are flushed.
798  */
799 static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
800 {
801         ppgtt->pd_dirty_engines = ALL_ENGINES;
802 }
803
804 /* Removes entries from a single page table, releasing it if it's empty.
805  * Caller can use the return value to update higher-level entries.
806  */
807 static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm,
808                                 struct i915_page_table *pt,
809                                 u64 start, u64 length)
810 {
811         unsigned int num_entries = gen8_pte_count(start, length);
812         gen8_pte_t *vaddr;
813
814         GEM_BUG_ON(num_entries > pt->used_ptes);
815
816         pt->used_ptes -= num_entries;
817         if (!pt->used_ptes)
818                 return true;
819
820         vaddr = kmap_atomic_px(pt);
821         memset64(vaddr + gen8_pte_index(start), vm->scratch_pte, num_entries);
822         kunmap_atomic(vaddr);
823
824         return false;
825 }
826
827 static void gen8_ppgtt_set_pde(struct i915_address_space *vm,
828                                struct i915_page_directory *pd,
829                                struct i915_page_table *pt,
830                                unsigned int pde)
831 {
832         gen8_pde_t *vaddr;
833
834         pd->page_table[pde] = pt;
835
836         vaddr = kmap_atomic_px(pd);
837         vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC);
838         kunmap_atomic(vaddr);
839 }
840
841 static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
842                                 struct i915_page_directory *pd,
843                                 u64 start, u64 length)
844 {
845         struct i915_page_table *pt;
846         u32 pde;
847
848         gen8_for_each_pde(pt, pd, start, length, pde) {
849                 GEM_BUG_ON(pt == vm->scratch_pt);
850
851                 if (!gen8_ppgtt_clear_pt(vm, pt, start, length))
852                         continue;
853
854                 gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde);
855                 GEM_BUG_ON(!pd->used_pdes);
856                 pd->used_pdes--;
857
858                 free_pt(vm, pt);
859         }
860
861         return !pd->used_pdes;
862 }
863
864 static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm,
865                                 struct i915_page_directory_pointer *pdp,
866                                 struct i915_page_directory *pd,
867                                 unsigned int pdpe)
868 {
869         gen8_ppgtt_pdpe_t *vaddr;
870
871         pdp->page_directory[pdpe] = pd;
872         if (!i915_vm_is_4lvl(vm))
873                 return;
874
875         vaddr = kmap_atomic_px(pdp);
876         vaddr[pdpe] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
877         kunmap_atomic(vaddr);
878 }
879
880 /* Removes entries from a single page dir pointer, releasing it if it's empty.
881  * Caller can use the return value to update higher-level entries
882  */
883 static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
884                                  struct i915_page_directory_pointer *pdp,
885                                  u64 start, u64 length)
886 {
887         struct i915_page_directory *pd;
888         unsigned int pdpe;
889
890         gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
891                 GEM_BUG_ON(pd == vm->scratch_pd);
892
893                 if (!gen8_ppgtt_clear_pd(vm, pd, start, length))
894                         continue;
895
896                 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
897                 GEM_BUG_ON(!pdp->used_pdpes);
898                 pdp->used_pdpes--;
899
900                 free_pd(vm, pd);
901         }
902
903         return !pdp->used_pdpes;
904 }
905
906 static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm,
907                                   u64 start, u64 length)
908 {
909         gen8_ppgtt_clear_pdp(vm, &i915_vm_to_ppgtt(vm)->pdp, start, length);
910 }
911
912 static void gen8_ppgtt_set_pml4e(struct i915_pml4 *pml4,
913                                  struct i915_page_directory_pointer *pdp,
914                                  unsigned int pml4e)
915 {
916         gen8_ppgtt_pml4e_t *vaddr;
917
918         pml4->pdps[pml4e] = pdp;
919
920         vaddr = kmap_atomic_px(pml4);
921         vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
922         kunmap_atomic(vaddr);
923 }
924
925 /* Removes entries from a single pml4.
926  * This is the top-level structure in 4-level page tables used on gen8+.
927  * Empty entries are always scratch pml4e.
928  */
929 static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
930                                   u64 start, u64 length)
931 {
932         struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
933         struct i915_pml4 *pml4 = &ppgtt->pml4;
934         struct i915_page_directory_pointer *pdp;
935         unsigned int pml4e;
936
937         GEM_BUG_ON(!i915_vm_is_4lvl(vm));
938
939         gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
940                 GEM_BUG_ON(pdp == vm->scratch_pdp);
941
942                 if (!gen8_ppgtt_clear_pdp(vm, pdp, start, length))
943                         continue;
944
945                 gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
946
947                 free_pdp(vm, pdp);
948         }
949 }
950
951 static inline struct sgt_dma {
952         struct scatterlist *sg;
953         dma_addr_t dma, max;
954 } sgt_dma(struct i915_vma *vma) {
955         struct scatterlist *sg = vma->pages->sgl;
956         dma_addr_t addr = sg_dma_address(sg);
957         return (struct sgt_dma) { sg, addr, addr + sg->length };
958 }
959
960 struct gen8_insert_pte {
961         u16 pml4e;
962         u16 pdpe;
963         u16 pde;
964         u16 pte;
965 };
966
967 static __always_inline struct gen8_insert_pte gen8_insert_pte(u64 start)
968 {
969         return (struct gen8_insert_pte) {
970                  gen8_pml4e_index(start),
971                  gen8_pdpe_index(start),
972                  gen8_pde_index(start),
973                  gen8_pte_index(start),
974         };
975 }
976
977 static __always_inline bool
978 gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
979                               struct i915_page_directory_pointer *pdp,
980                               struct sgt_dma *iter,
981                               struct gen8_insert_pte *idx,
982                               enum i915_cache_level cache_level,
983                               u32 flags)
984 {
985         struct i915_page_directory *pd;
986         const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
987         gen8_pte_t *vaddr;
988         bool ret;
989
990         GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
991         pd = pdp->page_directory[idx->pdpe];
992         vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
993         do {
994                 vaddr[idx->pte] = pte_encode | iter->dma;
995
996                 iter->dma += I915_GTT_PAGE_SIZE;
997                 if (iter->dma >= iter->max) {
998                         iter->sg = __sg_next(iter->sg);
999                         if (!iter->sg) {
1000                                 ret = false;
1001                                 break;
1002                         }
1003
1004                         iter->dma = sg_dma_address(iter->sg);
1005                         iter->max = iter->dma + iter->sg->length;
1006                 }
1007
1008                 if (++idx->pte == GEN8_PTES) {
1009                         idx->pte = 0;
1010
1011                         if (++idx->pde == I915_PDES) {
1012                                 idx->pde = 0;
1013
1014                                 /* Limited by sg length for 3lvl */
1015                                 if (++idx->pdpe == GEN8_PML4ES_PER_PML4) {
1016                                         idx->pdpe = 0;
1017                                         ret = true;
1018                                         break;
1019                                 }
1020
1021                                 GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
1022                                 pd = pdp->page_directory[idx->pdpe];
1023                         }
1024
1025                         kunmap_atomic(vaddr);
1026                         vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
1027                 }
1028         } while (1);
1029         kunmap_atomic(vaddr);
1030
1031         return ret;
1032 }
1033
1034 static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
1035                                    struct i915_vma *vma,
1036                                    enum i915_cache_level cache_level,
1037                                    u32 flags)
1038 {
1039         struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1040         struct sgt_dma iter = sgt_dma(vma);
1041         struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
1042
1043         gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
1044                                       cache_level, flags);
1045
1046         vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
1047 }
1048
1049 static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
1050                                            struct i915_page_directory_pointer **pdps,
1051                                            struct sgt_dma *iter,
1052                                            enum i915_cache_level cache_level,
1053                                            u32 flags)
1054 {
1055         const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
1056         u64 start = vma->node.start;
1057         dma_addr_t rem = iter->sg->length;
1058
1059         do {
1060                 struct gen8_insert_pte idx = gen8_insert_pte(start);
1061                 struct i915_page_directory_pointer *pdp = pdps[idx.pml4e];
1062                 struct i915_page_directory *pd = pdp->page_directory[idx.pdpe];
1063                 unsigned int page_size;
1064                 bool maybe_64K = false;
1065                 gen8_pte_t encode = pte_encode;
1066                 gen8_pte_t *vaddr;
1067                 u16 index, max;
1068
1069                 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
1070                     IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
1071                     rem >= I915_GTT_PAGE_SIZE_2M && !idx.pte) {
1072                         index = idx.pde;
1073                         max = I915_PDES;
1074                         page_size = I915_GTT_PAGE_SIZE_2M;
1075
1076                         encode |= GEN8_PDE_PS_2M;
1077
1078                         vaddr = kmap_atomic_px(pd);
1079                 } else {
1080                         struct i915_page_table *pt = pd->page_table[idx.pde];
1081
1082                         index = idx.pte;
1083                         max = GEN8_PTES;
1084                         page_size = I915_GTT_PAGE_SIZE;
1085
1086                         if (!index &&
1087                             vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
1088                             IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
1089                             (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
1090                              rem >= (max - index) * I915_GTT_PAGE_SIZE))
1091                                 maybe_64K = true;
1092
1093                         vaddr = kmap_atomic_px(pt);
1094                 }
1095
1096                 do {
1097                         GEM_BUG_ON(iter->sg->length < page_size);
1098                         vaddr[index++] = encode | iter->dma;
1099
1100                         start += page_size;
1101                         iter->dma += page_size;
1102                         rem -= page_size;
1103                         if (iter->dma >= iter->max) {
1104                                 iter->sg = __sg_next(iter->sg);
1105                                 if (!iter->sg)
1106                                         break;
1107
1108                                 rem = iter->sg->length;
1109                                 iter->dma = sg_dma_address(iter->sg);
1110                                 iter->max = iter->dma + rem;
1111
1112                                 if (maybe_64K && index < max &&
1113                                     !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
1114                                       (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
1115                                        rem >= (max - index) * I915_GTT_PAGE_SIZE)))
1116                                         maybe_64K = false;
1117
1118                                 if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
1119                                         break;
1120                         }
1121                 } while (rem >= page_size && index < max);
1122
1123                 kunmap_atomic(vaddr);
1124
1125                 /*
1126                  * Is it safe to mark the 2M block as 64K? -- Either we have
1127                  * filled whole page-table with 64K entries, or filled part of
1128                  * it and have reached the end of the sg table and we have
1129                  * enough padding.
1130                  */
1131                 if (maybe_64K &&
1132                     (index == max ||
1133                      (i915_vm_has_scratch_64K(vma->vm) &&
1134                       !iter->sg && IS_ALIGNED(vma->node.start +
1135                                               vma->node.size,
1136                                               I915_GTT_PAGE_SIZE_2M)))) {
1137                         vaddr = kmap_atomic_px(pd);
1138                         vaddr[idx.pde] |= GEN8_PDE_IPS_64K;
1139                         kunmap_atomic(vaddr);
1140                         page_size = I915_GTT_PAGE_SIZE_64K;
1141
1142                         /*
1143                          * We write all 4K page entries, even when using 64K
1144                          * pages. In order to verify that the HW isn't cheating
1145                          * by using the 4K PTE instead of the 64K PTE, we want
1146                          * to remove all the surplus entries. If the HW skipped
1147                          * the 64K PTE, it will read/write into the scratch page
1148                          * instead - which we detect as missing results during
1149                          * selftests.
1150                          */
1151                         if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
1152                                 u16 i;
1153
1154                                 encode = vma->vm->scratch_pte;
1155                                 vaddr = kmap_atomic_px(pd->page_table[idx.pde]);
1156
1157                                 for (i = 1; i < index; i += 16)
1158                                         memset64(vaddr + i, encode, 15);
1159
1160                                 kunmap_atomic(vaddr);
1161                         }
1162                 }
1163
1164                 vma->page_sizes.gtt |= page_size;
1165         } while (iter->sg);
1166 }
1167
1168 static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
1169                                    struct i915_vma *vma,
1170                                    enum i915_cache_level cache_level,
1171                                    u32 flags)
1172 {
1173         struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1174         struct sgt_dma iter = sgt_dma(vma);
1175         struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
1176
1177         if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
1178                 gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level,
1179                                                flags);
1180         } else {
1181                 struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
1182
1183                 while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++],
1184                                                      &iter, &idx, cache_level,
1185                                                      flags))
1186                         GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
1187
1188                 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
1189         }
1190 }
1191
1192 static void gen8_free_page_tables(struct i915_address_space *vm,
1193                                   struct i915_page_directory *pd)
1194 {
1195         int i;
1196
1197         for (i = 0; i < I915_PDES; i++) {
1198                 if (pd->page_table[i] != vm->scratch_pt)
1199                         free_pt(vm, pd->page_table[i]);
1200         }
1201 }
1202
1203 static int gen8_init_scratch(struct i915_address_space *vm)
1204 {
1205         int ret;
1206
1207         /*
1208          * If everybody agrees to not to write into the scratch page,
1209          * we can reuse it for all vm, keeping contexts and processes separate.
1210          */
1211         if (vm->has_read_only &&
1212             vm->i915->kernel_context &&
1213             vm->i915->kernel_context->ppgtt) {
1214                 struct i915_address_space *clone =
1215                         &vm->i915->kernel_context->ppgtt->vm;
1216
1217                 GEM_BUG_ON(!clone->has_read_only);
1218
1219                 vm->scratch_order = clone->scratch_order;
1220                 vm->scratch_pte = clone->scratch_pte;
1221                 vm->scratch_pt  = clone->scratch_pt;
1222                 vm->scratch_pd  = clone->scratch_pd;
1223                 vm->scratch_pdp = clone->scratch_pdp;
1224                 return 0;
1225         }
1226
1227         ret = setup_scratch_page(vm, __GFP_HIGHMEM);
1228         if (ret)
1229                 return ret;
1230
1231         vm->scratch_pte =
1232                 gen8_pte_encode(vm->scratch_page.daddr,
1233                                 I915_CACHE_LLC,
1234                                 vm->has_read_only);
1235
1236         vm->scratch_pt = alloc_pt(vm);
1237         if (IS_ERR(vm->scratch_pt)) {
1238                 ret = PTR_ERR(vm->scratch_pt);
1239                 goto free_scratch_page;
1240         }
1241
1242         vm->scratch_pd = alloc_pd(vm);
1243         if (IS_ERR(vm->scratch_pd)) {
1244                 ret = PTR_ERR(vm->scratch_pd);
1245                 goto free_pt;
1246         }
1247
1248         if (i915_vm_is_4lvl(vm)) {
1249                 vm->scratch_pdp = alloc_pdp(vm);
1250                 if (IS_ERR(vm->scratch_pdp)) {
1251                         ret = PTR_ERR(vm->scratch_pdp);
1252                         goto free_pd;
1253                 }
1254         }
1255
1256         gen8_initialize_pt(vm, vm->scratch_pt);
1257         gen8_initialize_pd(vm, vm->scratch_pd);
1258         if (i915_vm_is_4lvl(vm))
1259                 gen8_initialize_pdp(vm, vm->scratch_pdp);
1260
1261         return 0;
1262
1263 free_pd:
1264         free_pd(vm, vm->scratch_pd);
1265 free_pt:
1266         free_pt(vm, vm->scratch_pt);
1267 free_scratch_page:
1268         cleanup_scratch_page(vm);
1269
1270         return ret;
1271 }
1272
1273 static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
1274 {
1275         struct i915_address_space *vm = &ppgtt->vm;
1276         struct drm_i915_private *dev_priv = vm->i915;
1277         enum vgt_g2v_type msg;
1278         int i;
1279
1280         if (i915_vm_is_4lvl(vm)) {
1281                 const u64 daddr = px_dma(&ppgtt->pml4);
1282
1283                 I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
1284                 I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
1285
1286                 msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
1287                                 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
1288         } else {
1289                 for (i = 0; i < GEN8_3LVL_PDPES; i++) {
1290                         const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
1291
1292                         I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
1293                         I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
1294                 }
1295
1296                 msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
1297                                 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
1298         }
1299
1300         I915_WRITE(vgtif_reg(g2v_notify), msg);
1301
1302         return 0;
1303 }
1304
1305 static void gen8_free_scratch(struct i915_address_space *vm)
1306 {
1307         if (!vm->scratch_page.daddr)
1308                 return;
1309
1310         if (i915_vm_is_4lvl(vm))
1311                 free_pdp(vm, vm->scratch_pdp);
1312         free_pd(vm, vm->scratch_pd);
1313         free_pt(vm, vm->scratch_pt);
1314         cleanup_scratch_page(vm);
1315 }
1316
1317 static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm,
1318                                     struct i915_page_directory_pointer *pdp)
1319 {
1320         const unsigned int pdpes = i915_pdpes_per_pdp(vm);
1321         int i;
1322
1323         for (i = 0; i < pdpes; i++) {
1324                 if (pdp->page_directory[i] == vm->scratch_pd)
1325                         continue;
1326
1327                 gen8_free_page_tables(vm, pdp->page_directory[i]);
1328                 free_pd(vm, pdp->page_directory[i]);
1329         }
1330
1331         free_pdp(vm, pdp);
1332 }
1333
1334 static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
1335 {
1336         int i;
1337
1338         for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) {
1339                 if (ppgtt->pml4.pdps[i] == ppgtt->vm.scratch_pdp)
1340                         continue;
1341
1342                 gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, ppgtt->pml4.pdps[i]);
1343         }
1344
1345         cleanup_px(&ppgtt->vm, &ppgtt->pml4);
1346 }
1347
1348 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
1349 {
1350         struct drm_i915_private *dev_priv = vm->i915;
1351         struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1352
1353         if (intel_vgpu_active(dev_priv))
1354                 gen8_ppgtt_notify_vgt(ppgtt, false);
1355
1356         if (i915_vm_is_4lvl(vm))
1357                 gen8_ppgtt_cleanup_4lvl(ppgtt);
1358         else
1359                 gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, &ppgtt->pdp);
1360
1361         gen8_free_scratch(vm);
1362 }
1363
1364 static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
1365                                struct i915_page_directory *pd,
1366                                u64 start, u64 length)
1367 {
1368         struct i915_page_table *pt;
1369         u64 from = start;
1370         unsigned int pde;
1371
1372         gen8_for_each_pde(pt, pd, start, length, pde) {
1373                 int count = gen8_pte_count(start, length);
1374
1375                 if (pt == vm->scratch_pt) {
1376                         pd->used_pdes++;
1377
1378                         pt = alloc_pt(vm);
1379                         if (IS_ERR(pt)) {
1380                                 pd->used_pdes--;
1381                                 goto unwind;
1382                         }
1383
1384                         if (count < GEN8_PTES || intel_vgpu_active(vm->i915))
1385                                 gen8_initialize_pt(vm, pt);
1386
1387                         gen8_ppgtt_set_pde(vm, pd, pt, pde);
1388                         GEM_BUG_ON(pd->used_pdes > I915_PDES);
1389                 }
1390
1391                 pt->used_ptes += count;
1392         }
1393         return 0;
1394
1395 unwind:
1396         gen8_ppgtt_clear_pd(vm, pd, from, start - from);
1397         return -ENOMEM;
1398 }
1399
1400 static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
1401                                 struct i915_page_directory_pointer *pdp,
1402                                 u64 start, u64 length)
1403 {
1404         struct i915_page_directory *pd;
1405         u64 from = start;
1406         unsigned int pdpe;
1407         int ret;
1408
1409         gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1410                 if (pd == vm->scratch_pd) {
1411                         pdp->used_pdpes++;
1412
1413                         pd = alloc_pd(vm);
1414                         if (IS_ERR(pd)) {
1415                                 pdp->used_pdpes--;
1416                                 goto unwind;
1417                         }
1418
1419                         gen8_initialize_pd(vm, pd);
1420                         gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
1421                         GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm));
1422                 }
1423
1424                 ret = gen8_ppgtt_alloc_pd(vm, pd, start, length);
1425                 if (unlikely(ret))
1426                         goto unwind_pd;
1427         }
1428
1429         return 0;
1430
1431 unwind_pd:
1432         if (!pd->used_pdes) {
1433                 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
1434                 GEM_BUG_ON(!pdp->used_pdpes);
1435                 pdp->used_pdpes--;
1436                 free_pd(vm, pd);
1437         }
1438 unwind:
1439         gen8_ppgtt_clear_pdp(vm, pdp, from, start - from);
1440         return -ENOMEM;
1441 }
1442
1443 static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm,
1444                                  u64 start, u64 length)
1445 {
1446         return gen8_ppgtt_alloc_pdp(vm,
1447                                     &i915_vm_to_ppgtt(vm)->pdp, start, length);
1448 }
1449
1450 static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
1451                                  u64 start, u64 length)
1452 {
1453         struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1454         struct i915_pml4 *pml4 = &ppgtt->pml4;
1455         struct i915_page_directory_pointer *pdp;
1456         u64 from = start;
1457         u32 pml4e;
1458         int ret;
1459
1460         gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1461                 if (pml4->pdps[pml4e] == vm->scratch_pdp) {
1462                         pdp = alloc_pdp(vm);
1463                         if (IS_ERR(pdp))
1464                                 goto unwind;
1465
1466                         gen8_initialize_pdp(vm, pdp);
1467                         gen8_ppgtt_set_pml4e(pml4, pdp, pml4e);
1468                 }
1469
1470                 ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length);
1471                 if (unlikely(ret))
1472                         goto unwind_pdp;
1473         }
1474
1475         return 0;
1476
1477 unwind_pdp:
1478         if (!pdp->used_pdpes) {
1479                 gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
1480                 free_pdp(vm, pdp);
1481         }
1482 unwind:
1483         gen8_ppgtt_clear_4lvl(vm, from, start - from);
1484         return -ENOMEM;
1485 }
1486
1487 static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
1488 {
1489         struct i915_address_space *vm = &ppgtt->vm;
1490         struct i915_page_directory_pointer *pdp = &ppgtt->pdp;
1491         struct i915_page_directory *pd;
1492         u64 start = 0, length = ppgtt->vm.total;
1493         u64 from = start;
1494         unsigned int pdpe;
1495
1496         gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1497                 pd = alloc_pd(vm);
1498                 if (IS_ERR(pd))
1499                         goto unwind;
1500
1501                 gen8_initialize_pd(vm, pd);
1502                 gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
1503                 pdp->used_pdpes++;
1504         }
1505
1506         pdp->used_pdpes++; /* never remove */
1507         return 0;
1508
1509 unwind:
1510         start -= from;
1511         gen8_for_each_pdpe(pd, pdp, from, start, pdpe) {
1512                 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
1513                 free_pd(vm, pd);
1514         }
1515         pdp->used_pdpes = 0;
1516         return -ENOMEM;
1517 }
1518
1519 static void ppgtt_init(struct drm_i915_private *i915,
1520                        struct i915_hw_ppgtt *ppgtt)
1521 {
1522         kref_init(&ppgtt->ref);
1523
1524         ppgtt->vm.i915 = i915;
1525         ppgtt->vm.dma = &i915->drm.pdev->dev;
1526         ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
1527
1528         i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
1529
1530         ppgtt->vm.vma_ops.bind_vma    = ppgtt_bind_vma;
1531         ppgtt->vm.vma_ops.unbind_vma  = ppgtt_unbind_vma;
1532         ppgtt->vm.vma_ops.set_pages   = ppgtt_set_pages;
1533         ppgtt->vm.vma_ops.clear_pages = clear_pages;
1534 }
1535
1536 /*
1537  * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
1538  * with a net effect resembling a 2-level page table in normal x86 terms. Each
1539  * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
1540  * space.
1541  *
1542  */
1543 static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
1544 {
1545         struct i915_hw_ppgtt *ppgtt;
1546         int err;
1547
1548         ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
1549         if (!ppgtt)
1550                 return ERR_PTR(-ENOMEM);
1551
1552         ppgtt_init(i915, ppgtt);
1553
1554         /*
1555          * From bdw, there is hw support for read-only pages in the PPGTT.
1556          *
1557          * Gen11 has HSDES#:1807136187 unresolved. Disable ro support
1558          * for now.
1559          */
1560         ppgtt->vm.has_read_only = INTEL_GEN(i915) != 11;
1561
1562         /* There are only few exceptions for gen >=6. chv and bxt.
1563          * And we are not sure about the latter so play safe for now.
1564          */
1565         if (IS_CHERRYVIEW(i915) || IS_BROXTON(i915))
1566                 ppgtt->vm.pt_kmap_wc = true;
1567
1568         err = gen8_init_scratch(&ppgtt->vm);
1569         if (err)
1570                 goto err_free;
1571
1572         if (i915_vm_is_4lvl(&ppgtt->vm)) {
1573                 err = setup_px(&ppgtt->vm, &ppgtt->pml4);
1574                 if (err)
1575                         goto err_scratch;
1576
1577                 gen8_initialize_pml4(&ppgtt->vm, &ppgtt->pml4);
1578
1579                 ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_4lvl;
1580                 ppgtt->vm.insert_entries = gen8_ppgtt_insert_4lvl;
1581                 ppgtt->vm.clear_range = gen8_ppgtt_clear_4lvl;
1582         } else {
1583                 err = __pdp_init(&ppgtt->vm, &ppgtt->pdp);
1584                 if (err)
1585                         goto err_scratch;
1586
1587                 if (intel_vgpu_active(i915)) {
1588                         err = gen8_preallocate_top_level_pdp(ppgtt);
1589                         if (err) {
1590                                 __pdp_fini(&ppgtt->pdp);
1591                                 goto err_scratch;
1592                         }
1593                 }
1594
1595                 ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_3lvl;
1596                 ppgtt->vm.insert_entries = gen8_ppgtt_insert_3lvl;
1597                 ppgtt->vm.clear_range = gen8_ppgtt_clear_3lvl;
1598         }
1599
1600         if (intel_vgpu_active(i915))
1601                 gen8_ppgtt_notify_vgt(ppgtt, true);
1602
1603         ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
1604
1605         return ppgtt;
1606
1607 err_scratch:
1608         gen8_free_scratch(&ppgtt->vm);
1609 err_free:
1610         kfree(ppgtt);
1611         return ERR_PTR(err);
1612 }
1613
1614 /* Write pde (index) from the page directory @pd to the page table @pt */
1615 static inline void gen6_write_pde(const struct gen6_hw_ppgtt *ppgtt,
1616                                   const unsigned int pde,
1617                                   const struct i915_page_table *pt)
1618 {
1619         /* Caller needs to make sure the write completes if necessary */
1620         iowrite32(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
1621                   ppgtt->pd_addr + pde);
1622 }
1623
1624 static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
1625 {
1626         struct intel_engine_cs *engine;
1627         u32 ecochk, ecobits;
1628         enum intel_engine_id id;
1629
1630         ecobits = I915_READ(GAC_ECO_BITS);
1631         I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
1632
1633         ecochk = I915_READ(GAM_ECOCHK);
1634         if (IS_HASWELL(dev_priv)) {
1635                 ecochk |= ECOCHK_PPGTT_WB_HSW;
1636         } else {
1637                 ecochk |= ECOCHK_PPGTT_LLC_IVB;
1638                 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
1639         }
1640         I915_WRITE(GAM_ECOCHK, ecochk);
1641
1642         for_each_engine(engine, dev_priv, id) {
1643                 /* GFX_MODE is per-ring on gen7+ */
1644                 I915_WRITE(RING_MODE_GEN7(engine),
1645                            _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
1646         }
1647 }
1648
1649 static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
1650 {
1651         u32 ecochk, gab_ctl, ecobits;
1652
1653         ecobits = I915_READ(GAC_ECO_BITS);
1654         I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
1655                    ECOBITS_PPGTT_CACHE64B);
1656
1657         gab_ctl = I915_READ(GAB_CTL);
1658         I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
1659
1660         ecochk = I915_READ(GAM_ECOCHK);
1661         I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
1662
1663         if (HAS_PPGTT(dev_priv)) /* may be disabled for VT-d */
1664                 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
1665 }
1666
1667 /* PPGTT support for Sandybdrige/Gen6 and later */
1668 static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
1669                                    u64 start, u64 length)
1670 {
1671         struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
1672         unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
1673         unsigned int pde = first_entry / GEN6_PTES;
1674         unsigned int pte = first_entry % GEN6_PTES;
1675         unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
1676         const gen6_pte_t scratch_pte = vm->scratch_pte;
1677
1678         while (num_entries) {
1679                 struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++];
1680                 const unsigned int count = min(num_entries, GEN6_PTES - pte);
1681                 gen6_pte_t *vaddr;
1682
1683                 GEM_BUG_ON(pt == vm->scratch_pt);
1684
1685                 num_entries -= count;
1686
1687                 GEM_BUG_ON(count > pt->used_ptes);
1688                 pt->used_ptes -= count;
1689                 if (!pt->used_ptes)
1690                         ppgtt->scan_for_unused_pt = true;
1691
1692                 /*
1693                  * Note that the hw doesn't support removing PDE on the fly
1694                  * (they are cached inside the context with no means to
1695                  * invalidate the cache), so we can only reset the PTE
1696                  * entries back to scratch.
1697                  */
1698
1699                 vaddr = kmap_atomic_px(pt);
1700                 memset32(vaddr + pte, scratch_pte, count);
1701                 kunmap_atomic(vaddr);
1702
1703                 pte = 0;
1704         }
1705 }
1706
1707 static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
1708                                       struct i915_vma *vma,
1709                                       enum i915_cache_level cache_level,
1710                                       u32 flags)
1711 {
1712         struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1713         unsigned first_entry = vma->node.start / I915_GTT_PAGE_SIZE;
1714         unsigned act_pt = first_entry / GEN6_PTES;
1715         unsigned act_pte = first_entry % GEN6_PTES;
1716         const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
1717         struct sgt_dma iter = sgt_dma(vma);
1718         gen6_pte_t *vaddr;
1719
1720         GEM_BUG_ON(ppgtt->pd.page_table[act_pt] == vm->scratch_pt);
1721
1722         vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
1723         do {
1724                 vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
1725
1726                 iter.dma += I915_GTT_PAGE_SIZE;
1727                 if (iter.dma == iter.max) {
1728                         iter.sg = __sg_next(iter.sg);
1729                         if (!iter.sg)
1730                                 break;
1731
1732                         iter.dma = sg_dma_address(iter.sg);
1733                         iter.max = iter.dma + iter.sg->length;
1734                 }
1735
1736                 if (++act_pte == GEN6_PTES) {
1737                         kunmap_atomic(vaddr);
1738                         vaddr = kmap_atomic_px(ppgtt->pd.page_table[++act_pt]);
1739                         act_pte = 0;
1740                 }
1741         } while (1);
1742         kunmap_atomic(vaddr);
1743
1744         vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
1745 }
1746
1747 static int gen6_alloc_va_range(struct i915_address_space *vm,
1748                                u64 start, u64 length)
1749 {
1750         struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
1751         struct i915_page_table *pt;
1752         intel_wakeref_t wakeref;
1753         u64 from = start;
1754         unsigned int pde;
1755         bool flush = false;
1756
1757         wakeref = intel_runtime_pm_get(vm->i915);
1758
1759         gen6_for_each_pde(pt, &ppgtt->base.pd, start, length, pde) {
1760                 const unsigned int count = gen6_pte_count(start, length);
1761
1762                 if (pt == vm->scratch_pt) {
1763                         pt = alloc_pt(vm);
1764                         if (IS_ERR(pt))
1765                                 goto unwind_out;
1766
1767                         gen6_initialize_pt(vm, pt);
1768                         ppgtt->base.pd.page_table[pde] = pt;
1769
1770                         if (i915_vma_is_bound(ppgtt->vma,
1771                                               I915_VMA_GLOBAL_BIND)) {
1772                                 gen6_write_pde(ppgtt, pde, pt);
1773                                 flush = true;
1774                         }
1775
1776                         GEM_BUG_ON(pt->used_ptes);
1777                 }
1778
1779                 pt->used_ptes += count;
1780         }
1781
1782         if (flush) {
1783                 mark_tlbs_dirty(&ppgtt->base);
1784                 gen6_ggtt_invalidate(vm->i915);
1785         }
1786
1787         intel_runtime_pm_put(vm->i915, wakeref);
1788
1789         return 0;
1790
1791 unwind_out:
1792         intel_runtime_pm_put(vm->i915, wakeref);
1793         gen6_ppgtt_clear_range(vm, from, start - from);
1794         return -ENOMEM;
1795 }
1796
1797 static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt)
1798 {
1799         struct i915_address_space * const vm = &ppgtt->base.vm;
1800         struct i915_page_table *unused;
1801         u32 pde;
1802         int ret;
1803
1804         ret = setup_scratch_page(vm, __GFP_HIGHMEM);
1805         if (ret)
1806                 return ret;
1807
1808         vm->scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
1809                                          I915_CACHE_NONE,
1810                                          PTE_READ_ONLY);
1811
1812         vm->scratch_pt = alloc_pt(vm);
1813         if (IS_ERR(vm->scratch_pt)) {
1814                 cleanup_scratch_page(vm);
1815                 return PTR_ERR(vm->scratch_pt);
1816         }
1817
1818         gen6_initialize_pt(vm, vm->scratch_pt);
1819         gen6_for_all_pdes(unused, &ppgtt->base.pd, pde)
1820                 ppgtt->base.pd.page_table[pde] = vm->scratch_pt;
1821
1822         return 0;
1823 }
1824
1825 static void gen6_ppgtt_free_scratch(struct i915_address_space *vm)
1826 {
1827         free_pt(vm, vm->scratch_pt);
1828         cleanup_scratch_page(vm);
1829 }
1830
1831 static void gen6_ppgtt_free_pd(struct gen6_hw_ppgtt *ppgtt)
1832 {
1833         struct i915_page_table *pt;
1834         u32 pde;
1835
1836         gen6_for_all_pdes(pt, &ppgtt->base.pd, pde)
1837                 if (pt != ppgtt->base.vm.scratch_pt)
1838                         free_pt(&ppgtt->base.vm, pt);
1839 }
1840
1841 struct gen6_ppgtt_cleanup_work {
1842         struct work_struct base;
1843         struct i915_vma *vma;
1844 };
1845
1846 static void gen6_ppgtt_cleanup_work(struct work_struct *wrk)
1847 {
1848         struct gen6_ppgtt_cleanup_work *work =
1849                 container_of(wrk, typeof(*work), base);
1850         /* Side note, vma->vm is the GGTT not the ppgtt we just destroyed! */
1851         struct drm_i915_private *i915 = work->vma->vm->i915;
1852
1853         mutex_lock(&i915->drm.struct_mutex);
1854         i915_vma_destroy(work->vma);
1855         mutex_unlock(&i915->drm.struct_mutex);
1856
1857         kfree(work);
1858 }
1859
1860 static int nop_set_pages(struct i915_vma *vma)
1861 {
1862         return -ENODEV;
1863 }
1864
1865 static void nop_clear_pages(struct i915_vma *vma)
1866 {
1867 }
1868
1869 static int nop_bind(struct i915_vma *vma,
1870                     enum i915_cache_level cache_level,
1871                     u32 unused)
1872 {
1873         return -ENODEV;
1874 }
1875
1876 static void nop_unbind(struct i915_vma *vma)
1877 {
1878 }
1879
1880 static const struct i915_vma_ops nop_vma_ops = {
1881         .set_pages = nop_set_pages,
1882         .clear_pages = nop_clear_pages,
1883         .bind_vma = nop_bind,
1884         .unbind_vma = nop_unbind,
1885 };
1886
1887 static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
1888 {
1889         struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
1890         struct gen6_ppgtt_cleanup_work *work = ppgtt->work;
1891
1892         /* FIXME remove the struct_mutex to bring the locking under control */
1893         INIT_WORK(&work->base, gen6_ppgtt_cleanup_work);
1894         work->vma = ppgtt->vma;
1895         work->vma->ops = &nop_vma_ops;
1896         schedule_work(&work->base);
1897
1898         gen6_ppgtt_free_pd(ppgtt);
1899         gen6_ppgtt_free_scratch(vm);
1900 }
1901
1902 static int pd_vma_set_pages(struct i915_vma *vma)
1903 {
1904         vma->pages = ERR_PTR(-ENODEV);
1905         return 0;
1906 }
1907
1908 static void pd_vma_clear_pages(struct i915_vma *vma)
1909 {
1910         GEM_BUG_ON(!vma->pages);
1911
1912         vma->pages = NULL;
1913 }
1914
1915 static int pd_vma_bind(struct i915_vma *vma,
1916                        enum i915_cache_level cache_level,
1917                        u32 unused)
1918 {
1919         struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
1920         struct gen6_hw_ppgtt *ppgtt = vma->private;
1921         u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE;
1922         struct i915_page_table *pt;
1923         unsigned int pde;
1924
1925         ppgtt->base.pd.base.ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
1926         ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
1927
1928         gen6_for_all_pdes(pt, &ppgtt->base.pd, pde)
1929                 gen6_write_pde(ppgtt, pde, pt);
1930
1931         mark_tlbs_dirty(&ppgtt->base);
1932         gen6_ggtt_invalidate(ppgtt->base.vm.i915);
1933
1934         return 0;
1935 }
1936
1937 static void pd_vma_unbind(struct i915_vma *vma)
1938 {
1939         struct gen6_hw_ppgtt *ppgtt = vma->private;
1940         struct i915_page_table * const scratch_pt = ppgtt->base.vm.scratch_pt;
1941         struct i915_page_table *pt;
1942         unsigned int pde;
1943
1944         if (!ppgtt->scan_for_unused_pt)
1945                 return;
1946
1947         /* Free all no longer used page tables */
1948         gen6_for_all_pdes(pt, &ppgtt->base.pd, pde) {
1949                 if (pt->used_ptes || pt == scratch_pt)
1950                         continue;
1951
1952                 free_pt(&ppgtt->base.vm, pt);
1953                 ppgtt->base.pd.page_table[pde] = scratch_pt;
1954         }
1955
1956         ppgtt->scan_for_unused_pt = false;
1957 }
1958
1959 static const struct i915_vma_ops pd_vma_ops = {
1960         .set_pages = pd_vma_set_pages,
1961         .clear_pages = pd_vma_clear_pages,
1962         .bind_vma = pd_vma_bind,
1963         .unbind_vma = pd_vma_unbind,
1964 };
1965
1966 static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
1967 {
1968         struct drm_i915_private *i915 = ppgtt->base.vm.i915;
1969         struct i915_ggtt *ggtt = &i915->ggtt;
1970         struct i915_vma *vma;
1971
1972         GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
1973         GEM_BUG_ON(size > ggtt->vm.total);
1974
1975         vma = i915_vma_alloc();
1976         if (!vma)
1977                 return ERR_PTR(-ENOMEM);
1978
1979         i915_active_init(i915, &vma->active, NULL);
1980         INIT_ACTIVE_REQUEST(&vma->last_fence);
1981
1982         vma->vm = &ggtt->vm;
1983         vma->ops = &pd_vma_ops;
1984         vma->private = ppgtt;
1985
1986         vma->size = size;
1987         vma->fence_size = size;
1988         vma->flags = I915_VMA_GGTT;
1989         vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
1990
1991         INIT_LIST_HEAD(&vma->obj_link);
1992
1993         mutex_lock(&vma->vm->mutex);
1994         list_add(&vma->vm_link, &vma->vm->unbound_list);
1995         mutex_unlock(&vma->vm->mutex);
1996
1997         return vma;
1998 }
1999
2000 int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
2001 {
2002         struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
2003         int err;
2004
2005         GEM_BUG_ON(ppgtt->base.vm.closed);
2006
2007         /*
2008          * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
2009          * which will be pinned into every active context.
2010          * (When vma->pin_count becomes atomic, I expect we will naturally
2011          * need a larger, unpacked, type and kill this redundancy.)
2012          */
2013         if (ppgtt->pin_count++)
2014                 return 0;
2015
2016         /*
2017          * PPGTT PDEs reside in the GGTT and consists of 512 entries. The
2018          * allocator works in address space sizes, so it's multiplied by page
2019          * size. We allocate at the top of the GTT to avoid fragmentation.
2020          */
2021         err = i915_vma_pin(ppgtt->vma,
2022                            0, GEN6_PD_ALIGN,
2023                            PIN_GLOBAL | PIN_HIGH);
2024         if (err)
2025                 goto unpin;
2026
2027         return 0;
2028
2029 unpin:
2030         ppgtt->pin_count = 0;
2031         return err;
2032 }
2033
2034 void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
2035 {
2036         struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
2037
2038         GEM_BUG_ON(!ppgtt->pin_count);
2039         if (--ppgtt->pin_count)
2040                 return;
2041
2042         i915_vma_unpin(ppgtt->vma);
2043 }
2044
2045 void gen6_ppgtt_unpin_all(struct i915_hw_ppgtt *base)
2046 {
2047         struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
2048
2049         if (!ppgtt->pin_count)
2050                 return;
2051
2052         ppgtt->pin_count = 0;
2053         i915_vma_unpin(ppgtt->vma);
2054 }
2055
2056 static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
2057 {
2058         struct i915_ggtt * const ggtt = &i915->ggtt;
2059         struct gen6_hw_ppgtt *ppgtt;
2060         int err;
2061
2062         ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
2063         if (!ppgtt)
2064                 return ERR_PTR(-ENOMEM);
2065
2066         ppgtt_init(i915, &ppgtt->base);
2067
2068         ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
2069         ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
2070         ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
2071         ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
2072
2073         ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
2074
2075         ppgtt->work = kmalloc(sizeof(*ppgtt->work), GFP_KERNEL);
2076         if (!ppgtt->work) {
2077                 err = -ENOMEM;
2078                 goto err_free;
2079         }
2080
2081         err = gen6_ppgtt_init_scratch(ppgtt);
2082         if (err)
2083                 goto err_work;
2084
2085         ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE);
2086         if (IS_ERR(ppgtt->vma)) {
2087                 err = PTR_ERR(ppgtt->vma);
2088                 goto err_scratch;
2089         }
2090
2091         return &ppgtt->base;
2092
2093 err_scratch:
2094         gen6_ppgtt_free_scratch(&ppgtt->base.vm);
2095 err_work:
2096         kfree(ppgtt->work);
2097 err_free:
2098         kfree(ppgtt);
2099         return ERR_PTR(err);
2100 }
2101
2102 static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
2103 {
2104         /* This function is for gtt related workarounds. This function is
2105          * called on driver load and after a GPU reset, so you can place
2106          * workarounds here even if they get overwritten by GPU reset.
2107          */
2108         /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
2109         if (IS_BROADWELL(dev_priv))
2110                 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
2111         else if (IS_CHERRYVIEW(dev_priv))
2112                 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
2113         else if (IS_GEN9_LP(dev_priv))
2114                 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
2115         else if (INTEL_GEN(dev_priv) >= 9)
2116                 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
2117
2118         /*
2119          * To support 64K PTEs we need to first enable the use of the
2120          * Intermediate-Page-Size(IPS) bit of the PDE field via some magical
2121          * mmio, otherwise the page-walker will simply ignore the IPS bit. This
2122          * shouldn't be needed after GEN10.
2123          *
2124          * 64K pages were first introduced from BDW+, although technically they
2125          * only *work* from gen9+. For pre-BDW we instead have the option for
2126          * 32K pages, but we don't currently have any support for it in our
2127          * driver.
2128          */
2129         if (HAS_PAGE_SIZES(dev_priv, I915_GTT_PAGE_SIZE_64K) &&
2130             INTEL_GEN(dev_priv) <= 10)
2131                 I915_WRITE(GEN8_GAMW_ECO_DEV_RW_IA,
2132                            I915_READ(GEN8_GAMW_ECO_DEV_RW_IA) |
2133                            GAMW_ECO_ENABLE_64K_IPS_FIELD);
2134 }
2135
2136 int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
2137 {
2138         gtt_write_workarounds(dev_priv);
2139
2140         if (IS_GEN(dev_priv, 6))
2141                 gen6_ppgtt_enable(dev_priv);
2142         else if (IS_GEN(dev_priv, 7))
2143                 gen7_ppgtt_enable(dev_priv);
2144
2145         return 0;
2146 }
2147
2148 static struct i915_hw_ppgtt *
2149 __hw_ppgtt_create(struct drm_i915_private *i915)
2150 {
2151         if (INTEL_GEN(i915) < 8)
2152                 return gen6_ppgtt_create(i915);
2153         else
2154                 return gen8_ppgtt_create(i915);
2155 }
2156
2157 struct i915_hw_ppgtt *
2158 i915_ppgtt_create(struct drm_i915_private *i915)
2159 {
2160         struct i915_hw_ppgtt *ppgtt;
2161
2162         ppgtt = __hw_ppgtt_create(i915);
2163         if (IS_ERR(ppgtt))
2164                 return ppgtt;
2165
2166         trace_i915_ppgtt_create(&ppgtt->vm);
2167
2168         return ppgtt;
2169 }
2170
2171 static void ppgtt_destroy_vma(struct i915_address_space *vm)
2172 {
2173         struct list_head *phases[] = {
2174                 &vm->bound_list,
2175                 &vm->unbound_list,
2176                 NULL,
2177         }, **phase;
2178
2179         vm->closed = true;
2180         for (phase = phases; *phase; phase++) {
2181                 struct i915_vma *vma, *vn;
2182
2183                 list_for_each_entry_safe(vma, vn, *phase, vm_link)
2184                         i915_vma_destroy(vma);
2185         }
2186 }
2187
2188 void i915_ppgtt_release(struct kref *kref)
2189 {
2190         struct i915_hw_ppgtt *ppgtt =
2191                 container_of(kref, struct i915_hw_ppgtt, ref);
2192
2193         trace_i915_ppgtt_release(&ppgtt->vm);
2194
2195         ppgtt_destroy_vma(&ppgtt->vm);
2196
2197         GEM_BUG_ON(!list_empty(&ppgtt->vm.bound_list));
2198         GEM_BUG_ON(!list_empty(&ppgtt->vm.unbound_list));
2199
2200         ppgtt->vm.cleanup(&ppgtt->vm);
2201         i915_address_space_fini(&ppgtt->vm);
2202         kfree(ppgtt);
2203 }
2204
2205 /* Certain Gen5 chipsets require require idling the GPU before
2206  * unmapping anything from the GTT when VT-d is enabled.
2207  */
2208 static bool needs_idle_maps(struct drm_i915_private *dev_priv)
2209 {
2210         /* Query intel_iommu to see if we need the workaround. Presumably that
2211          * was loaded first.
2212          */
2213         return IS_GEN(dev_priv, 5) && IS_MOBILE(dev_priv) && intel_vtd_active();
2214 }
2215
2216 static void gen6_check_faults(struct drm_i915_private *dev_priv)
2217 {
2218         struct intel_engine_cs *engine;
2219         enum intel_engine_id id;
2220         u32 fault;
2221
2222         for_each_engine(engine, dev_priv, id) {
2223                 fault = I915_READ(RING_FAULT_REG(engine));
2224                 if (fault & RING_FAULT_VALID) {
2225                         DRM_DEBUG_DRIVER("Unexpected fault\n"
2226                                          "\tAddr: 0x%08lx\n"
2227                                          "\tAddress space: %s\n"
2228                                          "\tSource ID: %d\n"
2229                                          "\tType: %d\n",
2230                                          fault & PAGE_MASK,
2231                                          fault & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
2232                                          RING_FAULT_SRCID(fault),
2233                                          RING_FAULT_FAULT_TYPE(fault));
2234                 }
2235         }
2236 }
2237
2238 static void gen8_check_faults(struct drm_i915_private *dev_priv)
2239 {
2240         u32 fault = I915_READ(GEN8_RING_FAULT_REG);
2241
2242         if (fault & RING_FAULT_VALID) {
2243                 u32 fault_data0, fault_data1;
2244                 u64 fault_addr;
2245
2246                 fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
2247                 fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
2248                 fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
2249                              ((u64)fault_data0 << 12);
2250
2251                 DRM_DEBUG_DRIVER("Unexpected fault\n"
2252                                  "\tAddr: 0x%08x_%08x\n"
2253                                  "\tAddress space: %s\n"
2254                                  "\tEngine ID: %d\n"
2255                                  "\tSource ID: %d\n"
2256                                  "\tType: %d\n",
2257                                  upper_32_bits(fault_addr),
2258                                  lower_32_bits(fault_addr),
2259                                  fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
2260                                  GEN8_RING_FAULT_ENGINE_ID(fault),
2261                                  RING_FAULT_SRCID(fault),
2262                                  RING_FAULT_FAULT_TYPE(fault));
2263         }
2264 }
2265
2266 void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
2267 {
2268         /* From GEN8 onwards we only have one 'All Engine Fault Register' */
2269         if (INTEL_GEN(dev_priv) >= 8)
2270                 gen8_check_faults(dev_priv);
2271         else if (INTEL_GEN(dev_priv) >= 6)
2272                 gen6_check_faults(dev_priv);
2273         else
2274                 return;
2275
2276         i915_clear_error_registers(dev_priv);
2277 }
2278
2279 void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
2280 {
2281         struct i915_ggtt *ggtt = &dev_priv->ggtt;
2282
2283         /* Don't bother messing with faults pre GEN6 as we have little
2284          * documentation supporting that it's a good idea.
2285          */
2286         if (INTEL_GEN(dev_priv) < 6)
2287                 return;
2288
2289         i915_check_and_clear_faults(dev_priv);
2290
2291         ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
2292
2293         i915_ggtt_invalidate(dev_priv);
2294 }
2295
2296 int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
2297                                struct sg_table *pages)
2298 {
2299         do {
2300                 if (dma_map_sg_attrs(&obj->base.dev->pdev->dev,
2301                                      pages->sgl, pages->nents,
2302                                      PCI_DMA_BIDIRECTIONAL,
2303                                      DMA_ATTR_NO_WARN))
2304                         return 0;
2305
2306                 /*
2307                  * If the DMA remap fails, one cause can be that we have
2308                  * too many objects pinned in a small remapping table,
2309                  * such as swiotlb. Incrementally purge all other objects and
2310                  * try again - if there are no more pages to remove from
2311                  * the DMA remapper, i915_gem_shrink will return 0.
2312                  */
2313                 GEM_BUG_ON(obj->mm.pages == pages);
2314         } while (i915_gem_shrink(to_i915(obj->base.dev),
2315                                  obj->base.size >> PAGE_SHIFT, NULL,
2316                                  I915_SHRINK_BOUND |
2317                                  I915_SHRINK_UNBOUND));
2318
2319         return -ENOSPC;
2320 }
2321
2322 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
2323 {
2324         writeq(pte, addr);
2325 }
2326
2327 static void gen8_ggtt_insert_page(struct i915_address_space *vm,
2328                                   dma_addr_t addr,
2329                                   u64 offset,
2330                                   enum i915_cache_level level,
2331                                   u32 unused)
2332 {
2333         struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2334         gen8_pte_t __iomem *pte =
2335                 (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
2336
2337         gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
2338
2339         ggtt->invalidate(vm->i915);
2340 }
2341
2342 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
2343                                      struct i915_vma *vma,
2344                                      enum i915_cache_level level,
2345                                      u32 flags)
2346 {
2347         struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2348         struct sgt_iter sgt_iter;
2349         gen8_pte_t __iomem *gtt_entries;
2350         const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
2351         dma_addr_t addr;
2352
2353         /*
2354          * Note that we ignore PTE_READ_ONLY here. The caller must be careful
2355          * not to allow the user to override access to a read only page.
2356          */
2357
2358         gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
2359         gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE;
2360         for_each_sgt_dma(addr, sgt_iter, vma->pages)
2361                 gen8_set_pte(gtt_entries++, pte_encode | addr);
2362
2363         /*
2364          * We want to flush the TLBs only after we're certain all the PTE
2365          * updates have finished.
2366          */
2367         ggtt->invalidate(vm->i915);
2368 }
2369
2370 static void gen6_ggtt_insert_page(struct i915_address_space *vm,
2371                                   dma_addr_t addr,
2372                                   u64 offset,
2373                                   enum i915_cache_level level,
2374                                   u32 flags)
2375 {
2376         struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2377         gen6_pte_t __iomem *pte =
2378                 (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
2379
2380         iowrite32(vm->pte_encode(addr, level, flags), pte);
2381
2382         ggtt->invalidate(vm->i915);
2383 }
2384
2385 /*
2386  * Binds an object into the global gtt with the specified cache level. The object
2387  * will be accessible to the GPU via commands whose operands reference offsets
2388  * within the global GTT as well as accessible by the GPU through the GMADR
2389  * mapped BAR (dev_priv->mm.gtt->gtt).
2390  */
2391 static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
2392                                      struct i915_vma *vma,
2393                                      enum i915_cache_level level,
2394                                      u32 flags)
2395 {
2396         struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2397         gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
2398         unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE;
2399         struct sgt_iter iter;
2400         dma_addr_t addr;
2401         for_each_sgt_dma(addr, iter, vma->pages)
2402                 iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
2403
2404         /*
2405          * We want to flush the TLBs only after we're certain all the PTE
2406          * updates have finished.
2407          */
2408         ggtt->invalidate(vm->i915);
2409 }
2410
2411 static void nop_clear_range(struct i915_address_space *vm,
2412                             u64 start, u64 length)
2413 {
2414 }
2415
2416 static void gen8_ggtt_clear_range(struct i915_address_space *vm,
2417                                   u64 start, u64 length)
2418 {
2419         struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2420         unsigned first_entry = start / I915_GTT_PAGE_SIZE;
2421         unsigned num_entries = length / I915_GTT_PAGE_SIZE;
2422         const gen8_pte_t scratch_pte = vm->scratch_pte;
2423         gen8_pte_t __iomem *gtt_base =
2424                 (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
2425         const int max_entries = ggtt_total_entries(ggtt) - first_entry;
2426         int i;
2427
2428         if (WARN(num_entries > max_entries,
2429                  "First entry = %d; Num entries = %d (max=%d)\n",
2430                  first_entry, num_entries, max_entries))
2431                 num_entries = max_entries;
2432
2433         for (i = 0; i < num_entries; i++)
2434                 gen8_set_pte(&gtt_base[i], scratch_pte);
2435 }
2436
2437 static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
2438 {
2439         struct drm_i915_private *dev_priv = vm->i915;
2440
2441         /*
2442          * Make sure the internal GAM fifo has been cleared of all GTT
2443          * writes before exiting stop_machine(). This guarantees that
2444          * any aperture accesses waiting to start in another process
2445          * cannot back up behind the GTT writes causing a hang.
2446          * The register can be any arbitrary GAM register.
2447          */
2448         POSTING_READ(GFX_FLSH_CNTL_GEN6);
2449 }
2450
2451 struct insert_page {
2452         struct i915_address_space *vm;
2453         dma_addr_t addr;
2454         u64 offset;
2455         enum i915_cache_level level;
2456 };
2457
2458 static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
2459 {
2460         struct insert_page *arg = _arg;
2461
2462         gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
2463         bxt_vtd_ggtt_wa(arg->vm);
2464
2465         return 0;
2466 }
2467
2468 static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
2469                                           dma_addr_t addr,
2470                                           u64 offset,
2471                                           enum i915_cache_level level,
2472                                           u32 unused)
2473 {
2474         struct insert_page arg = { vm, addr, offset, level };
2475
2476         stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
2477 }
2478
2479 struct insert_entries {
2480         struct i915_address_space *vm;
2481         struct i915_vma *vma;
2482         enum i915_cache_level level;
2483         u32 flags;
2484 };
2485
2486 static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
2487 {
2488         struct insert_entries *arg = _arg;
2489
2490         gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags);
2491         bxt_vtd_ggtt_wa(arg->vm);
2492
2493         return 0;
2494 }
2495
2496 static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
2497                                              struct i915_vma *vma,
2498                                              enum i915_cache_level level,
2499                                              u32 flags)
2500 {
2501         struct insert_entries arg = { vm, vma, level, flags };
2502
2503         stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
2504 }
2505
2506 struct clear_range {
2507         struct i915_address_space *vm;
2508         u64 start;
2509         u64 length;
2510 };
2511
2512 static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
2513 {
2514         struct clear_range *arg = _arg;
2515
2516         gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
2517         bxt_vtd_ggtt_wa(arg->vm);
2518
2519         return 0;
2520 }
2521
2522 static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
2523                                           u64 start,
2524                                           u64 length)
2525 {
2526         struct clear_range arg = { vm, start, length };
2527
2528         stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
2529 }
2530
2531 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
2532                                   u64 start, u64 length)
2533 {
2534         struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2535         unsigned first_entry = start / I915_GTT_PAGE_SIZE;
2536         unsigned num_entries = length / I915_GTT_PAGE_SIZE;
2537         gen6_pte_t scratch_pte, __iomem *gtt_base =
2538                 (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
2539         const int max_entries = ggtt_total_entries(ggtt) - first_entry;
2540         int i;
2541
2542         if (WARN(num_entries > max_entries,
2543                  "First entry = %d; Num entries = %d (max=%d)\n",
2544                  first_entry, num_entries, max_entries))
2545                 num_entries = max_entries;
2546
2547         scratch_pte = vm->scratch_pte;
2548
2549         for (i = 0; i < num_entries; i++)
2550                 iowrite32(scratch_pte, &gtt_base[i]);
2551 }
2552
2553 static void i915_ggtt_insert_page(struct i915_address_space *vm,
2554                                   dma_addr_t addr,
2555                                   u64 offset,
2556                                   enum i915_cache_level cache_level,
2557                                   u32 unused)
2558 {
2559         unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2560                 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2561
2562         intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
2563 }
2564
2565 static void i915_ggtt_insert_entries(struct i915_address_space *vm,
2566                                      struct i915_vma *vma,
2567                                      enum i915_cache_level cache_level,
2568                                      u32 unused)
2569 {
2570         unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2571                 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2572
2573         intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
2574                                     flags);
2575 }
2576
2577 static void i915_ggtt_clear_range(struct i915_address_space *vm,
2578                                   u64 start, u64 length)
2579 {
2580         intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
2581 }
2582
2583 static int ggtt_bind_vma(struct i915_vma *vma,
2584                          enum i915_cache_level cache_level,
2585                          u32 flags)
2586 {
2587         struct drm_i915_private *i915 = vma->vm->i915;
2588         struct drm_i915_gem_object *obj = vma->obj;
2589         intel_wakeref_t wakeref;
2590         u32 pte_flags;
2591
2592         /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
2593         pte_flags = 0;
2594         if (i915_gem_object_is_readonly(obj))
2595                 pte_flags |= PTE_READ_ONLY;
2596
2597         with_intel_runtime_pm(i915, wakeref)
2598                 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
2599
2600         vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
2601
2602         /*
2603          * Without aliasing PPGTT there's no difference between
2604          * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
2605          * upgrade to both bound if we bind either to avoid double-binding.
2606          */
2607         vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
2608
2609         return 0;
2610 }
2611
2612 static void ggtt_unbind_vma(struct i915_vma *vma)
2613 {
2614         struct drm_i915_private *i915 = vma->vm->i915;
2615         intel_wakeref_t wakeref;
2616
2617         with_intel_runtime_pm(i915, wakeref)
2618                 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
2619 }
2620
2621 static int aliasing_gtt_bind_vma(struct i915_vma *vma,
2622                                  enum i915_cache_level cache_level,
2623                                  u32 flags)
2624 {
2625         struct drm_i915_private *i915 = vma->vm->i915;
2626         u32 pte_flags;
2627         int ret;
2628
2629         /* Currently applicable only to VLV */
2630         pte_flags = 0;
2631         if (i915_gem_object_is_readonly(vma->obj))
2632                 pte_flags |= PTE_READ_ONLY;
2633
2634         if (flags & I915_VMA_LOCAL_BIND) {
2635                 struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
2636
2637                 if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
2638                         ret = appgtt->vm.allocate_va_range(&appgtt->vm,
2639                                                            vma->node.start,
2640                                                            vma->size);
2641                         if (ret)
2642                                 return ret;
2643                 }
2644
2645                 appgtt->vm.insert_entries(&appgtt->vm, vma, cache_level,
2646                                           pte_flags);
2647         }
2648
2649         if (flags & I915_VMA_GLOBAL_BIND) {
2650                 intel_wakeref_t wakeref;
2651
2652                 with_intel_runtime_pm(i915, wakeref) {
2653                         vma->vm->insert_entries(vma->vm, vma,
2654                                                 cache_level, pte_flags);
2655                 }
2656         }
2657
2658         return 0;
2659 }
2660
2661 static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
2662 {
2663         struct drm_i915_private *i915 = vma->vm->i915;
2664
2665         if (vma->flags & I915_VMA_GLOBAL_BIND) {
2666                 struct i915_address_space *vm = vma->vm;
2667                 intel_wakeref_t wakeref;
2668
2669                 with_intel_runtime_pm(i915, wakeref)
2670                         vm->clear_range(vm, vma->node.start, vma->size);
2671         }
2672
2673         if (vma->flags & I915_VMA_LOCAL_BIND) {
2674                 struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->vm;
2675
2676                 vm->clear_range(vm, vma->node.start, vma->size);
2677         }
2678 }
2679
2680 void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
2681                                struct sg_table *pages)
2682 {
2683         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2684         struct device *kdev = &dev_priv->drm.pdev->dev;
2685         struct i915_ggtt *ggtt = &dev_priv->ggtt;
2686
2687         if (unlikely(ggtt->do_idle_maps)) {
2688                 if (i915_gem_wait_for_idle(dev_priv, 0, MAX_SCHEDULE_TIMEOUT)) {
2689                         DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
2690                         /* Wait a bit, in hopes it avoids the hang */
2691                         udelay(10);
2692                 }
2693         }
2694
2695         dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
2696 }
2697
2698 static int ggtt_set_pages(struct i915_vma *vma)
2699 {
2700         int ret;
2701
2702         GEM_BUG_ON(vma->pages);
2703
2704         ret = i915_get_ggtt_vma_pages(vma);
2705         if (ret)
2706                 return ret;
2707
2708         vma->page_sizes = vma->obj->mm.page_sizes;
2709
2710         return 0;
2711 }
2712
2713 static void i915_gtt_color_adjust(const struct drm_mm_node *node,
2714                                   unsigned long color,
2715                                   u64 *start,
2716                                   u64 *end)
2717 {
2718         if (node->allocated && node->color != color)
2719                 *start += I915_GTT_PAGE_SIZE;
2720
2721         /* Also leave a space between the unallocated reserved node after the
2722          * GTT and any objects within the GTT, i.e. we use the color adjustment
2723          * to insert a guard page to prevent prefetches crossing over the
2724          * GTT boundary.
2725          */
2726         node = list_next_entry(node, node_list);
2727         if (node->color != color)
2728                 *end -= I915_GTT_PAGE_SIZE;
2729 }
2730
2731 int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
2732 {
2733         struct i915_ggtt *ggtt = &i915->ggtt;
2734         struct i915_hw_ppgtt *ppgtt;
2735         int err;
2736
2737         ppgtt = i915_ppgtt_create(i915);
2738         if (IS_ERR(ppgtt))
2739                 return PTR_ERR(ppgtt);
2740
2741         if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
2742                 err = -ENODEV;
2743                 goto err_ppgtt;
2744         }
2745
2746         /*
2747          * Note we only pre-allocate as far as the end of the global
2748          * GTT. On 48b / 4-level page-tables, the difference is very,
2749          * very significant! We have to preallocate as GVT/vgpu does
2750          * not like the page directory disappearing.
2751          */
2752         err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, ggtt->vm.total);
2753         if (err)
2754                 goto err_ppgtt;
2755
2756         i915->mm.aliasing_ppgtt = ppgtt;
2757
2758         GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
2759         ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
2760
2761         GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma);
2762         ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
2763
2764         return 0;
2765
2766 err_ppgtt:
2767         i915_ppgtt_put(ppgtt);
2768         return err;
2769 }
2770
2771 void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915)
2772 {
2773         struct i915_ggtt *ggtt = &i915->ggtt;
2774         struct i915_hw_ppgtt *ppgtt;
2775
2776         ppgtt = fetch_and_zero(&i915->mm.aliasing_ppgtt);
2777         if (!ppgtt)
2778                 return;
2779
2780         i915_ppgtt_put(ppgtt);
2781
2782         ggtt->vm.vma_ops.bind_vma   = ggtt_bind_vma;
2783         ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
2784 }
2785
2786 int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
2787 {
2788         /* Let GEM Manage all of the aperture.
2789          *
2790          * However, leave one page at the end still bound to the scratch page.
2791          * There are a number of places where the hardware apparently prefetches
2792          * past the end of the object, and we've seen multiple hangs with the
2793          * GPU head pointer stuck in a batchbuffer bound at the last page of the
2794          * aperture.  One page should be enough to keep any prefetching inside
2795          * of the aperture.
2796          */
2797         struct i915_ggtt *ggtt = &dev_priv->ggtt;
2798         unsigned long hole_start, hole_end;
2799         struct drm_mm_node *entry;
2800         int ret;
2801
2802         /*
2803          * GuC requires all resources that we're sharing with it to be placed in
2804          * non-WOPCM memory. If GuC is not present or not in use we still need a
2805          * small bias as ring wraparound at offset 0 sometimes hangs. No idea
2806          * why.
2807          */
2808         ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
2809                                intel_guc_reserved_gtt_size(&dev_priv->guc));
2810
2811         ret = intel_vgt_balloon(dev_priv);
2812         if (ret)
2813                 return ret;
2814
2815         /* Reserve a mappable slot for our lockless error capture */
2816         ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture,
2817                                           PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
2818                                           0, ggtt->mappable_end,
2819                                           DRM_MM_INSERT_LOW);
2820         if (ret)
2821                 return ret;
2822
2823         if (USES_GUC(dev_priv)) {
2824                 ret = intel_guc_reserve_ggtt_top(&dev_priv->guc);
2825                 if (ret)
2826                         goto err_reserve;
2827         }
2828
2829         /* Clear any non-preallocated blocks */
2830         drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
2831                 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
2832                               hole_start, hole_end);
2833                 ggtt->vm.clear_range(&ggtt->vm, hole_start,
2834                                      hole_end - hole_start);
2835         }
2836
2837         /* And finally clear the reserved guard page */
2838         ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
2839
2840         if (INTEL_PPGTT(dev_priv) == INTEL_PPGTT_ALIASING) {
2841                 ret = i915_gem_init_aliasing_ppgtt(dev_priv);
2842                 if (ret)
2843                         goto err_appgtt;
2844         }
2845
2846         return 0;
2847
2848 err_appgtt:
2849         intel_guc_release_ggtt_top(&dev_priv->guc);
2850 err_reserve:
2851         drm_mm_remove_node(&ggtt->error_capture);
2852         return ret;
2853 }
2854
2855 /**
2856  * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
2857  * @dev_priv: i915 device
2858  */
2859 void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
2860 {
2861         struct i915_ggtt *ggtt = &dev_priv->ggtt;
2862         struct i915_vma *vma, *vn;
2863         struct pagevec *pvec;
2864
2865         ggtt->vm.closed = true;
2866
2867         mutex_lock(&dev_priv->drm.struct_mutex);
2868         i915_gem_fini_aliasing_ppgtt(dev_priv);
2869
2870         list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
2871                 WARN_ON(i915_vma_unbind(vma));
2872
2873         if (drm_mm_node_allocated(&ggtt->error_capture))
2874                 drm_mm_remove_node(&ggtt->error_capture);
2875
2876         intel_guc_release_ggtt_top(&dev_priv->guc);
2877
2878         if (drm_mm_initialized(&ggtt->vm.mm)) {
2879                 intel_vgt_deballoon(dev_priv);
2880                 i915_address_space_fini(&ggtt->vm);
2881         }
2882
2883         ggtt->vm.cleanup(&ggtt->vm);
2884
2885         pvec = &dev_priv->mm.wc_stash.pvec;
2886         if (pvec->nr) {
2887                 set_pages_array_wb(pvec->pages, pvec->nr);
2888                 __pagevec_release(pvec);
2889         }
2890
2891         mutex_unlock(&dev_priv->drm.struct_mutex);
2892
2893         arch_phys_wc_del(ggtt->mtrr);
2894         io_mapping_fini(&ggtt->iomap);
2895
2896         i915_gem_cleanup_stolen(dev_priv);
2897 }
2898
2899 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
2900 {
2901         snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
2902         snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
2903         return snb_gmch_ctl << 20;
2904 }
2905
2906 static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
2907 {
2908         bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
2909         bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
2910         if (bdw_gmch_ctl)
2911                 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
2912
2913 #ifdef CONFIG_X86_32
2914         /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
2915         if (bdw_gmch_ctl > 4)
2916                 bdw_gmch_ctl = 4;
2917 #endif
2918
2919         return bdw_gmch_ctl << 20;
2920 }
2921
2922 static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
2923 {
2924         gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
2925         gmch_ctrl &= SNB_GMCH_GGMS_MASK;
2926
2927         if (gmch_ctrl)
2928                 return 1 << (20 + gmch_ctrl);
2929
2930         return 0;
2931 }
2932
2933 static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
2934 {
2935         struct drm_i915_private *dev_priv = ggtt->vm.i915;
2936         struct pci_dev *pdev = dev_priv->drm.pdev;
2937         phys_addr_t phys_addr;
2938         int ret;
2939
2940         /* For Modern GENs the PTEs and register space are split in the BAR */
2941         phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
2942
2943         /*
2944          * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range
2945          * will be dropped. For WC mappings in general we have 64 byte burst
2946          * writes when the WC buffer is flushed, so we can't use it, but have to
2947          * resort to an uncached mapping. The WC issue is easily caught by the
2948          * readback check when writing GTT PTE entries.
2949          */
2950         if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10)
2951                 ggtt->gsm = ioremap_nocache(phys_addr, size);
2952         else
2953                 ggtt->gsm = ioremap_wc(phys_addr, size);
2954         if (!ggtt->gsm) {
2955                 DRM_ERROR("Failed to map the ggtt page table\n");
2956                 return -ENOMEM;
2957         }
2958
2959         ret = setup_scratch_page(&ggtt->vm, GFP_DMA32);
2960         if (ret) {
2961                 DRM_ERROR("Scratch setup failed\n");
2962                 /* iounmap will also get called at remove, but meh */
2963                 iounmap(ggtt->gsm);
2964                 return ret;
2965         }
2966
2967         ggtt->vm.scratch_pte =
2968                 ggtt->vm.pte_encode(ggtt->vm.scratch_page.daddr,
2969                                     I915_CACHE_NONE, 0);
2970
2971         return 0;
2972 }
2973
2974 static struct intel_ppat_entry *
2975 __alloc_ppat_entry(struct intel_ppat *ppat, unsigned int index, u8 value)
2976 {
2977         struct intel_ppat_entry *entry = &ppat->entries[index];
2978
2979         GEM_BUG_ON(index >= ppat->max_entries);
2980         GEM_BUG_ON(test_bit(index, ppat->used));
2981
2982         entry->ppat = ppat;
2983         entry->value = value;
2984         kref_init(&entry->ref);
2985         set_bit(index, ppat->used);
2986         set_bit(index, ppat->dirty);
2987
2988         return entry;
2989 }
2990
2991 static void __free_ppat_entry(struct intel_ppat_entry *entry)
2992 {
2993         struct intel_ppat *ppat = entry->ppat;
2994         unsigned int index = entry - ppat->entries;
2995
2996         GEM_BUG_ON(index >= ppat->max_entries);
2997         GEM_BUG_ON(!test_bit(index, ppat->used));
2998
2999         entry->value = ppat->clear_value;
3000         clear_bit(index, ppat->used);
3001         set_bit(index, ppat->dirty);
3002 }
3003
3004 /**
3005  * intel_ppat_get - get a usable PPAT entry
3006  * @i915: i915 device instance
3007  * @value: the PPAT value required by the caller
3008  *
3009  * The function tries to search if there is an existing PPAT entry which
3010  * matches with the required value. If perfectly matched, the existing PPAT
3011  * entry will be used. If only partially matched, it will try to check if
3012  * there is any available PPAT index. If yes, it will allocate a new PPAT
3013  * index for the required entry and update the HW. If not, the partially
3014  * matched entry will be used.
3015  */
3016 const struct intel_ppat_entry *
3017 intel_ppat_get(struct drm_i915_private *i915, u8 value)
3018 {
3019         struct intel_ppat *ppat = &i915->ppat;
3020         struct intel_ppat_entry *entry = NULL;
3021         unsigned int scanned, best_score;
3022         int i;
3023
3024         GEM_BUG_ON(!ppat->max_entries);
3025
3026         scanned = best_score = 0;
3027         for_each_set_bit(i, ppat->used, ppat->max_entries) {
3028                 unsigned int score;
3029
3030                 score = ppat->match(ppat->entries[i].value, value);
3031                 if (score > best_score) {
3032                         entry = &ppat->entries[i];
3033                         if (score == INTEL_PPAT_PERFECT_MATCH) {
3034                                 kref_get(&entry->ref);
3035                                 return entry;
3036                         }
3037                         best_score = score;
3038                 }
3039                 scanned++;
3040         }
3041
3042         if (scanned == ppat->max_entries) {
3043                 if (!entry)
3044                         return ERR_PTR(-ENOSPC);
3045
3046                 kref_get(&entry->ref);
3047                 return entry;
3048         }
3049
3050         i = find_first_zero_bit(ppat->used, ppat->max_entries);
3051         entry = __alloc_ppat_entry(ppat, i, value);
3052         ppat->update_hw(i915);
3053         return entry;
3054 }
3055
3056 static void release_ppat(struct kref *kref)
3057 {
3058         struct intel_ppat_entry *entry =
3059                 container_of(kref, struct intel_ppat_entry, ref);
3060         struct drm_i915_private *i915 = entry->ppat->i915;
3061
3062         __free_ppat_entry(entry);
3063         entry->ppat->update_hw(i915);
3064 }
3065
3066 /**
3067  * intel_ppat_put - put back the PPAT entry got from intel_ppat_get()
3068  * @entry: an intel PPAT entry
3069  *
3070  * Put back the PPAT entry got from intel_ppat_get(). If the PPAT index of the
3071  * entry is dynamically allocated, its reference count will be decreased. Once
3072  * the reference count becomes into zero, the PPAT index becomes free again.
3073  */
3074 void intel_ppat_put(const struct intel_ppat_entry *entry)
3075 {
3076         struct intel_ppat *ppat = entry->ppat;
3077         unsigned int index = entry - ppat->entries;
3078
3079         GEM_BUG_ON(!ppat->max_entries);
3080
3081         kref_put(&ppat->entries[index].ref, release_ppat);
3082 }
3083
3084 static void cnl_private_pat_update_hw(struct drm_i915_private *dev_priv)
3085 {
3086         struct intel_ppat *ppat = &dev_priv->ppat;
3087         int i;
3088
3089         for_each_set_bit(i, ppat->dirty, ppat->max_entries) {
3090                 I915_WRITE(GEN10_PAT_INDEX(i), ppat->entries[i].value);
3091                 clear_bit(i, ppat->dirty);
3092         }
3093 }
3094
3095 static void bdw_private_pat_update_hw(struct drm_i915_private *dev_priv)
3096 {
3097         struct intel_ppat *ppat = &dev_priv->ppat;
3098         u64 pat = 0;
3099         int i;
3100
3101         for (i = 0; i < ppat->max_entries; i++)
3102                 pat |= GEN8_PPAT(i, ppat->entries[i].value);
3103
3104         bitmap_clear(ppat->dirty, 0, ppat->max_entries);
3105
3106         I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
3107         I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
3108 }
3109
3110 static unsigned int bdw_private_pat_match(u8 src, u8 dst)
3111 {
3112         unsigned int score = 0;
3113         enum {
3114                 AGE_MATCH = BIT(0),
3115                 TC_MATCH = BIT(1),
3116                 CA_MATCH = BIT(2),
3117         };
3118
3119         /* Cache attribute has to be matched. */
3120         if (GEN8_PPAT_GET_CA(src) != GEN8_PPAT_GET_CA(dst))
3121                 return 0;
3122
3123         score |= CA_MATCH;
3124
3125         if (GEN8_PPAT_GET_TC(src) == GEN8_PPAT_GET_TC(dst))
3126                 score |= TC_MATCH;
3127
3128         if (GEN8_PPAT_GET_AGE(src) == GEN8_PPAT_GET_AGE(dst))
3129                 score |= AGE_MATCH;
3130
3131         if (score == (AGE_MATCH | TC_MATCH | CA_MATCH))
3132                 return INTEL_PPAT_PERFECT_MATCH;
3133
3134         return score;
3135 }
3136
3137 static unsigned int chv_private_pat_match(u8 src, u8 dst)
3138 {
3139         return (CHV_PPAT_GET_SNOOP(src) == CHV_PPAT_GET_SNOOP(dst)) ?
3140                 INTEL_PPAT_PERFECT_MATCH : 0;
3141 }
3142
3143 static void cnl_setup_private_ppat(struct intel_ppat *ppat)
3144 {
3145         ppat->max_entries = 8;
3146         ppat->update_hw = cnl_private_pat_update_hw;
3147         ppat->match = bdw_private_pat_match;
3148         ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
3149
3150         __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC);
3151         __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
3152         __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
3153         __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC);
3154         __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
3155         __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
3156         __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
3157         __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
3158 }
3159
3160 /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
3161  * bits. When using advanced contexts each context stores its own PAT, but
3162  * writing this data shouldn't be harmful even in those cases. */
3163 static void bdw_setup_private_ppat(struct intel_ppat *ppat)
3164 {
3165         ppat->max_entries = 8;
3166         ppat->update_hw = bdw_private_pat_update_hw;
3167         ppat->match = bdw_private_pat_match;
3168         ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
3169
3170         if (!HAS_PPGTT(ppat->i915)) {
3171                 /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
3172                  * so RTL will always use the value corresponding to
3173                  * pat_sel = 000".
3174                  * So let's disable cache for GGTT to avoid screen corruptions.
3175                  * MOCS still can be used though.
3176                  * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
3177                  * before this patch, i.e. the same uncached + snooping access
3178                  * like on gen6/7 seems to be in effect.
3179                  * - So this just fixes blitter/render access. Again it looks
3180                  * like it's not just uncached access, but uncached + snooping.
3181                  * So we can still hold onto all our assumptions wrt cpu
3182                  * clflushing on LLC machines.
3183                  */
3184                 __alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC);
3185                 return;
3186         }
3187
3188         __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC);      /* for normal objects, no eLLC */
3189         __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);  /* for something pointing to ptes? */
3190         __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);  /* for scanout with eLLC */
3191         __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC);                      /* Uncached objects, mostly for scanout */
3192         __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
3193         __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
3194         __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
3195         __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
3196 }
3197
3198 static void chv_setup_private_ppat(struct intel_ppat *ppat)
3199 {
3200         ppat->max_entries = 8;
3201         ppat->update_hw = bdw_private_pat_update_hw;
3202         ppat->match = chv_private_pat_match;
3203         ppat->clear_value = CHV_PPAT_SNOOP;
3204
3205         /*
3206          * Map WB on BDW to snooped on CHV.
3207          *
3208          * Only the snoop bit has meaning for CHV, the rest is
3209          * ignored.
3210          *
3211          * The hardware will never snoop for certain types of accesses:
3212          * - CPU GTT (GMADR->GGTT->no snoop->memory)
3213          * - PPGTT page tables
3214          * - some other special cycles
3215          *
3216          * As with BDW, we also need to consider the following for GT accesses:
3217          * "For GGTT, there is NO pat_sel[2:0] from the entry,
3218          * so RTL will always use the value corresponding to
3219          * pat_sel = 000".
3220          * Which means we must set the snoop bit in PAT entry 0
3221          * in order to keep the global status page working.
3222          */
3223
3224         __alloc_ppat_entry(ppat, 0, CHV_PPAT_SNOOP);
3225         __alloc_ppat_entry(ppat, 1, 0);
3226         __alloc_ppat_entry(ppat, 2, 0);
3227         __alloc_ppat_entry(ppat, 3, 0);
3228         __alloc_ppat_entry(ppat, 4, CHV_PPAT_SNOOP);
3229         __alloc_ppat_entry(ppat, 5, CHV_PPAT_SNOOP);
3230         __alloc_ppat_entry(ppat, 6, CHV_PPAT_SNOOP);
3231         __alloc_ppat_entry(ppat, 7, CHV_PPAT_SNOOP);
3232 }
3233
3234 static void gen6_gmch_remove(struct i915_address_space *vm)
3235 {
3236         struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
3237
3238         iounmap(ggtt->gsm);
3239         cleanup_scratch_page(vm);
3240 }
3241
3242 static void setup_private_pat(struct drm_i915_private *dev_priv)
3243 {
3244         struct intel_ppat *ppat = &dev_priv->ppat;
3245         int i;
3246
3247         ppat->i915 = dev_priv;
3248
3249         if (INTEL_GEN(dev_priv) >= 10)
3250                 cnl_setup_private_ppat(ppat);
3251         else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
3252                 chv_setup_private_ppat(ppat);
3253         else
3254                 bdw_setup_private_ppat(ppat);
3255
3256         GEM_BUG_ON(ppat->max_entries > INTEL_MAX_PPAT_ENTRIES);
3257
3258         for_each_clear_bit(i, ppat->used, ppat->max_entries) {
3259                 ppat->entries[i].value = ppat->clear_value;
3260                 ppat->entries[i].ppat = ppat;
3261                 set_bit(i, ppat->dirty);
3262         }
3263
3264         ppat->update_hw(dev_priv);
3265 }
3266
3267 static int gen8_gmch_probe(struct i915_ggtt *ggtt)
3268 {
3269         struct drm_i915_private *dev_priv = ggtt->vm.i915;
3270         struct pci_dev *pdev = dev_priv->drm.pdev;
3271         unsigned int size;
3272         u16 snb_gmch_ctl;
3273         int err;
3274
3275         /* TODO: We're not aware of mappable constraints on gen8 yet */
3276         ggtt->gmadr =
3277                 (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
3278                                                  pci_resource_len(pdev, 2));
3279         ggtt->mappable_end = resource_size(&ggtt->gmadr);
3280
3281         err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
3282         if (!err)
3283                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
3284         if (err)
3285                 DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
3286
3287         pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
3288         if (IS_CHERRYVIEW(dev_priv))
3289                 size = chv_get_total_gtt_size(snb_gmch_ctl);
3290         else
3291                 size = gen8_get_total_gtt_size(snb_gmch_ctl);
3292
3293         ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
3294         ggtt->vm.cleanup = gen6_gmch_remove;
3295         ggtt->vm.insert_page = gen8_ggtt_insert_page;
3296         ggtt->vm.clear_range = nop_clear_range;
3297         if (intel_scanout_needs_vtd_wa(dev_priv))
3298                 ggtt->vm.clear_range = gen8_ggtt_clear_range;
3299
3300         ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
3301
3302         /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
3303         if (intel_ggtt_update_needs_vtd_wa(dev_priv) ||
3304             IS_CHERRYVIEW(dev_priv) /* fails with concurrent use/update */) {
3305                 ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
3306                 ggtt->vm.insert_page    = bxt_vtd_ggtt_insert_page__BKL;
3307                 if (ggtt->vm.clear_range != nop_clear_range)
3308                         ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
3309
3310                 /* Prevent recursively calling stop_machine() and deadlocks. */
3311                 dev_info(dev_priv->drm.dev,
3312                          "Disabling error capture for VT-d workaround\n");
3313                 i915_disable_error_state(dev_priv, -ENODEV);
3314         }
3315
3316         ggtt->invalidate = gen6_ggtt_invalidate;
3317
3318         ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
3319         ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
3320         ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
3321         ggtt->vm.vma_ops.clear_pages = clear_pages;
3322
3323         ggtt->vm.pte_encode = gen8_pte_encode;
3324
3325         setup_private_pat(dev_priv);
3326
3327         return ggtt_probe_common(ggtt, size);
3328 }
3329
3330 static int gen6_gmch_probe(struct i915_ggtt *ggtt)
3331 {
3332         struct drm_i915_private *dev_priv = ggtt->vm.i915;
3333         struct pci_dev *pdev = dev_priv->drm.pdev;
3334         unsigned int size;
3335         u16 snb_gmch_ctl;
3336         int err;
3337
3338         ggtt->gmadr =
3339                 (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
3340                                                  pci_resource_len(pdev, 2));
3341         ggtt->mappable_end = resource_size(&ggtt->gmadr);
3342
3343         /* 64/512MB is the current min/max we actually know of, but this is just
3344          * a coarse sanity check.
3345          */
3346         if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
3347                 DRM_ERROR("Unknown GMADR size (%pa)\n", &ggtt->mappable_end);
3348                 return -ENXIO;
3349         }
3350
3351         err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
3352         if (!err)
3353                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
3354         if (err)
3355                 DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
3356         pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
3357
3358         size = gen6_get_total_gtt_size(snb_gmch_ctl);
3359         ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
3360
3361         ggtt->vm.clear_range = nop_clear_range;
3362         if (!HAS_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
3363                 ggtt->vm.clear_range = gen6_ggtt_clear_range;
3364         ggtt->vm.insert_page = gen6_ggtt_insert_page;
3365         ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
3366         ggtt->vm.cleanup = gen6_gmch_remove;
3367
3368         ggtt->invalidate = gen6_ggtt_invalidate;
3369
3370         if (HAS_EDRAM(dev_priv))
3371                 ggtt->vm.pte_encode = iris_pte_encode;
3372         else if (IS_HASWELL(dev_priv))
3373                 ggtt->vm.pte_encode = hsw_pte_encode;
3374         else if (IS_VALLEYVIEW(dev_priv))
3375                 ggtt->vm.pte_encode = byt_pte_encode;
3376         else if (INTEL_GEN(dev_priv) >= 7)
3377                 ggtt->vm.pte_encode = ivb_pte_encode;
3378         else
3379                 ggtt->vm.pte_encode = snb_pte_encode;
3380
3381         ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
3382         ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
3383         ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
3384         ggtt->vm.vma_ops.clear_pages = clear_pages;
3385
3386         return ggtt_probe_common(ggtt, size);
3387 }
3388
3389 static void i915_gmch_remove(struct i915_address_space *vm)
3390 {
3391         intel_gmch_remove();
3392 }
3393
3394 static int i915_gmch_probe(struct i915_ggtt *ggtt)
3395 {
3396         struct drm_i915_private *dev_priv = ggtt->vm.i915;
3397         phys_addr_t gmadr_base;
3398         int ret;
3399
3400         ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
3401         if (!ret) {
3402                 DRM_ERROR("failed to set up gmch\n");
3403                 return -EIO;
3404         }
3405
3406         intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
3407
3408         ggtt->gmadr =
3409                 (struct resource) DEFINE_RES_MEM(gmadr_base,
3410                                                  ggtt->mappable_end);
3411
3412         ggtt->do_idle_maps = needs_idle_maps(dev_priv);
3413         ggtt->vm.insert_page = i915_ggtt_insert_page;
3414         ggtt->vm.insert_entries = i915_ggtt_insert_entries;
3415         ggtt->vm.clear_range = i915_ggtt_clear_range;
3416         ggtt->vm.cleanup = i915_gmch_remove;
3417
3418         ggtt->invalidate = gmch_ggtt_invalidate;
3419
3420         ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
3421         ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
3422         ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
3423         ggtt->vm.vma_ops.clear_pages = clear_pages;
3424
3425         if (unlikely(ggtt->do_idle_maps))
3426                 DRM_INFO("applying Ironlake quirks for intel_iommu\n");
3427
3428         return 0;
3429 }
3430
3431 /**
3432  * i915_ggtt_probe_hw - Probe GGTT hardware location
3433  * @dev_priv: i915 device
3434  */
3435 int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
3436 {
3437         struct i915_ggtt *ggtt = &dev_priv->ggtt;
3438         int ret;
3439
3440         ggtt->vm.i915 = dev_priv;
3441         ggtt->vm.dma = &dev_priv->drm.pdev->dev;
3442
3443         if (INTEL_GEN(dev_priv) <= 5)
3444                 ret = i915_gmch_probe(ggtt);
3445         else if (INTEL_GEN(dev_priv) < 8)
3446                 ret = gen6_gmch_probe(ggtt);
3447         else
3448                 ret = gen8_gmch_probe(ggtt);
3449         if (ret)
3450                 return ret;
3451
3452         if ((ggtt->vm.total - 1) >> 32) {
3453                 DRM_ERROR("We never expected a Global GTT with more than 32bits"
3454                           " of address space! Found %lldM!\n",
3455                           ggtt->vm.total >> 20);
3456                 ggtt->vm.total = 1ULL << 32;
3457                 ggtt->mappable_end =
3458                         min_t(u64, ggtt->mappable_end, ggtt->vm.total);
3459         }
3460
3461         if (ggtt->mappable_end > ggtt->vm.total) {
3462                 DRM_ERROR("mappable aperture extends past end of GGTT,"
3463                           " aperture=%pa, total=%llx\n",
3464                           &ggtt->mappable_end, ggtt->vm.total);
3465                 ggtt->mappable_end = ggtt->vm.total;
3466         }
3467
3468         /* GMADR is the PCI mmio aperture into the global GTT. */
3469         DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20);
3470         DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
3471         DRM_DEBUG_DRIVER("DSM size = %lluM\n",
3472                          (u64)resource_size(&intel_graphics_stolen_res) >> 20);
3473         if (intel_vtd_active())
3474                 DRM_INFO("VT-d active for gfx access\n");
3475
3476         return 0;
3477 }
3478
3479 /**
3480  * i915_ggtt_init_hw - Initialize GGTT hardware
3481  * @dev_priv: i915 device
3482  */
3483 int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
3484 {
3485         struct i915_ggtt *ggtt = &dev_priv->ggtt;
3486         int ret;
3487
3488         stash_init(&dev_priv->mm.wc_stash);
3489
3490         /* Note that we use page colouring to enforce a guard page at the
3491          * end of the address space. This is required as the CS may prefetch
3492          * beyond the end of the batch buffer, across the page boundary,
3493          * and beyond the end of the GTT if we do not provide a guard.
3494          */
3495         mutex_lock(&dev_priv->drm.struct_mutex);
3496         i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
3497
3498         ggtt->vm.is_ggtt = true;
3499
3500         /* Only VLV supports read-only GGTT mappings */
3501         ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv);
3502
3503         if (!HAS_LLC(dev_priv) && !HAS_PPGTT(dev_priv))
3504                 ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
3505         mutex_unlock(&dev_priv->drm.struct_mutex);
3506
3507         if (!io_mapping_init_wc(&dev_priv->ggtt.iomap,
3508                                 dev_priv->ggtt.gmadr.start,
3509                                 dev_priv->ggtt.mappable_end)) {
3510                 ret = -EIO;
3511                 goto out_gtt_cleanup;
3512         }
3513
3514         ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end);
3515
3516         /*
3517          * Initialise stolen early so that we may reserve preallocated
3518          * objects for the BIOS to KMS transition.
3519          */
3520         ret = i915_gem_init_stolen(dev_priv);
3521         if (ret)
3522                 goto out_gtt_cleanup;
3523
3524         return 0;
3525
3526 out_gtt_cleanup:
3527         ggtt->vm.cleanup(&ggtt->vm);
3528         return ret;
3529 }
3530
3531 int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
3532 {
3533         if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt())
3534                 return -EIO;
3535
3536         return 0;
3537 }
3538
3539 void i915_ggtt_enable_guc(struct drm_i915_private *i915)
3540 {
3541         GEM_BUG_ON(i915->ggtt.invalidate != gen6_ggtt_invalidate);
3542
3543         i915->ggtt.invalidate = guc_ggtt_invalidate;
3544
3545         i915_ggtt_invalidate(i915);
3546 }
3547
3548 void i915_ggtt_disable_guc(struct drm_i915_private *i915)
3549 {
3550         /* XXX Temporary pardon for error unload */
3551         if (i915->ggtt.invalidate == gen6_ggtt_invalidate)
3552                 return;
3553
3554         /* We should only be called after i915_ggtt_enable_guc() */
3555         GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate);
3556
3557         i915->ggtt.invalidate = gen6_ggtt_invalidate;
3558
3559         i915_ggtt_invalidate(i915);
3560 }
3561
3562 void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
3563 {
3564         struct i915_ggtt *ggtt = &dev_priv->ggtt;
3565         struct i915_vma *vma, *vn;
3566
3567         i915_check_and_clear_faults(dev_priv);
3568
3569         mutex_lock(&ggtt->vm.mutex);
3570
3571         /* First fill our portion of the GTT with scratch pages */
3572         ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
3573         ggtt->vm.closed = true; /* skip rewriting PTE on VMA unbind */
3574
3575         /* clflush objects bound into the GGTT and rebind them. */
3576         list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
3577                 struct drm_i915_gem_object *obj = vma->obj;
3578
3579                 if (!(vma->flags & I915_VMA_GLOBAL_BIND))
3580                         continue;
3581
3582                 mutex_unlock(&ggtt->vm.mutex);
3583
3584                 if (!i915_vma_unbind(vma))
3585                         goto lock;
3586
3587                 WARN_ON(i915_vma_bind(vma,
3588                                       obj ? obj->cache_level : 0,
3589                                       PIN_UPDATE));
3590                 if (obj) {
3591                         i915_gem_object_lock(obj);
3592                         WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
3593                         i915_gem_object_unlock(obj);
3594                 }
3595
3596 lock:
3597                 mutex_lock(&ggtt->vm.mutex);
3598         }
3599
3600         ggtt->vm.closed = false;
3601         i915_ggtt_invalidate(dev_priv);
3602
3603         mutex_unlock(&ggtt->vm.mutex);
3604
3605         if (INTEL_GEN(dev_priv) >= 8) {
3606                 struct intel_ppat *ppat = &dev_priv->ppat;
3607
3608                 bitmap_set(ppat->dirty, 0, ppat->max_entries);
3609                 dev_priv->ppat.update_hw(dev_priv);
3610                 return;
3611         }
3612 }
3613
3614 static struct scatterlist *
3615 rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
3616              unsigned int width, unsigned int height,
3617              unsigned int stride,
3618              struct sg_table *st, struct scatterlist *sg)
3619 {
3620         unsigned int column, row;
3621         unsigned int src_idx;
3622
3623         for (column = 0; column < width; column++) {
3624                 src_idx = stride * (height - 1) + column + offset;
3625                 for (row = 0; row < height; row++) {
3626                         st->nents++;
3627                         /* We don't need the pages, but need to initialize
3628                          * the entries so the sg list can be happily traversed.
3629                          * The only thing we need are DMA addresses.
3630                          */
3631                         sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
3632                         sg_dma_address(sg) =
3633                                 i915_gem_object_get_dma_address(obj, src_idx);
3634                         sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
3635                         sg = sg_next(sg);
3636                         src_idx -= stride;
3637                 }
3638         }
3639
3640         return sg;
3641 }
3642
3643 static noinline struct sg_table *
3644 intel_rotate_pages(struct intel_rotation_info *rot_info,
3645                    struct drm_i915_gem_object *obj)
3646 {
3647         unsigned int size = intel_rotation_info_size(rot_info);
3648         struct sg_table *st;
3649         struct scatterlist *sg;
3650         int ret = -ENOMEM;
3651         int i;
3652
3653         /* Allocate target SG list. */
3654         st = kmalloc(sizeof(*st), GFP_KERNEL);
3655         if (!st)
3656                 goto err_st_alloc;
3657
3658         ret = sg_alloc_table(st, size, GFP_KERNEL);
3659         if (ret)
3660                 goto err_sg_alloc;
3661
3662         st->nents = 0;
3663         sg = st->sgl;
3664
3665         for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
3666                 sg = rotate_pages(obj, rot_info->plane[i].offset,
3667                                   rot_info->plane[i].width, rot_info->plane[i].height,
3668                                   rot_info->plane[i].stride, st, sg);
3669         }
3670
3671         return st;
3672
3673 err_sg_alloc:
3674         kfree(st);
3675 err_st_alloc:
3676
3677         DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
3678                          obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
3679
3680         return ERR_PTR(ret);
3681 }
3682
3683 static struct scatterlist *
3684 remap_pages(struct drm_i915_gem_object *obj, unsigned int offset,
3685             unsigned int width, unsigned int height,
3686             unsigned int stride,
3687             struct sg_table *st, struct scatterlist *sg)
3688 {
3689         unsigned int row;
3690
3691         for (row = 0; row < height; row++) {
3692                 unsigned int left = width * I915_GTT_PAGE_SIZE;
3693
3694                 while (left) {
3695                         dma_addr_t addr;
3696                         unsigned int length;
3697
3698                         /* We don't need the pages, but need to initialize
3699                          * the entries so the sg list can be happily traversed.
3700                          * The only thing we need are DMA addresses.
3701                          */
3702
3703                         addr = i915_gem_object_get_dma_address_len(obj, offset, &length);
3704
3705                         length = min(left, length);
3706
3707                         st->nents++;
3708
3709                         sg_set_page(sg, NULL, length, 0);
3710                         sg_dma_address(sg) = addr;
3711                         sg_dma_len(sg) = length;
3712                         sg = sg_next(sg);
3713
3714                         offset += length / I915_GTT_PAGE_SIZE;
3715                         left -= length;
3716                 }
3717
3718                 offset += stride - width;
3719         }
3720
3721         return sg;
3722 }
3723
3724 static noinline struct sg_table *
3725 intel_remap_pages(struct intel_remapped_info *rem_info,
3726                   struct drm_i915_gem_object *obj)
3727 {
3728         unsigned int size = intel_remapped_info_size(rem_info);
3729         struct sg_table *st;
3730         struct scatterlist *sg;
3731         int ret = -ENOMEM;
3732         int i;
3733
3734         /* Allocate target SG list. */
3735         st = kmalloc(sizeof(*st), GFP_KERNEL);
3736         if (!st)
3737                 goto err_st_alloc;
3738
3739         ret = sg_alloc_table(st, size, GFP_KERNEL);
3740         if (ret)
3741                 goto err_sg_alloc;
3742
3743         st->nents = 0;
3744         sg = st->sgl;
3745
3746         for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
3747                 sg = remap_pages(obj, rem_info->plane[i].offset,
3748                                  rem_info->plane[i].width, rem_info->plane[i].height,
3749                                  rem_info->plane[i].stride, st, sg);
3750         }
3751
3752         i915_sg_trim(st);
3753
3754         return st;
3755
3756 err_sg_alloc:
3757         kfree(st);
3758 err_st_alloc:
3759
3760         DRM_DEBUG_DRIVER("Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
3761                          obj->base.size, rem_info->plane[0].width, rem_info->plane[0].height, size);
3762
3763         return ERR_PTR(ret);
3764 }
3765
3766 static noinline struct sg_table *
3767 intel_partial_pages(const struct i915_ggtt_view *view,
3768                     struct drm_i915_gem_object *obj)
3769 {
3770         struct sg_table *st;
3771         struct scatterlist *sg, *iter;
3772         unsigned int count = view->partial.size;
3773         unsigned int offset;
3774         int ret = -ENOMEM;
3775
3776         st = kmalloc(sizeof(*st), GFP_KERNEL);
3777         if (!st)
3778                 goto err_st_alloc;
3779
3780         ret = sg_alloc_table(st, count, GFP_KERNEL);
3781         if (ret)
3782                 goto err_sg_alloc;
3783
3784         iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
3785         GEM_BUG_ON(!iter);
3786
3787         sg = st->sgl;
3788         st->nents = 0;
3789         do {
3790                 unsigned int len;
3791
3792                 len = min(iter->length - (offset << PAGE_SHIFT),
3793                           count << PAGE_SHIFT);
3794                 sg_set_page(sg, NULL, len, 0);
3795                 sg_dma_address(sg) =
3796                         sg_dma_address(iter) + (offset << PAGE_SHIFT);
3797                 sg_dma_len(sg) = len;
3798
3799                 st->nents++;
3800                 count -= len >> PAGE_SHIFT;
3801                 if (count == 0) {
3802                         sg_mark_end(sg);
3803                         i915_sg_trim(st); /* Drop any unused tail entries. */
3804
3805                         return st;
3806                 }
3807
3808                 sg = __sg_next(sg);
3809                 iter = __sg_next(iter);
3810                 offset = 0;
3811         } while (1);
3812
3813 err_sg_alloc:
3814         kfree(st);
3815 err_st_alloc:
3816         return ERR_PTR(ret);
3817 }
3818
3819 static int
3820 i915_get_ggtt_vma_pages(struct i915_vma *vma)
3821 {
3822         int ret;
3823
3824         /* The vma->pages are only valid within the lifespan of the borrowed
3825          * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
3826          * must be the vma->pages. A simple rule is that vma->pages must only
3827          * be accessed when the obj->mm.pages are pinned.
3828          */
3829         GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
3830
3831         switch (vma->ggtt_view.type) {
3832         default:
3833                 GEM_BUG_ON(vma->ggtt_view.type);
3834                 /* fall through */
3835         case I915_GGTT_VIEW_NORMAL:
3836                 vma->pages = vma->obj->mm.pages;
3837                 return 0;
3838
3839         case I915_GGTT_VIEW_ROTATED:
3840                 vma->pages =
3841                         intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
3842                 break;
3843
3844         case I915_GGTT_VIEW_REMAPPED:
3845                 vma->pages =
3846                         intel_remap_pages(&vma->ggtt_view.remapped, vma->obj);
3847                 break;
3848
3849         case I915_GGTT_VIEW_PARTIAL:
3850                 vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
3851                 break;
3852         }
3853
3854         ret = 0;
3855         if (IS_ERR(vma->pages)) {
3856                 ret = PTR_ERR(vma->pages);
3857                 vma->pages = NULL;
3858                 DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
3859                           vma->ggtt_view.type, ret);
3860         }
3861         return ret;
3862 }
3863
3864 /**
3865  * i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
3866  * @vm: the &struct i915_address_space
3867  * @node: the &struct drm_mm_node (typically i915_vma.mode)
3868  * @size: how much space to allocate inside the GTT,
3869  *        must be #I915_GTT_PAGE_SIZE aligned
3870  * @offset: where to insert inside the GTT,
3871  *          must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
3872  *          (@offset + @size) must fit within the address space
3873  * @color: color to apply to node, if this node is not from a VMA,
3874  *         color must be #I915_COLOR_UNEVICTABLE
3875  * @flags: control search and eviction behaviour
3876  *
3877  * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
3878  * the address space (using @size and @color). If the @node does not fit, it
3879  * tries to evict any overlapping nodes from the GTT, including any
3880  * neighbouring nodes if the colors do not match (to ensure guard pages between
3881  * differing domains). See i915_gem_evict_for_node() for the gory details
3882  * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
3883  * evicting active overlapping objects, and any overlapping node that is pinned
3884  * or marked as unevictable will also result in failure.
3885  *
3886  * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3887  * asked to wait for eviction and interrupted.
3888  */
3889 int i915_gem_gtt_reserve(struct i915_address_space *vm,
3890                          struct drm_mm_node *node,
3891                          u64 size, u64 offset, unsigned long color,
3892                          unsigned int flags)
3893 {
3894         int err;
3895
3896         GEM_BUG_ON(!size);
3897         GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3898         GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
3899         GEM_BUG_ON(range_overflows(offset, size, vm->total));
3900         GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
3901         GEM_BUG_ON(drm_mm_node_allocated(node));
3902
3903         node->size = size;
3904         node->start = offset;
3905         node->color = color;
3906
3907         err = drm_mm_reserve_node(&vm->mm, node);
3908         if (err != -ENOSPC)
3909                 return err;
3910
3911         if (flags & PIN_NOEVICT)
3912                 return -ENOSPC;
3913
3914         err = i915_gem_evict_for_node(vm, node, flags);
3915         if (err == 0)
3916                 err = drm_mm_reserve_node(&vm->mm, node);
3917
3918         return err;
3919 }
3920
3921 static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
3922 {
3923         u64 range, addr;
3924
3925         GEM_BUG_ON(range_overflows(start, len, end));
3926         GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));
3927
3928         range = round_down(end - len, align) - round_up(start, align);
3929         if (range) {
3930                 if (sizeof(unsigned long) == sizeof(u64)) {
3931                         addr = get_random_long();
3932                 } else {
3933                         addr = get_random_int();
3934                         if (range > U32_MAX) {
3935                                 addr <<= 32;
3936                                 addr |= get_random_int();
3937                         }
3938                 }
3939                 div64_u64_rem(addr, range, &addr);
3940                 start += addr;
3941         }
3942
3943         return round_up(start, align);
3944 }
3945
3946 /**
3947  * i915_gem_gtt_insert - insert a node into an address_space (GTT)
3948  * @vm: the &struct i915_address_space
3949  * @node: the &struct drm_mm_node (typically i915_vma.node)
3950  * @size: how much space to allocate inside the GTT,
3951  *        must be #I915_GTT_PAGE_SIZE aligned
3952  * @alignment: required alignment of starting offset, may be 0 but
3953  *             if specified, this must be a power-of-two and at least
3954  *             #I915_GTT_MIN_ALIGNMENT
3955  * @color: color to apply to node
3956  * @start: start of any range restriction inside GTT (0 for all),
3957  *         must be #I915_GTT_PAGE_SIZE aligned
3958  * @end: end of any range restriction inside GTT (U64_MAX for all),
3959  *       must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX
3960  * @flags: control search and eviction behaviour
3961  *
3962  * i915_gem_gtt_insert() first searches for an available hole into which
3963  * is can insert the node. The hole address is aligned to @alignment and
3964  * its @size must then fit entirely within the [@start, @end] bounds. The
3965  * nodes on either side of the hole must match @color, or else a guard page
3966  * will be inserted between the two nodes (or the node evicted). If no
3967  * suitable hole is found, first a victim is randomly selected and tested
3968  * for eviction, otherwise then the LRU list of objects within the GTT
3969  * is scanned to find the first set of replacement nodes to create the hole.
3970  * Those old overlapping nodes are evicted from the GTT (and so must be
3971  * rebound before any future use). Any node that is currently pinned cannot
3972  * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently
3973  * active and #PIN_NONBLOCK is specified, that node is also skipped when
3974  * searching for an eviction candidate. See i915_gem_evict_something() for
3975  * the gory details on the eviction algorithm.
3976  *
3977  * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3978  * asked to wait for eviction and interrupted.
3979  */
3980 int i915_gem_gtt_insert(struct i915_address_space *vm,
3981                         struct drm_mm_node *node,
3982                         u64 size, u64 alignment, unsigned long color,
3983                         u64 start, u64 end, unsigned int flags)
3984 {
3985         enum drm_mm_insert_mode mode;
3986         u64 offset;
3987         int err;
3988
3989         lockdep_assert_held(&vm->i915->drm.struct_mutex);
3990         GEM_BUG_ON(!size);
3991         GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3992         GEM_BUG_ON(alignment && !is_power_of_2(alignment));
3993         GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
3994         GEM_BUG_ON(start >= end);
3995         GEM_BUG_ON(start > 0  && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
3996         GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
3997         GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
3998         GEM_BUG_ON(drm_mm_node_allocated(node));
3999
4000         if (unlikely(range_overflows(start, size, end)))
4001                 return -ENOSPC;
4002
4003         if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
4004                 return -ENOSPC;
4005
4006         mode = DRM_MM_INSERT_BEST;
4007         if (flags & PIN_HIGH)
4008                 mode = DRM_MM_INSERT_HIGHEST;
4009         if (flags & PIN_MAPPABLE)
4010                 mode = DRM_MM_INSERT_LOW;
4011
4012         /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
4013          * so we know that we always have a minimum alignment of 4096.
4014          * The drm_mm range manager is optimised to return results
4015          * with zero alignment, so where possible use the optimal
4016          * path.
4017          */
4018         BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
4019         if (alignment <= I915_GTT_MIN_ALIGNMENT)
4020                 alignment = 0;
4021
4022         err = drm_mm_insert_node_in_range(&vm->mm, node,
4023                                           size, alignment, color,
4024                                           start, end, mode);
4025         if (err != -ENOSPC)
4026                 return err;
4027
4028         if (mode & DRM_MM_INSERT_ONCE) {
4029                 err = drm_mm_insert_node_in_range(&vm->mm, node,
4030                                                   size, alignment, color,
4031                                                   start, end,
4032                                                   DRM_MM_INSERT_BEST);
4033                 if (err != -ENOSPC)
4034                         return err;
4035         }
4036
4037         if (flags & PIN_NOEVICT)
4038                 return -ENOSPC;
4039
4040         /* No free space, pick a slot at random.
4041          *
4042          * There is a pathological case here using a GTT shared between
4043          * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
4044          *
4045          *    |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->|
4046          *         (64k objects)             (448k objects)
4047          *
4048          * Now imagine that the eviction LRU is ordered top-down (just because
4049          * pathology meets real life), and that we need to evict an object to
4050          * make room inside the aperture. The eviction scan then has to walk
4051          * the 448k list before it finds one within range. And now imagine that
4052          * it has to search for a new hole between every byte inside the memcpy,
4053          * for several simultaneous clients.
4054          *
4055          * On a full-ppgtt system, if we have run out of available space, there
4056          * will be lots and lots of objects in the eviction list! Again,
4057          * searching that LRU list may be slow if we are also applying any
4058          * range restrictions (e.g. restriction to low 4GiB) and so, for
4059          * simplicity and similarilty between different GTT, try the single
4060          * random replacement first.
4061          */
4062         offset = random_offset(start, end,
4063                                size, alignment ?: I915_GTT_MIN_ALIGNMENT);
4064         err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags);
4065         if (err != -ENOSPC)
4066                 return err;
4067
4068         /* Randomly selected placement is pinned, do a search */
4069         err = i915_gem_evict_something(vm, size, alignment, color,
4070                                        start, end, flags);
4071         if (err)
4072                 return err;
4073
4074         return drm_mm_insert_node_in_range(&vm->mm, node,
4075                                            size, alignment, color,
4076                                            start, end, DRM_MM_INSERT_EVICT);
4077 }
4078
4079 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
4080 #include "selftests/mock_gtt.c"
4081 #include "selftests/i915_gem_gtt.c"
4082 #endif