]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/gt/intel_ppgtt.c
Merge tag 'Smack-for-5.6' of git://github.com/cschaufler/smack-next
[linux.git] / drivers / gpu / drm / i915 / gt / intel_ppgtt.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5
6 #include <linux/slab.h>
7
8 #include "i915_trace.h"
9 #include "intel_gtt.h"
10 #include "gen6_ppgtt.h"
11 #include "gen8_ppgtt.h"
12
13 struct i915_page_table *alloc_pt(struct i915_address_space *vm)
14 {
15         struct i915_page_table *pt;
16
17         pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
18         if (unlikely(!pt))
19                 return ERR_PTR(-ENOMEM);
20
21         if (unlikely(setup_page_dma(vm, &pt->base))) {
22                 kfree(pt);
23                 return ERR_PTR(-ENOMEM);
24         }
25
26         atomic_set(&pt->used, 0);
27         return pt;
28 }
29
30 struct i915_page_directory *__alloc_pd(size_t sz)
31 {
32         struct i915_page_directory *pd;
33
34         pd = kzalloc(sz, I915_GFP_ALLOW_FAIL);
35         if (unlikely(!pd))
36                 return NULL;
37
38         spin_lock_init(&pd->lock);
39         return pd;
40 }
41
42 struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
43 {
44         struct i915_page_directory *pd;
45
46         pd = __alloc_pd(sizeof(*pd));
47         if (unlikely(!pd))
48                 return ERR_PTR(-ENOMEM);
49
50         if (unlikely(setup_page_dma(vm, px_base(pd)))) {
51                 kfree(pd);
52                 return ERR_PTR(-ENOMEM);
53         }
54
55         return pd;
56 }
57
58 void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd)
59 {
60         cleanup_page_dma(vm, pd);
61         kfree(pd);
62 }
63
64 static inline void
65 write_dma_entry(struct i915_page_dma * const pdma,
66                 const unsigned short idx,
67                 const u64 encoded_entry)
68 {
69         u64 * const vaddr = kmap_atomic(pdma->page);
70
71         vaddr[idx] = encoded_entry;
72         kunmap_atomic(vaddr);
73 }
74
75 void
76 __set_pd_entry(struct i915_page_directory * const pd,
77                const unsigned short idx,
78                struct i915_page_dma * const to,
79                u64 (*encode)(const dma_addr_t, const enum i915_cache_level))
80 {
81         /* Each thread pre-pins the pd, and we may have a thread per pde. */
82         GEM_BUG_ON(atomic_read(px_used(pd)) > NALLOC * ARRAY_SIZE(pd->entry));
83
84         atomic_inc(px_used(pd));
85         pd->entry[idx] = to;
86         write_dma_entry(px_base(pd), idx, encode(to->daddr, I915_CACHE_LLC));
87 }
88
89 void
90 clear_pd_entry(struct i915_page_directory * const pd,
91                const unsigned short idx,
92                const struct i915_page_scratch * const scratch)
93 {
94         GEM_BUG_ON(atomic_read(px_used(pd)) == 0);
95
96         write_dma_entry(px_base(pd), idx, scratch->encode);
97         pd->entry[idx] = NULL;
98         atomic_dec(px_used(pd));
99 }
100
101 bool
102 release_pd_entry(struct i915_page_directory * const pd,
103                  const unsigned short idx,
104                  struct i915_page_table * const pt,
105                  const struct i915_page_scratch * const scratch)
106 {
107         bool free = false;
108
109         if (atomic_add_unless(&pt->used, -1, 1))
110                 return false;
111
112         spin_lock(&pd->lock);
113         if (atomic_dec_and_test(&pt->used)) {
114                 clear_pd_entry(pd, idx, scratch);
115                 free = true;
116         }
117         spin_unlock(&pd->lock);
118
119         return free;
120 }
121
122 int i915_ppgtt_init_hw(struct intel_gt *gt)
123 {
124         struct drm_i915_private *i915 = gt->i915;
125
126         gtt_write_workarounds(gt);
127
128         if (IS_GEN(i915, 6))
129                 gen6_ppgtt_enable(gt);
130         else if (IS_GEN(i915, 7))
131                 gen7_ppgtt_enable(gt);
132
133         return 0;
134 }
135
136 static struct i915_ppgtt *
137 __ppgtt_create(struct intel_gt *gt)
138 {
139         if (INTEL_GEN(gt->i915) < 8)
140                 return gen6_ppgtt_create(gt);
141         else
142                 return gen8_ppgtt_create(gt);
143 }
144
145 struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt)
146 {
147         struct i915_ppgtt *ppgtt;
148
149         ppgtt = __ppgtt_create(gt);
150         if (IS_ERR(ppgtt))
151                 return ppgtt;
152
153         trace_i915_ppgtt_create(&ppgtt->vm);
154
155         return ppgtt;
156 }
157
158 static int ppgtt_bind_vma(struct i915_vma *vma,
159                           enum i915_cache_level cache_level,
160                           u32 flags)
161 {
162         u32 pte_flags;
163         int err;
164
165         if (flags & I915_VMA_ALLOC) {
166                 err = vma->vm->allocate_va_range(vma->vm,
167                                                  vma->node.start, vma->size);
168                 if (err)
169                         return err;
170
171                 set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
172         }
173
174         /* Applicable to VLV, and gen8+ */
175         pte_flags = 0;
176         if (i915_gem_object_is_readonly(vma->obj))
177                 pte_flags |= PTE_READ_ONLY;
178
179         GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)));
180         vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
181         wmb();
182
183         return 0;
184 }
185
186 static void ppgtt_unbind_vma(struct i915_vma *vma)
187 {
188         if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)))
189                 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
190 }
191
192 int ppgtt_set_pages(struct i915_vma *vma)
193 {
194         GEM_BUG_ON(vma->pages);
195
196         vma->pages = vma->obj->mm.pages;
197
198         vma->page_sizes = vma->obj->mm.page_sizes;
199
200         return 0;
201 }
202
203 void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt)
204 {
205         struct drm_i915_private *i915 = gt->i915;
206
207         ppgtt->vm.gt = gt;
208         ppgtt->vm.i915 = i915;
209         ppgtt->vm.dma = &i915->drm.pdev->dev;
210         ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
211
212         i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
213
214         ppgtt->vm.vma_ops.bind_vma    = ppgtt_bind_vma;
215         ppgtt->vm.vma_ops.unbind_vma  = ppgtt_unbind_vma;
216         ppgtt->vm.vma_ops.set_pages   = ppgtt_set_pages;
217         ppgtt->vm.vma_ops.clear_pages = clear_pages;
218 }