]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/nouveau/nouveau_svm.c
3ba980d80a1a1231f1e860efb1758cda9f3fe93e
[linux.git] / drivers / gpu / drm / nouveau / nouveau_svm.c
1 /*
2  * Copyright 2018 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include "nouveau_svm.h"
23 #include "nouveau_drv.h"
24 #include "nouveau_chan.h"
25
26 #include <nvif/notify.h>
27 #include <nvif/object.h>
28 #include <nvif/vmm.h>
29
30 #include <nvif/class.h>
31 #include <nvif/clb069.h>
32 #include <nvif/ifc00d.h>
33
34 #include <linux/sched/mm.h>
35 #include <linux/sort.h>
36 #include <linux/hmm.h>
37
38 struct nouveau_svm {
39         struct nouveau_drm *drm;
40         struct mutex mutex;
41         struct list_head inst;
42
43         struct nouveau_svm_fault_buffer {
44                 int id;
45                 struct nvif_object object;
46                 u32 entries;
47                 u32 getaddr;
48                 u32 putaddr;
49                 u32 get;
50                 u32 put;
51                 struct nvif_notify notify;
52
53                 struct nouveau_svm_fault {
54                         u64 inst;
55                         u64 addr;
56                         u64 time;
57                         u32 engine;
58                         u8  gpc;
59                         u8  hub;
60                         u8  access;
61                         u8  client;
62                         u8  fault;
63                         struct nouveau_svmm *svmm;
64                 } **fault;
65                 int fault_nr;
66         } buffer[1];
67 };
68
69 #define SVM_DBG(s,f,a...) NV_DEBUG((s)->drm, "svm: "f"\n", ##a)
70 #define SVM_ERR(s,f,a...) NV_WARN((s)->drm, "svm: "f"\n", ##a)
71
72 struct nouveau_ivmm {
73         struct nouveau_svmm *svmm;
74         u64 inst;
75         struct list_head head;
76 };
77
78 static struct nouveau_ivmm *
79 nouveau_ivmm_find(struct nouveau_svm *svm, u64 inst)
80 {
81         struct nouveau_ivmm *ivmm;
82         list_for_each_entry(ivmm, &svm->inst, head) {
83                 if (ivmm->inst == inst)
84                         return ivmm;
85         }
86         return NULL;
87 }
88
89 struct nouveau_svmm {
90         struct nouveau_vmm *vmm;
91         struct {
92                 unsigned long start;
93                 unsigned long limit;
94         } unmanaged;
95
96         struct mutex mutex;
97
98         struct mm_struct *mm;
99         struct hmm_mirror mirror;
100 };
101
102 #define SVMM_DBG(s,f,a...)                                                     \
103         NV_DEBUG((s)->vmm->cli->drm, "svm-%p: "f"\n", (s), ##a)
104 #define SVMM_ERR(s,f,a...)                                                     \
105         NV_WARN((s)->vmm->cli->drm, "svm-%p: "f"\n", (s), ##a)
106
107 /* Unlink channel instance from SVMM. */
108 void
109 nouveau_svmm_part(struct nouveau_svmm *svmm, u64 inst)
110 {
111         struct nouveau_ivmm *ivmm;
112         if (svmm) {
113                 mutex_lock(&svmm->vmm->cli->drm->svm->mutex);
114                 ivmm = nouveau_ivmm_find(svmm->vmm->cli->drm->svm, inst);
115                 if (ivmm) {
116                         list_del(&ivmm->head);
117                         kfree(ivmm);
118                 }
119                 mutex_unlock(&svmm->vmm->cli->drm->svm->mutex);
120         }
121 }
122
123 /* Link channel instance to SVMM. */
124 int
125 nouveau_svmm_join(struct nouveau_svmm *svmm, u64 inst)
126 {
127         struct nouveau_ivmm *ivmm;
128         if (svmm) {
129                 if (!(ivmm = kmalloc(sizeof(*ivmm), GFP_KERNEL)))
130                         return -ENOMEM;
131                 ivmm->svmm = svmm;
132                 ivmm->inst = inst;
133
134                 mutex_lock(&svmm->vmm->cli->drm->svm->mutex);
135                 list_add(&ivmm->head, &svmm->vmm->cli->drm->svm->inst);
136                 mutex_unlock(&svmm->vmm->cli->drm->svm->mutex);
137         }
138         return 0;
139 }
140
141 /* Invalidate SVMM address-range on GPU. */
142 static void
143 nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit)
144 {
145         if (limit > start) {
146                 bool super = svmm->vmm->vmm.object.client->super;
147                 svmm->vmm->vmm.object.client->super = true;
148                 nvif_object_mthd(&svmm->vmm->vmm.object, NVIF_VMM_V0_PFNCLR,
149                                  &(struct nvif_vmm_pfnclr_v0) {
150                                         .addr = start,
151                                         .size = limit - start,
152                                  }, sizeof(struct nvif_vmm_pfnclr_v0));
153                 svmm->vmm->vmm.object.client->super = super;
154         }
155 }
156
157 static int
158 nouveau_svmm_sync_cpu_device_pagetables(struct hmm_mirror *mirror,
159                                         const struct hmm_update *update)
160 {
161         struct nouveau_svmm *svmm = container_of(mirror, typeof(*svmm), mirror);
162         unsigned long start = update->start;
163         unsigned long limit = update->end;
164
165         if (!update->blockable)
166                 return -EAGAIN;
167
168         SVMM_DBG(svmm, "invalidate %016lx-%016lx", start, limit);
169
170         mutex_lock(&svmm->mutex);
171         if (limit > svmm->unmanaged.start && start < svmm->unmanaged.limit) {
172                 if (start < svmm->unmanaged.start) {
173                         nouveau_svmm_invalidate(svmm, start,
174                                                 svmm->unmanaged.limit);
175                 }
176                 start = svmm->unmanaged.limit;
177         }
178
179         nouveau_svmm_invalidate(svmm, start, limit);
180         mutex_unlock(&svmm->mutex);
181         return 0;
182 }
183
184 static void
185 nouveau_svmm_release(struct hmm_mirror *mirror)
186 {
187 }
188
189 static const struct hmm_mirror_ops
190 nouveau_svmm = {
191         .sync_cpu_device_pagetables = nouveau_svmm_sync_cpu_device_pagetables,
192         .release = nouveau_svmm_release,
193 };
194
195 void
196 nouveau_svmm_fini(struct nouveau_svmm **psvmm)
197 {
198         struct nouveau_svmm *svmm = *psvmm;
199         if (svmm) {
200                 hmm_mirror_unregister(&svmm->mirror);
201                 kfree(*psvmm);
202                 *psvmm = NULL;
203         }
204 }
205
206 int
207 nouveau_svmm_init(struct drm_device *dev, void *data,
208                   struct drm_file *file_priv)
209 {
210         struct nouveau_cli *cli = nouveau_cli(file_priv);
211         struct nouveau_svmm *svmm;
212         struct drm_nouveau_svm_init *args = data;
213         int ret;
214
215         /* Allocate tracking for SVM-enabled VMM. */
216         if (!(svmm = kzalloc(sizeof(*svmm), GFP_KERNEL)))
217                 return -ENOMEM;
218         svmm->vmm = &cli->svm;
219         svmm->unmanaged.start = args->unmanaged_addr;
220         svmm->unmanaged.limit = args->unmanaged_addr + args->unmanaged_size;
221         mutex_init(&svmm->mutex);
222
223         /* Check that SVM isn't already enabled for the client. */
224         mutex_lock(&cli->mutex);
225         if (cli->svm.cli) {
226                 ret = -EBUSY;
227                 goto done;
228         }
229
230         /* Allocate a new GPU VMM that can support SVM (managed by the
231          * client, with replayable faults enabled).
232          *
233          * All future channel/memory allocations will make use of this
234          * VMM instead of the standard one.
235          */
236         ret = nvif_vmm_init(&cli->mmu, cli->vmm.vmm.object.oclass, true,
237                             args->unmanaged_addr, args->unmanaged_size,
238                             &(struct gp100_vmm_v0) {
239                                 .fault_replay = true,
240                             }, sizeof(struct gp100_vmm_v0), &cli->svm.vmm);
241         if (ret)
242                 goto done;
243
244         /* Enable HMM mirroring of CPU address-space to VMM. */
245         svmm->mm = get_task_mm(current);
246         down_write(&svmm->mm->mmap_sem);
247         svmm->mirror.ops = &nouveau_svmm;
248         ret = hmm_mirror_register(&svmm->mirror, svmm->mm);
249         if (ret == 0) {
250                 cli->svm.svmm = svmm;
251                 cli->svm.cli = cli;
252         }
253         up_write(&svmm->mm->mmap_sem);
254         mmput(svmm->mm);
255
256 done:
257         if (ret)
258                 nouveau_svmm_fini(&svmm);
259         mutex_unlock(&cli->mutex);
260         return ret;
261 }
262
263 static const u64
264 nouveau_svm_pfn_flags[HMM_PFN_FLAG_MAX] = {
265         [HMM_PFN_VALID         ] = NVIF_VMM_PFNMAP_V0_V,
266         [HMM_PFN_WRITE         ] = NVIF_VMM_PFNMAP_V0_W,
267         [HMM_PFN_DEVICE_PRIVATE] = NVIF_VMM_PFNMAP_V0_VRAM,
268 };
269
270 static const u64
271 nouveau_svm_pfn_values[HMM_PFN_VALUE_MAX] = {
272         [HMM_PFN_ERROR  ] = ~NVIF_VMM_PFNMAP_V0_V,
273         [HMM_PFN_NONE   ] =  NVIF_VMM_PFNMAP_V0_NONE,
274         [HMM_PFN_SPECIAL] = ~NVIF_VMM_PFNMAP_V0_V,
275 };
276
277 /* Issue fault replay for GPU to retry accesses that faulted previously. */
278 static void
279 nouveau_svm_fault_replay(struct nouveau_svm *svm)
280 {
281         SVM_DBG(svm, "replay");
282         WARN_ON(nvif_object_mthd(&svm->drm->client.vmm.vmm.object,
283                                  GP100_VMM_VN_FAULT_REPLAY,
284                                  &(struct gp100_vmm_fault_replay_vn) {},
285                                  sizeof(struct gp100_vmm_fault_replay_vn)));
286 }
287
288 /* Cancel a replayable fault that could not be handled.
289  *
290  * Cancelling the fault will trigger recovery to reset the engine
291  * and kill the offending channel (ie. GPU SIGSEGV).
292  */
293 static void
294 nouveau_svm_fault_cancel(struct nouveau_svm *svm,
295                          u64 inst, u8 hub, u8 gpc, u8 client)
296 {
297         SVM_DBG(svm, "cancel %016llx %d %02x %02x", inst, hub, gpc, client);
298         WARN_ON(nvif_object_mthd(&svm->drm->client.vmm.vmm.object,
299                                  GP100_VMM_VN_FAULT_CANCEL,
300                                  &(struct gp100_vmm_fault_cancel_v0) {
301                                         .hub = hub,
302                                         .gpc = gpc,
303                                         .client = client,
304                                         .inst = inst,
305                                  }, sizeof(struct gp100_vmm_fault_cancel_v0)));
306 }
307
308 static void
309 nouveau_svm_fault_cancel_fault(struct nouveau_svm *svm,
310                                struct nouveau_svm_fault *fault)
311 {
312         nouveau_svm_fault_cancel(svm, fault->inst,
313                                       fault->hub,
314                                       fault->gpc,
315                                       fault->client);
316 }
317
318 static int
319 nouveau_svm_fault_cmp(const void *a, const void *b)
320 {
321         const struct nouveau_svm_fault *fa = *(struct nouveau_svm_fault **)a;
322         const struct nouveau_svm_fault *fb = *(struct nouveau_svm_fault **)b;
323         int ret;
324         if ((ret = (s64)fa->inst - fb->inst))
325                 return ret;
326         if ((ret = (s64)fa->addr - fb->addr))
327                 return ret;
328         /*XXX: atomic? */
329         return (fa->access == 0 || fa->access == 3) -
330                (fb->access == 0 || fb->access == 3);
331 }
332
333 static void
334 nouveau_svm_fault_cache(struct nouveau_svm *svm,
335                         struct nouveau_svm_fault_buffer *buffer, u32 offset)
336 {
337         struct nvif_object *memory = &buffer->object;
338         const u32 instlo = nvif_rd32(memory, offset + 0x00);
339         const u32 insthi = nvif_rd32(memory, offset + 0x04);
340         const u32 addrlo = nvif_rd32(memory, offset + 0x08);
341         const u32 addrhi = nvif_rd32(memory, offset + 0x0c);
342         const u32 timelo = nvif_rd32(memory, offset + 0x10);
343         const u32 timehi = nvif_rd32(memory, offset + 0x14);
344         const u32 engine = nvif_rd32(memory, offset + 0x18);
345         const u32   info = nvif_rd32(memory, offset + 0x1c);
346         const u64   inst = (u64)insthi << 32 | instlo;
347         const u8     gpc = (info & 0x1f000000) >> 24;
348         const u8     hub = (info & 0x00100000) >> 20;
349         const u8  client = (info & 0x00007f00) >> 8;
350         struct nouveau_svm_fault *fault;
351
352         //XXX: i think we're supposed to spin waiting */
353         if (WARN_ON(!(info & 0x80000000)))
354                 return;
355
356         nvif_mask(memory, offset + 0x1c, 0x80000000, 0x00000000);
357
358         if (!buffer->fault[buffer->fault_nr]) {
359                 fault = kmalloc(sizeof(*fault), GFP_KERNEL);
360                 if (WARN_ON(!fault)) {
361                         nouveau_svm_fault_cancel(svm, inst, hub, gpc, client);
362                         return;
363                 }
364                 buffer->fault[buffer->fault_nr] = fault;
365         }
366
367         fault = buffer->fault[buffer->fault_nr++];
368         fault->inst   = inst;
369         fault->addr   = (u64)addrhi << 32 | addrlo;
370         fault->time   = (u64)timehi << 32 | timelo;
371         fault->engine = engine;
372         fault->gpc    = gpc;
373         fault->hub    = hub;
374         fault->access = (info & 0x000f0000) >> 16;
375         fault->client = client;
376         fault->fault  = (info & 0x0000001f);
377
378         SVM_DBG(svm, "fault %016llx %016llx %02x",
379                 fault->inst, fault->addr, fault->access);
380 }
381
382 static int
383 nouveau_svm_fault(struct nvif_notify *notify)
384 {
385         struct nouveau_svm_fault_buffer *buffer =
386                 container_of(notify, typeof(*buffer), notify);
387         struct nouveau_svm *svm =
388                 container_of(buffer, typeof(*svm), buffer[buffer->id]);
389         struct nvif_object *device = &svm->drm->client.device.object;
390         struct nouveau_svmm *svmm;
391         struct {
392                 struct {
393                         struct nvif_ioctl_v0 i;
394                         struct nvif_ioctl_mthd_v0 m;
395                         struct nvif_vmm_pfnmap_v0 p;
396                 } i;
397                 u64 phys[16];
398         } args;
399         struct hmm_range range;
400         struct vm_area_struct *vma;
401         u64 inst, start, limit;
402         int fi, fn, pi, fill;
403         int replay = 0, ret;
404
405         /* Parse available fault buffer entries into a cache, and update
406          * the GET pointer so HW can reuse the entries.
407          */
408         SVM_DBG(svm, "fault handler");
409         if (buffer->get == buffer->put) {
410                 buffer->put = nvif_rd32(device, buffer->putaddr);
411                 buffer->get = nvif_rd32(device, buffer->getaddr);
412                 if (buffer->get == buffer->put)
413                         return NVIF_NOTIFY_KEEP;
414         }
415         buffer->fault_nr = 0;
416
417         SVM_DBG(svm, "get %08x put %08x", buffer->get, buffer->put);
418         while (buffer->get != buffer->put) {
419                 nouveau_svm_fault_cache(svm, buffer, buffer->get * 0x20);
420                 if (++buffer->get == buffer->entries)
421                         buffer->get = 0;
422         }
423         nvif_wr32(device, buffer->getaddr, buffer->get);
424         SVM_DBG(svm, "%d fault(s) pending", buffer->fault_nr);
425
426         /* Sort parsed faults by instance pointer to prevent unnecessary
427          * instance to SVMM translations, followed by address and access
428          * type to reduce the amount of work when handling the faults.
429          */
430         sort(buffer->fault, buffer->fault_nr, sizeof(*buffer->fault),
431              nouveau_svm_fault_cmp, NULL);
432
433         /* Lookup SVMM structure for each unique instance pointer. */
434         mutex_lock(&svm->mutex);
435         for (fi = 0, svmm = NULL; fi < buffer->fault_nr; fi++) {
436                 if (!svmm || buffer->fault[fi]->inst != inst) {
437                         struct nouveau_ivmm *ivmm =
438                                 nouveau_ivmm_find(svm, buffer->fault[fi]->inst);
439                         svmm = ivmm ? ivmm->svmm : NULL;
440                         inst = buffer->fault[fi]->inst;
441                         SVM_DBG(svm, "inst %016llx -> svm-%p", inst, svmm);
442                 }
443                 buffer->fault[fi]->svmm = svmm;
444         }
445         mutex_unlock(&svm->mutex);
446
447         /* Process list of faults. */
448         args.i.i.version = 0;
449         args.i.i.type = NVIF_IOCTL_V0_MTHD;
450         args.i.m.version = 0;
451         args.i.m.method = NVIF_VMM_V0_PFNMAP;
452         args.i.p.version = 0;
453
454         for (fi = 0; fn = fi + 1, fi < buffer->fault_nr; fi = fn) {
455                 /* Cancel any faults from non-SVM channels. */
456                 if (!(svmm = buffer->fault[fi]->svmm)) {
457                         nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
458                         continue;
459                 }
460                 SVMM_DBG(svmm, "addr %016llx", buffer->fault[fi]->addr);
461
462                 /* We try and group handling of faults within a small
463                  * window into a single update.
464                  */
465                 start = buffer->fault[fi]->addr;
466                 limit = start + (ARRAY_SIZE(args.phys) << PAGE_SHIFT);
467                 if (start < svmm->unmanaged.limit)
468                         limit = min_t(u64, limit, svmm->unmanaged.start);
469                 else
470                 if (limit > svmm->unmanaged.start)
471                         start = max_t(u64, start, svmm->unmanaged.limit);
472                 SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit);
473
474                 /* Intersect fault window with the CPU VMA, cancelling
475                  * the fault if the address is invalid.
476                  */
477                 down_read(&svmm->mm->mmap_sem);
478                 vma = find_vma_intersection(svmm->mm, start, limit);
479                 if (!vma) {
480                         SVMM_ERR(svmm, "wndw %016llx-%016llx", start, limit);
481                         up_read(&svmm->mm->mmap_sem);
482                         nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
483                         continue;
484                 }
485                 start = max_t(u64, start, vma->vm_start);
486                 limit = min_t(u64, limit, vma->vm_end);
487                 SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit);
488
489                 if (buffer->fault[fi]->addr != start) {
490                         SVMM_ERR(svmm, "addr %016llx", buffer->fault[fi]->addr);
491                         up_read(&svmm->mm->mmap_sem);
492                         nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
493                         continue;
494                 }
495
496                 /* Prepare the GPU-side update of all pages within the
497                  * fault window, determining required pages and access
498                  * permissions based on pending faults.
499                  */
500                 args.i.p.page = PAGE_SHIFT;
501                 args.i.p.addr = start;
502                 for (fn = fi, pi = 0;;) {
503                         /* Determine required permissions based on GPU fault
504                          * access flags.
505                          *XXX: atomic?
506                          */
507                         if (buffer->fault[fn]->access != 0 /* READ. */ &&
508                             buffer->fault[fn]->access != 3 /* PREFETCH. */) {
509                                 args.phys[pi++] = NVIF_VMM_PFNMAP_V0_V |
510                                                   NVIF_VMM_PFNMAP_V0_W;
511                         } else {
512                                 args.phys[pi++] = NVIF_VMM_PFNMAP_V0_V;
513                         }
514                         args.i.p.size = pi << PAGE_SHIFT;
515
516                         /* It's okay to skip over duplicate addresses from the
517                          * same SVMM as faults are ordered by access type such
518                          * that only the first one needs to be handled.
519                          *
520                          * ie. WRITE faults appear first, thus any handling of
521                          * pending READ faults will already be satisfied.
522                          */
523                         while (++fn < buffer->fault_nr &&
524                                buffer->fault[fn]->svmm == svmm &&
525                                buffer->fault[fn    ]->addr ==
526                                buffer->fault[fn - 1]->addr);
527
528                         /* If the next fault is outside the window, or all GPU
529                          * faults have been dealt with, we're done here.
530                          */
531                         if (fn >= buffer->fault_nr ||
532                             buffer->fault[fn]->svmm != svmm ||
533                             buffer->fault[fn]->addr >= limit)
534                                 break;
535
536                         /* Fill in the gap between this fault and the next. */
537                         fill = (buffer->fault[fn    ]->addr -
538                                 buffer->fault[fn - 1]->addr) >> PAGE_SHIFT;
539                         while (--fill)
540                                 args.phys[pi++] = NVIF_VMM_PFNMAP_V0_NONE;
541                 }
542
543                 SVMM_DBG(svmm, "wndw %016llx-%016llx covering %d fault(s)",
544                          args.i.p.addr,
545                          args.i.p.addr + args.i.p.size, fn - fi);
546
547                 /* Have HMM fault pages within the fault window to the GPU. */
548                 range.vma = vma;
549                 range.start = args.i.p.addr;
550                 range.end = args.i.p.addr + args.i.p.size;
551                 range.pfns = args.phys;
552                 range.flags = nouveau_svm_pfn_flags;
553                 range.values = nouveau_svm_pfn_values;
554                 range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT;
555 again:
556                 ret = hmm_vma_fault(&range, true);
557                 if (ret == 0) {
558                         mutex_lock(&svmm->mutex);
559                         if (!hmm_vma_range_done(&range)) {
560                                 mutex_unlock(&svmm->mutex);
561                                 goto again;
562                         }
563
564                         nouveau_dmem_convert_pfn(svm->drm, &range);
565
566                         svmm->vmm->vmm.object.client->super = true;
567                         ret = nvif_object_ioctl(&svmm->vmm->vmm.object,
568                                                 &args, sizeof(args.i) +
569                                                 pi * sizeof(args.phys[0]),
570                                                 NULL);
571                         svmm->vmm->vmm.object.client->super = false;
572                         mutex_unlock(&svmm->mutex);
573                 }
574                 up_read(&svmm->mm->mmap_sem);
575
576                 /* Cancel any faults in the window whose pages didn't manage
577                  * to keep their valid bit, or stay writeable when required.
578                  *
579                  * If handling failed completely, cancel all faults.
580                  */
581                 while (fi < fn) {
582                         struct nouveau_svm_fault *fault = buffer->fault[fi++];
583                         pi = (fault->addr - range.start) >> PAGE_SHIFT;
584                         if (ret ||
585                              !(range.pfns[pi] & NVIF_VMM_PFNMAP_V0_V) ||
586                             (!(range.pfns[pi] & NVIF_VMM_PFNMAP_V0_W) &&
587                              fault->access != 0 && fault->access != 3)) {
588                                 nouveau_svm_fault_cancel_fault(svm, fault);
589                                 continue;
590                         }
591                         replay++;
592                 }
593         }
594
595         /* Issue fault replay to the GPU. */
596         if (replay)
597                 nouveau_svm_fault_replay(svm);
598         return NVIF_NOTIFY_KEEP;
599 }
600
601 static void
602 nouveau_svm_fault_buffer_fini(struct nouveau_svm *svm, int id)
603 {
604         struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
605         nvif_notify_put(&buffer->notify);
606 }
607
608 static int
609 nouveau_svm_fault_buffer_init(struct nouveau_svm *svm, int id)
610 {
611         struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
612         struct nvif_object *device = &svm->drm->client.device.object;
613         buffer->get = nvif_rd32(device, buffer->getaddr);
614         buffer->put = nvif_rd32(device, buffer->putaddr);
615         SVM_DBG(svm, "get %08x put %08x (init)", buffer->get, buffer->put);
616         return nvif_notify_get(&buffer->notify);
617 }
618
619 static void
620 nouveau_svm_fault_buffer_dtor(struct nouveau_svm *svm, int id)
621 {
622         struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
623         int i;
624
625         if (buffer->fault) {
626                 for (i = 0; buffer->fault[i] && i < buffer->entries; i++)
627                         kfree(buffer->fault[i]);
628                 kvfree(buffer->fault);
629         }
630
631         nouveau_svm_fault_buffer_fini(svm, id);
632
633         nvif_notify_fini(&buffer->notify);
634         nvif_object_fini(&buffer->object);
635 }
636
637 static int
638 nouveau_svm_fault_buffer_ctor(struct nouveau_svm *svm, s32 oclass, int id)
639 {
640         struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
641         struct nouveau_drm *drm = svm->drm;
642         struct nvif_object *device = &drm->client.device.object;
643         struct nvif_clb069_v0 args = {};
644         int ret;
645
646         buffer->id = id;
647
648         ret = nvif_object_init(device, 0, oclass, &args, sizeof(args),
649                                &buffer->object);
650         if (ret < 0) {
651                 SVM_ERR(svm, "Fault buffer allocation failed: %d", ret);
652                 return ret;
653         }
654
655         nvif_object_map(&buffer->object, NULL, 0);
656         buffer->entries = args.entries;
657         buffer->getaddr = args.get;
658         buffer->putaddr = args.put;
659
660         ret = nvif_notify_init(&buffer->object, nouveau_svm_fault, true,
661                                NVB069_V0_NTFY_FAULT, NULL, 0, 0,
662                                &buffer->notify);
663         if (ret)
664                 return ret;
665
666         buffer->fault = kvzalloc(sizeof(*buffer->fault) * buffer->entries, GFP_KERNEL);
667         if (!buffer->fault)
668                 return -ENOMEM;
669
670         return nouveau_svm_fault_buffer_init(svm, id);
671 }
672
673 void
674 nouveau_svm_resume(struct nouveau_drm *drm)
675 {
676         struct nouveau_svm *svm = drm->svm;
677         if (svm)
678                 nouveau_svm_fault_buffer_init(svm, 0);
679 }
680
681 void
682 nouveau_svm_suspend(struct nouveau_drm *drm)
683 {
684         struct nouveau_svm *svm = drm->svm;
685         if (svm)
686                 nouveau_svm_fault_buffer_fini(svm, 0);
687 }
688
689 void
690 nouveau_svm_fini(struct nouveau_drm *drm)
691 {
692         struct nouveau_svm *svm = drm->svm;
693         if (svm) {
694                 nouveau_svm_fault_buffer_dtor(svm, 0);
695                 kfree(drm->svm);
696                 drm->svm = NULL;
697         }
698 }
699
700 void
701 nouveau_svm_init(struct nouveau_drm *drm)
702 {
703         static const struct nvif_mclass buffers[] = {
704                 {   VOLTA_FAULT_BUFFER_A, 0 },
705                 { MAXWELL_FAULT_BUFFER_A, 0 },
706                 {}
707         };
708         struct nouveau_svm *svm;
709         int ret;
710
711         /* Disable on Volta and newer until channel recovery is fixed,
712          * otherwise clients will have a trivial way to trash the GPU
713          * for everyone.
714          */
715         if (drm->client.device.info.family > NV_DEVICE_INFO_V0_PASCAL)
716                 return;
717
718         if (!(drm->svm = svm = kzalloc(sizeof(*drm->svm), GFP_KERNEL)))
719                 return;
720
721         drm->svm->drm = drm;
722         mutex_init(&drm->svm->mutex);
723         INIT_LIST_HEAD(&drm->svm->inst);
724
725         ret = nvif_mclass(&drm->client.device.object, buffers);
726         if (ret < 0) {
727                 SVM_DBG(svm, "No supported fault buffer class");
728                 nouveau_svm_fini(drm);
729                 return;
730         }
731
732         ret = nouveau_svm_fault_buffer_ctor(svm, buffers[ret].oclass, 0);
733         if (ret) {
734                 nouveau_svm_fini(drm);
735                 return;
736         }
737
738         SVM_DBG(svm, "Initialised");
739 }