2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include "changk104.h"
28 #include <core/client.h>
29 #include <core/gpuobj.h>
30 #include <subdev/bar.h>
31 #include <subdev/fault.h>
32 #include <subdev/timer.h>
33 #include <subdev/top.h>
34 #include <engine/sw.h>
36 #include <nvif/class.h>
37 #include <nvif/cl0080.h>
39 struct gk104_fifo_engine_status {
52 gk104_fifo_engine_status(struct gk104_fifo *fifo, int engn,
53 struct gk104_fifo_engine_status *status)
55 struct nvkm_engine *engine = fifo->engine[engn].engine;
56 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
57 struct nvkm_device *device = subdev->device;
58 u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x08));
60 status->busy = !!(stat & 0x80000000);
61 status->faulted = !!(stat & 0x40000000);
62 status->next.tsg = !!(stat & 0x10000000);
63 status->next.id = (stat & 0x0fff0000) >> 16;
64 status->chsw = !!(stat & 0x00008000);
65 status->save = !!(stat & 0x00004000);
66 status->load = !!(stat & 0x00002000);
67 status->prev.tsg = !!(stat & 0x00001000);
68 status->prev.id = (stat & 0x00000fff);
71 if (status->busy && status->chsw) {
72 if (status->load && status->save) {
73 if (engine && nvkm_engine_chsw_load(engine))
74 status->chan = &status->next;
76 status->chan = &status->prev;
79 status->chan = &status->next;
81 status->chan = &status->prev;
85 status->chan = &status->prev;
88 nvkm_debug(subdev, "engine %02d: busy %d faulted %d chsw %d "
89 "save %d load %d %sid %d%s-> %sid %d%s\n",
90 engn, status->busy, status->faulted,
91 status->chsw, status->save, status->load,
92 status->prev.tsg ? "tsg" : "ch", status->prev.id,
93 status->chan == &status->prev ? "*" : " ",
94 status->next.tsg ? "tsg" : "ch", status->next.id,
95 status->chan == &status->next ? "*" : " ");
99 gk104_fifo_class_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
100 void *argv, u32 argc, struct nvkm_object **pobject)
102 struct gk104_fifo *fifo = gk104_fifo(base);
103 if (oclass->engn == &fifo->func->chan) {
104 const struct gk104_fifo_chan_user *user = oclass->engn;
105 return user->ctor(fifo, oclass, argv, argc, pobject);
107 if (oclass->engn == &fifo->func->user) {
108 const struct gk104_fifo_user_user *user = oclass->engn;
109 return user->ctor(oclass, argv, argc, pobject);
116 gk104_fifo_class_get(struct nvkm_fifo *base, int index,
117 struct nvkm_oclass *oclass)
119 struct gk104_fifo *fifo = gk104_fifo(base);
122 if (fifo->func->user.ctor && c++ == index) {
123 oclass->base = fifo->func->user.user;
124 oclass->engn = &fifo->func->user;
128 if (fifo->func->chan.ctor && c++ == index) {
129 oclass->base = fifo->func->chan.user;
130 oclass->engn = &fifo->func->chan;
138 gk104_fifo_uevent_fini(struct nvkm_fifo *fifo)
140 struct nvkm_device *device = fifo->engine.subdev.device;
141 nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
145 gk104_fifo_uevent_init(struct nvkm_fifo *fifo)
147 struct nvkm_device *device = fifo->engine.subdev.device;
148 nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
152 gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl,
153 struct nvkm_memory *mem, int nr)
155 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
156 struct nvkm_device *device = subdev->device;
159 switch (nvkm_memory_target(mem)) {
160 case NVKM_MEM_TARGET_VRAM: target = 0; break;
161 case NVKM_MEM_TARGET_NCOH: target = 3; break;
167 nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) |
169 nvkm_wr32(device, 0x002274, (runl << 20) | nr);
171 if (nvkm_msec(device, 2000,
172 if (!(nvkm_rd32(device, 0x002284 + (runl * 0x08)) & 0x00100000))
175 nvkm_error(subdev, "runlist %d update timeout\n", runl);
179 gk104_fifo_runlist_update(struct gk104_fifo *fifo, int runl)
181 const struct gk104_fifo_runlist_func *func = fifo->func->runlist;
182 struct gk104_fifo_chan *chan;
183 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
184 struct nvkm_memory *mem;
185 struct nvkm_fifo_cgrp *cgrp;
188 mutex_lock(&subdev->mutex);
189 mem = fifo->runlist[runl].mem[fifo->runlist[runl].next];
190 fifo->runlist[runl].next = !fifo->runlist[runl].next;
193 list_for_each_entry(chan, &fifo->runlist[runl].chan, head) {
194 func->chan(chan, mem, nr++ * func->size);
197 list_for_each_entry(cgrp, &fifo->runlist[runl].cgrp, head) {
198 func->cgrp(cgrp, mem, nr++ * func->size);
199 list_for_each_entry(chan, &cgrp->chan, head) {
200 func->chan(chan, mem, nr++ * func->size);
205 func->commit(fifo, runl, mem, nr);
206 mutex_unlock(&subdev->mutex);
210 gk104_fifo_runlist_remove(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
212 struct nvkm_fifo_cgrp *cgrp = chan->cgrp;
213 mutex_lock(&fifo->base.engine.subdev.mutex);
214 if (!list_empty(&chan->head)) {
215 list_del_init(&chan->head);
216 if (cgrp && !--cgrp->chan_nr)
217 list_del_init(&cgrp->head);
219 mutex_unlock(&fifo->base.engine.subdev.mutex);
223 gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
225 struct nvkm_fifo_cgrp *cgrp = chan->cgrp;
226 mutex_lock(&fifo->base.engine.subdev.mutex);
228 if (!cgrp->chan_nr++)
229 list_add_tail(&cgrp->head, &fifo->runlist[chan->runl].cgrp);
230 list_add_tail(&chan->head, &cgrp->chan);
232 list_add_tail(&chan->head, &fifo->runlist[chan->runl].chan);
234 mutex_unlock(&fifo->base.engine.subdev.mutex);
238 gk104_fifo_runlist_chan(struct gk104_fifo_chan *chan,
239 struct nvkm_memory *memory, u32 offset)
241 nvkm_wo32(memory, offset + 0, chan->base.chid);
242 nvkm_wo32(memory, offset + 4, 0x00000000);
245 const struct gk104_fifo_runlist_func
246 gk104_fifo_runlist = {
248 .chan = gk104_fifo_runlist_chan,
249 .commit = gk104_fifo_runlist_commit,
253 gk104_fifo_pbdma_init(struct gk104_fifo *fifo)
255 struct nvkm_device *device = fifo->base.engine.subdev.device;
256 nvkm_wr32(device, 0x000204, (1 << fifo->pbdma_nr) - 1);
260 gk104_fifo_pbdma_nr(struct gk104_fifo *fifo)
262 struct nvkm_device *device = fifo->base.engine.subdev.device;
263 /* Determine number of PBDMAs by checking valid enable bits. */
264 nvkm_wr32(device, 0x000204, 0xffffffff);
265 return hweight32(nvkm_rd32(device, 0x000204));
268 const struct gk104_fifo_pbdma_func
270 .nr = gk104_fifo_pbdma_nr,
271 .init = gk104_fifo_pbdma_init,
275 gk104_fifo_recover_work(struct work_struct *w)
277 struct gk104_fifo *fifo = container_of(w, typeof(*fifo), recover.work);
278 struct nvkm_device *device = fifo->base.engine.subdev.device;
279 struct nvkm_engine *engine;
281 u32 engm, runm, todo;
284 spin_lock_irqsave(&fifo->base.lock, flags);
285 runm = fifo->recover.runm;
286 engm = fifo->recover.engm;
287 fifo->recover.engm = 0;
288 fifo->recover.runm = 0;
289 spin_unlock_irqrestore(&fifo->base.lock, flags);
291 nvkm_mask(device, 0x002630, runm, runm);
293 for (todo = engm; engn = __ffs(todo), todo; todo &= ~BIT(engn)) {
294 if ((engine = fifo->engine[engn].engine)) {
295 nvkm_subdev_fini(&engine->subdev, false);
296 WARN_ON(nvkm_subdev_init(&engine->subdev));
300 for (todo = runm; runl = __ffs(todo), todo; todo &= ~BIT(runl))
301 gk104_fifo_runlist_update(fifo, runl);
303 nvkm_wr32(device, 0x00262c, runm);
304 nvkm_mask(device, 0x002630, runm, 0x00000000);
307 static void gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn);
310 gk104_fifo_recover_runl(struct gk104_fifo *fifo, int runl)
312 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
313 struct nvkm_device *device = subdev->device;
314 const u32 runm = BIT(runl);
316 assert_spin_locked(&fifo->base.lock);
317 if (fifo->recover.runm & runm)
319 fifo->recover.runm |= runm;
321 /* Block runlist to prevent channel assignment(s) from changing. */
322 nvkm_mask(device, 0x002630, runm, runm);
324 /* Schedule recovery. */
325 nvkm_warn(subdev, "runlist %d: scheduled for recovery\n", runl);
326 schedule_work(&fifo->recover.work);
329 static struct gk104_fifo_chan *
330 gk104_fifo_recover_chid(struct gk104_fifo *fifo, int runl, int chid)
332 struct gk104_fifo_chan *chan;
333 struct nvkm_fifo_cgrp *cgrp;
335 list_for_each_entry(chan, &fifo->runlist[runl].chan, head) {
336 if (chan->base.chid == chid) {
337 list_del_init(&chan->head);
342 list_for_each_entry(cgrp, &fifo->runlist[runl].cgrp, head) {
343 if (cgrp->id == chid) {
344 chan = list_first_entry(&cgrp->chan, typeof(*chan), head);
345 list_del_init(&chan->head);
346 if (!--cgrp->chan_nr)
347 list_del_init(&cgrp->head);
356 gk104_fifo_recover_chan(struct nvkm_fifo *base, int chid)
358 struct gk104_fifo *fifo = gk104_fifo(base);
359 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
360 struct nvkm_device *device = subdev->device;
361 const u32 stat = nvkm_rd32(device, 0x800004 + (chid * 0x08));
362 const u32 runl = (stat & 0x000f0000) >> 16;
363 const bool used = (stat & 0x00000001);
364 unsigned long engn, engm = fifo->runlist[runl].engm;
365 struct gk104_fifo_chan *chan;
367 assert_spin_locked(&fifo->base.lock);
371 /* Lookup SW state for channel, and mark it as dead. */
372 chan = gk104_fifo_recover_chid(fifo, runl, chid);
375 nvkm_fifo_kevent(&fifo->base, chid);
378 /* Disable channel. */
379 nvkm_wr32(device, 0x800004 + (chid * 0x08), stat | 0x00000800);
380 nvkm_warn(subdev, "channel %d: killed\n", chid);
382 /* Block channel assignments from changing during recovery. */
383 gk104_fifo_recover_runl(fifo, runl);
385 /* Schedule recovery for any engines the channel is on. */
386 for_each_set_bit(engn, &engm, fifo->engine_nr) {
387 struct gk104_fifo_engine_status status;
388 gk104_fifo_engine_status(fifo, engn, &status);
389 if (!status.chan || status.chan->id != chid)
391 gk104_fifo_recover_engn(fifo, engn);
396 gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn)
398 struct nvkm_engine *engine = fifo->engine[engn].engine;
399 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
400 struct nvkm_device *device = subdev->device;
401 const u32 runl = fifo->engine[engn].runl;
402 const u32 engm = BIT(engn);
403 struct gk104_fifo_engine_status status;
406 assert_spin_locked(&fifo->base.lock);
407 if (fifo->recover.engm & engm)
409 fifo->recover.engm |= engm;
411 /* Block channel assignments from changing during recovery. */
412 gk104_fifo_recover_runl(fifo, runl);
414 /* Determine which channel (if any) is currently on the engine. */
415 gk104_fifo_engine_status(fifo, engn, &status);
417 /* The channel is not longer viable, kill it. */
418 gk104_fifo_recover_chan(&fifo->base, status.chan->id);
421 /* Determine MMU fault ID for the engine, if we're not being
422 * called from the fault handler already.
424 if (!status.faulted && engine) {
425 mmui = nvkm_top_fault_id(device, engine->subdev.index);
427 const struct nvkm_enum *en = fifo->func->fault.engine;
428 for (; en && en->name; en++) {
429 if (en->data2 == engine->subdev.index) {
438 /* Trigger a MMU fault for the engine.
440 * No good idea why this is needed, but nvgpu does something similar,
441 * and it makes recovery from CTXSW_TIMEOUT a lot more reliable.
444 nvkm_wr32(device, 0x002a30 + (engn * 0x04), 0x00000100 | mmui);
446 /* Wait for fault to trigger. */
447 nvkm_msec(device, 2000,
448 gk104_fifo_engine_status(fifo, engn, &status);
453 /* Release MMU fault trigger, and ACK the fault. */
454 nvkm_wr32(device, 0x002a30 + (engn * 0x04), 0x00000000);
455 nvkm_wr32(device, 0x00259c, BIT(mmui));
456 nvkm_wr32(device, 0x002100, 0x10000000);
459 /* Schedule recovery. */
460 nvkm_warn(subdev, "engine %d: scheduled for recovery\n", engn);
461 schedule_work(&fifo->recover.work);
465 gk104_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info)
467 struct gk104_fifo *fifo = gk104_fifo(base);
468 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
469 struct nvkm_device *device = subdev->device;
470 const struct nvkm_enum *er, *ee, *ec, *ea;
471 struct nvkm_engine *engine = NULL;
472 struct nvkm_fifo_chan *chan;
474 char ct[8] = "HUB/", en[16] = "";
477 er = nvkm_enum_find(fifo->func->fault.reason, info->reason);
478 ee = nvkm_enum_find(fifo->func->fault.engine, info->engine);
480 ec = nvkm_enum_find(fifo->func->fault.hubclient, info->client);
482 ec = nvkm_enum_find(fifo->func->fault.gpcclient, info->client);
483 snprintf(ct, sizeof(ct), "GPC%d/", info->gpc);
485 ea = nvkm_enum_find(fifo->func->fault.access, info->access);
487 if (ee && ee->data2) {
489 case NVKM_SUBDEV_BAR:
490 nvkm_bar_bar1_reset(device);
492 case NVKM_SUBDEV_INSTMEM:
493 nvkm_bar_bar2_reset(device);
495 case NVKM_ENGINE_IFB:
496 nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
499 engine = nvkm_device_engine(device, ee->data2);
505 enum nvkm_devidx engidx = nvkm_top_fault(device, info->engine);
506 if (engidx < NVKM_SUBDEV_NR) {
507 const char *src = nvkm_subdev_name[engidx];
510 *dst++ = toupper(*src++);
512 engine = nvkm_device_engine(device, engidx);
515 snprintf(en, sizeof(en), "%s", ee->name);
518 spin_lock_irqsave(&fifo->base.lock, flags);
519 chan = nvkm_fifo_chan_inst_locked(&fifo->base, info->inst);
522 "fault %02x [%s] at %016llx engine %02x [%s] client %02x "
523 "[%s%s] reason %02x [%s] on channel %d [%010llx %s]\n",
524 info->access, ea ? ea->name : "", info->addr,
525 info->engine, ee ? ee->name : en,
526 info->client, ct, ec ? ec->name : "",
527 info->reason, er ? er->name : "", chan ? chan->chid : -1,
528 info->inst, chan ? chan->object.client->name : "unknown");
530 /* Kill the channel that caused the fault. */
532 gk104_fifo_recover_chan(&fifo->base, chan->chid);
534 /* Channel recovery will probably have already done this for the
535 * correct engine(s), but just in case we can't find the channel
538 for (engn = 0; engn < fifo->engine_nr && engine; engn++) {
539 if (fifo->engine[engn].engine == engine) {
540 gk104_fifo_recover_engn(fifo, engn);
545 spin_unlock_irqrestore(&fifo->base.lock, flags);
548 static const struct nvkm_enum
549 gk104_fifo_bind_reason[] = {
550 { 0x01, "BIND_NOT_UNBOUND" },
551 { 0x02, "SNOOP_WITHOUT_BAR1" },
552 { 0x03, "UNBIND_WHILE_RUNNING" },
553 { 0x05, "INVALID_RUNLIST" },
554 { 0x06, "INVALID_CTX_TGT" },
555 { 0x0b, "UNBIND_WHILE_PARKED" },
560 gk104_fifo_intr_bind(struct gk104_fifo *fifo)
562 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
563 struct nvkm_device *device = subdev->device;
564 u32 intr = nvkm_rd32(device, 0x00252c);
565 u32 code = intr & 0x000000ff;
566 const struct nvkm_enum *en =
567 nvkm_enum_find(gk104_fifo_bind_reason, code);
569 nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : "");
572 static const struct nvkm_enum
573 gk104_fifo_sched_reason[] = {
574 { 0x0a, "CTXSW_TIMEOUT" },
579 gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
581 struct nvkm_device *device = fifo->base.engine.subdev.device;
582 unsigned long flags, engm = 0;
585 /* We need to ACK the SCHED_ERROR here, and prevent it reasserting,
586 * as MMU_FAULT cannot be triggered while it's pending.
588 spin_lock_irqsave(&fifo->base.lock, flags);
589 nvkm_mask(device, 0x002140, 0x00000100, 0x00000000);
590 nvkm_wr32(device, 0x002100, 0x00000100);
592 for (engn = 0; engn < fifo->engine_nr; engn++) {
593 struct gk104_fifo_engine_status status;
595 gk104_fifo_engine_status(fifo, engn, &status);
596 if (!status.busy || !status.chsw)
602 for_each_set_bit(engn, &engm, fifo->engine_nr)
603 gk104_fifo_recover_engn(fifo, engn);
605 nvkm_mask(device, 0x002140, 0x00000100, 0x00000100);
606 spin_unlock_irqrestore(&fifo->base.lock, flags);
610 gk104_fifo_intr_sched(struct gk104_fifo *fifo)
612 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
613 struct nvkm_device *device = subdev->device;
614 u32 intr = nvkm_rd32(device, 0x00254c);
615 u32 code = intr & 0x000000ff;
616 const struct nvkm_enum *en =
617 nvkm_enum_find(gk104_fifo_sched_reason, code);
619 nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
623 gk104_fifo_intr_sched_ctxsw(fifo);
631 gk104_fifo_intr_chsw(struct gk104_fifo *fifo)
633 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
634 struct nvkm_device *device = subdev->device;
635 u32 stat = nvkm_rd32(device, 0x00256c);
636 nvkm_error(subdev, "CHSW_ERROR %08x\n", stat);
637 nvkm_wr32(device, 0x00256c, stat);
641 gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo)
643 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
644 struct nvkm_device *device = subdev->device;
645 u32 stat = nvkm_rd32(device, 0x00259c);
646 nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat);
650 gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
652 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
653 struct nvkm_device *device = subdev->device;
654 u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
655 u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
656 u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
657 u32 type = nvkm_rd32(device, 0x00280c + (unit * 0x10));
658 struct nvkm_fault_data info;
660 info.inst = (u64)inst << 12;
661 info.addr = ((u64)vahi << 32) | valo;
665 info.gpc = (type & 0x1f000000) >> 24;
666 info.client = (type & 0x00001f00) >> 8;
667 info.access = (type & 0x00000080) >> 7;
668 info.hub = (type & 0x00000040) >> 6;
669 info.reason = (type & 0x000000ff);
671 nvkm_fifo_fault(&fifo->base, &info);
674 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
675 { 0x00000001, "MEMREQ" },
676 { 0x00000002, "MEMACK_TIMEOUT" },
677 { 0x00000004, "MEMACK_EXTRA" },
678 { 0x00000008, "MEMDAT_TIMEOUT" },
679 { 0x00000010, "MEMDAT_EXTRA" },
680 { 0x00000020, "MEMFLUSH" },
681 { 0x00000040, "MEMOP" },
682 { 0x00000080, "LBCONNECT" },
683 { 0x00000100, "LBREQ" },
684 { 0x00000200, "LBACK_TIMEOUT" },
685 { 0x00000400, "LBACK_EXTRA" },
686 { 0x00000800, "LBDAT_TIMEOUT" },
687 { 0x00001000, "LBDAT_EXTRA" },
688 { 0x00002000, "GPFIFO" },
689 { 0x00004000, "GPPTR" },
690 { 0x00008000, "GPENTRY" },
691 { 0x00010000, "GPCRC" },
692 { 0x00020000, "PBPTR" },
693 { 0x00040000, "PBENTRY" },
694 { 0x00080000, "PBCRC" },
695 { 0x00100000, "XBARCONNECT" },
696 { 0x00200000, "METHOD" },
697 { 0x00400000, "METHODCRC" },
698 { 0x00800000, "DEVICE" },
699 { 0x02000000, "SEMAPHORE" },
700 { 0x04000000, "ACQUIRE" },
701 { 0x08000000, "PRI" },
702 { 0x20000000, "NO_CTXSW_SEG" },
703 { 0x40000000, "PBSEG" },
704 { 0x80000000, "SIGNATURE" },
709 gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit)
711 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
712 struct nvkm_device *device = subdev->device;
713 u32 mask = nvkm_rd32(device, 0x04010c + (unit * 0x2000));
714 u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000)) & mask;
715 u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
716 u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
717 u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
718 u32 subc = (addr & 0x00070000) >> 16;
719 u32 mthd = (addr & 0x00003ffc);
721 struct nvkm_fifo_chan *chan;
725 if (stat & 0x00800000) {
727 if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
732 nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
735 nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show);
736 chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
737 nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
738 "subc %d mthd %04x data %08x\n",
739 unit, show, msg, chid, chan ? chan->inst->addr : 0,
740 chan ? chan->object.client->name : "unknown",
742 nvkm_fifo_chan_put(&fifo->base, flags, &chan);
745 nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
748 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = {
749 { 0x00000001, "HCE_RE_ILLEGAL_OP" },
750 { 0x00000002, "HCE_RE_ALIGNB" },
751 { 0x00000004, "HCE_PRIV" },
752 { 0x00000008, "HCE_ILLEGAL_MTHD" },
753 { 0x00000010, "HCE_ILLEGAL_CLASS" },
758 gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit)
760 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
761 struct nvkm_device *device = subdev->device;
762 u32 mask = nvkm_rd32(device, 0x04014c + (unit * 0x2000));
763 u32 stat = nvkm_rd32(device, 0x040148 + (unit * 0x2000)) & mask;
764 u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
768 nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_1, stat);
769 nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n",
770 unit, stat, msg, chid,
771 nvkm_rd32(device, 0x040150 + (unit * 0x2000)),
772 nvkm_rd32(device, 0x040154 + (unit * 0x2000)));
775 nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat);
779 gk104_fifo_intr_runlist(struct gk104_fifo *fifo)
781 struct nvkm_device *device = fifo->base.engine.subdev.device;
782 u32 mask = nvkm_rd32(device, 0x002a00);
784 int runl = __ffs(mask);
785 wake_up(&fifo->runlist[runl].wait);
786 nvkm_wr32(device, 0x002a00, 1 << runl);
787 mask &= ~(1 << runl);
792 gk104_fifo_intr_engine(struct gk104_fifo *fifo)
794 nvkm_fifo_uevent(&fifo->base);
798 gk104_fifo_intr(struct nvkm_fifo *base)
800 struct gk104_fifo *fifo = gk104_fifo(base);
801 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
802 struct nvkm_device *device = subdev->device;
803 u32 mask = nvkm_rd32(device, 0x002140);
804 u32 stat = nvkm_rd32(device, 0x002100) & mask;
806 if (stat & 0x00000001) {
807 gk104_fifo_intr_bind(fifo);
808 nvkm_wr32(device, 0x002100, 0x00000001);
812 if (stat & 0x00000010) {
813 nvkm_error(subdev, "PIO_ERROR\n");
814 nvkm_wr32(device, 0x002100, 0x00000010);
818 if (stat & 0x00000100) {
819 gk104_fifo_intr_sched(fifo);
820 nvkm_wr32(device, 0x002100, 0x00000100);
824 if (stat & 0x00010000) {
825 gk104_fifo_intr_chsw(fifo);
826 nvkm_wr32(device, 0x002100, 0x00010000);
830 if (stat & 0x00800000) {
831 nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n");
832 nvkm_wr32(device, 0x002100, 0x00800000);
836 if (stat & 0x01000000) {
837 nvkm_error(subdev, "LB_ERROR\n");
838 nvkm_wr32(device, 0x002100, 0x01000000);
842 if (stat & 0x08000000) {
843 gk104_fifo_intr_dropped_fault(fifo);
844 nvkm_wr32(device, 0x002100, 0x08000000);
848 if (stat & 0x10000000) {
849 u32 mask = nvkm_rd32(device, 0x00259c);
851 u32 unit = __ffs(mask);
852 gk104_fifo_intr_fault(fifo, unit);
853 nvkm_wr32(device, 0x00259c, (1 << unit));
854 mask &= ~(1 << unit);
859 if (stat & 0x20000000) {
860 u32 mask = nvkm_rd32(device, 0x0025a0);
862 u32 unit = __ffs(mask);
863 gk104_fifo_intr_pbdma_0(fifo, unit);
864 gk104_fifo_intr_pbdma_1(fifo, unit);
865 nvkm_wr32(device, 0x0025a0, (1 << unit));
866 mask &= ~(1 << unit);
871 if (stat & 0x40000000) {
872 gk104_fifo_intr_runlist(fifo);
876 if (stat & 0x80000000) {
877 nvkm_wr32(device, 0x002100, 0x80000000);
878 gk104_fifo_intr_engine(fifo);
883 nvkm_error(subdev, "INTR %08x\n", stat);
884 nvkm_mask(device, 0x002140, stat, 0x00000000);
885 nvkm_wr32(device, 0x002100, stat);
890 gk104_fifo_fini(struct nvkm_fifo *base)
892 struct gk104_fifo *fifo = gk104_fifo(base);
893 struct nvkm_device *device = fifo->base.engine.subdev.device;
894 flush_work(&fifo->recover.work);
895 /* allow mmu fault interrupts, even when we're not using fifo */
896 nvkm_mask(device, 0x002140, 0x10000000, 0x10000000);
900 gk104_fifo_info(struct nvkm_fifo *base, u64 mthd, u64 *data)
902 struct gk104_fifo *fifo = gk104_fifo(base);
904 case NV_DEVICE_FIFO_RUNLISTS:
905 *data = (1ULL << fifo->runlist_nr) - 1;
907 case NV_DEVICE_FIFO_RUNLIST_ENGINES(0)...
908 NV_DEVICE_FIFO_RUNLIST_ENGINES(63): {
909 int runl = mthd - NV_DEVICE_FIFO_RUNLIST_ENGINES(0), engn;
910 if (runl < fifo->runlist_nr) {
911 unsigned long engm = fifo->runlist[runl].engm;
912 struct nvkm_engine *engine;
914 for_each_set_bit(engn, &engm, fifo->engine_nr) {
915 if ((engine = fifo->engine[engn].engine))
916 *data |= BIT_ULL(engine->subdev.index);
928 gk104_fifo_oneinit(struct nvkm_fifo *base)
930 struct gk104_fifo *fifo = gk104_fifo(base);
931 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
932 struct nvkm_device *device = subdev->device;
933 struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device);
934 int engn, runl, pbid, ret, i, j;
935 enum nvkm_devidx engidx;
938 fifo->pbdma_nr = fifo->func->pbdma->nr(fifo);
939 nvkm_debug(subdev, "%d PBDMA(s)\n", fifo->pbdma_nr);
941 /* Read PBDMA->runlist(s) mapping from HW. */
942 if (!(map = kcalloc(fifo->pbdma_nr, sizeof(*map), GFP_KERNEL)))
945 for (i = 0; i < fifo->pbdma_nr; i++)
946 map[i] = nvkm_rd32(device, 0x002390 + (i * 0x04));
948 /* Determine runlist configuration from topology device info. */
950 while ((int)(engidx = nvkm_top_engine(device, i++, &runl, &engn)) >= 0) {
951 /* Determine which PBDMA handles requests for this engine. */
952 for (j = 0, pbid = -1; j < fifo->pbdma_nr; j++) {
953 if (map[j] & (1 << runl)) {
959 nvkm_debug(subdev, "engine %2d: runlist %2d pbdma %2d (%s)\n",
960 engn, runl, pbid, nvkm_subdev_name[engidx]);
962 fifo->engine[engn].engine = nvkm_device_engine(device, engidx);
963 fifo->engine[engn].runl = runl;
964 fifo->engine[engn].pbid = pbid;
965 fifo->engine_nr = max(fifo->engine_nr, engn + 1);
966 fifo->runlist[runl].engm |= 1 << engn;
967 fifo->runlist_nr = max(fifo->runlist_nr, runl + 1);
972 for (i = 0; i < fifo->runlist_nr; i++) {
973 for (j = 0; j < ARRAY_SIZE(fifo->runlist[i].mem); j++) {
974 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
975 fifo->base.nr * 2/* TSG+chan */ *
976 fifo->func->runlist->size,
978 &fifo->runlist[i].mem[j]);
983 init_waitqueue_head(&fifo->runlist[i].wait);
984 INIT_LIST_HEAD(&fifo->runlist[i].cgrp);
985 INIT_LIST_HEAD(&fifo->runlist[i].chan);
988 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
989 fifo->base.nr * 0x200, 0x1000, true,
994 ret = nvkm_vmm_get(bar, 12, nvkm_memory_size(fifo->user.mem),
999 return nvkm_memory_map(fifo->user.mem, 0, bar, fifo->user.bar, NULL, 0);
1003 gk104_fifo_init(struct nvkm_fifo *base)
1005 struct gk104_fifo *fifo = gk104_fifo(base);
1006 struct nvkm_device *device = fifo->base.engine.subdev.device;
1009 /* Enable PBDMAs. */
1010 fifo->func->pbdma->init(fifo);
1013 for (i = 0; i < fifo->pbdma_nr; i++) {
1014 nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
1015 nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
1016 nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
1020 for (i = 0; i < fifo->pbdma_nr; i++) {
1021 nvkm_wr32(device, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */
1022 nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
1025 nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar->addr >> 12);
1027 if (fifo->func->pbdma->init_timeout)
1028 fifo->func->pbdma->init_timeout(fifo);
1030 nvkm_wr32(device, 0x002100, 0xffffffff);
1031 nvkm_wr32(device, 0x002140, 0x7fffffff);
1035 gk104_fifo_dtor(struct nvkm_fifo *base)
1037 struct gk104_fifo *fifo = gk104_fifo(base);
1038 struct nvkm_device *device = fifo->base.engine.subdev.device;
1041 nvkm_vmm_put(nvkm_bar_bar1_vmm(device), &fifo->user.bar);
1042 nvkm_memory_unref(&fifo->user.mem);
1044 for (i = 0; i < fifo->runlist_nr; i++) {
1045 nvkm_memory_unref(&fifo->runlist[i].mem[1]);
1046 nvkm_memory_unref(&fifo->runlist[i].mem[0]);
1052 static const struct nvkm_fifo_func
1054 .dtor = gk104_fifo_dtor,
1055 .oneinit = gk104_fifo_oneinit,
1056 .info = gk104_fifo_info,
1057 .init = gk104_fifo_init,
1058 .fini = gk104_fifo_fini,
1059 .intr = gk104_fifo_intr,
1060 .fault = gk104_fifo_fault,
1061 .uevent_init = gk104_fifo_uevent_init,
1062 .uevent_fini = gk104_fifo_uevent_fini,
1063 .recover_chan = gk104_fifo_recover_chan,
1064 .class_get = gk104_fifo_class_get,
1065 .class_new = gk104_fifo_class_new,
1069 gk104_fifo_new_(const struct gk104_fifo_func *func, struct nvkm_device *device,
1070 int index, int nr, struct nvkm_fifo **pfifo)
1072 struct gk104_fifo *fifo;
1074 if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
1077 INIT_WORK(&fifo->recover.work, gk104_fifo_recover_work);
1078 *pfifo = &fifo->base;
1080 return nvkm_fifo_ctor(&gk104_fifo_, device, index, nr, &fifo->base);
1083 const struct nvkm_enum
1084 gk104_fifo_fault_access[] = {
1090 const struct nvkm_enum
1091 gk104_fifo_fault_engine[] = {
1092 { 0x00, "GR", NULL, NVKM_ENGINE_GR },
1093 { 0x01, "DISPLAY" },
1094 { 0x02, "CAPTURE" },
1095 { 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
1096 { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
1097 { 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
1099 { 0x07, "HOST0", NULL, NVKM_ENGINE_FIFO },
1100 { 0x08, "HOST1", NULL, NVKM_ENGINE_FIFO },
1101 { 0x09, "HOST2", NULL, NVKM_ENGINE_FIFO },
1102 { 0x0a, "HOST3", NULL, NVKM_ENGINE_FIFO },
1103 { 0x0b, "HOST4", NULL, NVKM_ENGINE_FIFO },
1104 { 0x0c, "HOST5", NULL, NVKM_ENGINE_FIFO },
1105 { 0x0d, "HOST6", NULL, NVKM_ENGINE_FIFO },
1106 { 0x0e, "HOST7", NULL, NVKM_ENGINE_FIFO },
1108 { 0x10, "MSVLD", NULL, NVKM_ENGINE_MSVLD },
1109 { 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP },
1111 { 0x14, "MSPDEC", NULL, NVKM_ENGINE_MSPDEC },
1112 { 0x15, "CE0", NULL, NVKM_ENGINE_CE0 },
1113 { 0x16, "CE1", NULL, NVKM_ENGINE_CE1 },
1116 { 0x19, "MSENC", NULL, NVKM_ENGINE_MSENC },
1117 { 0x1b, "CE2", NULL, NVKM_ENGINE_CE2 },
1121 const struct nvkm_enum
1122 gk104_fifo_fault_reason[] = {
1124 { 0x01, "PDE_SIZE" },
1126 { 0x03, "VA_LIMIT_VIOLATION" },
1127 { 0x04, "UNBOUND_INST_BLOCK" },
1128 { 0x05, "PRIV_VIOLATION" },
1129 { 0x06, "RO_VIOLATION" },
1130 { 0x07, "WO_VIOLATION" },
1131 { 0x08, "PITCH_MASK_VIOLATION" },
1132 { 0x09, "WORK_CREATION" },
1133 { 0x0a, "UNSUPPORTED_APERTURE" },
1134 { 0x0b, "COMPRESSION_FAILURE" },
1135 { 0x0c, "UNSUPPORTED_KIND" },
1136 { 0x0d, "REGION_VIOLATION" },
1137 { 0x0e, "BOTH_PTES_VALID" },
1138 { 0x0f, "INFO_TYPE_POISONED" },
1142 const struct nvkm_enum
1143 gk104_fifo_fault_hubclient[] = {
1151 { 0x07, "HOST_CPU" },
1152 { 0x08, "HOST_CPU_NB" },
1163 { 0x13, "RASTERTWOD" },
1173 { 0x1d, "DFALCON" },
1175 { 0x1f, "AFALCON" },
1179 const struct nvkm_enum
1180 gk104_fifo_fault_gpcclient[] = {
1181 { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
1182 { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
1183 { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
1184 { 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" },
1192 { 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" },
1193 { 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" },
1194 { 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" },
1195 { 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" },
1197 { 0x20, "LTP_UTLB_0" },
1198 { 0x21, "LTP_UTLB_1" },
1199 { 0x22, "LTP_UTLB_2" },
1200 { 0x23, "LTP_UTLB_3" },
1201 { 0x24, "GPC_RGG_UTLB" },
1205 static const struct gk104_fifo_func
1207 .pbdma = &gk104_fifo_pbdma,
1208 .fault.access = gk104_fifo_fault_access,
1209 .fault.engine = gk104_fifo_fault_engine,
1210 .fault.reason = gk104_fifo_fault_reason,
1211 .fault.hubclient = gk104_fifo_fault_hubclient,
1212 .fault.gpcclient = gk104_fifo_fault_gpcclient,
1213 .runlist = &gk104_fifo_runlist,
1214 .chan = {{0,0,KEPLER_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
1218 gk104_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
1220 return gk104_fifo_new_(&gk104_fifo, device, index, 4096, pfifo);