]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/nouveau/dispnv50/disp.c
drm/nouveau/kms/nv50: handle SetControlOutputResource from head
[linux.git] / drivers / gpu / drm / nouveau / dispnv50 / disp.c
1 /*
2  * Copyright 2011 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24
25 #include <linux/dma-mapping.h>
26 #include <linux/hdmi.h>
27
28 #include <drm/drmP.h>
29 #include <drm/drm_atomic.h>
30 #include <drm/drm_atomic_helper.h>
31 #include <drm/drm_crtc_helper.h>
32 #include <drm/drm_dp_helper.h>
33 #include <drm/drm_fb_helper.h>
34 #include <drm/drm_plane_helper.h>
35 #include <drm/drm_edid.h>
36
37 #include <nvif/mem.h>
38
39 #include <nvif/class.h>
40 #include <nvif/cl0002.h>
41 #include <nvif/cl5070.h>
42 #include <nvif/cl507a.h>
43 #include <nvif/cl507b.h>
44 #include <nvif/cl507c.h>
45 #include <nvif/cl507d.h>
46 #include <nvif/cl507e.h>
47 #include <nvif/event.h>
48
49 #include "nouveau_drv.h"
50 #include "nouveau_dma.h"
51 #include "nouveau_gem.h"
52 #include "nouveau_connector.h"
53 #include "nouveau_encoder.h"
54 #include "nouveau_crtc.h"
55 #include "nouveau_fence.h"
56 #include "nouveau_fbcon.h"
57 #include "nv50_display.h"
58
59 #define EVO_DMA_NR 9
60
61 #define EVO_MASTER  (0x00)
62 #define EVO_FLIP(c) (0x01 + (c))
63 #define EVO_OVLY(c) (0x05 + (c))
64 #define EVO_OIMM(c) (0x09 + (c))
65 #define EVO_CURS(c) (0x0d + (c))
66
67 /* offsets in shared sync bo of various structures */
68 #define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
69 #define EVO_MAST_NTFY     EVO_SYNC(      0, 0x00)
70 #define EVO_FLIP_SEM0(c)  EVO_SYNC((c) + 1, 0x00)
71 #define EVO_FLIP_SEM1(c)  EVO_SYNC((c) + 1, 0x10)
72 #define EVO_FLIP_NTFY0(c) EVO_SYNC((c) + 1, 0x20)
73 #define EVO_FLIP_NTFY1(c) EVO_SYNC((c) + 1, 0x30)
74
75 /******************************************************************************
76  * Atomic state
77  *****************************************************************************/
78 #define nv50_atom(p) container_of((p), struct nv50_atom, state)
79
80 struct nv50_atom {
81         struct drm_atomic_state state;
82
83         struct list_head outp;
84         bool lock_core;
85         bool flush_disable;
86 };
87
88 struct nv50_outp_atom {
89         struct list_head head;
90
91         struct drm_encoder *encoder;
92         bool flush_disable;
93
94         union {
95                 struct {
96                         bool ctrl:1;
97                 };
98                 u8 mask;
99         } clr;
100
101         union {
102                 struct {
103                         bool ctrl:1;
104                 };
105                 u8 mask;
106         } set;
107 };
108
109 #define nv50_head_atom(p) container_of((p), struct nv50_head_atom, state)
110
111 struct nv50_head_atom {
112         struct drm_crtc_state state;
113
114         struct {
115                 u16 iW;
116                 u16 iH;
117                 u16 oW;
118                 u16 oH;
119         } view;
120
121         struct nv50_head_mode {
122                 bool interlace;
123                 u32 clock;
124                 struct {
125                         u16 active;
126                         u16 synce;
127                         u16 blanke;
128                         u16 blanks;
129                 } h;
130                 struct {
131                         u32 active;
132                         u16 synce;
133                         u16 blanke;
134                         u16 blanks;
135                         u16 blank2s;
136                         u16 blank2e;
137                         u16 blankus;
138                 } v;
139         } mode;
140
141         struct {
142                 bool visible;
143                 u32 handle;
144                 u64 offset:40;
145                 u8  mode:4;
146         } lut;
147
148         struct {
149                 bool visible;
150                 u32 handle;
151                 u64 offset:40;
152                 u8  format;
153                 u8  kind:7;
154                 u8  layout:1;
155                 u8  block:4;
156                 u32 pitch:20;
157                 u16 x;
158                 u16 y;
159                 u16 w;
160                 u16 h;
161         } core;
162
163         struct {
164                 bool visible;
165                 u32 handle;
166                 u64 offset:40;
167                 u8  layout:1;
168                 u8  format:1;
169         } curs;
170
171         struct {
172                 u8  depth;
173                 u8  cpp;
174                 u16 x;
175                 u16 y;
176                 u16 w;
177                 u16 h;
178         } base;
179
180         struct {
181                 u8 cpp;
182         } ovly;
183
184         struct {
185                 bool enable:1;
186                 u8 bits:2;
187                 u8 mode:4;
188         } dither;
189
190         struct {
191                 struct {
192                         u16 cos:12;
193                         u16 sin:12;
194                 } sat;
195         } procamp;
196
197         struct {
198                 u8 nhsync:1;
199                 u8 nvsync:1;
200                 u8 depth:4;
201         } or;
202
203         union {
204                 struct {
205                         bool ilut:1;
206                         bool core:1;
207                         bool curs:1;
208                 };
209                 u8 mask;
210         } clr;
211
212         union {
213                 struct {
214                         bool ilut:1;
215                         bool core:1;
216                         bool curs:1;
217                         bool view:1;
218                         bool mode:1;
219                         bool base:1;
220                         bool ovly:1;
221                         bool dither:1;
222                         bool procamp:1;
223                         bool or:1;
224                 };
225                 u16 mask;
226         } set;
227 };
228
229 static inline struct nv50_head_atom *
230 nv50_head_atom_get(struct drm_atomic_state *state, struct drm_crtc *crtc)
231 {
232         struct drm_crtc_state *statec = drm_atomic_get_crtc_state(state, crtc);
233         if (IS_ERR(statec))
234                 return (void *)statec;
235         return nv50_head_atom(statec);
236 }
237
238 #define nv50_wndw_atom(p) container_of((p), struct nv50_wndw_atom, state)
239
240 struct nv50_wndw_atom {
241         struct drm_plane_state state;
242         u8 interval;
243
244         struct {
245                 u32  handle;
246                 u16  offset:12;
247                 bool awaken:1;
248         } ntfy;
249
250         struct {
251                 u32 handle;
252                 u16 offset:12;
253                 u32 acquire;
254                 u32 release;
255         } sema;
256
257         struct {
258                 u8 enable:2;
259         } lut;
260
261         struct {
262                 u8  mode:2;
263                 u8  interval:4;
264
265                 u8  format;
266                 u8  kind:7;
267                 u8  layout:1;
268                 u8  block:4;
269                 u32 pitch:20;
270                 u16 w;
271                 u16 h;
272
273                 u32 handle;
274                 u64 offset;
275         } image;
276
277         struct {
278                 u16 x;
279                 u16 y;
280         } point;
281
282         union {
283                 struct {
284                         bool ntfy:1;
285                         bool sema:1;
286                         bool image:1;
287                 };
288                 u8 mask;
289         } clr;
290
291         union {
292                 struct {
293                         bool ntfy:1;
294                         bool sema:1;
295                         bool image:1;
296                         bool lut:1;
297                         bool point:1;
298                 };
299                 u8 mask;
300         } set;
301 };
302
303 /******************************************************************************
304  * EVO channel
305  *****************************************************************************/
306
307 struct nv50_chan {
308         struct nvif_object user;
309         struct nvif_device *device;
310 };
311
312 static int
313 nv50_chan_create(struct nvif_device *device, struct nvif_object *disp,
314                  const s32 *oclass, u8 head, void *data, u32 size,
315                  struct nv50_chan *chan)
316 {
317         struct nvif_sclass *sclass;
318         int ret, i, n;
319
320         chan->device = device;
321
322         ret = n = nvif_object_sclass_get(disp, &sclass);
323         if (ret < 0)
324                 return ret;
325
326         while (oclass[0]) {
327                 for (i = 0; i < n; i++) {
328                         if (sclass[i].oclass == oclass[0]) {
329                                 ret = nvif_object_init(disp, 0, oclass[0],
330                                                        data, size, &chan->user);
331                                 if (ret == 0)
332                                         nvif_object_map(&chan->user, NULL, 0);
333                                 nvif_object_sclass_put(&sclass);
334                                 return ret;
335                         }
336                 }
337                 oclass++;
338         }
339
340         nvif_object_sclass_put(&sclass);
341         return -ENOSYS;
342 }
343
344 static void
345 nv50_chan_destroy(struct nv50_chan *chan)
346 {
347         nvif_object_fini(&chan->user);
348 }
349
350 /******************************************************************************
351  * DMA EVO channel
352  *****************************************************************************/
353
354 struct nv50_wndw_ctxdma {
355         struct list_head head;
356         struct nvif_object object;
357 };
358
359 struct nv50_dmac {
360         struct nv50_chan base;
361
362         struct nvif_mem push;
363         u32 *ptr;
364
365         struct nvif_object sync;
366         struct nvif_object vram;
367
368         /* Protects against concurrent pushbuf access to this channel, lock is
369          * grabbed by evo_wait (if the pushbuf reservation is successful) and
370          * dropped again by evo_kick. */
371         struct mutex lock;
372 };
373
374 static void
375 nv50_dmac_destroy(struct nv50_dmac *dmac)
376 {
377         nvif_object_fini(&dmac->vram);
378         nvif_object_fini(&dmac->sync);
379
380         nv50_chan_destroy(&dmac->base);
381
382         nvif_mem_fini(&dmac->push);
383 }
384
385 static int
386 nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
387                  const s32 *oclass, u8 head, void *data, u32 size, u64 syncbuf,
388                  struct nv50_dmac *dmac)
389 {
390         struct nouveau_cli *cli = (void *)device->object.client;
391         struct nv50_disp_core_channel_dma_v0 *args = data;
392         int ret;
393
394         mutex_init(&dmac->lock);
395
396         ret = nvif_mem_init_map(&cli->mmu, NVIF_MEM_COHERENT, 0x1000,
397                                 &dmac->push);
398         if (ret)
399                 return ret;
400
401         dmac->ptr = dmac->push.object.map.ptr;
402
403         args->pushbuf = nvif_handle(&dmac->push.object);
404
405         ret = nv50_chan_create(device, disp, oclass, head, data, size,
406                                &dmac->base);
407         if (ret)
408                 return ret;
409
410         ret = nvif_object_init(&dmac->base.user, 0xf0000000, NV_DMA_IN_MEMORY,
411                                &(struct nv_dma_v0) {
412                                         .target = NV_DMA_V0_TARGET_VRAM,
413                                         .access = NV_DMA_V0_ACCESS_RDWR,
414                                         .start = syncbuf + 0x0000,
415                                         .limit = syncbuf + 0x0fff,
416                                }, sizeof(struct nv_dma_v0),
417                                &dmac->sync);
418         if (ret)
419                 return ret;
420
421         ret = nvif_object_init(&dmac->base.user, 0xf0000001, NV_DMA_IN_MEMORY,
422                                &(struct nv_dma_v0) {
423                                         .target = NV_DMA_V0_TARGET_VRAM,
424                                         .access = NV_DMA_V0_ACCESS_RDWR,
425                                         .start = 0,
426                                         .limit = device->info.ram_user - 1,
427                                }, sizeof(struct nv_dma_v0),
428                                &dmac->vram);
429         if (ret)
430                 return ret;
431
432         return ret;
433 }
434
435 /******************************************************************************
436  * Base
437  *****************************************************************************/
438
439 struct nv50_sync {
440         struct nv50_dmac base;
441         u32 addr;
442         u32 data;
443 };
444
445 struct nv50_head {
446         const struct nv50_head_func *func;
447         struct nouveau_crtc base;
448         struct {
449                 struct nouveau_bo *nvbo[2];
450                 int next;
451         } lut;
452 };
453
454 struct nv50_head_func {
455         void (*view)(struct nv50_head *, struct nv50_head_atom *);
456         void (*mode)(struct nv50_head *, struct nv50_head_atom *);
457         void (*ilut_set)(struct nv50_head *, struct nv50_head_atom *);
458         void (*ilut_clr)(struct nv50_head *);
459         void (*core_set)(struct nv50_head *, struct nv50_head_atom *);
460         void (*core_clr)(struct nv50_head *);
461         void (*curs_set)(struct nv50_head *, struct nv50_head_atom *);
462         void (*curs_clr)(struct nv50_head *);
463         void (*base)(struct nv50_head *, struct nv50_head_atom *);
464         void (*ovly)(struct nv50_head *, struct nv50_head_atom *);
465         void (*dither)(struct nv50_head *, struct nv50_head_atom *);
466         void (*procamp)(struct nv50_head *, struct nv50_head_atom *);
467         void (*or)(struct nv50_head *, struct nv50_head_atom *);
468 };
469
470 #define nv50_head(c) container_of((c), struct nv50_head, base.base)
471
472 struct nv50_disp {
473         struct nvif_disp *disp;
474         struct nv50_core *core;
475
476         struct nouveau_bo *sync;
477
478         struct mutex mutex;
479 };
480
481 static struct nv50_disp *
482 nv50_disp(struct drm_device *dev)
483 {
484         return nouveau_display(dev)->priv;
485 }
486
487 /******************************************************************************
488  * Core
489  *****************************************************************************/
490
491 struct nv50_core {
492         const struct nv50_core_func *func;
493         struct nv50_dmac chan;
494 };
495
496 struct nv50_core_func {
497         const struct nv50_head_func *head;
498 };
499
500 static int
501 core507d_new_(const struct nv50_core_func *func, struct nouveau_drm *drm,
502               s32 oclass, struct nv50_core **pcore)
503 {
504         struct nv50_disp_core_channel_dma_v0 args = {};
505         struct nv50_disp *disp = nv50_disp(drm->dev);
506         struct nv50_core *core;
507         int ret;
508
509         if (!(core = *pcore = kzalloc(sizeof(*core), GFP_KERNEL)))
510                 return -ENOMEM;
511         core->func = func;
512
513         ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
514                                &oclass, 0, &args, sizeof(args),
515                                disp->sync->bo.offset, &core->chan);
516         if (ret) {
517                 NV_ERROR(drm, "core%04x allocation failed: %d\n", oclass, ret);
518                 return ret;
519         }
520
521         return 0;
522 }
523
524 /******************************************************************************
525  * EVO channel helpers
526  *****************************************************************************/
527 static u32 *
528 evo_wait(void *evoc, int nr)
529 {
530         struct nv50_dmac *dmac = evoc;
531         struct nvif_device *device = dmac->base.device;
532         u32 put = nvif_rd32(&dmac->base.user, 0x0000) / 4;
533
534         mutex_lock(&dmac->lock);
535         if (put + nr >= (PAGE_SIZE / 4) - 8) {
536                 dmac->ptr[put] = 0x20000000;
537
538                 nvif_wr32(&dmac->base.user, 0x0000, 0x00000000);
539                 if (nvif_msec(device, 2000,
540                         if (!nvif_rd32(&dmac->base.user, 0x0004))
541                                 break;
542                 ) < 0) {
543                         mutex_unlock(&dmac->lock);
544                         pr_err("nouveau: evo channel stalled\n");
545                         return NULL;
546                 }
547
548                 put = 0;
549         }
550
551         return dmac->ptr + put;
552 }
553
554 static void
555 evo_kick(u32 *push, void *evoc)
556 {
557         struct nv50_dmac *dmac = evoc;
558         nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
559         mutex_unlock(&dmac->lock);
560 }
561
562 #define evo_mthd(p, m, s) do {                                          \
563         const u32 _m = (m), _s = (s);                                   \
564         if (drm_debug & DRM_UT_KMS)                                     \
565                 pr_err("%04x %d %s\n", _m, _s, __func__);               \
566         *((p)++) = ((_s << 18) | _m);                                   \
567 } while(0)
568
569 #define evo_data(p, d) do {                                             \
570         const u32 _d = (d);                                             \
571         if (drm_debug & DRM_UT_KMS)                                     \
572                 pr_err("\t%08x\n", _d);                                 \
573         *((p)++) = _d;                                                  \
574 } while(0)
575
576 /******************************************************************************
577  * Plane
578  *****************************************************************************/
579 #define nv50_wndw(p) container_of((p), struct nv50_wndw, plane)
580
581 struct nv50_wndw {
582         const struct nv50_wndw_func *func;
583         const struct nv50_wimm_func *immd;
584         int id;
585
586         struct {
587                 struct nvif_object *parent;
588                 struct list_head list;
589         } ctxdma;
590
591         struct drm_plane plane;
592
593         struct nv50_dmac wndw;
594         struct nv50_dmac wimm;
595
596         struct nvif_notify notify;
597         u16 ntfy;
598         u16 sema;
599         u32 data;
600 };
601
602 struct nv50_wndw_func {
603         int (*acquire)(struct nv50_wndw *, struct nv50_wndw_atom *asyw,
604                        struct nv50_head_atom *asyh);
605         void (*release)(struct nv50_wndw *, struct nv50_wndw_atom *asyw,
606                         struct nv50_head_atom *asyh);
607         void (*prepare)(struct nv50_wndw *, struct nv50_head_atom *asyh,
608                         struct nv50_wndw_atom *asyw);
609
610         void (*sema_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
611         void (*sema_clr)(struct nv50_wndw *);
612         void (*ntfy_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
613         void (*ntfy_clr)(struct nv50_wndw *);
614         int (*ntfy_wait_begun)(struct nv50_wndw *, struct nv50_wndw_atom *);
615         void (*image_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
616         void (*image_clr)(struct nv50_wndw *);
617         void (*lut)(struct nv50_wndw *, struct nv50_wndw_atom *);
618
619         u32 (*update)(struct nv50_wndw *, u32 interlock);
620 };
621
622 struct nv50_wimm_func {
623         void (*point)(struct nv50_wndw *, struct nv50_wndw_atom *);
624
625         u32 (*update)(struct nv50_wndw *, u32 interlock);
626 };
627
628 static void
629 nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma *ctxdma)
630 {
631         nvif_object_fini(&ctxdma->object);
632         list_del(&ctxdma->head);
633         kfree(ctxdma);
634 }
635
636 static struct nv50_wndw_ctxdma *
637 nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct nouveau_framebuffer *fb)
638 {
639         struct nouveau_drm *drm = nouveau_drm(fb->base.dev);
640         struct nv50_wndw_ctxdma *ctxdma;
641         const u8    kind = fb->nvbo->kind;
642         const u32 handle = 0xfb000000 | kind;
643         struct {
644                 struct nv_dma_v0 base;
645                 union {
646                         struct nv50_dma_v0 nv50;
647                         struct gf100_dma_v0 gf100;
648                         struct gf119_dma_v0 gf119;
649                 };
650         } args = {};
651         u32 argc = sizeof(args.base);
652         int ret;
653
654         list_for_each_entry(ctxdma, &wndw->ctxdma.list, head) {
655                 if (ctxdma->object.handle == handle)
656                         return ctxdma;
657         }
658
659         if (!(ctxdma = kzalloc(sizeof(*ctxdma), GFP_KERNEL)))
660                 return ERR_PTR(-ENOMEM);
661         list_add(&ctxdma->head, &wndw->ctxdma.list);
662
663         args.base.target = NV_DMA_V0_TARGET_VRAM;
664         args.base.access = NV_DMA_V0_ACCESS_RDWR;
665         args.base.start  = 0;
666         args.base.limit  = drm->client.device.info.ram_user - 1;
667
668         if (drm->client.device.info.chipset < 0x80) {
669                 args.nv50.part = NV50_DMA_V0_PART_256;
670                 argc += sizeof(args.nv50);
671         } else
672         if (drm->client.device.info.chipset < 0xc0) {
673                 args.nv50.part = NV50_DMA_V0_PART_256;
674                 args.nv50.kind = kind;
675                 argc += sizeof(args.nv50);
676         } else
677         if (drm->client.device.info.chipset < 0xd0) {
678                 args.gf100.kind = kind;
679                 argc += sizeof(args.gf100);
680         } else {
681                 args.gf119.page = GF119_DMA_V0_PAGE_LP;
682                 args.gf119.kind = kind;
683                 argc += sizeof(args.gf119);
684         }
685
686         ret = nvif_object_init(wndw->ctxdma.parent, handle, NV_DMA_IN_MEMORY,
687                                &args, argc, &ctxdma->object);
688         if (ret) {
689                 nv50_wndw_ctxdma_del(ctxdma);
690                 return ERR_PTR(ret);
691         }
692
693         return ctxdma;
694 }
695
696 static int
697 nv50_wndw_wait_armed(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
698 {
699         if (asyw->set.ntfy)
700                 return wndw->func->ntfy_wait_begun(wndw, asyw);
701         return 0;
702 }
703
704 static u32
705 nv50_wndw_flush_clr(struct nv50_wndw *wndw, u32 interlock, bool flush,
706                     struct nv50_wndw_atom *asyw)
707 {
708         if (asyw->clr.sema && (!asyw->set.sema || flush))
709                 wndw->func->sema_clr(wndw);
710         if (asyw->clr.ntfy && (!asyw->set.ntfy || flush))
711                 wndw->func->ntfy_clr(wndw);
712         if (asyw->clr.image && (!asyw->set.image || flush))
713                 wndw->func->image_clr(wndw);
714
715         return flush ? wndw->func->update(wndw, interlock) : 0;
716 }
717
718 static u32
719 nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 interlock,
720                     struct nv50_wndw_atom *asyw)
721 {
722         if (interlock) {
723                 asyw->image.mode = 0;
724                 asyw->image.interval = 1;
725         }
726
727         if (asyw->set.sema ) wndw->func->sema_set (wndw, asyw);
728         if (asyw->set.ntfy ) wndw->func->ntfy_set (wndw, asyw);
729         if (asyw->set.image) wndw->func->image_set(wndw, asyw);
730         if (asyw->set.lut  ) wndw->func->lut      (wndw, asyw);
731         if (asyw->set.point) {
732                 wndw->immd->point(wndw, asyw);
733                 wndw->immd->update(wndw, interlock);
734         }
735
736         return wndw->func->update ? wndw->func->update(wndw, interlock) : 0;
737 }
738
739 static void
740 nv50_wndw_atomic_check_release(struct nv50_wndw *wndw,
741                                struct nv50_wndw_atom *asyw,
742                                struct nv50_head_atom *asyh)
743 {
744         struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
745         NV_ATOMIC(drm, "%s release\n", wndw->plane.name);
746         wndw->func->release(wndw, asyw, asyh);
747         asyw->ntfy.handle = 0;
748         asyw->sema.handle = 0;
749 }
750
751 static int
752 nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
753                                struct nv50_wndw_atom *asyw,
754                                struct nv50_head_atom *asyh)
755 {
756         struct nouveau_framebuffer *fb = nouveau_framebuffer(asyw->state.fb);
757         struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
758         int ret;
759
760         NV_ATOMIC(drm, "%s acquire\n", wndw->plane.name);
761
762         asyw->image.w = fb->base.width;
763         asyw->image.h = fb->base.height;
764         asyw->image.kind = fb->nvbo->kind;
765
766         if (asyh->state.pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
767                 asyw->interval = 0;
768         else
769                 asyw->interval = 1;
770
771         if (asyw->image.kind) {
772                 asyw->image.layout = 0;
773                 if (drm->client.device.info.chipset >= 0xc0)
774                         asyw->image.block = fb->nvbo->mode >> 4;
775                 else
776                         asyw->image.block = fb->nvbo->mode;
777                 asyw->image.pitch = (fb->base.pitches[0] / 4) << 4;
778         } else {
779                 asyw->image.layout = 1;
780                 asyw->image.block  = 0;
781                 asyw->image.pitch  = fb->base.pitches[0];
782         }
783
784         ret = wndw->func->acquire(wndw, asyw, asyh);
785         if (ret)
786                 return ret;
787
788         if (asyw->set.image) {
789                 if (!(asyw->image.mode = asyw->interval ? 0 : 1))
790                         asyw->image.interval = asyw->interval;
791                 else
792                         asyw->image.interval = 0;
793         }
794
795         return 0;
796 }
797
798 static int
799 nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
800 {
801         struct nouveau_drm *drm = nouveau_drm(plane->dev);
802         struct nv50_wndw *wndw = nv50_wndw(plane);
803         struct nv50_wndw_atom *armw = nv50_wndw_atom(wndw->plane.state);
804         struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
805         struct nv50_head_atom *harm = NULL, *asyh = NULL;
806         bool varm = false, asyv = false, asym = false;
807         int ret;
808
809         NV_ATOMIC(drm, "%s atomic_check\n", plane->name);
810         if (asyw->state.crtc) {
811                 asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
812                 if (IS_ERR(asyh))
813                         return PTR_ERR(asyh);
814                 asym = drm_atomic_crtc_needs_modeset(&asyh->state);
815                 asyv = asyh->state.active;
816         }
817
818         if (armw->state.crtc) {
819                 harm = nv50_head_atom_get(asyw->state.state, armw->state.crtc);
820                 if (IS_ERR(harm))
821                         return PTR_ERR(harm);
822                 varm = harm->state.crtc->state->active;
823         }
824
825         if (asyv) {
826                 asyw->point.x = asyw->state.crtc_x;
827                 asyw->point.y = asyw->state.crtc_y;
828                 if (memcmp(&armw->point, &asyw->point, sizeof(asyw->point)))
829                         asyw->set.point = true;
830
831                 ret = nv50_wndw_atomic_check_acquire(wndw, asyw, asyh);
832                 if (ret)
833                         return ret;
834         } else
835         if (varm) {
836                 nv50_wndw_atomic_check_release(wndw, asyw, harm);
837         } else {
838                 return 0;
839         }
840
841         if (!asyv || asym) {
842                 asyw->clr.ntfy = armw->ntfy.handle != 0;
843                 asyw->clr.sema = armw->sema.handle != 0;
844                 if (wndw->func->image_clr)
845                         asyw->clr.image = armw->image.handle != 0;
846                 asyw->set.lut = wndw->func->lut && asyv;
847         }
848
849         return 0;
850 }
851
852 static void
853 nv50_wndw_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state)
854 {
855         struct nouveau_framebuffer *fb = nouveau_framebuffer(old_state->fb);
856         struct nouveau_drm *drm = nouveau_drm(plane->dev);
857
858         NV_ATOMIC(drm, "%s cleanup: %p\n", plane->name, old_state->fb);
859         if (!old_state->fb)
860                 return;
861
862         nouveau_bo_unpin(fb->nvbo);
863 }
864
865 static int
866 nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
867 {
868         struct nouveau_framebuffer *fb = nouveau_framebuffer(state->fb);
869         struct nouveau_drm *drm = nouveau_drm(plane->dev);
870         struct nv50_wndw *wndw = nv50_wndw(plane);
871         struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
872         struct nv50_head_atom *asyh;
873         struct nv50_wndw_ctxdma *ctxdma;
874         int ret;
875
876         NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, state->fb);
877         if (!asyw->state.fb)
878                 return 0;
879
880         ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM, true);
881         if (ret)
882                 return ret;
883
884         ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
885         if (IS_ERR(ctxdma)) {
886                 nouveau_bo_unpin(fb->nvbo);
887                 return PTR_ERR(ctxdma);
888         }
889
890         asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.resv);
891         asyw->image.handle = ctxdma->object.handle;
892         asyw->image.offset = fb->nvbo->bo.offset;
893
894         if (wndw->func->prepare) {
895                 asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
896                 if (IS_ERR(asyh))
897                         return PTR_ERR(asyh);
898
899                 wndw->func->prepare(wndw, asyh, asyw);
900         }
901
902         return 0;
903 }
904
905 static const struct drm_plane_helper_funcs
906 nv50_wndw_helper = {
907         .prepare_fb = nv50_wndw_prepare_fb,
908         .cleanup_fb = nv50_wndw_cleanup_fb,
909         .atomic_check = nv50_wndw_atomic_check,
910 };
911
912 static void
913 nv50_wndw_atomic_destroy_state(struct drm_plane *plane,
914                                struct drm_plane_state *state)
915 {
916         struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
917         __drm_atomic_helper_plane_destroy_state(&asyw->state);
918         kfree(asyw);
919 }
920
921 static struct drm_plane_state *
922 nv50_wndw_atomic_duplicate_state(struct drm_plane *plane)
923 {
924         struct nv50_wndw_atom *armw = nv50_wndw_atom(plane->state);
925         struct nv50_wndw_atom *asyw;
926         if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL)))
927                 return NULL;
928         __drm_atomic_helper_plane_duplicate_state(plane, &asyw->state);
929         asyw->interval = 1;
930         asyw->sema = armw->sema;
931         asyw->ntfy = armw->ntfy;
932         asyw->image = armw->image;
933         asyw->point = armw->point;
934         asyw->lut = armw->lut;
935         asyw->clr.mask = 0;
936         asyw->set.mask = 0;
937         return &asyw->state;
938 }
939
940 static void
941 nv50_wndw_reset(struct drm_plane *plane)
942 {
943         struct nv50_wndw_atom *asyw;
944
945         if (WARN_ON(!(asyw = kzalloc(sizeof(*asyw), GFP_KERNEL))))
946                 return;
947
948         if (plane->state)
949                 plane->funcs->atomic_destroy_state(plane, plane->state);
950         plane->state = &asyw->state;
951         plane->state->plane = plane;
952         plane->state->rotation = DRM_MODE_ROTATE_0;
953 }
954
955 static void
956 nv50_wndw_destroy(struct drm_plane *plane)
957 {
958         struct nv50_wndw *wndw = nv50_wndw(plane);
959         struct nv50_wndw_ctxdma *ctxdma, *ctxtmp;
960
961         list_for_each_entry_safe(ctxdma, ctxtmp, &wndw->ctxdma.list, head) {
962                 nv50_wndw_ctxdma_del(ctxdma);
963         }
964
965         nvif_notify_fini(&wndw->notify);
966         nv50_dmac_destroy(&wndw->wimm);
967         nv50_dmac_destroy(&wndw->wndw);
968         drm_plane_cleanup(&wndw->plane);
969         kfree(wndw);
970 }
971
972 static const struct drm_plane_funcs
973 nv50_wndw = {
974         .update_plane = drm_atomic_helper_update_plane,
975         .disable_plane = drm_atomic_helper_disable_plane,
976         .destroy = nv50_wndw_destroy,
977         .reset = nv50_wndw_reset,
978         .atomic_duplicate_state = nv50_wndw_atomic_duplicate_state,
979         .atomic_destroy_state = nv50_wndw_atomic_destroy_state,
980 };
981
982 static int
983 nv50_wndw_notify(struct nvif_notify *notify)
984 {
985         return NVIF_NOTIFY_KEEP;
986 }
987
988 static void
989 nv50_wndw_fini(struct nv50_wndw *wndw)
990 {
991         nvif_notify_put(&wndw->notify);
992 }
993
994 static void
995 nv50_wndw_init(struct nv50_wndw *wndw)
996 {
997         nvif_notify_get(&wndw->notify);
998 }
999
1000 static int
1001 nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
1002                enum drm_plane_type type, const char *name, int index,
1003                const u32 *format, struct nv50_wndw **pwndw)
1004 {
1005         struct nv50_wndw *wndw;
1006         int nformat;
1007         int ret;
1008
1009         if (!(wndw = *pwndw = kzalloc(sizeof(*wndw), GFP_KERNEL)))
1010                 return -ENOMEM;
1011         wndw->func = func;
1012         wndw->id = index;
1013
1014         wndw->ctxdma.parent = &wndw->wndw.base.user;
1015         INIT_LIST_HEAD(&wndw->ctxdma.list);
1016
1017         for (nformat = 0; format[nformat]; nformat++);
1018
1019         ret = drm_universal_plane_init(dev, &wndw->plane, 0, &nv50_wndw,
1020                                        format, nformat, NULL,
1021                                        type, "%s-%d", name, index);
1022         if (ret) {
1023                 kfree(*pwndw);
1024                 *pwndw = NULL;
1025                 return ret;
1026         }
1027
1028         drm_plane_helper_add(&wndw->plane, &nv50_wndw_helper);
1029
1030         wndw->notify.func = nv50_wndw_notify;
1031         return 0;
1032 }
1033
1034 /******************************************************************************
1035  * Overlay
1036  *****************************************************************************/
1037
1038 static const struct nv50_wimm_func
1039 oimm507b = {
1040 };
1041
1042 static int
1043 oimm507b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
1044                s32 oclass, struct nv50_wndw *wndw)
1045 {
1046         struct nv50_disp_overlay_v0 args = {
1047                 .head = wndw->id,
1048         };
1049         struct nv50_disp *disp = nv50_disp(drm->dev);
1050         int ret;
1051
1052         ret = nvif_object_init(&disp->disp->object, 0, oclass, &args,
1053                                sizeof(args), &wndw->wimm.base.user);
1054         if (ret) {
1055                 NV_ERROR(drm, "oimm%04x allocation failed: %d\n", oclass, ret);
1056                 return ret;
1057         }
1058
1059         nvif_object_map(&wndw->wimm.base.user, NULL, 0);
1060         wndw->immd = func;
1061         return 0;
1062 }
1063
1064 static int
1065 oimm507b_init(struct nouveau_drm *drm, s32 oclass, struct nv50_wndw *wndw)
1066 {
1067         return oimm507b_init_(&oimm507b, drm, oclass, wndw);
1068 }
1069
1070 static int
1071 nv50_oimm_init(struct nouveau_drm *drm, struct nv50_wndw *wndw)
1072 {
1073         static const struct {
1074                 s32 oclass;
1075                 int version;
1076                 int (*init)(struct nouveau_drm *, s32, struct nv50_wndw *);
1077         } oimms[] = {
1078                 { GK104_DISP_OVERLAY, 0, oimm507b_init },
1079                 { GF110_DISP_OVERLAY, 0, oimm507b_init },
1080                 { GT214_DISP_OVERLAY, 0, oimm507b_init },
1081                 {   G82_DISP_OVERLAY, 0, oimm507b_init },
1082                 {  NV50_DISP_OVERLAY, 0, oimm507b_init },
1083                 {}
1084         };
1085         struct nv50_disp *disp = nv50_disp(drm->dev);
1086         int cid;
1087
1088         cid = nvif_mclass(&disp->disp->object, oimms);
1089         if (cid < 0) {
1090                 NV_ERROR(drm, "No supported overlay immediate class\n");
1091                 return cid;
1092         }
1093
1094         return oimms[cid].init(drm, oimms[cid].oclass, wndw);
1095 }
1096
1097 static const struct nv50_wndw_func
1098 ovly507e = {
1099 };
1100
1101 static const u32
1102 ovly507e_format[] = {
1103         0
1104 };
1105
1106 static int
1107 ovly507e_new_(const struct nv50_wndw_func *func, const u32 *format,
1108               struct nouveau_drm *drm, int head, s32 oclass,
1109               struct nv50_wndw **pwndw)
1110 {
1111         struct nv50_disp_overlay_channel_dma_v0 args = {
1112                 .head = head,
1113         };
1114         struct nv50_disp *disp = nv50_disp(drm->dev);
1115         struct nv50_wndw *wndw;
1116         int ret;
1117
1118         ret = nv50_wndw_new_(func, drm->dev, DRM_PLANE_TYPE_OVERLAY,
1119                              "ovly", head, format, &wndw);
1120         if (*pwndw = wndw, ret)
1121                 return ret;
1122
1123         ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
1124                                &oclass, 0, &args, sizeof(args),
1125                                disp->sync->bo.offset, &wndw->wndw);
1126         if (ret) {
1127                 NV_ERROR(drm, "ovly%04x allocation failed: %d\n", oclass, ret);
1128                 return ret;
1129         }
1130
1131         return 0;
1132 }
1133
1134 static int
1135 ovly507e_new(struct nouveau_drm *drm, int head, s32 oclass,
1136              struct nv50_wndw **pwndw)
1137 {
1138         return ovly507e_new_(&ovly507e, ovly507e_format, drm, head, oclass, pwndw);
1139 }
1140
1141 static int
1142 nv50_ovly_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
1143 {
1144         static const struct {
1145                 s32 oclass;
1146                 int version;
1147                 int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
1148         } ovlys[] = {
1149                 { GK104_DISP_OVERLAY_CONTROL_DMA, 0, ovly507e_new },
1150                 { GF110_DISP_OVERLAY_CONTROL_DMA, 0, ovly507e_new },
1151                 { GT214_DISP_OVERLAY_CHANNEL_DMA, 0, ovly507e_new },
1152                 { GT200_DISP_OVERLAY_CHANNEL_DMA, 0, ovly507e_new },
1153                 {   G82_DISP_OVERLAY_CHANNEL_DMA, 0, ovly507e_new },
1154                 {  NV50_DISP_OVERLAY_CHANNEL_DMA, 0, ovly507e_new },
1155                 {}
1156         };
1157         struct nv50_disp *disp = nv50_disp(drm->dev);
1158         int cid, ret;
1159
1160         cid = nvif_mclass(&disp->disp->object, ovlys);
1161         if (cid < 0) {
1162                 NV_ERROR(drm, "No supported overlay class\n");
1163                 return cid;
1164         }
1165
1166         ret = ovlys[cid].new(drm, head, ovlys[cid].oclass, pwndw);
1167         if (ret)
1168                 return ret;
1169
1170         return nv50_oimm_init(drm, *pwndw);
1171 }
1172
1173 /******************************************************************************
1174  * Cursor plane
1175  *****************************************************************************/
1176 static u32
1177 nv50_curs_update(struct nv50_wndw *wndw, u32 interlock)
1178 {
1179         nvif_wr32(&wndw->wimm.base.user, 0x0080, 0x00000000);
1180         return 0;
1181 }
1182
1183 static void
1184 nv50_curs_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
1185 {
1186         nvif_wr32(&wndw->wimm.base.user, 0x0084, (asyw->point.y << 16) |
1187                                                   asyw->point.x);
1188 }
1189
1190 static const struct nv50_wimm_func
1191 curs507a = {
1192         .point = nv50_curs_point,
1193         .update = nv50_curs_update,
1194 };
1195
1196 static void
1197 nv50_curs_prepare(struct nv50_wndw *wndw, struct nv50_head_atom *asyh,
1198                   struct nv50_wndw_atom *asyw)
1199 {
1200         u32 handle = nv50_disp(wndw->plane.dev)->core->chan.vram.handle;
1201         u32 offset = asyw->image.offset;
1202         if (asyh->curs.handle != handle || asyh->curs.offset != offset) {
1203                 asyh->curs.handle = handle;
1204                 asyh->curs.offset = offset;
1205                 asyh->set.curs = asyh->curs.visible;
1206         }
1207 }
1208
1209 static void
1210 nv50_curs_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
1211                   struct nv50_head_atom *asyh)
1212 {
1213         asyh->curs.visible = false;
1214 }
1215
1216 static int
1217 nv50_curs_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
1218                   struct nv50_head_atom *asyh)
1219 {
1220         int ret;
1221
1222         ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
1223                                                   DRM_PLANE_HELPER_NO_SCALING,
1224                                                   DRM_PLANE_HELPER_NO_SCALING,
1225                                                   true, true);
1226         asyh->curs.visible = asyw->state.visible;
1227         if (ret || !asyh->curs.visible)
1228                 return ret;
1229
1230         switch (asyw->state.fb->width) {
1231         case 32: asyh->curs.layout = 0; break;
1232         case 64: asyh->curs.layout = 1; break;
1233         default:
1234                 return -EINVAL;
1235         }
1236
1237         if (asyw->state.fb->width != asyw->state.fb->height)
1238                 return -EINVAL;
1239
1240         switch (asyw->state.fb->format->format) {
1241         case DRM_FORMAT_ARGB8888: asyh->curs.format = 1; break;
1242         default:
1243                 WARN_ON(1);
1244                 return -EINVAL;
1245         }
1246
1247         return 0;
1248 }
1249
1250 static const u32
1251 nv50_curs_format[] = {
1252         DRM_FORMAT_ARGB8888,
1253         0
1254 };
1255
1256 static const struct nv50_wndw_func
1257 nv50_curs = {
1258         .acquire = nv50_curs_acquire,
1259         .release = nv50_curs_release,
1260         .prepare = nv50_curs_prepare,
1261 };
1262
1263 static int
1264 curs507a_new_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
1265               int head, s32 oclass, struct nv50_wndw **pwndw)
1266 {
1267         struct nv50_disp_cursor_v0 args = {
1268                 .head = head,
1269         };
1270         struct nv50_disp *disp = nv50_disp(drm->dev);
1271         struct nv50_wndw *wndw;
1272         int ret;
1273
1274         ret = nv50_wndw_new_(&nv50_curs, drm->dev, DRM_PLANE_TYPE_CURSOR,
1275                              "curs", head, nv50_curs_format, &wndw);
1276         if (*pwndw = wndw, ret)
1277                 return ret;
1278
1279         ret = nvif_object_init(&disp->disp->object, 0, oclass, &args,
1280                                sizeof(args), &wndw->wimm.base.user);
1281         if (ret) {
1282                 NV_ERROR(drm, "curs%04x allocation failed: %d\n", oclass, ret);
1283                 return ret;
1284         }
1285
1286         nvif_object_map(&wndw->wimm.base.user, NULL, 0);
1287         wndw->immd = func;
1288         wndw->ctxdma.parent = &disp->core->chan.base.user;
1289         return 0;
1290 }
1291
1292 static int
1293 curs507a_new(struct nouveau_drm *drm, int head, s32 oclass,
1294              struct nv50_wndw **pwndw)
1295 {
1296         return curs507a_new_(&curs507a, drm, head, oclass, pwndw);
1297 }
1298
1299 static int
1300 nv50_curs_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
1301 {
1302         struct {
1303                 s32 oclass;
1304                 int version;
1305                 int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
1306         } curses[] = {
1307                 { GK104_DISP_CURSOR, 0, curs507a_new },
1308                 { GF110_DISP_CURSOR, 0, curs507a_new },
1309                 { GT214_DISP_CURSOR, 0, curs507a_new },
1310                 {   G82_DISP_CURSOR, 0, curs507a_new },
1311                 {  NV50_DISP_CURSOR, 0, curs507a_new },
1312                 {}
1313         };
1314         struct nv50_disp *disp = nv50_disp(drm->dev);
1315         int cid;
1316
1317         cid = nvif_mclass(&disp->disp->object, curses);
1318         if (cid < 0) {
1319                 NV_ERROR(drm, "No supported cursor immediate class\n");
1320                 return cid;
1321         }
1322
1323         return curses[cid].new(drm, head, curses[cid].oclass, pwndw);
1324 }
1325
1326 /******************************************************************************
1327  * Primary plane
1328  *****************************************************************************/
1329 static void
1330 nv50_base_lut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
1331 {
1332         u32 *push;
1333         if ((push = evo_wait(&wndw->wndw, 2))) {
1334                 evo_mthd(push, 0x00e0, 1);
1335                 evo_data(push, asyw->lut.enable << 30);
1336                 evo_kick(push, &wndw->wndw);
1337         }
1338 }
1339
1340 static void
1341 nv50_base_image_clr(struct nv50_wndw *wndw)
1342 {
1343         u32 *push;
1344         if ((push = evo_wait(&wndw->wndw, 4))) {
1345                 evo_mthd(push, 0x0084, 1);
1346                 evo_data(push, 0x00000000);
1347                 evo_mthd(push, 0x00c0, 1);
1348                 evo_data(push, 0x00000000);
1349                 evo_kick(push, &wndw->wndw);
1350         }
1351 }
1352
1353 static void
1354 nv50_base_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
1355 {
1356         const s32 oclass = wndw->wndw.base.user.oclass;
1357         u32 *push;
1358         if ((push = evo_wait(&wndw->wndw, 10))) {
1359                 evo_mthd(push, 0x0084, 1);
1360                 evo_data(push, (asyw->image.mode << 8) |
1361                                (asyw->image.interval << 4));
1362                 evo_mthd(push, 0x00c0, 1);
1363                 evo_data(push, asyw->image.handle);
1364                 if (oclass < G82_DISP_BASE_CHANNEL_DMA) {
1365                         evo_mthd(push, 0x0800, 5);
1366                         evo_data(push, asyw->image.offset >> 8);
1367                         evo_data(push, 0x00000000);
1368                         evo_data(push, (asyw->image.h << 16) | asyw->image.w);
1369                         evo_data(push, (asyw->image.layout << 20) |
1370                                         asyw->image.pitch |
1371                                         asyw->image.block);
1372                         evo_data(push, (asyw->image.kind << 16) |
1373                                        (asyw->image.format << 8));
1374                 } else
1375                 if (oclass < GF110_DISP_BASE_CHANNEL_DMA) {
1376                         evo_mthd(push, 0x0800, 5);
1377                         evo_data(push, asyw->image.offset >> 8);
1378                         evo_data(push, 0x00000000);
1379                         evo_data(push, (asyw->image.h << 16) | asyw->image.w);
1380                         evo_data(push, (asyw->image.layout << 20) |
1381                                         asyw->image.pitch |
1382                                         asyw->image.block);
1383                         evo_data(push, asyw->image.format << 8);
1384                 } else {
1385                         evo_mthd(push, 0x0400, 5);
1386                         evo_data(push, asyw->image.offset >> 8);
1387                         evo_data(push, 0x00000000);
1388                         evo_data(push, (asyw->image.h << 16) | asyw->image.w);
1389                         evo_data(push, (asyw->image.layout << 24) |
1390                                         asyw->image.pitch |
1391                                         asyw->image.block);
1392                         evo_data(push, asyw->image.format << 8);
1393                 }
1394                 evo_kick(push, &wndw->wndw);
1395         }
1396 }
1397
1398 static void
1399 nv50_base_ntfy_clr(struct nv50_wndw *wndw)
1400 {
1401         u32 *push;
1402         if ((push = evo_wait(&wndw->wndw, 2))) {
1403                 evo_mthd(push, 0x00a4, 1);
1404                 evo_data(push, 0x00000000);
1405                 evo_kick(push, &wndw->wndw);
1406         }
1407 }
1408
1409 static void
1410 nv50_base_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
1411 {
1412         u32 *push;
1413         if ((push = evo_wait(&wndw->wndw, 3))) {
1414                 evo_mthd(push, 0x00a0, 2);
1415                 evo_data(push, (asyw->ntfy.awaken << 30) | asyw->ntfy.offset);
1416                 evo_data(push, asyw->ntfy.handle);
1417                 evo_kick(push, &wndw->wndw);
1418         }
1419 }
1420
1421 static void
1422 nv50_base_sema_clr(struct nv50_wndw *wndw)
1423 {
1424         u32 *push;
1425         if ((push = evo_wait(&wndw->wndw, 2))) {
1426                 evo_mthd(push, 0x0094, 1);
1427                 evo_data(push, 0x00000000);
1428                 evo_kick(push, &wndw->wndw);
1429         }
1430 }
1431
1432 static void
1433 nv50_base_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
1434 {
1435         u32 *push;
1436         if ((push = evo_wait(&wndw->wndw, 5))) {
1437                 evo_mthd(push, 0x0088, 4);
1438                 evo_data(push, asyw->sema.offset);
1439                 evo_data(push, asyw->sema.acquire);
1440                 evo_data(push, asyw->sema.release);
1441                 evo_data(push, asyw->sema.handle);
1442                 evo_kick(push, &wndw->wndw);
1443         }
1444 }
1445
1446 static u32
1447 nv50_base_update(struct nv50_wndw *wndw, u32 interlock)
1448 {
1449         u32 *push;
1450
1451         if (!(push = evo_wait(&wndw->wndw, 2)))
1452                 return 0;
1453         evo_mthd(push, 0x0080, 1);
1454         evo_data(push, interlock);
1455         evo_kick(push, &wndw->wndw);
1456
1457         if (wndw->wndw.base.user.oclass < GF110_DISP_BASE_CHANNEL_DMA)
1458                 return interlock ? 2 << (wndw->id * 8) : 0;
1459         return interlock ? 2 << (wndw->id * 4) : 0;
1460 }
1461
1462 static int
1463 nv50_base_ntfy_wait_begun(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
1464 {
1465         struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
1466         struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
1467         if (nvif_msec(&drm->client.device, 2000ULL,
1468                 u32 data = nouveau_bo_rd32(disp->sync, asyw->ntfy.offset / 4);
1469                 if ((data & 0xc0000000) == 0x40000000)
1470                         break;
1471                 usleep_range(1, 2);
1472         ) < 0)
1473                 return -ETIMEDOUT;
1474         return 0;
1475 }
1476
1477 static void
1478 nv50_base_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
1479                   struct nv50_head_atom *asyh)
1480 {
1481         asyh->base.cpp = 0;
1482 }
1483
1484 static int
1485 nv50_base_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
1486                   struct nv50_head_atom *asyh)
1487 {
1488         const struct drm_framebuffer *fb = asyw->state.fb;
1489         int ret;
1490
1491         if (!fb->format->depth)
1492                 return -EINVAL;
1493
1494         ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
1495                                                   DRM_PLANE_HELPER_NO_SCALING,
1496                                                   DRM_PLANE_HELPER_NO_SCALING,
1497                                                   false, true);
1498         if (ret)
1499                 return ret;
1500
1501         asyh->base.depth = fb->format->depth;
1502         asyh->base.cpp = fb->format->cpp[0];
1503         asyh->base.x = asyw->state.src.x1 >> 16;
1504         asyh->base.y = asyw->state.src.y1 >> 16;
1505         asyh->base.w = asyw->state.fb->width;
1506         asyh->base.h = asyw->state.fb->height;
1507
1508         switch (fb->format->format) {
1509         case DRM_FORMAT_C8         : asyw->image.format = 0x1e; break;
1510         case DRM_FORMAT_RGB565     : asyw->image.format = 0xe8; break;
1511         case DRM_FORMAT_XRGB1555   :
1512         case DRM_FORMAT_ARGB1555   : asyw->image.format = 0xe9; break;
1513         case DRM_FORMAT_XRGB8888   :
1514         case DRM_FORMAT_ARGB8888   : asyw->image.format = 0xcf; break;
1515         case DRM_FORMAT_XBGR2101010:
1516         case DRM_FORMAT_ABGR2101010: asyw->image.format = 0xd1; break;
1517         case DRM_FORMAT_XBGR8888   :
1518         case DRM_FORMAT_ABGR8888   : asyw->image.format = 0xd5; break;
1519         default:
1520                 WARN_ON(1);
1521                 return -EINVAL;
1522         }
1523
1524         asyw->lut.enable = 1;
1525         asyw->set.image = true;
1526         return 0;
1527 }
1528
1529 static const u32
1530 nv50_base_format[] = {
1531         DRM_FORMAT_C8,
1532         DRM_FORMAT_RGB565,
1533         DRM_FORMAT_XRGB1555,
1534         DRM_FORMAT_ARGB1555,
1535         DRM_FORMAT_XRGB8888,
1536         DRM_FORMAT_ARGB8888,
1537         DRM_FORMAT_XBGR2101010,
1538         DRM_FORMAT_ABGR2101010,
1539         DRM_FORMAT_XBGR8888,
1540         DRM_FORMAT_ABGR8888,
1541         0
1542 };
1543
1544 static const struct nv50_wndw_func
1545 nv50_base = {
1546         .acquire = nv50_base_acquire,
1547         .release = nv50_base_release,
1548         .sema_set = nv50_base_sema_set,
1549         .sema_clr = nv50_base_sema_clr,
1550         .ntfy_set = nv50_base_ntfy_set,
1551         .ntfy_clr = nv50_base_ntfy_clr,
1552         .ntfy_wait_begun = nv50_base_ntfy_wait_begun,
1553         .image_set = nv50_base_image_set,
1554         .image_clr = nv50_base_image_clr,
1555         .lut = nv50_base_lut,
1556         .update = nv50_base_update,
1557 };
1558
1559 static int
1560 base507c_new_(const struct nv50_wndw_func *func, const u32 *format,
1561               struct nouveau_drm *drm, int head, s32 oclass,
1562               struct nv50_wndw **pwndw)
1563 {
1564         struct nv50_disp_base_channel_dma_v0 args = {
1565                 .head = head,
1566         };
1567         struct nv50_disp *disp = nv50_disp(drm->dev);
1568         struct nv50_wndw *wndw;
1569         int ret;
1570
1571         ret = nv50_wndw_new_(func, drm->dev, DRM_PLANE_TYPE_PRIMARY,
1572                              "base", head, format, &wndw);
1573         if (*pwndw = wndw, ret)
1574                 return ret;
1575
1576         ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
1577                                &oclass, head, &args, sizeof(args),
1578                                disp->sync->bo.offset, &wndw->wndw);
1579         if (ret) {
1580                 NV_ERROR(drm, "base%04x allocation failed: %d\n", oclass, ret);
1581                 return ret;
1582         }
1583
1584         ret = nvif_notify_init(&wndw->wndw.base.user, wndw->notify.func,
1585                                false, NV50_DISP_BASE_CHANNEL_DMA_V0_NTFY_UEVENT,
1586                                &(struct nvif_notify_uevent_req) {},
1587                                sizeof(struct nvif_notify_uevent_req),
1588                                sizeof(struct nvif_notify_uevent_rep),
1589                                &wndw->notify);
1590         if (ret)
1591                 return ret;
1592
1593         wndw->ntfy = EVO_FLIP_NTFY0(wndw->id);
1594         wndw->sema = EVO_FLIP_SEM0(wndw->id);
1595         wndw->data = 0x00000000;
1596         return 0;
1597 }
1598
1599 static int
1600 base507c_new(struct nouveau_drm *drm, int head, s32 oclass,
1601              struct nv50_wndw **pwndw)
1602 {
1603         return base507c_new_(&nv50_base, nv50_base_format, drm, head, oclass, pwndw);
1604 }
1605
1606 static int
1607 nv50_base_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
1608 {
1609         struct {
1610                 s32 oclass;
1611                 int version;
1612                 int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
1613         } bases[] = {
1614                 { GK110_DISP_BASE_CHANNEL_DMA, 0, base507c_new },
1615                 { GK104_DISP_BASE_CHANNEL_DMA, 0, base507c_new },
1616                 { GF110_DISP_BASE_CHANNEL_DMA, 0, base507c_new },
1617                 { GT214_DISP_BASE_CHANNEL_DMA, 0, base507c_new },
1618                 { GT200_DISP_BASE_CHANNEL_DMA, 0, base507c_new },
1619                 {   G82_DISP_BASE_CHANNEL_DMA, 0, base507c_new },
1620                 {  NV50_DISP_BASE_CHANNEL_DMA, 0, base507c_new },
1621                 {}
1622         };
1623         struct nv50_disp *disp = nv50_disp(drm->dev);
1624         int cid;
1625
1626         cid = nvif_mclass(&disp->disp->object, bases);
1627         if (cid < 0) {
1628                 NV_ERROR(drm, "No supported base class\n");
1629                 return cid;
1630         }
1631
1632         return bases[cid].new(drm, head, bases[cid].oclass, pwndw);
1633 }
1634
1635 /******************************************************************************
1636  * Head
1637  *****************************************************************************/
1638 static void
1639 head907d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
1640 {
1641         struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
1642         u32 *push;
1643         if (core->base.user.oclass >= GF110_DISP_CORE_CHANNEL_DMA &&
1644             (push = evo_wait(core, 2))) {
1645                 evo_mthd(push, 0x0404 + (head->base.index * 0x300), 2);
1646                 evo_data(push, 0x00000001 | (asyh->or.depth  << 6) |
1647                                             (asyh->or.nvsync << 4) |
1648                                             (asyh->or.nhsync << 3));
1649                 evo_data(push, 0x31ec6000 | (head->base.index << 25) |
1650                                              asyh->mode.interlace);
1651                 evo_kick(push, core);
1652         }
1653 }
1654
1655 static void
1656 nv50_head_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
1657 {
1658         struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
1659         u32 *push;
1660         if ((push = evo_wait(core, 2))) {
1661                 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
1662                         evo_mthd(push, 0x08a8 + (head->base.index * 0x400), 1);
1663                 else
1664                         evo_mthd(push, 0x0498 + (head->base.index * 0x300), 1);
1665                 evo_data(push, (asyh->procamp.sat.sin << 20) |
1666                                (asyh->procamp.sat.cos << 8));
1667                 evo_kick(push, core);
1668         }
1669 }
1670
1671 static void
1672 nv50_head_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
1673 {
1674         struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
1675         u32 *push;
1676         if ((push = evo_wait(core, 2))) {
1677                 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
1678                         evo_mthd(push, 0x08a0 + (head->base.index * 0x0400), 1);
1679                 else
1680                 if (core->base.user.oclass < GK104_DISP_CORE_CHANNEL_DMA)
1681                         evo_mthd(push, 0x0490 + (head->base.index * 0x0300), 1);
1682                 else
1683                         evo_mthd(push, 0x04a0 + (head->base.index * 0x0300), 1);
1684                 evo_data(push, (asyh->dither.mode << 3) |
1685                                (asyh->dither.bits << 1) |
1686                                 asyh->dither.enable);
1687                 evo_kick(push, core);
1688         }
1689 }
1690
1691 static void
1692 nv50_head_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
1693 {
1694         struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
1695         u32 bounds = 0;
1696         u32 *push;
1697
1698         if (asyh->base.cpp) {
1699                 switch (asyh->base.cpp) {
1700                 case 8: bounds |= 0x00000500; break;
1701                 case 4: bounds |= 0x00000300; break;
1702                 case 2: bounds |= 0x00000100; break;
1703                 default:
1704                         WARN_ON(1);
1705                         break;
1706                 }
1707                 bounds |= 0x00000001;
1708         }
1709
1710         if ((push = evo_wait(core, 2))) {
1711                 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
1712                         evo_mthd(push, 0x0904 + head->base.index * 0x400, 1);
1713                 else
1714                         evo_mthd(push, 0x04d4 + head->base.index * 0x300, 1);
1715                 evo_data(push, bounds);
1716                 evo_kick(push, core);
1717         }
1718 }
1719
1720 static void
1721 nv50_head_base(struct nv50_head *head, struct nv50_head_atom *asyh)
1722 {
1723         struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
1724         u32 bounds = 0;
1725         u32 *push;
1726
1727         if (asyh->base.cpp) {
1728                 switch (asyh->base.cpp) {
1729                 case 8: bounds |= 0x00000500; break;
1730                 case 4: bounds |= 0x00000300; break;
1731                 case 2: bounds |= 0x00000100; break;
1732                 case 1: bounds |= 0x00000000; break;
1733                 default:
1734                         WARN_ON(1);
1735                         break;
1736                 }
1737                 bounds |= 0x00000001;
1738         }
1739
1740         if ((push = evo_wait(core, 2))) {
1741                 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
1742                         evo_mthd(push, 0x0900 + head->base.index * 0x400, 1);
1743                 else
1744                         evo_mthd(push, 0x04d0 + head->base.index * 0x300, 1);
1745                 evo_data(push, bounds);
1746                 evo_kick(push, core);
1747         }
1748 }
1749
1750 static void
1751 nv50_head_curs_clr(struct nv50_head *head)
1752 {
1753         struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
1754         u32 *push;
1755         if ((push = evo_wait(core, 4))) {
1756                 if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
1757                         evo_mthd(push, 0x0880 + head->base.index * 0x400, 1);
1758                         evo_data(push, 0x05000000);
1759                 } else
1760                 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
1761                         evo_mthd(push, 0x0880 + head->base.index * 0x400, 1);
1762                         evo_data(push, 0x05000000);
1763                         evo_mthd(push, 0x089c + head->base.index * 0x400, 1);
1764                         evo_data(push, 0x00000000);
1765                 } else {
1766                         evo_mthd(push, 0x0480 + head->base.index * 0x300, 1);
1767                         evo_data(push, 0x05000000);
1768                         evo_mthd(push, 0x048c + head->base.index * 0x300, 1);
1769                         evo_data(push, 0x00000000);
1770                 }
1771                 evo_kick(push, core);
1772         }
1773 }
1774
1775 static void
1776 nv50_head_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
1777 {
1778         struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
1779         u32 *push;
1780         if ((push = evo_wait(core, 5))) {
1781                 if (core->base.user.oclass < G82_DISP_BASE_CHANNEL_DMA) {
1782                         evo_mthd(push, 0x0880 + head->base.index * 0x400, 2);
1783                         evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
1784                                                     (asyh->curs.format << 24));
1785                         evo_data(push, asyh->curs.offset >> 8);
1786                 } else
1787                 if (core->base.user.oclass < GF110_DISP_BASE_CHANNEL_DMA) {
1788                         evo_mthd(push, 0x0880 + head->base.index * 0x400, 2);
1789                         evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
1790                                                     (asyh->curs.format << 24));
1791                         evo_data(push, asyh->curs.offset >> 8);
1792                         evo_mthd(push, 0x089c + head->base.index * 0x400, 1);
1793                         evo_data(push, asyh->curs.handle);
1794                 } else {
1795                         evo_mthd(push, 0x0480 + head->base.index * 0x300, 2);
1796                         evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
1797                                                     (asyh->curs.format << 24));
1798                         evo_data(push, asyh->curs.offset >> 8);
1799                         evo_mthd(push, 0x048c + head->base.index * 0x300, 1);
1800                         evo_data(push, asyh->curs.handle);
1801                 }
1802                 evo_kick(push, core);
1803         }
1804 }
1805
1806 static void
1807 nv50_head_core_clr(struct nv50_head *head)
1808 {
1809         struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
1810         u32 *push;
1811         if ((push = evo_wait(core, 2))) {
1812                 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
1813                         evo_mthd(push, 0x0874 + head->base.index * 0x400, 1);
1814                 else
1815                         evo_mthd(push, 0x0474 + head->base.index * 0x300, 1);
1816                 evo_data(push, 0x00000000);
1817                 evo_kick(push, core);
1818         }
1819 }
1820
1821 static void
1822 nv50_head_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
1823 {
1824         struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
1825         u32 *push;
1826         if ((push = evo_wait(core, 9))) {
1827                 if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
1828                         evo_mthd(push, 0x0860 + head->base.index * 0x400, 1);
1829                         evo_data(push, asyh->core.offset >> 8);
1830                         evo_mthd(push, 0x0868 + head->base.index * 0x400, 4);
1831                         evo_data(push, (asyh->core.h << 16) | asyh->core.w);
1832                         evo_data(push, asyh->core.layout << 20 |
1833                                        (asyh->core.pitch >> 8) << 8 |
1834                                        asyh->core.block);
1835                         evo_data(push, asyh->core.kind << 16 |
1836                                        asyh->core.format << 8);
1837                         evo_data(push, asyh->core.handle);
1838                         evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1);
1839                         evo_data(push, (asyh->core.y << 16) | asyh->core.x);
1840                         /* EVO will complain with INVALID_STATE if we have an
1841                          * active cursor and (re)specify HeadSetContextDmaIso
1842                          * without also updating HeadSetOffsetCursor.
1843                          */
1844                         asyh->set.curs = asyh->curs.visible;
1845                 } else
1846                 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
1847                         evo_mthd(push, 0x0860 + head->base.index * 0x400, 1);
1848                         evo_data(push, asyh->core.offset >> 8);
1849                         evo_mthd(push, 0x0868 + head->base.index * 0x400, 4);
1850                         evo_data(push, (asyh->core.h << 16) | asyh->core.w);
1851                         evo_data(push, asyh->core.layout << 20 |
1852                                        (asyh->core.pitch >> 8) << 8 |
1853                                        asyh->core.block);
1854                         evo_data(push, asyh->core.format << 8);
1855                         evo_data(push, asyh->core.handle);
1856                         evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1);
1857                         evo_data(push, (asyh->core.y << 16) | asyh->core.x);
1858                 } else {
1859                         evo_mthd(push, 0x0460 + head->base.index * 0x300, 1);
1860                         evo_data(push, asyh->core.offset >> 8);
1861                         evo_mthd(push, 0x0468 + head->base.index * 0x300, 4);
1862                         evo_data(push, (asyh->core.h << 16) | asyh->core.w);
1863                         evo_data(push, asyh->core.layout << 24 |
1864                                        (asyh->core.pitch >> 8) << 8 |
1865                                        asyh->core.block);
1866                         evo_data(push, asyh->core.format << 8);
1867                         evo_data(push, asyh->core.handle);
1868                         evo_mthd(push, 0x04b0 + head->base.index * 0x300, 1);
1869                         evo_data(push, (asyh->core.y << 16) | asyh->core.x);
1870                 }
1871                 evo_kick(push, core);
1872         }
1873 }
1874
1875 static void
1876 nv50_head_lut_clr(struct nv50_head *head)
1877 {
1878         struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
1879         u32 *push;
1880         if ((push = evo_wait(core, 4))) {
1881                 if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
1882                         evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1);
1883                         evo_data(push, 0x40000000);
1884                 } else
1885                 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
1886                         evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1);
1887                         evo_data(push, 0x40000000);
1888                         evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
1889                         evo_data(push, 0x00000000);
1890                 } else {
1891                         evo_mthd(push, 0x0440 + (head->base.index * 0x300), 1);
1892                         evo_data(push, 0x03000000);
1893                         evo_mthd(push, 0x045c + (head->base.index * 0x300), 1);
1894                         evo_data(push, 0x00000000);
1895                 }
1896                 evo_kick(push, core);
1897         }
1898 }
1899
1900 static void
1901 nv50_head_lut_load(struct drm_property_blob *blob, int mode,
1902                    struct nouveau_bo *nvbo)
1903 {
1904         struct drm_color_lut *in = (struct drm_color_lut *)blob->data;
1905         void __iomem *lut = (u8 *)nvbo_kmap_obj_iovirtual(nvbo);
1906         const int size = blob->length / sizeof(*in);
1907         int bits, shift, i;
1908         u16 zero, r, g, b;
1909
1910         /* This can't happen.. But it shuts the compiler up. */
1911         if (WARN_ON(size != 256))
1912                 return;
1913
1914         switch (mode) {
1915         case 0: /* LORES. */
1916         case 1: /* HIRES. */
1917                 bits = 11;
1918                 shift = 3;
1919                 zero = 0x0000;
1920                 break;
1921         case 7: /* INTERPOLATE_257_UNITY_RANGE. */
1922                 bits = 14;
1923                 shift = 0;
1924                 zero = 0x6000;
1925                 break;
1926         default:
1927                 WARN_ON(1);
1928                 return;
1929         }
1930
1931         for (i = 0; i < size; i++) {
1932                 r = (drm_color_lut_extract(in[i].  red, bits) + zero) << shift;
1933                 g = (drm_color_lut_extract(in[i].green, bits) + zero) << shift;
1934                 b = (drm_color_lut_extract(in[i]. blue, bits) + zero) << shift;
1935                 writew(r, lut + (i * 0x08) + 0);
1936                 writew(g, lut + (i * 0x08) + 2);
1937                 writew(b, lut + (i * 0x08) + 4);
1938         }
1939
1940         /* INTERPOLATE modes require a "next" entry to interpolate with,
1941          * so we replicate the last entry to deal with this for now.
1942          */
1943         writew(r, lut + (i * 0x08) + 0);
1944         writew(g, lut + (i * 0x08) + 2);
1945         writew(b, lut + (i * 0x08) + 4);
1946 }
1947
1948 static void
1949 nv50_head_lut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
1950 {
1951         struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
1952         u32 *push;
1953         if ((push = evo_wait(core, 7))) {
1954                 if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
1955                         evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
1956                         evo_data(push, 0x80000000 | asyh->lut.mode << 30);
1957                         evo_data(push, asyh->lut.offset >> 8);
1958                 } else
1959                 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
1960                         evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
1961                         evo_data(push, 0x80000000 | asyh->lut.mode << 30);
1962                         evo_data(push, asyh->lut.offset >> 8);
1963                         evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
1964                         evo_data(push, asyh->lut.handle);
1965                 } else {
1966                         evo_mthd(push, 0x0440 + (head->base.index * 0x300), 4);
1967                         evo_data(push, 0x80000000 | asyh->lut.mode << 24);
1968                         evo_data(push, asyh->lut.offset >> 8);
1969                         evo_data(push, 0x00000000);
1970                         evo_data(push, 0x00000000);
1971                         evo_mthd(push, 0x045c + (head->base.index * 0x300), 1);
1972                         evo_data(push, asyh->lut.handle);
1973                 }
1974                 evo_kick(push, core);
1975         }
1976 }
1977
1978 static void
1979 nv50_head_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
1980 {
1981         struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
1982         struct nv50_head_mode *m = &asyh->mode;
1983         u32 *push;
1984         if ((push = evo_wait(core, 14))) {
1985                 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
1986                         evo_mthd(push, 0x0804 + (head->base.index * 0x400), 2);
1987                         evo_data(push, 0x00800000 | m->clock);
1988                         evo_data(push, m->interlace ? 0x00000002 : 0x00000000);
1989                         evo_mthd(push, 0x0810 + (head->base.index * 0x400), 7);
1990                         evo_data(push, 0x00000000);
1991                         evo_data(push, (m->v.active  << 16) | m->h.active );
1992                         evo_data(push, (m->v.synce   << 16) | m->h.synce  );
1993                         evo_data(push, (m->v.blanke  << 16) | m->h.blanke );
1994                         evo_data(push, (m->v.blanks  << 16) | m->h.blanks );
1995                         evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
1996                         evo_data(push, asyh->mode.v.blankus);
1997                         evo_mthd(push, 0x082c + (head->base.index * 0x400), 1);
1998                         evo_data(push, 0x00000000);
1999                 } else {
2000                         evo_mthd(push, 0x0410 + (head->base.index * 0x300), 6);
2001                         evo_data(push, 0x00000000);
2002                         evo_data(push, (m->v.active  << 16) | m->h.active );
2003                         evo_data(push, (m->v.synce   << 16) | m->h.synce  );
2004                         evo_data(push, (m->v.blanke  << 16) | m->h.blanke );
2005                         evo_data(push, (m->v.blanks  << 16) | m->h.blanks );
2006                         evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
2007                         evo_mthd(push, 0x042c + (head->base.index * 0x300), 2);
2008                         evo_data(push, 0x00000000); /* ??? */
2009                         evo_data(push, 0xffffff00);
2010                         evo_mthd(push, 0x0450 + (head->base.index * 0x300), 3);
2011                         evo_data(push, m->clock * 1000);
2012                         evo_data(push, 0x00200000); /* ??? */
2013                         evo_data(push, m->clock * 1000);
2014                 }
2015                 evo_kick(push, core);
2016         }
2017 }
2018
2019 static void
2020 nv50_head_view(struct nv50_head *head, struct nv50_head_atom *asyh)
2021 {
2022         struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
2023         u32 *push;
2024         if ((push = evo_wait(core, 10))) {
2025                 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
2026                         evo_mthd(push, 0x08a4 + (head->base.index * 0x400), 1);
2027                         evo_data(push, 0x00000000);
2028                         evo_mthd(push, 0x08c8 + (head->base.index * 0x400), 1);
2029                         evo_data(push, (asyh->view.iH << 16) | asyh->view.iW);
2030                         evo_mthd(push, 0x08d8 + (head->base.index * 0x400), 2);
2031                         evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
2032                         evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
2033                 } else {
2034                         evo_mthd(push, 0x0494 + (head->base.index * 0x300), 1);
2035                         evo_data(push, 0x00000000);
2036                         evo_mthd(push, 0x04b8 + (head->base.index * 0x300), 1);
2037                         evo_data(push, (asyh->view.iH << 16) | asyh->view.iW);
2038                         evo_mthd(push, 0x04c0 + (head->base.index * 0x300), 3);
2039                         evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
2040                         evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
2041                         evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
2042                 }
2043                 evo_kick(push, core);
2044         }
2045 }
2046
2047 static const struct nv50_head_func
2048 head507d = {
2049         .view = nv50_head_view,
2050         .mode = nv50_head_mode,
2051         .ilut_set = nv50_head_lut_set,
2052         .ilut_clr = nv50_head_lut_clr,
2053         .core_set = nv50_head_core_set,
2054         .core_clr = nv50_head_core_clr,
2055         .curs_set = nv50_head_curs_set,
2056         .curs_clr = nv50_head_curs_clr,
2057         .base = nv50_head_base,
2058         .ovly = nv50_head_ovly,
2059         .dither = nv50_head_dither,
2060         .procamp = nv50_head_procamp,
2061         .or = head907d_or,
2062 };
2063
2064 static void
2065 nv50_head_flush_clr(struct nv50_head *head, struct nv50_head_atom *asyh, bool y)
2066 {
2067         if (asyh->clr.ilut && (!asyh->set.ilut || y))
2068                 head->func->ilut_clr(head);
2069         if (asyh->clr.core && (!asyh->set.core || y))
2070                 head->func->core_clr(head);
2071         if (asyh->clr.curs && (!asyh->set.curs || y))
2072                 head->func->curs_clr(head);
2073 }
2074
2075 static void
2076 nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
2077 {
2078         if (asyh->set.view   ) head->func->view    (head, asyh);
2079         if (asyh->set.mode   ) head->func->mode    (head, asyh);
2080         if (asyh->set.ilut   ) {
2081                 struct nouveau_bo *nvbo = head->lut.nvbo[head->lut.next];
2082                 struct drm_property_blob *blob = asyh->state.gamma_lut;
2083                 if (blob)
2084                         nv50_head_lut_load(blob, asyh->lut.mode, nvbo);
2085                 asyh->lut.offset = nvbo->bo.offset;
2086                 head->lut.next ^= 1;
2087                 head->func->ilut_set(head, asyh);
2088         }
2089         if (asyh->set.core   ) head->func->core_set(head, asyh);
2090         if (asyh->set.curs   ) head->func->curs_set(head, asyh);
2091         if (asyh->set.base   ) head->func->base    (head, asyh);
2092         if (asyh->set.ovly   ) head->func->ovly    (head, asyh);
2093         if (asyh->set.dither ) head->func->dither  (head, asyh);
2094         if (asyh->set.procamp) head->func->procamp (head, asyh);
2095         if (asyh->set.or     ) head->func->or      (head, asyh);
2096 }
2097
2098 static void
2099 nv50_head_atomic_check_procamp(struct nv50_head_atom *armh,
2100                                struct nv50_head_atom *asyh,
2101                                struct nouveau_conn_atom *asyc)
2102 {
2103         const int vib = asyc->procamp.color_vibrance - 100;
2104         const int hue = asyc->procamp.vibrant_hue - 90;
2105         const int adj = (vib > 0) ? 50 : 0;
2106         asyh->procamp.sat.cos = ((vib * 2047 + adj) / 100) & 0xfff;
2107         asyh->procamp.sat.sin = ((hue * 2047) / 100) & 0xfff;
2108         asyh->set.procamp = true;
2109 }
2110
2111 static void
2112 nv50_head_atomic_check_dither(struct nv50_head_atom *armh,
2113                               struct nv50_head_atom *asyh,
2114                               struct nouveau_conn_atom *asyc)
2115 {
2116         struct drm_connector *connector = asyc->state.connector;
2117         u32 mode = 0x00;
2118
2119         if (asyc->dither.mode == DITHERING_MODE_AUTO) {
2120                 if (asyh->base.depth > connector->display_info.bpc * 3)
2121                         mode = DITHERING_MODE_DYNAMIC2X2;
2122         } else {
2123                 mode = asyc->dither.mode;
2124         }
2125
2126         if (asyc->dither.depth == DITHERING_DEPTH_AUTO) {
2127                 if (connector->display_info.bpc >= 8)
2128                         mode |= DITHERING_DEPTH_8BPC;
2129         } else {
2130                 mode |= asyc->dither.depth;
2131         }
2132
2133         asyh->dither.enable = mode;
2134         asyh->dither.bits = mode >> 1;
2135         asyh->dither.mode = mode >> 3;
2136         asyh->set.dither = true;
2137 }
2138
2139 static void
2140 nv50_head_atomic_check_view(struct nv50_head_atom *armh,
2141                             struct nv50_head_atom *asyh,
2142                             struct nouveau_conn_atom *asyc)
2143 {
2144         struct drm_connector *connector = asyc->state.connector;
2145         struct drm_display_mode *omode = &asyh->state.adjusted_mode;
2146         struct drm_display_mode *umode = &asyh->state.mode;
2147         int mode = asyc->scaler.mode;
2148         struct edid *edid;
2149         int umode_vdisplay, omode_hdisplay, omode_vdisplay;
2150
2151         if (connector->edid_blob_ptr)
2152                 edid = (struct edid *)connector->edid_blob_ptr->data;
2153         else
2154                 edid = NULL;
2155
2156         if (!asyc->scaler.full) {
2157                 if (mode == DRM_MODE_SCALE_NONE)
2158                         omode = umode;
2159         } else {
2160                 /* Non-EDID LVDS/eDP mode. */
2161                 mode = DRM_MODE_SCALE_FULLSCREEN;
2162         }
2163
2164         /* For the user-specified mode, we must ignore doublescan and
2165          * the like, but honor frame packing.
2166          */
2167         umode_vdisplay = umode->vdisplay;
2168         if ((umode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
2169                 umode_vdisplay += umode->vtotal;
2170         asyh->view.iW = umode->hdisplay;
2171         asyh->view.iH = umode_vdisplay;
2172         /* For the output mode, we can just use the stock helper. */
2173         drm_mode_get_hv_timing(omode, &omode_hdisplay, &omode_vdisplay);
2174         asyh->view.oW = omode_hdisplay;
2175         asyh->view.oH = omode_vdisplay;
2176
2177         /* Add overscan compensation if necessary, will keep the aspect
2178          * ratio the same as the backend mode unless overridden by the
2179          * user setting both hborder and vborder properties.
2180          */
2181         if ((asyc->scaler.underscan.mode == UNDERSCAN_ON ||
2182             (asyc->scaler.underscan.mode == UNDERSCAN_AUTO &&
2183              drm_detect_hdmi_monitor(edid)))) {
2184                 u32 bX = asyc->scaler.underscan.hborder;
2185                 u32 bY = asyc->scaler.underscan.vborder;
2186                 u32 r = (asyh->view.oH << 19) / asyh->view.oW;
2187
2188                 if (bX) {
2189                         asyh->view.oW -= (bX * 2);
2190                         if (bY) asyh->view.oH -= (bY * 2);
2191                         else    asyh->view.oH  = ((asyh->view.oW * r) + (r / 2)) >> 19;
2192                 } else {
2193                         asyh->view.oW -= (asyh->view.oW >> 4) + 32;
2194                         if (bY) asyh->view.oH -= (bY * 2);
2195                         else    asyh->view.oH  = ((asyh->view.oW * r) + (r / 2)) >> 19;
2196                 }
2197         }
2198
2199         /* Handle CENTER/ASPECT scaling, taking into account the areas
2200          * removed already for overscan compensation.
2201          */
2202         switch (mode) {
2203         case DRM_MODE_SCALE_CENTER:
2204                 asyh->view.oW = min((u16)umode->hdisplay, asyh->view.oW);
2205                 asyh->view.oH = min((u16)umode_vdisplay, asyh->view.oH);
2206                 /* fall-through */
2207         case DRM_MODE_SCALE_ASPECT:
2208                 if (asyh->view.oH < asyh->view.oW) {
2209                         u32 r = (asyh->view.iW << 19) / asyh->view.iH;
2210                         asyh->view.oW = ((asyh->view.oH * r) + (r / 2)) >> 19;
2211                 } else {
2212                         u32 r = (asyh->view.iH << 19) / asyh->view.iW;
2213                         asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
2214                 }
2215                 break;
2216         default:
2217                 break;
2218         }
2219
2220         asyh->set.view = true;
2221 }
2222
2223 static void
2224 nv50_head_atomic_check_lut(struct nv50_head *head,
2225                            struct nv50_head_atom *armh,
2226                            struct nv50_head_atom *asyh)
2227 {
2228         struct nv50_disp *disp = nv50_disp(head->base.base.dev);
2229
2230         /* An I8 surface without an input LUT makes no sense, and
2231          * EVO will throw an error if you try.
2232          *
2233          * Legacy clients actually cause this due to the order in
2234          * which they call ioctls, so we will enable the LUT with
2235          * whatever contents the buffer already contains to avoid
2236          * triggering the error check.
2237          */
2238         if (!asyh->state.gamma_lut && asyh->base.cpp != 1) {
2239                 asyh->lut.handle = 0;
2240                 asyh->clr.ilut = armh->lut.visible;
2241                 return;
2242         }
2243
2244         if (disp->disp->object.oclass < GF110_DISP) {
2245                 asyh->lut.mode = (asyh->base.cpp == 1) ? 0 : 1;
2246                 asyh->set.ilut = true;
2247         } else {
2248                 asyh->lut.mode = 7;
2249                 asyh->set.ilut = asyh->state.color_mgmt_changed;
2250         }
2251         asyh->lut.handle = disp->core->chan.vram.handle;
2252 }
2253
2254 static void
2255 nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
2256 {
2257         struct drm_display_mode *mode = &asyh->state.adjusted_mode;
2258         struct nv50_head_mode *m = &asyh->mode;
2259         u32 blankus;
2260
2261         drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V | CRTC_STEREO_DOUBLE);
2262
2263         /*
2264          * DRM modes are defined in terms of a repeating interval
2265          * starting with the active display area.  The hardware modes
2266          * are defined in terms of a repeating interval starting one
2267          * unit (pixel or line) into the sync pulse.  So, add bias.
2268          */
2269
2270         m->h.active = mode->crtc_htotal;
2271         m->h.synce  = mode->crtc_hsync_end - mode->crtc_hsync_start - 1;
2272         m->h.blanke = mode->crtc_hblank_end - mode->crtc_hsync_start - 1;
2273         m->h.blanks = m->h.blanke + mode->crtc_hdisplay;
2274
2275         m->v.active = mode->crtc_vtotal;
2276         m->v.synce  = mode->crtc_vsync_end - mode->crtc_vsync_start - 1;
2277         m->v.blanke = mode->crtc_vblank_end - mode->crtc_vsync_start - 1;
2278         m->v.blanks = m->v.blanke + mode->crtc_vdisplay;
2279
2280         /*XXX: Safe underestimate, even "0" works */
2281         blankus = (m->v.active - mode->crtc_vdisplay - 2) * m->h.active;
2282         blankus *= 1000;
2283         blankus /= mode->crtc_clock;
2284         m->v.blankus = blankus;
2285
2286         if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
2287                 m->v.blank2e =  m->v.active + m->v.blanke;
2288                 m->v.blank2s =  m->v.blank2e + mode->crtc_vdisplay;
2289                 m->v.active  = (m->v.active * 2) + 1;
2290                 m->interlace = true;
2291         } else {
2292                 m->v.blank2e = 0;
2293                 m->v.blank2s = 1;
2294                 m->interlace = false;
2295         }
2296         m->clock = mode->crtc_clock;
2297
2298         asyh->or.nhsync = !!(mode->flags & DRM_MODE_FLAG_NHSYNC);
2299         asyh->or.nvsync = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
2300         asyh->set.or = head->func->or != NULL;
2301         asyh->set.mode = true;
2302 }
2303
2304 static int
2305 nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
2306 {
2307         struct nouveau_drm *drm = nouveau_drm(crtc->dev);
2308         struct nv50_disp *disp = nv50_disp(crtc->dev);
2309         struct nv50_head *head = nv50_head(crtc);
2310         struct nv50_head_atom *armh = nv50_head_atom(crtc->state);
2311         struct nv50_head_atom *asyh = nv50_head_atom(state);
2312         struct nouveau_conn_atom *asyc = NULL;
2313         struct drm_connector_state *conns;
2314         struct drm_connector *conn;
2315         int i;
2316
2317         NV_ATOMIC(drm, "%s atomic_check %d\n", crtc->name, asyh->state.active);
2318         if (asyh->state.active) {
2319                 for_each_new_connector_in_state(asyh->state.state, conn, conns, i) {
2320                         if (conns->crtc == crtc) {
2321                                 asyc = nouveau_conn_atom(conns);
2322                                 break;
2323                         }
2324                 }
2325
2326                 if (armh->state.active) {
2327                         if (asyc) {
2328                                 if (asyh->state.mode_changed)
2329                                         asyc->set.scaler = true;
2330                                 if (armh->base.depth != asyh->base.depth)
2331                                         asyc->set.dither = true;
2332                         }
2333                 } else {
2334                         if (asyc)
2335                                 asyc->set.mask = ~0;
2336                         asyh->set.mask = ~0;
2337                         asyh->set.or = head->func->or != NULL;
2338                 }
2339
2340                 if (asyh->state.mode_changed)
2341                         nv50_head_atomic_check_mode(head, asyh);
2342
2343                 if (asyh->state.color_mgmt_changed ||
2344                     asyh->base.cpp != armh->base.cpp)
2345                         nv50_head_atomic_check_lut(head, armh, asyh);
2346                 asyh->lut.visible = asyh->lut.handle != 0;
2347
2348                 if (asyc) {
2349                         if (asyc->set.scaler)
2350                                 nv50_head_atomic_check_view(armh, asyh, asyc);
2351                         if (asyc->set.dither)
2352                                 nv50_head_atomic_check_dither(armh, asyh, asyc);
2353                         if (asyc->set.procamp)
2354                                 nv50_head_atomic_check_procamp(armh, asyh, asyc);
2355                 }
2356
2357                 if ((asyh->core.visible = (asyh->base.cpp != 0))) {
2358                         asyh->core.x = asyh->base.x;
2359                         asyh->core.y = asyh->base.y;
2360                         asyh->core.w = asyh->base.w;
2361                         asyh->core.h = asyh->base.h;
2362                 } else
2363                 if ((asyh->core.visible = asyh->curs.visible) ||
2364                     (asyh->core.visible = asyh->lut.visible)) {
2365                         /*XXX: We need to either find some way of having the
2366                          *     primary base layer appear black, while still
2367                          *     being able to display the other layers, or we
2368                          *     need to allocate a dummy black surface here.
2369                          */
2370                         asyh->core.x = 0;
2371                         asyh->core.y = 0;
2372                         asyh->core.w = asyh->state.mode.hdisplay;
2373                         asyh->core.h = asyh->state.mode.vdisplay;
2374                 }
2375                 asyh->core.handle = disp->core->chan.vram.handle;
2376                 asyh->core.offset = 0;
2377                 asyh->core.format = 0xcf;
2378                 asyh->core.kind = 0;
2379                 asyh->core.layout = 1;
2380                 asyh->core.block = 0;
2381                 asyh->core.pitch = ALIGN(asyh->core.w, 64) * 4;
2382                 asyh->set.base = armh->base.cpp != asyh->base.cpp;
2383                 asyh->set.ovly = armh->ovly.cpp != asyh->ovly.cpp;
2384         } else {
2385                 asyh->lut.visible = false;
2386                 asyh->core.visible = false;
2387                 asyh->curs.visible = false;
2388                 asyh->base.cpp = 0;
2389                 asyh->ovly.cpp = 0;
2390         }
2391
2392         if (!drm_atomic_crtc_needs_modeset(&asyh->state)) {
2393                 if (asyh->core.visible) {
2394                         if (memcmp(&armh->core, &asyh->core, sizeof(asyh->core)))
2395                                 asyh->set.core = true;
2396                 } else
2397                 if (armh->core.visible) {
2398                         asyh->clr.core = true;
2399                 }
2400
2401                 if (asyh->curs.visible) {
2402                         if (memcmp(&armh->curs, &asyh->curs, sizeof(asyh->curs)))
2403                                 asyh->set.curs = true;
2404                 } else
2405                 if (armh->curs.visible) {
2406                         asyh->clr.curs = true;
2407                 }
2408         } else {
2409                 asyh->clr.ilut = armh->lut.visible;
2410                 asyh->clr.core = armh->core.visible;
2411                 asyh->clr.curs = armh->curs.visible;
2412                 asyh->set.ilut = asyh->lut.visible;
2413                 asyh->set.core = asyh->core.visible;
2414                 asyh->set.curs = asyh->curs.visible;
2415         }
2416
2417         if (asyh->clr.mask || asyh->set.mask)
2418                 nv50_atom(asyh->state.state)->lock_core = true;
2419         return 0;
2420 }
2421
2422 static const struct drm_crtc_helper_funcs
2423 nv50_head_help = {
2424         .atomic_check = nv50_head_atomic_check,
2425 };
2426
2427 static void
2428 nv50_head_atomic_destroy_state(struct drm_crtc *crtc,
2429                                struct drm_crtc_state *state)
2430 {
2431         struct nv50_head_atom *asyh = nv50_head_atom(state);
2432         __drm_atomic_helper_crtc_destroy_state(&asyh->state);
2433         kfree(asyh);
2434 }
2435
2436 static struct drm_crtc_state *
2437 nv50_head_atomic_duplicate_state(struct drm_crtc *crtc)
2438 {
2439         struct nv50_head_atom *armh = nv50_head_atom(crtc->state);
2440         struct nv50_head_atom *asyh;
2441         if (!(asyh = kmalloc(sizeof(*asyh), GFP_KERNEL)))
2442                 return NULL;
2443         __drm_atomic_helper_crtc_duplicate_state(crtc, &asyh->state);
2444         asyh->view = armh->view;
2445         asyh->mode = armh->mode;
2446         asyh->lut  = armh->lut;
2447         asyh->core = armh->core;
2448         asyh->curs = armh->curs;
2449         asyh->base = armh->base;
2450         asyh->ovly = armh->ovly;
2451         asyh->dither = armh->dither;
2452         asyh->procamp = armh->procamp;
2453         asyh->clr.mask = 0;
2454         asyh->set.mask = 0;
2455         return &asyh->state;
2456 }
2457
2458 static void
2459 __drm_atomic_helper_crtc_reset(struct drm_crtc *crtc,
2460                                struct drm_crtc_state *state)
2461 {
2462         if (crtc->state)
2463                 crtc->funcs->atomic_destroy_state(crtc, crtc->state);
2464         crtc->state = state;
2465         crtc->state->crtc = crtc;
2466 }
2467
2468 static void
2469 nv50_head_reset(struct drm_crtc *crtc)
2470 {
2471         struct nv50_head_atom *asyh;
2472
2473         if (WARN_ON(!(asyh = kzalloc(sizeof(*asyh), GFP_KERNEL))))
2474                 return;
2475
2476         __drm_atomic_helper_crtc_reset(crtc, &asyh->state);
2477 }
2478
2479 static void
2480 nv50_head_destroy(struct drm_crtc *crtc)
2481 {
2482         struct nv50_head *head = nv50_head(crtc);
2483         int i;
2484
2485         for (i = 0; i < ARRAY_SIZE(head->lut.nvbo); i++)
2486                 nouveau_bo_unmap_unpin_unref(&head->lut.nvbo[i]);
2487
2488         drm_crtc_cleanup(crtc);
2489         kfree(head);
2490 }
2491
2492 static const struct drm_crtc_funcs
2493 nv50_head_func = {
2494         .reset = nv50_head_reset,
2495         .gamma_set = drm_atomic_helper_legacy_gamma_set,
2496         .destroy = nv50_head_destroy,
2497         .set_config = drm_atomic_helper_set_config,
2498         .page_flip = drm_atomic_helper_page_flip,
2499         .atomic_duplicate_state = nv50_head_atomic_duplicate_state,
2500         .atomic_destroy_state = nv50_head_atomic_destroy_state,
2501 };
2502
2503 static int
2504 nv50_head_create(struct drm_device *dev, int index)
2505 {
2506         struct nouveau_drm *drm = nouveau_drm(dev);
2507         struct nv50_disp *disp = nv50_disp(dev);
2508         struct nv50_head *head;
2509         struct nv50_wndw *curs, *wndw;
2510         struct drm_crtc *crtc;
2511         int ret, i;
2512
2513         head = kzalloc(sizeof(*head), GFP_KERNEL);
2514         if (!head)
2515                 return -ENOMEM;
2516
2517         head->func = disp->core->func->head;
2518         head->base.index = index;
2519         ret = nv50_base_new(drm, head->base.index, &wndw);
2520         if (ret == 0)
2521                 ret = nv50_curs_new(drm, head->base.index, &curs);
2522         if (ret) {
2523                 kfree(head);
2524                 return ret;
2525         }
2526
2527         crtc = &head->base.base;
2528         drm_crtc_init_with_planes(dev, crtc, &wndw->plane, &curs->plane,
2529                                   &nv50_head_func, "head-%d", head->base.index);
2530         drm_crtc_helper_add(crtc, &nv50_head_help);
2531         drm_mode_crtc_set_gamma_size(crtc, 256);
2532
2533         for (i = 0; i < ARRAY_SIZE(head->lut.nvbo); i++) {
2534                 ret = nouveau_bo_new_pin_map(&drm->client, 1025 * 8, 0x100,
2535                                              TTM_PL_FLAG_VRAM,
2536                                              &head->lut.nvbo[i]);
2537                 if (ret)
2538                         goto out;
2539         }
2540
2541         /* allocate overlay resources */
2542         ret = nv50_ovly_new(drm, head->base.index, &wndw);
2543 out:
2544         if (ret)
2545                 nv50_head_destroy(crtc);
2546         return ret;
2547 }
2548
2549 static const struct nv50_core_func
2550 core507d = {
2551         .head = &head507d,
2552 };
2553
2554 static int
2555 core507d_new(struct nouveau_drm *drm, s32 oclass, struct nv50_core **pcore)
2556 {
2557         return core507d_new_(&core507d, drm, oclass, pcore);
2558 }
2559
2560 static void
2561 nv50_core_del(struct nv50_core **pcore)
2562 {
2563         struct nv50_core *core = *pcore;
2564         if (core) {
2565                 nv50_dmac_destroy(&core->chan);
2566                 kfree(*pcore);
2567                 *pcore = NULL;
2568         }
2569 }
2570
2571 static int
2572 nv50_core_new(struct nouveau_drm *drm, struct nv50_core **pcore)
2573 {
2574         struct {
2575                 s32 oclass;
2576                 int version;
2577                 int (*new)(struct nouveau_drm *, s32, struct nv50_core **);
2578         } cores[] = {
2579                 { GP102_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
2580                 { GP100_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
2581                 { GM200_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
2582                 { GM107_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
2583                 { GK110_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
2584                 { GK104_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
2585                 { GF110_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
2586                 { GT214_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
2587                 { GT206_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
2588                 { GT200_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
2589                 {   G82_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
2590                 {  NV50_DISP_CORE_CHANNEL_DMA, 0, core507d_new },
2591                 {}
2592         };
2593         struct nv50_disp *disp = nv50_disp(drm->dev);
2594         int cid;
2595
2596         cid = nvif_mclass(&disp->disp->object, cores);
2597         if (cid < 0) {
2598                 NV_ERROR(drm, "No supported core channel class\n");
2599                 return cid;
2600         }
2601
2602         return cores[cid].new(drm, cores[cid].oclass, pcore);
2603 }
2604
2605 /******************************************************************************
2606  * Output path helpers
2607  *****************************************************************************/
2608 static void
2609 nv50_outp_release(struct nouveau_encoder *nv_encoder)
2610 {
2611         struct nv50_disp *disp = nv50_disp(nv_encoder->base.base.dev);
2612         struct {
2613                 struct nv50_disp_mthd_v1 base;
2614         } args = {
2615                 .base.version = 1,
2616                 .base.method = NV50_DISP_MTHD_V1_RELEASE,
2617                 .base.hasht  = nv_encoder->dcb->hasht,
2618                 .base.hashm  = nv_encoder->dcb->hashm,
2619         };
2620
2621         nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
2622         nv_encoder->or = -1;
2623         nv_encoder->link = 0;
2624 }
2625
2626 static int
2627 nv50_outp_acquire(struct nouveau_encoder *nv_encoder)
2628 {
2629         struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
2630         struct nv50_disp *disp = nv50_disp(drm->dev);
2631         struct {
2632                 struct nv50_disp_mthd_v1 base;
2633                 struct nv50_disp_acquire_v0 info;
2634         } args = {
2635                 .base.version = 1,
2636                 .base.method = NV50_DISP_MTHD_V1_ACQUIRE,
2637                 .base.hasht  = nv_encoder->dcb->hasht,
2638                 .base.hashm  = nv_encoder->dcb->hashm,
2639         };
2640         int ret;
2641
2642         ret = nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
2643         if (ret) {
2644                 NV_ERROR(drm, "error acquiring output path: %d\n", ret);
2645                 return ret;
2646         }
2647
2648         nv_encoder->or = args.info.or;
2649         nv_encoder->link = args.info.link;
2650         return 0;
2651 }
2652
2653 static int
2654 nv50_outp_atomic_check_view(struct drm_encoder *encoder,
2655                             struct drm_crtc_state *crtc_state,
2656                             struct drm_connector_state *conn_state,
2657                             struct drm_display_mode *native_mode)
2658 {
2659         struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
2660         struct drm_display_mode *mode = &crtc_state->mode;
2661         struct drm_connector *connector = conn_state->connector;
2662         struct nouveau_conn_atom *asyc = nouveau_conn_atom(conn_state);
2663         struct nouveau_drm *drm = nouveau_drm(encoder->dev);
2664
2665         NV_ATOMIC(drm, "%s atomic_check\n", encoder->name);
2666         asyc->scaler.full = false;
2667         if (!native_mode)
2668                 return 0;
2669
2670         if (asyc->scaler.mode == DRM_MODE_SCALE_NONE) {
2671                 switch (connector->connector_type) {
2672                 case DRM_MODE_CONNECTOR_LVDS:
2673                 case DRM_MODE_CONNECTOR_eDP:
2674                         /* Force use of scaler for non-EDID modes. */
2675                         if (adjusted_mode->type & DRM_MODE_TYPE_DRIVER)
2676                                 break;
2677                         mode = native_mode;
2678                         asyc->scaler.full = true;
2679                         break;
2680                 default:
2681                         break;
2682                 }
2683         } else {
2684                 mode = native_mode;
2685         }
2686
2687         if (!drm_mode_equal(adjusted_mode, mode)) {
2688                 drm_mode_copy(adjusted_mode, mode);
2689                 crtc_state->mode_changed = true;
2690         }
2691
2692         return 0;
2693 }
2694
2695 static int
2696 nv50_outp_atomic_check(struct drm_encoder *encoder,
2697                        struct drm_crtc_state *crtc_state,
2698                        struct drm_connector_state *conn_state)
2699 {
2700         struct nouveau_connector *nv_connector =
2701                 nouveau_connector(conn_state->connector);
2702         return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
2703                                            nv_connector->native_mode);
2704 }
2705
2706 /******************************************************************************
2707  * DAC
2708  *****************************************************************************/
2709 static void
2710 nv50_dac_disable(struct drm_encoder *encoder)
2711 {
2712         struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2713         struct nv50_dmac *core = &nv50_disp(encoder->dev)->core->chan;
2714         const int or = nv_encoder->or;
2715         u32 *push;
2716
2717         if (nv_encoder->crtc) {
2718                 push = evo_wait(core, 4);
2719                 if (push) {
2720                         if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
2721                                 evo_mthd(push, 0x0400 + (or * 0x080), 1);
2722                                 evo_data(push, 0x00000000);
2723                         } else {
2724                                 evo_mthd(push, 0x0180 + (or * 0x020), 1);
2725                                 evo_data(push, 0x00000000);
2726                         }
2727                         evo_kick(push, core);
2728                 }
2729         }
2730
2731         nv_encoder->crtc = NULL;
2732         nv50_outp_release(nv_encoder);
2733 }
2734
2735 static void
2736 nv50_dac_enable(struct drm_encoder *encoder)
2737 {
2738         struct nv50_dmac *core = &nv50_disp(encoder->dev)->core->chan;
2739         struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2740         struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
2741         struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
2742         u32 *push;
2743
2744         nv50_outp_acquire(nv_encoder);
2745
2746         push = evo_wait(core, 8);
2747         if (push) {
2748                 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
2749                         evo_mthd(push, 0x0400 + (nv_encoder->or * 0x080), 2);
2750                         evo_data(push, 1 << nv_crtc->index);
2751                         evo_data(push, (asyh->or.nvsync << 1) | asyh->or.nhsync);
2752                 } else {
2753                         evo_mthd(push, 0x0180 + (nv_encoder->or * 0x020), 1);
2754                         evo_data(push, 1 << nv_crtc->index);
2755                 }
2756
2757                 evo_kick(push, core);
2758         }
2759         asyh->or.depth = 0;
2760
2761         nv_encoder->crtc = encoder->crtc;
2762 }
2763
2764 static enum drm_connector_status
2765 nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
2766 {
2767         struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2768         struct nv50_disp *disp = nv50_disp(encoder->dev);
2769         struct {
2770                 struct nv50_disp_mthd_v1 base;
2771                 struct nv50_disp_dac_load_v0 load;
2772         } args = {
2773                 .base.version = 1,
2774                 .base.method = NV50_DISP_MTHD_V1_DAC_LOAD,
2775                 .base.hasht  = nv_encoder->dcb->hasht,
2776                 .base.hashm  = nv_encoder->dcb->hashm,
2777         };
2778         int ret;
2779
2780         args.load.data = nouveau_drm(encoder->dev)->vbios.dactestval;
2781         if (args.load.data == 0)
2782                 args.load.data = 340;
2783
2784         ret = nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
2785         if (ret || !args.load.load)
2786                 return connector_status_disconnected;
2787
2788         return connector_status_connected;
2789 }
2790
2791 static const struct drm_encoder_helper_funcs
2792 nv50_dac_help = {
2793         .atomic_check = nv50_outp_atomic_check,
2794         .enable = nv50_dac_enable,
2795         .disable = nv50_dac_disable,
2796         .detect = nv50_dac_detect
2797 };
2798
2799 static void
2800 nv50_dac_destroy(struct drm_encoder *encoder)
2801 {
2802         drm_encoder_cleanup(encoder);
2803         kfree(encoder);
2804 }
2805
2806 static const struct drm_encoder_funcs
2807 nv50_dac_func = {
2808         .destroy = nv50_dac_destroy,
2809 };
2810
2811 static int
2812 nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
2813 {
2814         struct nouveau_drm *drm = nouveau_drm(connector->dev);
2815         struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
2816         struct nvkm_i2c_bus *bus;
2817         struct nouveau_encoder *nv_encoder;
2818         struct drm_encoder *encoder;
2819         int type = DRM_MODE_ENCODER_DAC;
2820
2821         nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
2822         if (!nv_encoder)
2823                 return -ENOMEM;
2824         nv_encoder->dcb = dcbe;
2825
2826         bus = nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
2827         if (bus)
2828                 nv_encoder->i2c = &bus->i2c;
2829
2830         encoder = to_drm_encoder(nv_encoder);
2831         encoder->possible_crtcs = dcbe->heads;
2832         encoder->possible_clones = 0;
2833         drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type,
2834                          "dac-%04x-%04x", dcbe->hasht, dcbe->hashm);
2835         drm_encoder_helper_add(encoder, &nv50_dac_help);
2836
2837         drm_mode_connector_attach_encoder(connector, encoder);
2838         return 0;
2839 }
2840
2841 /******************************************************************************
2842  * Audio
2843  *****************************************************************************/
2844 static void
2845 nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
2846 {
2847         struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2848         struct nv50_disp *disp = nv50_disp(encoder->dev);
2849         struct {
2850                 struct nv50_disp_mthd_v1 base;
2851                 struct nv50_disp_sor_hda_eld_v0 eld;
2852         } args = {
2853                 .base.version = 1,
2854                 .base.method  = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
2855                 .base.hasht   = nv_encoder->dcb->hasht,
2856                 .base.hashm   = (0xf0ff & nv_encoder->dcb->hashm) |
2857                                 (0x0100 << nv_crtc->index),
2858         };
2859
2860         nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
2861 }
2862
2863 static void
2864 nv50_audio_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
2865 {
2866         struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2867         struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
2868         struct nouveau_connector *nv_connector;
2869         struct nv50_disp *disp = nv50_disp(encoder->dev);
2870         struct __packed {
2871                 struct {
2872                         struct nv50_disp_mthd_v1 mthd;
2873                         struct nv50_disp_sor_hda_eld_v0 eld;
2874                 } base;
2875                 u8 data[sizeof(nv_connector->base.eld)];
2876         } args = {
2877                 .base.mthd.version = 1,
2878                 .base.mthd.method  = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
2879                 .base.mthd.hasht   = nv_encoder->dcb->hasht,
2880                 .base.mthd.hashm   = (0xf0ff & nv_encoder->dcb->hashm) |
2881                                      (0x0100 << nv_crtc->index),
2882         };
2883
2884         nv_connector = nouveau_encoder_connector_get(nv_encoder);
2885         if (!drm_detect_monitor_audio(nv_connector->edid))
2886                 return;
2887
2888         memcpy(args.data, nv_connector->base.eld, sizeof(args.data));
2889
2890         nvif_mthd(&disp->disp->object, 0, &args,
2891                   sizeof(args.base) + drm_eld_size(args.data));
2892 }
2893
2894 /******************************************************************************
2895  * HDMI
2896  *****************************************************************************/
2897 static void
2898 nv50_hdmi_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
2899 {
2900         struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2901         struct nv50_disp *disp = nv50_disp(encoder->dev);
2902         struct {
2903                 struct nv50_disp_mthd_v1 base;
2904                 struct nv50_disp_sor_hdmi_pwr_v0 pwr;
2905         } args = {
2906                 .base.version = 1,
2907                 .base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
2908                 .base.hasht  = nv_encoder->dcb->hasht,
2909                 .base.hashm  = (0xf0ff & nv_encoder->dcb->hashm) |
2910                                (0x0100 << nv_crtc->index),
2911         };
2912
2913         nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
2914 }
2915
2916 static void
2917 nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
2918 {
2919         struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2920         struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
2921         struct nv50_disp *disp = nv50_disp(encoder->dev);
2922         struct {
2923                 struct nv50_disp_mthd_v1 base;
2924                 struct nv50_disp_sor_hdmi_pwr_v0 pwr;
2925                 u8 infoframes[2 * 17]; /* two frames, up to 17 bytes each */
2926         } args = {
2927                 .base.version = 1,
2928                 .base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
2929                 .base.hasht  = nv_encoder->dcb->hasht,
2930                 .base.hashm  = (0xf0ff & nv_encoder->dcb->hashm) |
2931                                (0x0100 << nv_crtc->index),
2932                 .pwr.state = 1,
2933                 .pwr.rekey = 56, /* binary driver, and tegra, constant */
2934         };
2935         struct nouveau_connector *nv_connector;
2936         u32 max_ac_packet;
2937         union hdmi_infoframe avi_frame;
2938         union hdmi_infoframe vendor_frame;
2939         int ret;
2940         int size;
2941
2942         nv_connector = nouveau_encoder_connector_get(nv_encoder);
2943         if (!drm_detect_hdmi_monitor(nv_connector->edid))
2944                 return;
2945
2946         ret = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame.avi, mode,
2947                                                        false);
2948         if (!ret) {
2949                 /* We have an AVI InfoFrame, populate it to the display */
2950                 args.pwr.avi_infoframe_length
2951                         = hdmi_infoframe_pack(&avi_frame, args.infoframes, 17);
2952         }
2953
2954         ret = drm_hdmi_vendor_infoframe_from_display_mode(&vendor_frame.vendor.hdmi,
2955                                                           &nv_connector->base, mode);
2956         if (!ret) {
2957                 /* We have a Vendor InfoFrame, populate it to the display */
2958                 args.pwr.vendor_infoframe_length
2959                         = hdmi_infoframe_pack(&vendor_frame,
2960                                               args.infoframes
2961                                               + args.pwr.avi_infoframe_length,
2962                                               17);
2963         }
2964
2965         max_ac_packet  = mode->htotal - mode->hdisplay;
2966         max_ac_packet -= args.pwr.rekey;
2967         max_ac_packet -= 18; /* constant from tegra */
2968         args.pwr.max_ac_packet = max_ac_packet / 32;
2969
2970         size = sizeof(args.base)
2971                 + sizeof(args.pwr)
2972                 + args.pwr.avi_infoframe_length
2973                 + args.pwr.vendor_infoframe_length;
2974         nvif_mthd(&disp->disp->object, 0, &args, size);
2975         nv50_audio_enable(encoder, mode);
2976 }
2977
2978 /******************************************************************************
2979  * MST
2980  *****************************************************************************/
2981 #define nv50_mstm(p) container_of((p), struct nv50_mstm, mgr)
2982 #define nv50_mstc(p) container_of((p), struct nv50_mstc, connector)
2983 #define nv50_msto(p) container_of((p), struct nv50_msto, encoder)
2984
2985 struct nv50_mstm {
2986         struct nouveau_encoder *outp;
2987
2988         struct drm_dp_mst_topology_mgr mgr;
2989         struct nv50_msto *msto[4];
2990
2991         bool modified;
2992         bool disabled;
2993         int links;
2994 };
2995
2996 struct nv50_mstc {
2997         struct nv50_mstm *mstm;
2998         struct drm_dp_mst_port *port;
2999         struct drm_connector connector;
3000
3001         struct drm_display_mode *native;
3002         struct edid *edid;
3003
3004         int pbn;
3005 };
3006
3007 struct nv50_msto {
3008         struct drm_encoder encoder;
3009
3010         struct nv50_head *head;
3011         struct nv50_mstc *mstc;
3012         bool disabled;
3013 };
3014
3015 static struct drm_dp_payload *
3016 nv50_msto_payload(struct nv50_msto *msto)
3017 {
3018         struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
3019         struct nv50_mstc *mstc = msto->mstc;
3020         struct nv50_mstm *mstm = mstc->mstm;
3021         int vcpi = mstc->port->vcpi.vcpi, i;
3022
3023         NV_ATOMIC(drm, "%s: vcpi %d\n", msto->encoder.name, vcpi);
3024         for (i = 0; i < mstm->mgr.max_payloads; i++) {
3025                 struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
3026                 NV_ATOMIC(drm, "%s: %d: vcpi %d start 0x%02x slots 0x%02x\n",
3027                           mstm->outp->base.base.name, i, payload->vcpi,
3028                           payload->start_slot, payload->num_slots);
3029         }
3030
3031         for (i = 0; i < mstm->mgr.max_payloads; i++) {
3032                 struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
3033                 if (payload->vcpi == vcpi)
3034                         return payload;
3035         }
3036
3037         return NULL;
3038 }
3039
3040 static void
3041 nv50_msto_cleanup(struct nv50_msto *msto)
3042 {
3043         struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
3044         struct nv50_mstc *mstc = msto->mstc;
3045         struct nv50_mstm *mstm = mstc->mstm;
3046
3047         NV_ATOMIC(drm, "%s: msto cleanup\n", msto->encoder.name);
3048         if (mstc->port && mstc->port->vcpi.vcpi > 0 && !nv50_msto_payload(msto))
3049                 drm_dp_mst_deallocate_vcpi(&mstm->mgr, mstc->port);
3050         if (msto->disabled) {
3051                 msto->mstc = NULL;
3052                 msto->head = NULL;
3053                 msto->disabled = false;
3054         }
3055 }
3056
3057 static void
3058 nv50_msto_prepare(struct nv50_msto *msto)
3059 {
3060         struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
3061         struct nv50_mstc *mstc = msto->mstc;
3062         struct nv50_mstm *mstm = mstc->mstm;
3063         struct {
3064                 struct nv50_disp_mthd_v1 base;
3065                 struct nv50_disp_sor_dp_mst_vcpi_v0 vcpi;
3066         } args = {
3067                 .base.version = 1,
3068                 .base.method = NV50_DISP_MTHD_V1_SOR_DP_MST_VCPI,
3069                 .base.hasht  = mstm->outp->dcb->hasht,
3070                 .base.hashm  = (0xf0ff & mstm->outp->dcb->hashm) |
3071                                (0x0100 << msto->head->base.index),
3072         };
3073
3074         NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name);
3075         if (mstc->port && mstc->port->vcpi.vcpi > 0) {
3076                 struct drm_dp_payload *payload = nv50_msto_payload(msto);
3077                 if (payload) {
3078                         args.vcpi.start_slot = payload->start_slot;
3079                         args.vcpi.num_slots = payload->num_slots;
3080                         args.vcpi.pbn = mstc->port->vcpi.pbn;
3081                         args.vcpi.aligned_pbn = mstc->port->vcpi.aligned_pbn;
3082                 }
3083         }
3084
3085         NV_ATOMIC(drm, "%s: %s: %02x %02x %04x %04x\n",
3086                   msto->encoder.name, msto->head->base.base.name,
3087                   args.vcpi.start_slot, args.vcpi.num_slots,
3088                   args.vcpi.pbn, args.vcpi.aligned_pbn);
3089         nvif_mthd(&drm->display->disp.object, 0, &args, sizeof(args));
3090 }
3091
3092 static int
3093 nv50_msto_atomic_check(struct drm_encoder *encoder,
3094                        struct drm_crtc_state *crtc_state,
3095                        struct drm_connector_state *conn_state)
3096 {
3097         struct nv50_mstc *mstc = nv50_mstc(conn_state->connector);
3098         struct nv50_mstm *mstm = mstc->mstm;
3099         int bpp = conn_state->connector->display_info.bpc * 3;
3100         int slots;
3101
3102         mstc->pbn = drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock, bpp);
3103
3104         slots = drm_dp_find_vcpi_slots(&mstm->mgr, mstc->pbn);
3105         if (slots < 0)
3106                 return slots;
3107
3108         return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
3109                                            mstc->native);
3110 }
3111
3112 static void
3113 nv50_msto_enable(struct drm_encoder *encoder)
3114 {
3115         struct nv50_head *head = nv50_head(encoder->crtc);
3116         struct nv50_msto *msto = nv50_msto(encoder);
3117         struct nv50_mstc *mstc = NULL;
3118         struct nv50_mstm *mstm = NULL;
3119         struct drm_connector *connector;
3120         struct drm_connector_list_iter conn_iter;
3121         u8 proto, depth;
3122         int slots;
3123         bool r;
3124
3125         drm_connector_list_iter_begin(encoder->dev, &conn_iter);
3126         drm_for_each_connector_iter(connector, &conn_iter) {
3127                 if (connector->state->best_encoder == &msto->encoder) {
3128                         mstc = nv50_mstc(connector);
3129                         mstm = mstc->mstm;
3130                         break;
3131                 }
3132         }
3133         drm_connector_list_iter_end(&conn_iter);
3134
3135         if (WARN_ON(!mstc))
3136                 return;
3137
3138         slots = drm_dp_find_vcpi_slots(&mstm->mgr, mstc->pbn);
3139         r = drm_dp_mst_allocate_vcpi(&mstm->mgr, mstc->port, mstc->pbn, slots);
3140         WARN_ON(!r);
3141
3142         if (!mstm->links++)
3143                 nv50_outp_acquire(mstm->outp);
3144
3145         if (mstm->outp->link & 1)
3146                 proto = 0x8;
3147         else
3148                 proto = 0x9;
3149
3150         switch (mstc->connector.display_info.bpc) {
3151         case  6: depth = 0x2; break;
3152         case  8: depth = 0x5; break;
3153         case 10:
3154         default: depth = 0x6; break;
3155         }
3156
3157         mstm->outp->update(mstm->outp, head->base.index,
3158                            nv50_head_atom(head->base.base.state), proto, depth);
3159
3160         msto->head = head;
3161         msto->mstc = mstc;
3162         mstm->modified = true;
3163 }
3164
3165 static void
3166 nv50_msto_disable(struct drm_encoder *encoder)
3167 {
3168         struct nv50_msto *msto = nv50_msto(encoder);
3169         struct nv50_mstc *mstc = msto->mstc;
3170         struct nv50_mstm *mstm = mstc->mstm;
3171
3172         if (mstc->port)
3173                 drm_dp_mst_reset_vcpi_slots(&mstm->mgr, mstc->port);
3174
3175         mstm->outp->update(mstm->outp, msto->head->base.index, NULL, 0, 0);
3176         mstm->modified = true;
3177         if (!--mstm->links)
3178                 mstm->disabled = true;
3179         msto->disabled = true;
3180 }
3181
3182 static const struct drm_encoder_helper_funcs
3183 nv50_msto_help = {
3184         .disable = nv50_msto_disable,
3185         .enable = nv50_msto_enable,
3186         .atomic_check = nv50_msto_atomic_check,
3187 };
3188
3189 static void
3190 nv50_msto_destroy(struct drm_encoder *encoder)
3191 {
3192         struct nv50_msto *msto = nv50_msto(encoder);
3193         drm_encoder_cleanup(&msto->encoder);
3194         kfree(msto);
3195 }
3196
3197 static const struct drm_encoder_funcs
3198 nv50_msto = {
3199         .destroy = nv50_msto_destroy,
3200 };
3201
3202 static int
3203 nv50_msto_new(struct drm_device *dev, u32 heads, const char *name, int id,
3204               struct nv50_msto **pmsto)
3205 {
3206         struct nv50_msto *msto;
3207         int ret;
3208
3209         if (!(msto = *pmsto = kzalloc(sizeof(*msto), GFP_KERNEL)))
3210                 return -ENOMEM;
3211
3212         ret = drm_encoder_init(dev, &msto->encoder, &nv50_msto,
3213                                DRM_MODE_ENCODER_DPMST, "%s-mst-%d", name, id);
3214         if (ret) {
3215                 kfree(*pmsto);
3216                 *pmsto = NULL;
3217                 return ret;
3218         }
3219
3220         drm_encoder_helper_add(&msto->encoder, &nv50_msto_help);
3221         msto->encoder.possible_crtcs = heads;
3222         return 0;
3223 }
3224
3225 static struct drm_encoder *
3226 nv50_mstc_atomic_best_encoder(struct drm_connector *connector,
3227                               struct drm_connector_state *connector_state)
3228 {
3229         struct nv50_head *head = nv50_head(connector_state->crtc);
3230         struct nv50_mstc *mstc = nv50_mstc(connector);
3231         if (mstc->port) {
3232                 struct nv50_mstm *mstm = mstc->mstm;
3233                 return &mstm->msto[head->base.index]->encoder;
3234         }
3235         return NULL;
3236 }
3237
3238 static struct drm_encoder *
3239 nv50_mstc_best_encoder(struct drm_connector *connector)
3240 {
3241         struct nv50_mstc *mstc = nv50_mstc(connector);
3242         if (mstc->port) {
3243                 struct nv50_mstm *mstm = mstc->mstm;
3244                 return &mstm->msto[0]->encoder;
3245         }
3246         return NULL;
3247 }
3248
3249 static enum drm_mode_status
3250 nv50_mstc_mode_valid(struct drm_connector *connector,
3251                      struct drm_display_mode *mode)
3252 {
3253         return MODE_OK;
3254 }
3255
3256 static int
3257 nv50_mstc_get_modes(struct drm_connector *connector)
3258 {
3259         struct nv50_mstc *mstc = nv50_mstc(connector);
3260         int ret = 0;
3261
3262         mstc->edid = drm_dp_mst_get_edid(&mstc->connector, mstc->port->mgr, mstc->port);
3263         drm_mode_connector_update_edid_property(&mstc->connector, mstc->edid);
3264         if (mstc->edid)
3265                 ret = drm_add_edid_modes(&mstc->connector, mstc->edid);
3266
3267         if (!mstc->connector.display_info.bpc)
3268                 mstc->connector.display_info.bpc = 8;
3269
3270         if (mstc->native)
3271                 drm_mode_destroy(mstc->connector.dev, mstc->native);
3272         mstc->native = nouveau_conn_native_mode(&mstc->connector);
3273         return ret;
3274 }
3275
3276 static const struct drm_connector_helper_funcs
3277 nv50_mstc_help = {
3278         .get_modes = nv50_mstc_get_modes,
3279         .mode_valid = nv50_mstc_mode_valid,
3280         .best_encoder = nv50_mstc_best_encoder,
3281         .atomic_best_encoder = nv50_mstc_atomic_best_encoder,
3282 };
3283
3284 static enum drm_connector_status
3285 nv50_mstc_detect(struct drm_connector *connector, bool force)
3286 {
3287         struct nv50_mstc *mstc = nv50_mstc(connector);
3288         if (!mstc->port)
3289                 return connector_status_disconnected;
3290         return drm_dp_mst_detect_port(connector, mstc->port->mgr, mstc->port);
3291 }
3292
3293 static void
3294 nv50_mstc_destroy(struct drm_connector *connector)
3295 {
3296         struct nv50_mstc *mstc = nv50_mstc(connector);
3297         drm_connector_cleanup(&mstc->connector);
3298         kfree(mstc);
3299 }
3300
3301 static const struct drm_connector_funcs
3302 nv50_mstc = {
3303         .reset = nouveau_conn_reset,
3304         .detect = nv50_mstc_detect,
3305         .fill_modes = drm_helper_probe_single_connector_modes,
3306         .destroy = nv50_mstc_destroy,
3307         .atomic_duplicate_state = nouveau_conn_atomic_duplicate_state,
3308         .atomic_destroy_state = nouveau_conn_atomic_destroy_state,
3309         .atomic_set_property = nouveau_conn_atomic_set_property,
3310         .atomic_get_property = nouveau_conn_atomic_get_property,
3311 };
3312
3313 static int
3314 nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
3315               const char *path, struct nv50_mstc **pmstc)
3316 {
3317         struct drm_device *dev = mstm->outp->base.base.dev;
3318         struct nv50_mstc *mstc;
3319         int ret, i;
3320
3321         if (!(mstc = *pmstc = kzalloc(sizeof(*mstc), GFP_KERNEL)))
3322                 return -ENOMEM;
3323         mstc->mstm = mstm;
3324         mstc->port = port;
3325
3326         ret = drm_connector_init(dev, &mstc->connector, &nv50_mstc,
3327                                  DRM_MODE_CONNECTOR_DisplayPort);
3328         if (ret) {
3329                 kfree(*pmstc);
3330                 *pmstc = NULL;
3331                 return ret;
3332         }
3333
3334         drm_connector_helper_add(&mstc->connector, &nv50_mstc_help);
3335
3336         mstc->connector.funcs->reset(&mstc->connector);
3337         nouveau_conn_attach_properties(&mstc->connector);
3338
3339         for (i = 0; i < ARRAY_SIZE(mstm->msto) && mstm->msto[i]; i++)
3340                 drm_mode_connector_attach_encoder(&mstc->connector, &mstm->msto[i]->encoder);
3341
3342         drm_object_attach_property(&mstc->connector.base, dev->mode_config.path_property, 0);
3343         drm_object_attach_property(&mstc->connector.base, dev->mode_config.tile_property, 0);
3344         drm_mode_connector_set_path_property(&mstc->connector, path);
3345         return 0;
3346 }
3347
3348 static void
3349 nv50_mstm_cleanup(struct nv50_mstm *mstm)
3350 {
3351         struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
3352         struct drm_encoder *encoder;
3353         int ret;
3354
3355         NV_ATOMIC(drm, "%s: mstm cleanup\n", mstm->outp->base.base.name);
3356         ret = drm_dp_check_act_status(&mstm->mgr);
3357
3358         ret = drm_dp_update_payload_part2(&mstm->mgr);
3359
3360         drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
3361                 if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
3362                         struct nv50_msto *msto = nv50_msto(encoder);
3363                         struct nv50_mstc *mstc = msto->mstc;
3364                         if (mstc && mstc->mstm == mstm)
3365                                 nv50_msto_cleanup(msto);
3366                 }
3367         }
3368
3369         mstm->modified = false;
3370 }
3371
3372 static void
3373 nv50_mstm_prepare(struct nv50_mstm *mstm)
3374 {
3375         struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
3376         struct drm_encoder *encoder;
3377         int ret;
3378
3379         NV_ATOMIC(drm, "%s: mstm prepare\n", mstm->outp->base.base.name);
3380         ret = drm_dp_update_payload_part1(&mstm->mgr);
3381
3382         drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
3383                 if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
3384                         struct nv50_msto *msto = nv50_msto(encoder);
3385                         struct nv50_mstc *mstc = msto->mstc;
3386                         if (mstc && mstc->mstm == mstm)
3387                                 nv50_msto_prepare(msto);
3388                 }
3389         }
3390
3391         if (mstm->disabled) {
3392                 if (!mstm->links)
3393                         nv50_outp_release(mstm->outp);
3394                 mstm->disabled = false;
3395         }
3396 }
3397
3398 static void
3399 nv50_mstm_hotplug(struct drm_dp_mst_topology_mgr *mgr)
3400 {
3401         struct nv50_mstm *mstm = nv50_mstm(mgr);
3402         drm_kms_helper_hotplug_event(mstm->outp->base.base.dev);
3403 }
3404
3405 static void
3406 nv50_mstm_destroy_connector(struct drm_dp_mst_topology_mgr *mgr,
3407                             struct drm_connector *connector)
3408 {
3409         struct nouveau_drm *drm = nouveau_drm(connector->dev);
3410         struct nv50_mstc *mstc = nv50_mstc(connector);
3411
3412         drm_connector_unregister(&mstc->connector);
3413
3414         drm_fb_helper_remove_one_connector(&drm->fbcon->helper, &mstc->connector);
3415
3416         drm_modeset_lock(&drm->dev->mode_config.connection_mutex, NULL);
3417         mstc->port = NULL;
3418         drm_modeset_unlock(&drm->dev->mode_config.connection_mutex);
3419
3420         drm_connector_unreference(&mstc->connector);
3421 }
3422
3423 static void
3424 nv50_mstm_register_connector(struct drm_connector *connector)
3425 {
3426         struct nouveau_drm *drm = nouveau_drm(connector->dev);
3427
3428         drm_fb_helper_add_one_connector(&drm->fbcon->helper, connector);
3429
3430         drm_connector_register(connector);
3431 }
3432
3433 static struct drm_connector *
3434 nv50_mstm_add_connector(struct drm_dp_mst_topology_mgr *mgr,
3435                         struct drm_dp_mst_port *port, const char *path)
3436 {
3437         struct nv50_mstm *mstm = nv50_mstm(mgr);
3438         struct nv50_mstc *mstc;
3439         int ret;
3440
3441         ret = nv50_mstc_new(mstm, port, path, &mstc);
3442         if (ret) {
3443                 if (mstc)
3444                         mstc->connector.funcs->destroy(&mstc->connector);
3445                 return NULL;
3446         }
3447
3448         return &mstc->connector;
3449 }
3450
3451 static const struct drm_dp_mst_topology_cbs
3452 nv50_mstm = {
3453         .add_connector = nv50_mstm_add_connector,
3454         .register_connector = nv50_mstm_register_connector,
3455         .destroy_connector = nv50_mstm_destroy_connector,
3456         .hotplug = nv50_mstm_hotplug,
3457 };
3458
3459 void
3460 nv50_mstm_service(struct nv50_mstm *mstm)
3461 {
3462         struct drm_dp_aux *aux = mstm ? mstm->mgr.aux : NULL;
3463         bool handled = true;
3464         int ret;
3465         u8 esi[8] = {};
3466
3467         if (!aux)
3468                 return;
3469
3470         while (handled) {
3471                 ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8);
3472                 if (ret != 8) {
3473                         drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
3474                         return;
3475                 }
3476
3477                 drm_dp_mst_hpd_irq(&mstm->mgr, esi, &handled);
3478                 if (!handled)
3479                         break;
3480
3481                 drm_dp_dpcd_write(aux, DP_SINK_COUNT_ESI + 1, &esi[1], 3);
3482         }
3483 }
3484
3485 void
3486 nv50_mstm_remove(struct nv50_mstm *mstm)
3487 {
3488         if (mstm)
3489                 drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
3490 }
3491
3492 static int
3493 nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
3494 {
3495         struct nouveau_encoder *outp = mstm->outp;
3496         struct {
3497                 struct nv50_disp_mthd_v1 base;
3498                 struct nv50_disp_sor_dp_mst_link_v0 mst;
3499         } args = {
3500                 .base.version = 1,
3501                 .base.method = NV50_DISP_MTHD_V1_SOR_DP_MST_LINK,
3502                 .base.hasht = outp->dcb->hasht,
3503                 .base.hashm = outp->dcb->hashm,
3504                 .mst.state = state,
3505         };
3506         struct nouveau_drm *drm = nouveau_drm(outp->base.base.dev);
3507         struct nvif_object *disp = &drm->display->disp.object;
3508         int ret;
3509
3510         if (dpcd >= 0x12) {
3511                 ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CTRL, &dpcd);
3512                 if (ret < 0)
3513                         return ret;
3514
3515                 dpcd &= ~DP_MST_EN;
3516                 if (state)
3517                         dpcd |= DP_MST_EN;
3518
3519                 ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, dpcd);
3520                 if (ret < 0)
3521                         return ret;
3522         }
3523
3524         return nvif_mthd(disp, 0, &args, sizeof(args));
3525 }
3526
3527 int
3528 nv50_mstm_detect(struct nv50_mstm *mstm, u8 dpcd[8], int allow)
3529 {
3530         int ret, state = 0;
3531
3532         if (!mstm)
3533                 return 0;
3534
3535         if (dpcd[0] >= 0x12) {
3536                 ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CAP, &dpcd[1]);
3537                 if (ret < 0)
3538                         return ret;
3539
3540                 if (!(dpcd[1] & DP_MST_CAP))
3541                         dpcd[0] = 0x11;
3542                 else
3543                         state = allow;
3544         }
3545
3546         ret = nv50_mstm_enable(mstm, dpcd[0], state);
3547         if (ret)
3548                 return ret;
3549
3550         ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, state);
3551         if (ret)
3552                 return nv50_mstm_enable(mstm, dpcd[0], 0);
3553
3554         return mstm->mgr.mst_state;
3555 }
3556
3557 static void
3558 nv50_mstm_fini(struct nv50_mstm *mstm)
3559 {
3560         if (mstm && mstm->mgr.mst_state)
3561                 drm_dp_mst_topology_mgr_suspend(&mstm->mgr);
3562 }
3563
3564 static void
3565 nv50_mstm_init(struct nv50_mstm *mstm)
3566 {
3567         if (mstm && mstm->mgr.mst_state)
3568                 drm_dp_mst_topology_mgr_resume(&mstm->mgr);
3569 }
3570
3571 static void
3572 nv50_mstm_del(struct nv50_mstm **pmstm)
3573 {
3574         struct nv50_mstm *mstm = *pmstm;
3575         if (mstm) {
3576                 kfree(*pmstm);
3577                 *pmstm = NULL;
3578         }
3579 }
3580
3581 static int
3582 nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
3583               int conn_base_id, struct nv50_mstm **pmstm)
3584 {
3585         const int max_payloads = hweight8(outp->dcb->heads);
3586         struct drm_device *dev = outp->base.base.dev;
3587         struct nv50_mstm *mstm;
3588         int ret, i;
3589         u8 dpcd;
3590
3591         /* This is a workaround for some monitors not functioning
3592          * correctly in MST mode on initial module load.  I think
3593          * some bad interaction with the VBIOS may be responsible.
3594          *
3595          * A good ol' off and on again seems to work here ;)
3596          */
3597         ret = drm_dp_dpcd_readb(aux, DP_DPCD_REV, &dpcd);
3598         if (ret >= 0 && dpcd >= 0x12)
3599                 drm_dp_dpcd_writeb(aux, DP_MSTM_CTRL, 0);
3600
3601         if (!(mstm = *pmstm = kzalloc(sizeof(*mstm), GFP_KERNEL)))
3602                 return -ENOMEM;
3603         mstm->outp = outp;
3604         mstm->mgr.cbs = &nv50_mstm;
3605
3606         ret = drm_dp_mst_topology_mgr_init(&mstm->mgr, dev, aux, aux_max,
3607                                            max_payloads, conn_base_id);
3608         if (ret)
3609                 return ret;
3610
3611         for (i = 0; i < max_payloads; i++) {
3612                 ret = nv50_msto_new(dev, outp->dcb->heads, outp->base.base.name,
3613                                     i, &mstm->msto[i]);
3614                 if (ret)
3615                         return ret;
3616         }
3617
3618         return 0;
3619 }
3620
3621 /******************************************************************************
3622  * SOR
3623  *****************************************************************************/
3624 static void
3625 nv50_sor_update(struct nouveau_encoder *nv_encoder, u8 head,
3626                 struct nv50_head_atom *asyh, u8 proto, u8 depth)
3627 {
3628         struct nv50_disp *disp = nv50_disp(nv_encoder->base.base.dev);
3629         struct nv50_dmac *core = &disp->core->chan;
3630         u32 *push;
3631
3632         if (!asyh) {
3633                 nv_encoder->ctrl &= ~BIT(head);
3634                 if (!(nv_encoder->ctrl & 0x0000000f))
3635                         nv_encoder->ctrl = 0;
3636         } else {
3637                 nv_encoder->ctrl |= proto << 8;
3638                 nv_encoder->ctrl |= BIT(head);
3639                 asyh->or.depth = depth;
3640         }
3641
3642         if ((push = evo_wait(core, 6))) {
3643                 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
3644                         if (asyh) {
3645                                 nv_encoder->ctrl |= asyh->or.depth  << 16 |
3646                                                     asyh->or.nvsync << 13 |
3647                                                     asyh->or.nhsync << 12;
3648                         }
3649                         evo_mthd(push, 0x0600 + (nv_encoder->or * 0x40), 1);
3650                 } else {
3651                         evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
3652                 }
3653                 evo_data(push, nv_encoder->ctrl);
3654                 evo_kick(push, core);
3655         }
3656 }
3657
3658 static void
3659 nv50_sor_disable(struct drm_encoder *encoder)
3660 {
3661         struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
3662         struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
3663
3664         nv_encoder->crtc = NULL;
3665
3666         if (nv_crtc) {
3667                 struct nvkm_i2c_aux *aux = nv_encoder->aux;
3668                 u8 pwr;
3669
3670                 if (aux) {
3671                         int ret = nvkm_rdaux(aux, DP_SET_POWER, &pwr, 1);
3672                         if (ret == 0) {
3673                                 pwr &= ~DP_SET_POWER_MASK;
3674                                 pwr |=  DP_SET_POWER_D3;
3675                                 nvkm_wraux(aux, DP_SET_POWER, &pwr, 1);
3676                         }
3677                 }
3678
3679                 nv_encoder->update(nv_encoder, nv_crtc->index, NULL, 0, 0);
3680                 nv50_audio_disable(encoder, nv_crtc);
3681                 nv50_hdmi_disable(&nv_encoder->base.base, nv_crtc);
3682                 nv50_outp_release(nv_encoder);
3683         }
3684 }
3685
3686 static void
3687 nv50_sor_enable(struct drm_encoder *encoder)
3688 {
3689         struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
3690         struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
3691         struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
3692         struct drm_display_mode *mode = &asyh->state.adjusted_mode;
3693         struct {
3694                 struct nv50_disp_mthd_v1 base;
3695                 struct nv50_disp_sor_lvds_script_v0 lvds;
3696         } lvds = {
3697                 .base.version = 1,
3698                 .base.method  = NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT,
3699                 .base.hasht   = nv_encoder->dcb->hasht,
3700                 .base.hashm   = nv_encoder->dcb->hashm,
3701         };
3702         struct nv50_disp *disp = nv50_disp(encoder->dev);
3703         struct drm_device *dev = encoder->dev;
3704         struct nouveau_drm *drm = nouveau_drm(dev);
3705         struct nouveau_connector *nv_connector;
3706         struct nvbios *bios = &drm->vbios;
3707         u8 proto = 0xf;
3708         u8 depth = 0x0;
3709
3710         nv_connector = nouveau_encoder_connector_get(nv_encoder);
3711         nv_encoder->crtc = encoder->crtc;
3712         nv50_outp_acquire(nv_encoder);
3713
3714         switch (nv_encoder->dcb->type) {
3715         case DCB_OUTPUT_TMDS:
3716                 if (nv_encoder->link & 1) {
3717                         proto = 0x1;
3718                         /* Only enable dual-link if:
3719                          *  - Need to (i.e. rate > 165MHz)
3720                          *  - DCB says we can
3721                          *  - Not an HDMI monitor, since there's no dual-link
3722                          *    on HDMI.
3723                          */
3724                         if (mode->clock >= 165000 &&
3725                             nv_encoder->dcb->duallink_possible &&
3726                             !drm_detect_hdmi_monitor(nv_connector->edid))
3727                                 proto |= 0x4;
3728                 } else {
3729                         proto = 0x2;
3730                 }
3731
3732                 nv50_hdmi_enable(&nv_encoder->base.base, mode);
3733                 break;
3734         case DCB_OUTPUT_LVDS:
3735                 proto = 0x0;
3736
3737                 if (bios->fp_no_ddc) {
3738                         if (bios->fp.dual_link)
3739                                 lvds.lvds.script |= 0x0100;
3740                         if (bios->fp.if_is_24bit)
3741                                 lvds.lvds.script |= 0x0200;
3742                 } else {
3743                         if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
3744                                 if (((u8 *)nv_connector->edid)[121] == 2)
3745                                         lvds.lvds.script |= 0x0100;
3746                         } else
3747                         if (mode->clock >= bios->fp.duallink_transition_clk) {
3748                                 lvds.lvds.script |= 0x0100;
3749                         }
3750
3751                         if (lvds.lvds.script & 0x0100) {
3752                                 if (bios->fp.strapless_is_24bit & 2)
3753                                         lvds.lvds.script |= 0x0200;
3754                         } else {
3755                                 if (bios->fp.strapless_is_24bit & 1)
3756                                         lvds.lvds.script |= 0x0200;
3757                         }
3758
3759                         if (nv_connector->base.display_info.bpc == 8)
3760                                 lvds.lvds.script |= 0x0200;
3761                 }
3762
3763                 nvif_mthd(&disp->disp->object, 0, &lvds, sizeof(lvds));
3764                 break;
3765         case DCB_OUTPUT_DP:
3766                 if (nv_connector->base.display_info.bpc == 6)
3767                         depth = 0x2;
3768                 else
3769                 if (nv_connector->base.display_info.bpc == 8)
3770                         depth = 0x5;
3771                 else
3772                         depth = 0x6;
3773
3774                 if (nv_encoder->link & 1)
3775                         proto = 0x8;
3776                 else
3777                         proto = 0x9;
3778
3779                 nv50_audio_enable(encoder, mode);
3780                 break;
3781         default:
3782                 BUG();
3783                 break;
3784         }
3785
3786         nv_encoder->update(nv_encoder, nv_crtc->index, asyh, proto, depth);
3787 }
3788
3789 static const struct drm_encoder_helper_funcs
3790 nv50_sor_help = {
3791         .atomic_check = nv50_outp_atomic_check,
3792         .enable = nv50_sor_enable,
3793         .disable = nv50_sor_disable,
3794 };
3795
3796 static void
3797 nv50_sor_destroy(struct drm_encoder *encoder)
3798 {
3799         struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
3800         nv50_mstm_del(&nv_encoder->dp.mstm);
3801         drm_encoder_cleanup(encoder);
3802         kfree(encoder);
3803 }
3804
3805 static const struct drm_encoder_funcs
3806 nv50_sor_func = {
3807         .destroy = nv50_sor_destroy,
3808 };
3809
3810 static int
3811 nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
3812 {
3813         struct nouveau_connector *nv_connector = nouveau_connector(connector);
3814         struct nouveau_drm *drm = nouveau_drm(connector->dev);
3815         struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
3816         struct nouveau_encoder *nv_encoder;
3817         struct drm_encoder *encoder;
3818         int type, ret;
3819
3820         switch (dcbe->type) {
3821         case DCB_OUTPUT_LVDS: type = DRM_MODE_ENCODER_LVDS; break;
3822         case DCB_OUTPUT_TMDS:
3823         case DCB_OUTPUT_DP:
3824         default:
3825                 type = DRM_MODE_ENCODER_TMDS;
3826                 break;
3827         }
3828
3829         nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
3830         if (!nv_encoder)
3831                 return -ENOMEM;
3832         nv_encoder->dcb = dcbe;
3833         nv_encoder->update = nv50_sor_update;
3834
3835         encoder = to_drm_encoder(nv_encoder);
3836         encoder->possible_crtcs = dcbe->heads;
3837         encoder->possible_clones = 0;
3838         drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type,
3839                          "sor-%04x-%04x", dcbe->hasht, dcbe->hashm);
3840         drm_encoder_helper_add(encoder, &nv50_sor_help);
3841
3842         drm_mode_connector_attach_encoder(connector, encoder);
3843
3844         if (dcbe->type == DCB_OUTPUT_DP) {
3845                 struct nv50_disp *disp = nv50_disp(encoder->dev);
3846                 struct nvkm_i2c_aux *aux =
3847                         nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
3848                 if (aux) {
3849                         if (disp->disp->object.oclass < GF110_DISP) {
3850                                 /* HW has no support for address-only
3851                                  * transactions, so we're required to
3852                                  * use custom I2C-over-AUX code.
3853                                  */
3854                                 nv_encoder->i2c = &aux->i2c;
3855                         } else {
3856                                 nv_encoder->i2c = &nv_connector->aux.ddc;
3857                         }
3858                         nv_encoder->aux = aux;
3859                 }
3860
3861                 /*TODO: Use DP Info Table to check for support. */
3862                 if (disp->disp->object.oclass >= GF110_DISP) {
3863                         ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16,
3864                                             nv_connector->base.base.id,
3865                                             &nv_encoder->dp.mstm);
3866                         if (ret)
3867                                 return ret;
3868                 }
3869         } else {
3870                 struct nvkm_i2c_bus *bus =
3871                         nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
3872                 if (bus)
3873                         nv_encoder->i2c = &bus->i2c;
3874         }
3875
3876         return 0;
3877 }
3878
3879 /******************************************************************************
3880  * PIOR
3881  *****************************************************************************/
3882 static int
3883 nv50_pior_atomic_check(struct drm_encoder *encoder,
3884                        struct drm_crtc_state *crtc_state,
3885                        struct drm_connector_state *conn_state)
3886 {
3887         int ret = nv50_outp_atomic_check(encoder, crtc_state, conn_state);
3888         if (ret)
3889                 return ret;
3890         crtc_state->adjusted_mode.clock *= 2;
3891         return 0;
3892 }
3893
3894 static void
3895 nv50_pior_disable(struct drm_encoder *encoder)
3896 {
3897         struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
3898         struct nv50_dmac *core = &nv50_disp(encoder->dev)->core->chan;
3899         const int or = nv_encoder->or;
3900         u32 *push;
3901
3902         if (nv_encoder->crtc) {
3903                 push = evo_wait(core, 4);
3904                 if (push) {
3905                         if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
3906                                 evo_mthd(push, 0x0700 + (or * 0x040), 1);
3907                                 evo_data(push, 0x00000000);
3908                         }
3909                         evo_kick(push, core);
3910                 }
3911         }
3912
3913         nv_encoder->crtc = NULL;
3914         nv50_outp_release(nv_encoder);
3915 }
3916
3917 static void
3918 nv50_pior_enable(struct drm_encoder *encoder)
3919 {
3920         struct nv50_dmac *core = &nv50_disp(encoder->dev)->core->chan;
3921         struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
3922         struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
3923         struct nouveau_connector *nv_connector;
3924         struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
3925         u8 owner = 1 << nv_crtc->index;
3926         u8 proto;
3927         u32 *push;
3928
3929         nv50_outp_acquire(nv_encoder);
3930
3931         nv_connector = nouveau_encoder_connector_get(nv_encoder);
3932         switch (nv_connector->base.display_info.bpc) {
3933         case 10: asyh->or.depth = 0x6; break;
3934         case  8: asyh->or.depth = 0x5; break;
3935         case  6: asyh->or.depth = 0x2; break;
3936         default: asyh->or.depth = 0x0; break;
3937         }
3938
3939         switch (nv_encoder->dcb->type) {
3940         case DCB_OUTPUT_TMDS:
3941         case DCB_OUTPUT_DP:
3942                 proto = 0x0;
3943                 break;
3944         default:
3945                 BUG();
3946                 break;
3947         }
3948
3949         push = evo_wait(core, 8);
3950         if (push) {
3951                 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
3952                         evo_mthd(push, 0x0700 + (nv_encoder->or * 0x040), 1);
3953                         evo_data(push, (asyh->or.depth  << 16) |
3954                                        (asyh->or.nvsync << 13) |
3955                                        (asyh->or.nhsync << 12) |
3956                                        (proto << 8) | owner);
3957                 }
3958
3959                 evo_kick(push, core);
3960         }
3961
3962         nv_encoder->crtc = encoder->crtc;
3963 }
3964
3965 static const struct drm_encoder_helper_funcs
3966 nv50_pior_help = {
3967         .atomic_check = nv50_pior_atomic_check,
3968         .enable = nv50_pior_enable,
3969         .disable = nv50_pior_disable,
3970 };
3971
3972 static void
3973 nv50_pior_destroy(struct drm_encoder *encoder)
3974 {
3975         drm_encoder_cleanup(encoder);
3976         kfree(encoder);
3977 }
3978
3979 static const struct drm_encoder_funcs
3980 nv50_pior_func = {
3981         .destroy = nv50_pior_destroy,
3982 };
3983
3984 static int
3985 nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
3986 {
3987         struct nouveau_drm *drm = nouveau_drm(connector->dev);
3988         struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
3989         struct nvkm_i2c_bus *bus = NULL;
3990         struct nvkm_i2c_aux *aux = NULL;
3991         struct i2c_adapter *ddc;
3992         struct nouveau_encoder *nv_encoder;
3993         struct drm_encoder *encoder;
3994         int type;
3995
3996         switch (dcbe->type) {
3997         case DCB_OUTPUT_TMDS:
3998                 bus  = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_EXT(dcbe->extdev));
3999                 ddc  = bus ? &bus->i2c : NULL;
4000                 type = DRM_MODE_ENCODER_TMDS;
4001                 break;
4002         case DCB_OUTPUT_DP:
4003                 aux  = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbe->extdev));
4004                 ddc  = aux ? &aux->i2c : NULL;
4005                 type = DRM_MODE_ENCODER_TMDS;
4006                 break;
4007         default:
4008                 return -ENODEV;
4009         }
4010
4011         nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
4012         if (!nv_encoder)
4013                 return -ENOMEM;
4014         nv_encoder->dcb = dcbe;
4015         nv_encoder->i2c = ddc;
4016         nv_encoder->aux = aux;
4017
4018         encoder = to_drm_encoder(nv_encoder);
4019         encoder->possible_crtcs = dcbe->heads;
4020         encoder->possible_clones = 0;
4021         drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type,
4022                          "pior-%04x-%04x", dcbe->hasht, dcbe->hashm);
4023         drm_encoder_helper_add(encoder, &nv50_pior_help);
4024
4025         drm_mode_connector_attach_encoder(connector, encoder);
4026         return 0;
4027 }
4028
4029 /******************************************************************************
4030  * Atomic
4031  *****************************************************************************/
4032
4033 static void
4034 nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 interlock)
4035 {
4036         struct nv50_disp *disp = nv50_disp(drm->dev);
4037         struct nv50_dmac *core = &disp->core->chan;
4038         struct nv50_mstm *mstm;
4039         struct drm_encoder *encoder;
4040         u32 *push;
4041
4042         NV_ATOMIC(drm, "commit core %08x\n", interlock);
4043
4044         drm_for_each_encoder(encoder, drm->dev) {
4045                 if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
4046                         mstm = nouveau_encoder(encoder)->dp.mstm;
4047                         if (mstm && mstm->modified)
4048                                 nv50_mstm_prepare(mstm);
4049                 }
4050         }
4051
4052         if ((push = evo_wait(core, 5))) {
4053                 evo_mthd(push, 0x0084, 1);
4054                 evo_data(push, 0x80000000);
4055                 evo_mthd(push, 0x0080, 2);
4056                 evo_data(push, interlock);
4057                 evo_data(push, 0x00000000);
4058                 nouveau_bo_wr32(disp->sync, 0, 0x00000000);
4059                 evo_kick(push, core);
4060                 if (nvif_msec(&drm->client.device, 2000ULL,
4061                         if (nouveau_bo_rd32(disp->sync, 0))
4062                                 break;
4063                         usleep_range(1, 2);
4064                 ) < 0)
4065                         NV_ERROR(drm, "EVO timeout\n");
4066         }
4067
4068         drm_for_each_encoder(encoder, drm->dev) {
4069                 if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
4070                         mstm = nouveau_encoder(encoder)->dp.mstm;
4071                         if (mstm && mstm->modified)
4072                                 nv50_mstm_cleanup(mstm);
4073                 }
4074         }
4075 }
4076
4077 static void
4078 nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
4079 {
4080         struct drm_device *dev = state->dev;
4081         struct drm_crtc_state *new_crtc_state, *old_crtc_state;
4082         struct drm_crtc *crtc;
4083         struct drm_plane_state *new_plane_state;
4084         struct drm_plane *plane;
4085         struct nouveau_drm *drm = nouveau_drm(dev);
4086         struct nv50_disp *disp = nv50_disp(dev);
4087         struct nv50_atom *atom = nv50_atom(state);
4088         struct nv50_outp_atom *outp, *outt;
4089         u32 interlock_core = 0;
4090         u32 interlock_chan = 0;
4091         int i;
4092
4093         NV_ATOMIC(drm, "commit %d %d\n", atom->lock_core, atom->flush_disable);
4094         drm_atomic_helper_wait_for_fences(dev, state, false);
4095         drm_atomic_helper_wait_for_dependencies(state);
4096         drm_atomic_helper_update_legacy_modeset_state(dev, state);
4097
4098         if (atom->lock_core)
4099                 mutex_lock(&disp->mutex);
4100
4101         /* Disable head(s). */
4102         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4103                 struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
4104                 struct nv50_head *head = nv50_head(crtc);
4105
4106                 NV_ATOMIC(drm, "%s: clr %04x (set %04x)\n", crtc->name,
4107                           asyh->clr.mask, asyh->set.mask);
4108                 if (old_crtc_state->active && !new_crtc_state->active)
4109                         drm_crtc_vblank_off(crtc);
4110
4111                 if (asyh->clr.mask) {
4112                         nv50_head_flush_clr(head, asyh, atom->flush_disable);
4113                         interlock_core |= 1;
4114                 }
4115         }
4116
4117         /* Disable plane(s). */
4118         for_each_new_plane_in_state(state, plane, new_plane_state, i) {
4119                 struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
4120                 struct nv50_wndw *wndw = nv50_wndw(plane);
4121
4122                 NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", plane->name,
4123                           asyw->clr.mask, asyw->set.mask);
4124                 if (!asyw->clr.mask)
4125                         continue;
4126
4127                 interlock_chan |= nv50_wndw_flush_clr(wndw, interlock_core,
4128                                                       atom->flush_disable,
4129                                                       asyw);
4130         }
4131
4132         /* Disable output path(s). */
4133         list_for_each_entry(outp, &atom->outp, head) {
4134                 const struct drm_encoder_helper_funcs *help;
4135                 struct drm_encoder *encoder;
4136
4137                 encoder = outp->encoder;
4138                 help = encoder->helper_private;
4139
4140                 NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", encoder->name,
4141                           outp->clr.mask, outp->set.mask);
4142
4143                 if (outp->clr.mask) {
4144                         help->disable(encoder);
4145                         interlock_core |= 1;
4146                         if (outp->flush_disable) {
4147                                 nv50_disp_atomic_commit_core(drm, interlock_chan);
4148                                 interlock_core = 0;
4149                                 interlock_chan = 0;
4150                         }
4151                 }
4152         }
4153
4154         /* Flush disable. */
4155         if (interlock_core) {
4156                 if (atom->flush_disable) {
4157                         nv50_disp_atomic_commit_core(drm, interlock_chan);
4158                         interlock_core = 0;
4159                         interlock_chan = 0;
4160                 }
4161         }
4162
4163         /* Update output path(s). */
4164         list_for_each_entry_safe(outp, outt, &atom->outp, head) {
4165                 const struct drm_encoder_helper_funcs *help;
4166                 struct drm_encoder *encoder;
4167
4168                 encoder = outp->encoder;
4169                 help = encoder->helper_private;
4170
4171                 NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", encoder->name,
4172                           outp->set.mask, outp->clr.mask);
4173
4174                 if (outp->set.mask) {
4175                         help->enable(encoder);
4176                         interlock_core = 1;
4177                 }
4178
4179                 list_del(&outp->head);
4180                 kfree(outp);
4181         }
4182
4183         /* Update head(s). */
4184         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4185                 struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
4186                 struct nv50_head *head = nv50_head(crtc);
4187
4188                 NV_ATOMIC(drm, "%s: set %04x (clr %04x)\n", crtc->name,
4189                           asyh->set.mask, asyh->clr.mask);
4190
4191                 if (asyh->set.mask) {
4192                         nv50_head_flush_set(head, asyh);
4193                         interlock_core = 1;
4194                 }
4195
4196                 if (new_crtc_state->active) {
4197                         if (!old_crtc_state->active)
4198                                 drm_crtc_vblank_on(crtc);
4199                         if (new_crtc_state->event)
4200                                 drm_crtc_vblank_get(crtc);
4201                 }
4202         }
4203
4204         /* Update plane(s). */
4205         for_each_new_plane_in_state(state, plane, new_plane_state, i) {
4206                 struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
4207                 struct nv50_wndw *wndw = nv50_wndw(plane);
4208
4209                 NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", plane->name,
4210                           asyw->set.mask, asyw->clr.mask);
4211                 if ( !asyw->set.mask &&
4212                     (!asyw->clr.mask || atom->flush_disable))
4213                         continue;
4214
4215                 interlock_chan |= nv50_wndw_flush_set(wndw, interlock_core, asyw);
4216         }
4217
4218         /* Flush update. */
4219         if (interlock_core) {
4220                 if (!interlock_chan && atom->state.legacy_cursor_update) {
4221                         u32 *push = evo_wait(&disp->core->chan, 2);
4222                         if (push) {
4223                                 evo_mthd(push, 0x0080, 1);
4224                                 evo_data(push, 0x00000000);
4225                                 evo_kick(push, &disp->core->chan);
4226                         }
4227                 } else {
4228                         nv50_disp_atomic_commit_core(drm, interlock_chan);
4229                 }
4230         }
4231
4232         if (atom->lock_core)
4233                 mutex_unlock(&disp->mutex);
4234
4235         /* Wait for HW to signal completion. */
4236         for_each_new_plane_in_state(state, plane, new_plane_state, i) {
4237                 struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
4238                 struct nv50_wndw *wndw = nv50_wndw(plane);
4239                 int ret = nv50_wndw_wait_armed(wndw, asyw);
4240                 if (ret)
4241                         NV_ERROR(drm, "%s: timeout\n", plane->name);
4242         }
4243
4244         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
4245                 if (new_crtc_state->event) {
4246                         unsigned long flags;
4247                         /* Get correct count/ts if racing with vblank irq */
4248                         if (new_crtc_state->active)
4249                                 drm_crtc_accurate_vblank_count(crtc);
4250                         spin_lock_irqsave(&crtc->dev->event_lock, flags);
4251                         drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
4252                         spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
4253
4254                         new_crtc_state->event = NULL;
4255                         if (new_crtc_state->active)
4256                                 drm_crtc_vblank_put(crtc);
4257                 }
4258         }
4259
4260         drm_atomic_helper_commit_hw_done(state);
4261         drm_atomic_helper_cleanup_planes(dev, state);
4262         drm_atomic_helper_commit_cleanup_done(state);
4263         drm_atomic_state_put(state);
4264 }
4265
4266 static void
4267 nv50_disp_atomic_commit_work(struct work_struct *work)
4268 {
4269         struct drm_atomic_state *state =
4270                 container_of(work, typeof(*state), commit_work);
4271         nv50_disp_atomic_commit_tail(state);
4272 }
4273
4274 static int
4275 nv50_disp_atomic_commit(struct drm_device *dev,
4276                         struct drm_atomic_state *state, bool nonblock)
4277 {
4278         struct nouveau_drm *drm = nouveau_drm(dev);
4279         struct nv50_disp *disp = nv50_disp(dev);
4280         struct drm_plane_state *new_plane_state;
4281         struct drm_plane *plane;
4282         struct drm_crtc *crtc;
4283         bool active = false;
4284         int ret, i;
4285
4286         ret = pm_runtime_get_sync(dev->dev);
4287         if (ret < 0 && ret != -EACCES)
4288                 return ret;
4289
4290         ret = drm_atomic_helper_setup_commit(state, nonblock);
4291         if (ret)
4292                 goto done;
4293
4294         INIT_WORK(&state->commit_work, nv50_disp_atomic_commit_work);
4295
4296         ret = drm_atomic_helper_prepare_planes(dev, state);
4297         if (ret)
4298                 goto done;
4299
4300         if (!nonblock) {
4301                 ret = drm_atomic_helper_wait_for_fences(dev, state, true);
4302                 if (ret)
4303                         goto err_cleanup;
4304         }
4305
4306         ret = drm_atomic_helper_swap_state(state, true);
4307         if (ret)
4308                 goto err_cleanup;
4309
4310         for_each_new_plane_in_state(state, plane, new_plane_state, i) {
4311                 struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
4312                 struct nv50_wndw *wndw = nv50_wndw(plane);
4313
4314                 if (asyw->set.image) {
4315                         asyw->ntfy.handle = wndw->wndw.sync.handle;
4316                         asyw->ntfy.offset = wndw->ntfy;
4317                         asyw->ntfy.awaken = false;
4318                         asyw->set.ntfy = true;
4319                         nouveau_bo_wr32(disp->sync, wndw->ntfy / 4, 0x00000000);
4320                         wndw->ntfy ^= 0x10;
4321                 }
4322         }
4323
4324         drm_atomic_state_get(state);
4325
4326         if (nonblock)
4327                 queue_work(system_unbound_wq, &state->commit_work);
4328         else
4329                 nv50_disp_atomic_commit_tail(state);
4330
4331         drm_for_each_crtc(crtc, dev) {
4332                 if (crtc->state->enable) {
4333                         if (!drm->have_disp_power_ref) {
4334                                 drm->have_disp_power_ref = true;
4335                                 return 0;
4336                         }
4337                         active = true;
4338                         break;
4339                 }
4340         }
4341
4342         if (!active && drm->have_disp_power_ref) {
4343                 pm_runtime_put_autosuspend(dev->dev);
4344                 drm->have_disp_power_ref = false;
4345         }
4346
4347 err_cleanup:
4348         if (ret)
4349                 drm_atomic_helper_cleanup_planes(dev, state);
4350 done:
4351         pm_runtime_put_autosuspend(dev->dev);
4352         return ret;
4353 }
4354
4355 static struct nv50_outp_atom *
4356 nv50_disp_outp_atomic_add(struct nv50_atom *atom, struct drm_encoder *encoder)
4357 {
4358         struct nv50_outp_atom *outp;
4359
4360         list_for_each_entry(outp, &atom->outp, head) {
4361                 if (outp->encoder == encoder)
4362                         return outp;
4363         }
4364
4365         outp = kzalloc(sizeof(*outp), GFP_KERNEL);
4366         if (!outp)
4367                 return ERR_PTR(-ENOMEM);
4368
4369         list_add(&outp->head, &atom->outp);
4370         outp->encoder = encoder;
4371         return outp;
4372 }
4373
4374 static int
4375 nv50_disp_outp_atomic_check_clr(struct nv50_atom *atom,
4376                                 struct drm_connector_state *old_connector_state)
4377 {
4378         struct drm_encoder *encoder = old_connector_state->best_encoder;
4379         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4380         struct drm_crtc *crtc;
4381         struct nv50_outp_atom *outp;
4382
4383         if (!(crtc = old_connector_state->crtc))
4384                 return 0;
4385
4386         old_crtc_state = drm_atomic_get_old_crtc_state(&atom->state, crtc);
4387         new_crtc_state = drm_atomic_get_new_crtc_state(&atom->state, crtc);
4388         if (old_crtc_state->active && drm_atomic_crtc_needs_modeset(new_crtc_state)) {
4389                 outp = nv50_disp_outp_atomic_add(atom, encoder);
4390                 if (IS_ERR(outp))
4391                         return PTR_ERR(outp);
4392
4393                 if (outp->encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
4394                         outp->flush_disable = true;
4395                         atom->flush_disable = true;
4396                 }
4397                 outp->clr.ctrl = true;
4398                 atom->lock_core = true;
4399         }
4400
4401         return 0;
4402 }
4403
4404 static int
4405 nv50_disp_outp_atomic_check_set(struct nv50_atom *atom,
4406                                 struct drm_connector_state *connector_state)
4407 {
4408         struct drm_encoder *encoder = connector_state->best_encoder;
4409         struct drm_crtc_state *new_crtc_state;
4410         struct drm_crtc *crtc;
4411         struct nv50_outp_atom *outp;
4412
4413         if (!(crtc = connector_state->crtc))
4414                 return 0;
4415
4416         new_crtc_state = drm_atomic_get_new_crtc_state(&atom->state, crtc);
4417         if (new_crtc_state->active && drm_atomic_crtc_needs_modeset(new_crtc_state)) {
4418                 outp = nv50_disp_outp_atomic_add(atom, encoder);
4419                 if (IS_ERR(outp))
4420                         return PTR_ERR(outp);
4421
4422                 outp->set.ctrl = true;
4423                 atom->lock_core = true;
4424         }
4425
4426         return 0;
4427 }
4428
4429 static int
4430 nv50_disp_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
4431 {
4432         struct nv50_atom *atom = nv50_atom(state);
4433         struct drm_connector_state *old_connector_state, *new_connector_state;
4434         struct drm_connector *connector;
4435         int ret, i;
4436
4437         ret = drm_atomic_helper_check(dev, state);
4438         if (ret)
4439                 return ret;
4440
4441         for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
4442                 ret = nv50_disp_outp_atomic_check_clr(atom, old_connector_state);
4443                 if (ret)
4444                         return ret;
4445
4446                 ret = nv50_disp_outp_atomic_check_set(atom, new_connector_state);
4447                 if (ret)
4448                         return ret;
4449         }
4450
4451         return 0;
4452 }
4453
4454 static void
4455 nv50_disp_atomic_state_clear(struct drm_atomic_state *state)
4456 {
4457         struct nv50_atom *atom = nv50_atom(state);
4458         struct nv50_outp_atom *outp, *outt;
4459
4460         list_for_each_entry_safe(outp, outt, &atom->outp, head) {
4461                 list_del(&outp->head);
4462                 kfree(outp);
4463         }
4464
4465         drm_atomic_state_default_clear(state);
4466 }
4467
4468 static void
4469 nv50_disp_atomic_state_free(struct drm_atomic_state *state)
4470 {
4471         struct nv50_atom *atom = nv50_atom(state);
4472         drm_atomic_state_default_release(&atom->state);
4473         kfree(atom);
4474 }
4475
4476 static struct drm_atomic_state *
4477 nv50_disp_atomic_state_alloc(struct drm_device *dev)
4478 {
4479         struct nv50_atom *atom;
4480         if (!(atom = kzalloc(sizeof(*atom), GFP_KERNEL)) ||
4481             drm_atomic_state_init(dev, &atom->state) < 0) {
4482                 kfree(atom);
4483                 return NULL;
4484         }
4485         INIT_LIST_HEAD(&atom->outp);
4486         return &atom->state;
4487 }
4488
4489 static const struct drm_mode_config_funcs
4490 nv50_disp_func = {
4491         .fb_create = nouveau_user_framebuffer_create,
4492         .output_poll_changed = drm_fb_helper_output_poll_changed,
4493         .atomic_check = nv50_disp_atomic_check,
4494         .atomic_commit = nv50_disp_atomic_commit,
4495         .atomic_state_alloc = nv50_disp_atomic_state_alloc,
4496         .atomic_state_clear = nv50_disp_atomic_state_clear,
4497         .atomic_state_free = nv50_disp_atomic_state_free,
4498 };
4499
4500 /******************************************************************************
4501  * Init
4502  *****************************************************************************/
4503
4504 void
4505 nv50_display_fini(struct drm_device *dev)
4506 {
4507         struct nouveau_encoder *nv_encoder;
4508         struct drm_encoder *encoder;
4509         struct drm_plane *plane;
4510
4511         drm_for_each_plane(plane, dev) {
4512                 struct nv50_wndw *wndw = nv50_wndw(plane);
4513                 if (plane->funcs != &nv50_wndw)
4514                         continue;
4515                 nv50_wndw_fini(wndw);
4516         }
4517
4518         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
4519                 if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
4520                         nv_encoder = nouveau_encoder(encoder);
4521                         nv50_mstm_fini(nv_encoder->dp.mstm);
4522                 }
4523         }
4524 }
4525
4526 int
4527 nv50_display_init(struct drm_device *dev)
4528 {
4529         struct nv50_dmac *core = &nv50_disp(dev)->core->chan;
4530         struct drm_encoder *encoder;
4531         struct drm_plane *plane;
4532         u32 *push;
4533
4534         push = evo_wait(core, 32);
4535         if (!push)
4536                 return -EBUSY;
4537
4538         evo_mthd(push, 0x0088, 1);
4539         evo_data(push, core->sync.handle);
4540         evo_kick(push, core);
4541
4542         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
4543                 if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
4544                         struct nouveau_encoder *nv_encoder =
4545                                 nouveau_encoder(encoder);
4546                         nv50_mstm_init(nv_encoder->dp.mstm);
4547                 }
4548         }
4549
4550         drm_for_each_plane(plane, dev) {
4551                 struct nv50_wndw *wndw = nv50_wndw(plane);
4552                 if (plane->funcs != &nv50_wndw)
4553                         continue;
4554                 nv50_wndw_init(wndw);
4555         }
4556
4557         return 0;
4558 }
4559
4560 void
4561 nv50_display_destroy(struct drm_device *dev)
4562 {
4563         struct nv50_disp *disp = nv50_disp(dev);
4564
4565         nv50_core_del(&disp->core);
4566
4567         nouveau_bo_unmap(disp->sync);
4568         if (disp->sync)
4569                 nouveau_bo_unpin(disp->sync);
4570         nouveau_bo_ref(NULL, &disp->sync);
4571
4572         nouveau_display(dev)->priv = NULL;
4573         kfree(disp);
4574 }
4575
4576 MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)");
4577 static int nouveau_atomic = 0;
4578 module_param_named(atomic, nouveau_atomic, int, 0400);
4579
4580 int
4581 nv50_display_create(struct drm_device *dev)
4582 {
4583         struct nvif_device *device = &nouveau_drm(dev)->client.device;
4584         struct nouveau_drm *drm = nouveau_drm(dev);
4585         struct dcb_table *dcb = &drm->vbios.dcb;
4586         struct drm_connector *connector, *tmp;
4587         struct nv50_disp *disp;
4588         struct dcb_output *dcbe;
4589         int crtcs, ret, i;
4590
4591         disp = kzalloc(sizeof(*disp), GFP_KERNEL);
4592         if (!disp)
4593                 return -ENOMEM;
4594
4595         mutex_init(&disp->mutex);
4596
4597         nouveau_display(dev)->priv = disp;
4598         nouveau_display(dev)->dtor = nv50_display_destroy;
4599         nouveau_display(dev)->init = nv50_display_init;
4600         nouveau_display(dev)->fini = nv50_display_fini;
4601         disp->disp = &nouveau_display(dev)->disp;
4602         dev->mode_config.funcs = &nv50_disp_func;
4603         dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP;
4604         if (nouveau_atomic)
4605                 dev->driver->driver_features |= DRIVER_ATOMIC;
4606
4607         /* small shared memory area we use for notifiers and semaphores */
4608         ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
4609                              0, 0x0000, NULL, NULL, &disp->sync);
4610         if (!ret) {
4611                 ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM, true);
4612                 if (!ret) {
4613                         ret = nouveau_bo_map(disp->sync);
4614                         if (ret)
4615                                 nouveau_bo_unpin(disp->sync);
4616                 }
4617                 if (ret)
4618                         nouveau_bo_ref(NULL, &disp->sync);
4619         }
4620
4621         if (ret)
4622                 goto out;
4623
4624         /* allocate master evo channel */
4625         ret = nv50_core_new(drm, &disp->core);
4626         if (ret)
4627                 goto out;
4628
4629         /* create crtc objects to represent the hw heads */
4630         if (disp->disp->object.oclass >= GF110_DISP)
4631                 crtcs = nvif_rd32(&device->object, 0x612004) & 0xf;
4632         else
4633                 crtcs = 0x3;
4634
4635         for (i = 0; i < fls(crtcs); i++) {
4636                 if (!(crtcs & (1 << i)))
4637                         continue;
4638                 ret = nv50_head_create(dev, i);
4639                 if (ret)
4640                         goto out;
4641         }
4642
4643         /* create encoder/connector objects based on VBIOS DCB table */
4644         for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
4645                 connector = nouveau_connector_create(dev, dcbe->connector);
4646                 if (IS_ERR(connector))
4647                         continue;
4648
4649                 if (dcbe->location == DCB_LOC_ON_CHIP) {
4650                         switch (dcbe->type) {
4651                         case DCB_OUTPUT_TMDS:
4652                         case DCB_OUTPUT_LVDS:
4653                         case DCB_OUTPUT_DP:
4654                                 ret = nv50_sor_create(connector, dcbe);
4655                                 break;
4656                         case DCB_OUTPUT_ANALOG:
4657                                 ret = nv50_dac_create(connector, dcbe);
4658                                 break;
4659                         default:
4660                                 ret = -ENODEV;
4661                                 break;
4662                         }
4663                 } else {
4664                         ret = nv50_pior_create(connector, dcbe);
4665                 }
4666
4667                 if (ret) {
4668                         NV_WARN(drm, "failed to create encoder %d/%d/%d: %d\n",
4669                                      dcbe->location, dcbe->type,
4670                                      ffs(dcbe->or) - 1, ret);
4671                         ret = 0;
4672                 }
4673         }
4674
4675         /* cull any connectors we created that don't have an encoder */
4676         list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
4677                 if (connector->encoder_ids[0])
4678                         continue;
4679
4680                 NV_WARN(drm, "%s has no encoders, removing\n",
4681                         connector->name);
4682                 connector->funcs->destroy(connector);
4683         }
4684
4685 out:
4686         if (ret)
4687                 nv50_display_destroy(dev);
4688         return ret;
4689 }