2 * Copyright 2018 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <nvif/class.h>
26 #include <nvif/cl0002.h>
28 #include <drm/drm_atomic_helper.h>
29 #include <drm/drm_fourcc.h>
31 #include "nouveau_bo.h"
34 nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma *ctxdma)
36 nvif_object_fini(&ctxdma->object);
37 list_del(&ctxdma->head);
41 static struct nv50_wndw_ctxdma *
42 nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct nouveau_framebuffer *fb)
44 struct nouveau_drm *drm = nouveau_drm(fb->base.dev);
45 struct nv50_wndw_ctxdma *ctxdma;
46 const u8 kind = fb->nvbo->kind;
47 const u32 handle = 0xfb000000 | kind;
49 struct nv_dma_v0 base;
51 struct nv50_dma_v0 nv50;
52 struct gf100_dma_v0 gf100;
53 struct gf119_dma_v0 gf119;
56 u32 argc = sizeof(args.base);
59 list_for_each_entry(ctxdma, &wndw->ctxdma.list, head) {
60 if (ctxdma->object.handle == handle)
64 if (!(ctxdma = kzalloc(sizeof(*ctxdma), GFP_KERNEL)))
65 return ERR_PTR(-ENOMEM);
66 list_add(&ctxdma->head, &wndw->ctxdma.list);
68 args.base.target = NV_DMA_V0_TARGET_VRAM;
69 args.base.access = NV_DMA_V0_ACCESS_RDWR;
71 args.base.limit = drm->client.device.info.ram_user - 1;
73 if (drm->client.device.info.chipset < 0x80) {
74 args.nv50.part = NV50_DMA_V0_PART_256;
75 argc += sizeof(args.nv50);
77 if (drm->client.device.info.chipset < 0xc0) {
78 args.nv50.part = NV50_DMA_V0_PART_256;
79 args.nv50.kind = kind;
80 argc += sizeof(args.nv50);
82 if (drm->client.device.info.chipset < 0xd0) {
83 args.gf100.kind = kind;
84 argc += sizeof(args.gf100);
86 args.gf119.page = GF119_DMA_V0_PAGE_LP;
87 args.gf119.kind = kind;
88 argc += sizeof(args.gf119);
91 ret = nvif_object_init(wndw->ctxdma.parent, handle, NV_DMA_IN_MEMORY,
92 &args, argc, &ctxdma->object);
94 nv50_wndw_ctxdma_del(ctxdma);
102 nv50_wndw_wait_armed(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
104 struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
105 if (asyw->set.ntfy) {
106 return wndw->func->ntfy_wait_begun(disp->sync,
108 wndw->wndw.base.device);
114 nv50_wndw_flush_clr(struct nv50_wndw *wndw, u32 *interlock, bool flush,
115 struct nv50_wndw_atom *asyw)
117 union nv50_wndw_atom_mask clr = {
118 .mask = asyw->clr.mask & ~(flush ? 0 : asyw->set.mask),
120 if (clr.sema ) wndw->func-> sema_clr(wndw);
121 if (clr.ntfy ) wndw->func-> ntfy_clr(wndw);
122 if (clr.xlut ) wndw->func-> xlut_clr(wndw);
123 if (clr.image) wndw->func->image_clr(wndw);
125 interlock[wndw->interlock.type] |= wndw->interlock.data;
129 nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock,
130 struct nv50_wndw_atom *asyw)
132 if (interlock[NV50_DISP_INTERLOCK_CORE]) {
133 asyw->image.mode = 0;
134 asyw->image.interval = 1;
137 if (asyw->set.sema ) wndw->func->sema_set (wndw, asyw);
138 if (asyw->set.ntfy ) wndw->func->ntfy_set (wndw, asyw);
139 if (asyw->set.image) wndw->func->image_set(wndw, asyw);
141 if (asyw->set.xlut ) {
143 asyw->xlut.i.offset =
144 nv50_lut_load(&wndw->ilut, asyw->xlut.i.buffer,
145 asyw->ilut, asyw->xlut.i.load);
147 wndw->func->xlut_set(wndw, asyw);
150 if (asyw->set.scale) wndw->func->scale_set(wndw, asyw);
151 if (asyw->set.point) {
152 if (asyw->set.point = false, asyw->set.mask)
153 interlock[wndw->interlock.type] |= wndw->interlock.data;
154 interlock[NV50_DISP_INTERLOCK_WIMM] |= wndw->interlock.wimm;
156 wndw->immd->point(wndw, asyw);
157 wndw->immd->update(wndw, interlock);
159 interlock[wndw->interlock.type] |= wndw->interlock.data;
164 nv50_wndw_ntfy_enable(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
166 struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
168 asyw->ntfy.handle = wndw->wndw.sync.handle;
169 asyw->ntfy.offset = wndw->ntfy;
170 asyw->ntfy.awaken = false;
171 asyw->set.ntfy = true;
173 wndw->func->ntfy_reset(disp->sync, wndw->ntfy);
178 nv50_wndw_atomic_check_release(struct nv50_wndw *wndw,
179 struct nv50_wndw_atom *asyw,
180 struct nv50_head_atom *asyh)
182 struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
183 NV_ATOMIC(drm, "%s release\n", wndw->plane.name);
184 wndw->func->release(wndw, asyw, asyh);
185 asyw->ntfy.handle = 0;
186 asyw->sema.handle = 0;
190 nv50_wndw_atomic_check_acquire_yuv(struct nv50_wndw_atom *asyw)
192 switch (asyw->state.fb->format->format) {
193 case DRM_FORMAT_YUYV: asyw->image.format = 0x28; break;
194 case DRM_FORMAT_UYVY: asyw->image.format = 0x29; break;
199 asyw->image.colorspace = 1;
204 nv50_wndw_atomic_check_acquire_rgb(struct nv50_wndw_atom *asyw)
206 switch (asyw->state.fb->format->format) {
207 case DRM_FORMAT_C8 : asyw->image.format = 0x1e; break;
208 case DRM_FORMAT_XRGB8888 :
209 case DRM_FORMAT_ARGB8888 : asyw->image.format = 0xcf; break;
210 case DRM_FORMAT_RGB565 : asyw->image.format = 0xe8; break;
211 case DRM_FORMAT_XRGB1555 :
212 case DRM_FORMAT_ARGB1555 : asyw->image.format = 0xe9; break;
213 case DRM_FORMAT_XBGR2101010:
214 case DRM_FORMAT_ABGR2101010: asyw->image.format = 0xd1; break;
215 case DRM_FORMAT_XBGR8888 :
216 case DRM_FORMAT_ABGR8888 : asyw->image.format = 0xd5; break;
217 case DRM_FORMAT_XRGB2101010:
218 case DRM_FORMAT_ARGB2101010: asyw->image.format = 0xdf; break;
222 asyw->image.colorspace = 0;
227 nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset,
228 struct nv50_wndw_atom *armw,
229 struct nv50_wndw_atom *asyw,
230 struct nv50_head_atom *asyh)
232 struct nouveau_framebuffer *fb = nouveau_framebuffer(asyw->state.fb);
233 struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
236 NV_ATOMIC(drm, "%s acquire\n", wndw->plane.name);
238 if (asyw->state.fb != armw->state.fb || !armw->visible || modeset) {
239 asyw->image.w = fb->base.width;
240 asyw->image.h = fb->base.height;
241 asyw->image.kind = fb->nvbo->kind;
243 ret = nv50_wndw_atomic_check_acquire_rgb(asyw);
245 ret = nv50_wndw_atomic_check_acquire_yuv(asyw);
250 if (asyw->image.kind) {
251 asyw->image.layout = 0;
252 if (drm->client.device.info.chipset >= 0xc0)
253 asyw->image.blockh = fb->nvbo->mode >> 4;
255 asyw->image.blockh = fb->nvbo->mode;
256 asyw->image.blocks[0] = fb->base.pitches[0] / 64;
257 asyw->image.pitch[0] = 0;
259 asyw->image.layout = 1;
260 asyw->image.blockh = 0;
261 asyw->image.blocks[0] = 0;
262 asyw->image.pitch[0] = fb->base.pitches[0];
265 if (!(asyh->state.pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC))
266 asyw->image.interval = 1;
268 asyw->image.interval = 0;
269 asyw->image.mode = asyw->image.interval ? 0 : 1;
270 asyw->set.image = wndw->func->image_set != NULL;
273 if (wndw->func->scale_set) {
274 asyw->scale.sx = asyw->state.src_x >> 16;
275 asyw->scale.sy = asyw->state.src_y >> 16;
276 asyw->scale.sw = asyw->state.src_w >> 16;
277 asyw->scale.sh = asyw->state.src_h >> 16;
278 asyw->scale.dw = asyw->state.crtc_w;
279 asyw->scale.dh = asyw->state.crtc_h;
280 if (memcmp(&armw->scale, &asyw->scale, sizeof(asyw->scale)))
281 asyw->set.scale = true;
285 asyw->point.x = asyw->state.crtc_x;
286 asyw->point.y = asyw->state.crtc_y;
287 if (memcmp(&armw->point, &asyw->point, sizeof(asyw->point)))
288 asyw->set.point = true;
291 return wndw->func->acquire(wndw, asyw, asyh);
295 nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw,
296 struct nv50_wndw_atom *armw,
297 struct nv50_wndw_atom *asyw,
298 struct nv50_head_atom *asyh)
300 struct drm_property_blob *ilut = asyh->state.degamma_lut;
302 /* I8 format without an input LUT makes no sense, and the
303 * HW error-checks for this.
305 * In order to handle legacy gamma, when there's no input
306 * LUT we need to steal the output LUT and use it instead.
308 if (!ilut && asyw->state.fb->format->format == DRM_FORMAT_C8) {
309 /* This should be an error, but there's legacy clients
310 * that do a modeset before providing a gamma table.
312 * We keep the window disabled to avoid angering HW.
314 if (!(ilut = asyh->state.gamma_lut)) {
315 asyw->visible = false;
319 if (wndw->func->ilut)
320 asyh->wndw.olut |= BIT(wndw->id);
322 asyh->wndw.olut &= ~BIT(wndw->id);
325 if (!ilut && wndw->func->ilut_identity &&
326 asyw->state.fb->format->format != DRM_FORMAT_XBGR16161616F &&
327 asyw->state.fb->format->format != DRM_FORMAT_ABGR16161616F) {
328 static struct drm_property_blob dummy = {};
332 /* Recalculate LUT state. */
333 memset(&asyw->xlut, 0x00, sizeof(asyw->xlut));
334 if ((asyw->ilut = wndw->func->ilut ? ilut : NULL)) {
335 wndw->func->ilut(wndw, asyw);
336 asyw->xlut.handle = wndw->wndw.vram.handle;
337 asyw->xlut.i.buffer = !asyw->xlut.i.buffer;
338 asyw->set.xlut = true;
340 asyw->clr.xlut = armw->xlut.handle != 0;
343 /* Handle setting base SET_OUTPUT_LUT_LO_ENABLE_USE_CORE_LUT. */
344 if (wndw->func->olut_core &&
345 (!armw->visible || (armw->xlut.handle && !asyw->xlut.handle)))
346 asyw->set.xlut = true;
348 /* Can't do an immediate flip while changing the LUT. */
349 asyh->state.pageflip_flags &= ~DRM_MODE_PAGE_FLIP_ASYNC;
353 nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
355 struct nouveau_drm *drm = nouveau_drm(plane->dev);
356 struct nv50_wndw *wndw = nv50_wndw(plane);
357 struct nv50_wndw_atom *armw = nv50_wndw_atom(wndw->plane.state);
358 struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
359 struct nv50_head_atom *harm = NULL, *asyh = NULL;
360 bool modeset = false;
363 NV_ATOMIC(drm, "%s atomic_check\n", plane->name);
365 /* Fetch the assembly state for the head the window will belong to,
366 * and determine whether the window will be visible.
368 if (asyw->state.crtc) {
369 asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
371 return PTR_ERR(asyh);
372 modeset = drm_atomic_crtc_needs_modeset(&asyh->state);
373 asyw->visible = asyh->state.active;
375 asyw->visible = false;
378 /* Fetch assembly state for the head the window used to belong to. */
379 if (armw->state.crtc) {
380 harm = nv50_head_atom_get(asyw->state.state, armw->state.crtc);
382 return PTR_ERR(harm);
385 /* LUT configuration can potentially cause the window to be disabled. */
386 if (asyw->visible && wndw->func->xlut_set &&
388 asyh->state.color_mgmt_changed ||
389 asyw->state.fb->format->format !=
390 armw->state.fb->format->format))
391 nv50_wndw_atomic_check_lut(wndw, armw, asyw, asyh);
393 /* Calculate new window state. */
395 ret = nv50_wndw_atomic_check_acquire(wndw, modeset,
400 asyh->wndw.mask |= BIT(wndw->id);
403 nv50_wndw_atomic_check_release(wndw, asyw, harm);
404 harm->wndw.mask &= ~BIT(wndw->id);
409 /* Aside from the obvious case where the window is actively being
410 * disabled, we might also need to temporarily disable the window
411 * when performing certain modeset operations.
413 if (!asyw->visible || modeset) {
414 asyw->clr.ntfy = armw->ntfy.handle != 0;
415 asyw->clr.sema = armw->sema.handle != 0;
416 asyw->clr.xlut = armw->xlut.handle != 0;
417 if (wndw->func->image_clr)
418 asyw->clr.image = armw->image.handle[0] != 0;
425 nv50_wndw_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state)
427 struct nouveau_framebuffer *fb = nouveau_framebuffer(old_state->fb);
428 struct nouveau_drm *drm = nouveau_drm(plane->dev);
430 NV_ATOMIC(drm, "%s cleanup: %p\n", plane->name, old_state->fb);
434 nouveau_bo_unpin(fb->nvbo);
438 nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
440 struct nouveau_framebuffer *fb = nouveau_framebuffer(state->fb);
441 struct nouveau_drm *drm = nouveau_drm(plane->dev);
442 struct nv50_wndw *wndw = nv50_wndw(plane);
443 struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
444 struct nv50_head_atom *asyh;
445 struct nv50_wndw_ctxdma *ctxdma;
448 NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, state->fb);
452 ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM, true);
456 if (wndw->ctxdma.parent) {
457 ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
458 if (IS_ERR(ctxdma)) {
459 nouveau_bo_unpin(fb->nvbo);
460 return PTR_ERR(ctxdma);
463 asyw->image.handle[0] = ctxdma->object.handle;
466 asyw->state.fence = dma_resv_get_excl_rcu(fb->nvbo->bo.base.resv);
467 asyw->image.offset[0] = fb->nvbo->bo.offset;
469 if (wndw->func->prepare) {
470 asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
472 return PTR_ERR(asyh);
474 wndw->func->prepare(wndw, asyh, asyw);
480 static const struct drm_plane_helper_funcs
482 .prepare_fb = nv50_wndw_prepare_fb,
483 .cleanup_fb = nv50_wndw_cleanup_fb,
484 .atomic_check = nv50_wndw_atomic_check,
488 nv50_wndw_atomic_destroy_state(struct drm_plane *plane,
489 struct drm_plane_state *state)
491 struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
492 __drm_atomic_helper_plane_destroy_state(&asyw->state);
496 static struct drm_plane_state *
497 nv50_wndw_atomic_duplicate_state(struct drm_plane *plane)
499 struct nv50_wndw_atom *armw = nv50_wndw_atom(plane->state);
500 struct nv50_wndw_atom *asyw;
501 if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL)))
503 __drm_atomic_helper_plane_duplicate_state(plane, &asyw->state);
504 asyw->sema = armw->sema;
505 asyw->ntfy = armw->ntfy;
507 asyw->xlut = armw->xlut;
508 asyw->image = armw->image;
509 asyw->point = armw->point;
516 nv50_wndw_reset(struct drm_plane *plane)
518 struct nv50_wndw_atom *asyw;
520 if (WARN_ON(!(asyw = kzalloc(sizeof(*asyw), GFP_KERNEL))))
524 plane->funcs->atomic_destroy_state(plane, plane->state);
525 plane->state = &asyw->state;
526 plane->state->plane = plane;
527 plane->state->rotation = DRM_MODE_ROTATE_0;
531 nv50_wndw_destroy(struct drm_plane *plane)
533 struct nv50_wndw *wndw = nv50_wndw(plane);
534 struct nv50_wndw_ctxdma *ctxdma, *ctxtmp;
536 list_for_each_entry_safe(ctxdma, ctxtmp, &wndw->ctxdma.list, head) {
537 nv50_wndw_ctxdma_del(ctxdma);
540 nvif_notify_fini(&wndw->notify);
541 nv50_dmac_destroy(&wndw->wimm);
542 nv50_dmac_destroy(&wndw->wndw);
544 nv50_lut_fini(&wndw->ilut);
546 drm_plane_cleanup(&wndw->plane);
550 const struct drm_plane_funcs
552 .update_plane = drm_atomic_helper_update_plane,
553 .disable_plane = drm_atomic_helper_disable_plane,
554 .destroy = nv50_wndw_destroy,
555 .reset = nv50_wndw_reset,
556 .atomic_duplicate_state = nv50_wndw_atomic_duplicate_state,
557 .atomic_destroy_state = nv50_wndw_atomic_destroy_state,
561 nv50_wndw_notify(struct nvif_notify *notify)
563 return NVIF_NOTIFY_KEEP;
567 nv50_wndw_fini(struct nv50_wndw *wndw)
569 nvif_notify_put(&wndw->notify);
573 nv50_wndw_init(struct nv50_wndw *wndw)
575 nvif_notify_get(&wndw->notify);
579 nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
580 enum drm_plane_type type, const char *name, int index,
581 const u32 *format, u32 heads,
582 enum nv50_disp_interlock_type interlock_type, u32 interlock_data,
583 struct nv50_wndw **pwndw)
585 struct nouveau_drm *drm = nouveau_drm(dev);
586 struct nvif_mmu *mmu = &drm->client.mmu;
587 struct nv50_disp *disp = nv50_disp(dev);
588 struct nv50_wndw *wndw;
592 if (!(wndw = *pwndw = kzalloc(sizeof(*wndw), GFP_KERNEL)))
596 wndw->interlock.type = interlock_type;
597 wndw->interlock.data = interlock_data;
599 wndw->ctxdma.parent = &wndw->wndw.base.user;
600 INIT_LIST_HEAD(&wndw->ctxdma.list);
602 for (nformat = 0; format[nformat]; nformat++);
604 ret = drm_universal_plane_init(dev, &wndw->plane, heads, &nv50_wndw,
605 format, nformat, NULL,
606 type, "%s-%d", name, index);
613 drm_plane_helper_add(&wndw->plane, &nv50_wndw_helper);
615 if (wndw->func->ilut) {
616 ret = nv50_lut_init(disp, mmu, &wndw->ilut);
621 wndw->notify.func = nv50_wndw_notify;
626 nv50_wndw_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
627 struct nv50_wndw **pwndw)
632 int (*new)(struct nouveau_drm *, enum drm_plane_type,
633 int, s32, struct nv50_wndw **);
635 { TU102_DISP_WINDOW_CHANNEL_DMA, 0, wndwc57e_new },
636 { GV100_DISP_WINDOW_CHANNEL_DMA, 0, wndwc37e_new },
639 struct nv50_disp *disp = nv50_disp(drm->dev);
642 cid = nvif_mclass(&disp->disp->object, wndws);
644 NV_ERROR(drm, "No supported window class\n");
648 ret = wndws[cid].new(drm, type, index, wndws[cid].oclass, pwndw);
652 return nv50_wimm_init(drm, *pwndw);