]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
8c5ed0b59e46f89e73e255d14dbb0066b883de9f
[linux.git] / drivers / gpu / drm / msm / disp / mdp5 / mdp5_crtc.c
1 /*
2  * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published by
8  * the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18
19 #include <linux/sort.h>
20 #include <drm/drm_mode.h>
21 #include <drm/drm_crtc.h>
22 #include <drm/drm_crtc_helper.h>
23 #include <drm/drm_flip_work.h>
24
25 #include "mdp5_kms.h"
26
27 #define CURSOR_WIDTH    64
28 #define CURSOR_HEIGHT   64
29
30 struct mdp5_crtc {
31         struct drm_crtc base;
32         int id;
33         bool enabled;
34
35         spinlock_t lm_lock;     /* protect REG_MDP5_LM_* registers */
36
37         /* if there is a pending flip, these will be non-null: */
38         struct drm_pending_vblank_event *event;
39
40         /* Bits have been flushed at the last commit,
41          * used to decide if a vsync has happened since last commit.
42          */
43         u32 flushed_mask;
44
45 #define PENDING_CURSOR 0x1
46 #define PENDING_FLIP   0x2
47         atomic_t pending;
48
49         /* for unref'ing cursor bo's after scanout completes: */
50         struct drm_flip_work unref_cursor_work;
51
52         struct mdp_irq vblank;
53         struct mdp_irq err;
54         struct mdp_irq pp_done;
55
56         struct completion pp_completion;
57
58         bool lm_cursor_enabled;
59
60         struct {
61                 /* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
62                 spinlock_t lock;
63
64                 /* current cursor being scanned out: */
65                 struct drm_gem_object *scanout_bo;
66                 uint64_t iova;
67                 uint32_t width, height;
68                 uint32_t x, y;
69         } cursor;
70 };
71 #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
72
73 static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc);
74
75 static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
76 {
77         struct msm_drm_private *priv = crtc->dev->dev_private;
78         return to_mdp5_kms(to_mdp_kms(priv->kms));
79 }
80
81 static void request_pending(struct drm_crtc *crtc, uint32_t pending)
82 {
83         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
84
85         atomic_or(pending, &mdp5_crtc->pending);
86         mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
87 }
88
89 static void request_pp_done_pending(struct drm_crtc *crtc)
90 {
91         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
92         reinit_completion(&mdp5_crtc->pp_completion);
93 }
94
95 static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
96 {
97         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
98         struct mdp5_ctl *ctl = mdp5_cstate->ctl;
99         struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
100
101         DBG("%s: flush=%08x", crtc->name, flush_mask);
102         return mdp5_ctl_commit(ctl, pipeline, flush_mask);
103 }
104
105 /*
106  * flush updates, to make sure hw is updated to new scanout fb,
107  * so that we can safely queue unref to current fb (ie. next
108  * vblank we know hw is done w/ previous scanout_fb).
109  */
110 static u32 crtc_flush_all(struct drm_crtc *crtc)
111 {
112         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
113         struct mdp5_hw_mixer *mixer, *r_mixer;
114         struct drm_plane *plane;
115         uint32_t flush_mask = 0;
116
117         /* this should not happen: */
118         if (WARN_ON(!mdp5_cstate->ctl))
119                 return 0;
120
121         drm_atomic_crtc_for_each_plane(plane, crtc) {
122                 if (!plane->state->visible)
123                         continue;
124                 flush_mask |= mdp5_plane_get_flush(plane);
125         }
126
127         mixer = mdp5_cstate->pipeline.mixer;
128         flush_mask |= mdp_ctl_flush_mask_lm(mixer->lm);
129
130         r_mixer = mdp5_cstate->pipeline.r_mixer;
131         if (r_mixer)
132                 flush_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
133
134         return crtc_flush(crtc, flush_mask);
135 }
136
137 /* if file!=NULL, this is preclose potential cancel-flip path */
138 static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
139 {
140         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
141         struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
142         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
143         struct mdp5_ctl *ctl = mdp5_cstate->ctl;
144         struct drm_device *dev = crtc->dev;
145         struct drm_pending_vblank_event *event;
146         unsigned long flags;
147
148         spin_lock_irqsave(&dev->event_lock, flags);
149         event = mdp5_crtc->event;
150         if (event) {
151                 mdp5_crtc->event = NULL;
152                 DBG("%s: send event: %p", crtc->name, event);
153                 drm_crtc_send_vblank_event(crtc, event);
154         }
155         spin_unlock_irqrestore(&dev->event_lock, flags);
156
157         if (ctl && !crtc->state->enable) {
158                 /* set STAGE_UNUSED for all layers */
159                 mdp5_ctl_blend(ctl, pipeline, NULL, NULL, 0, 0);
160                 /* XXX: What to do here? */
161                 /* mdp5_crtc->ctl = NULL; */
162         }
163 }
164
165 static void unref_cursor_worker(struct drm_flip_work *work, void *val)
166 {
167         struct mdp5_crtc *mdp5_crtc =
168                 container_of(work, struct mdp5_crtc, unref_cursor_work);
169         struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
170         struct msm_kms *kms = &mdp5_kms->base.base;
171
172         msm_gem_put_iova(val, kms->aspace);
173         drm_gem_object_put_unlocked(val);
174 }
175
176 static void mdp5_crtc_destroy(struct drm_crtc *crtc)
177 {
178         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
179
180         drm_crtc_cleanup(crtc);
181         drm_flip_work_cleanup(&mdp5_crtc->unref_cursor_work);
182
183         kfree(mdp5_crtc);
184 }
185
186 static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage)
187 {
188         switch (stage) {
189         case STAGE0: return MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA;
190         case STAGE1: return MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA;
191         case STAGE2: return MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA;
192         case STAGE3: return MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA;
193         case STAGE4: return MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA;
194         case STAGE5: return MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA;
195         case STAGE6: return MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA;
196         default:
197                 return 0;
198         }
199 }
200
201 /*
202  * left/right pipe offsets for the stage array used in blend_setup()
203  */
204 #define PIPE_LEFT       0
205 #define PIPE_RIGHT      1
206
207 /*
208  * blend_setup() - blend all the planes of a CRTC
209  *
210  * If no base layer is available, border will be enabled as the base layer.
211  * Otherwise all layers will be blended based on their stage calculated
212  * in mdp5_crtc_atomic_check.
213  */
214 static void blend_setup(struct drm_crtc *crtc)
215 {
216         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
217         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
218         struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
219         struct mdp5_kms *mdp5_kms = get_kms(crtc);
220         struct drm_plane *plane;
221         const struct mdp5_cfg_hw *hw_cfg;
222         struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL};
223         const struct mdp_format *format;
224         struct mdp5_hw_mixer *mixer = pipeline->mixer;
225         uint32_t lm = mixer->lm;
226         struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
227         uint32_t r_lm = r_mixer ? r_mixer->lm : 0;
228         struct mdp5_ctl *ctl = mdp5_cstate->ctl;
229         uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
230         unsigned long flags;
231         enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
232         enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
233         int i, plane_cnt = 0;
234         bool bg_alpha_enabled = false;
235         u32 mixer_op_mode = 0;
236         u32 val;
237 #define blender(stage)  ((stage) - STAGE0)
238
239         hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
240
241         spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
242
243         /* ctl could be released already when we are shutting down: */
244         /* XXX: Can this happen now? */
245         if (!ctl)
246                 goto out;
247
248         /* Collect all plane information */
249         drm_atomic_crtc_for_each_plane(plane, crtc) {
250                 enum mdp5_pipe right_pipe;
251
252                 if (!plane->state->visible)
253                         continue;
254
255                 pstate = to_mdp5_plane_state(plane->state);
256                 pstates[pstate->stage] = pstate;
257                 stage[pstate->stage][PIPE_LEFT] = mdp5_plane_pipe(plane);
258                 /*
259                  * if we have a right mixer, stage the same pipe as we
260                  * have on the left mixer
261                  */
262                 if (r_mixer)
263                         r_stage[pstate->stage][PIPE_LEFT] =
264                                                 mdp5_plane_pipe(plane);
265                 /*
266                  * if we have a right pipe (i.e, the plane comprises of 2
267                  * hwpipes, then stage the right pipe on the right side of both
268                  * the layer mixers
269                  */
270                 right_pipe = mdp5_plane_right_pipe(plane);
271                 if (right_pipe) {
272                         stage[pstate->stage][PIPE_RIGHT] = right_pipe;
273                         r_stage[pstate->stage][PIPE_RIGHT] = right_pipe;
274                 }
275
276                 plane_cnt++;
277         }
278
279         if (!pstates[STAGE_BASE]) {
280                 ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
281                 DBG("Border Color is enabled");
282         } else if (plane_cnt) {
283                 format = to_mdp_format(msm_framebuffer_format(pstates[STAGE_BASE]->base.fb));
284
285                 if (format->alpha_enable)
286                         bg_alpha_enabled = true;
287         }
288
289         /* The reset for blending */
290         for (i = STAGE0; i <= STAGE_MAX; i++) {
291                 if (!pstates[i])
292                         continue;
293
294                 format = to_mdp_format(
295                         msm_framebuffer_format(pstates[i]->base.fb));
296                 plane = pstates[i]->base.plane;
297                 blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
298                         MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST);
299                 fg_alpha = pstates[i]->alpha;
300                 bg_alpha = 0xFF - pstates[i]->alpha;
301
302                 if (!format->alpha_enable && bg_alpha_enabled)
303                         mixer_op_mode = 0;
304                 else
305                         mixer_op_mode |= mdp5_lm_use_fg_alpha_mask(i);
306
307                 DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha);
308
309                 if (format->alpha_enable && pstates[i]->premultiplied) {
310                         blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
311                                 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
312                         if (fg_alpha != 0xff) {
313                                 bg_alpha = fg_alpha;
314                                 blend_op |=
315                                         MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
316                                         MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
317                         } else {
318                                 blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
319                         }
320                 } else if (format->alpha_enable) {
321                         blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_PIXEL) |
322                                 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
323                         if (fg_alpha != 0xff) {
324                                 bg_alpha = fg_alpha;
325                                 blend_op |=
326                                        MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA |
327                                        MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA |
328                                        MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
329                                        MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
330                         } else {
331                                 blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
332                         }
333                 }
334
335                 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(lm,
336                                 blender(i)), blend_op);
337                 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
338                                 blender(i)), fg_alpha);
339                 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
340                                 blender(i)), bg_alpha);
341                 if (r_mixer) {
342                         mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(r_lm,
343                                         blender(i)), blend_op);
344                         mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(r_lm,
345                                         blender(i)), fg_alpha);
346                         mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(r_lm,
347                                         blender(i)), bg_alpha);
348                 }
349         }
350
351         val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
352         mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm),
353                    val | mixer_op_mode);
354         if (r_mixer) {
355                 val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
356                 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm),
357                            val | mixer_op_mode);
358         }
359
360         mdp5_ctl_blend(ctl, pipeline, stage, r_stage, plane_cnt,
361                        ctl_blend_flags);
362 out:
363         spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
364 }
365
366 static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
367 {
368         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
369         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
370         struct mdp5_kms *mdp5_kms = get_kms(crtc);
371         struct mdp5_hw_mixer *mixer = mdp5_cstate->pipeline.mixer;
372         struct mdp5_hw_mixer *r_mixer = mdp5_cstate->pipeline.r_mixer;
373         uint32_t lm = mixer->lm;
374         u32 mixer_width, val;
375         unsigned long flags;
376         struct drm_display_mode *mode;
377
378         if (WARN_ON(!crtc->state))
379                 return;
380
381         mode = &crtc->state->adjusted_mode;
382
383         DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
384                         crtc->name, mode->base.id, mode->name,
385                         mode->vrefresh, mode->clock,
386                         mode->hdisplay, mode->hsync_start,
387                         mode->hsync_end, mode->htotal,
388                         mode->vdisplay, mode->vsync_start,
389                         mode->vsync_end, mode->vtotal,
390                         mode->type, mode->flags);
391
392         mixer_width = mode->hdisplay;
393         if (r_mixer)
394                 mixer_width /= 2;
395
396         spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
397         mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(lm),
398                         MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
399                         MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
400
401         /* Assign mixer to LEFT side in source split mode */
402         val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
403         val &= ~MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
404         mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), val);
405
406         if (r_mixer) {
407                 u32 r_lm = r_mixer->lm;
408
409                 mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(r_lm),
410                            MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
411                            MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
412
413                 /* Assign mixer to RIGHT side in source split mode */
414                 val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
415                 val |= MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
416                 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm), val);
417         }
418
419         spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
420 }
421
422 static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
423                                      struct drm_crtc_state *old_state)
424 {
425         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
426         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
427         struct mdp5_kms *mdp5_kms = get_kms(crtc);
428         struct device *dev = &mdp5_kms->pdev->dev;
429
430         DBG("%s", crtc->name);
431
432         if (WARN_ON(!mdp5_crtc->enabled))
433                 return;
434
435         /* Disable/save vblank irq handling before power is disabled */
436         drm_crtc_vblank_off(crtc);
437
438         if (mdp5_cstate->cmd_mode)
439                 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done);
440
441         mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
442         pm_runtime_put_sync(dev);
443
444         mdp5_crtc->enabled = false;
445 }
446
447 static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
448                                     struct drm_crtc_state *old_state)
449 {
450         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
451         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
452         struct mdp5_kms *mdp5_kms = get_kms(crtc);
453         struct device *dev = &mdp5_kms->pdev->dev;
454
455         DBG("%s", crtc->name);
456
457         if (WARN_ON(mdp5_crtc->enabled))
458                 return;
459
460         pm_runtime_get_sync(dev);
461
462         if (mdp5_crtc->lm_cursor_enabled) {
463                 /*
464                  * Restore LM cursor state, as it might have been lost
465                  * with suspend:
466                  */
467                 if (mdp5_crtc->cursor.iova) {
468                         unsigned long flags;
469
470                         spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
471                         mdp5_crtc_restore_cursor(crtc);
472                         spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
473
474                         mdp5_ctl_set_cursor(mdp5_cstate->ctl,
475                                             &mdp5_cstate->pipeline, 0, true);
476                 } else {
477                         mdp5_ctl_set_cursor(mdp5_cstate->ctl,
478                                             &mdp5_cstate->pipeline, 0, false);
479                 }
480         }
481
482         /* Restore vblank irq handling after power is enabled */
483         drm_crtc_vblank_on(crtc);
484
485         mdp5_crtc_mode_set_nofb(crtc);
486
487         mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
488
489         if (mdp5_cstate->cmd_mode)
490                 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->pp_done);
491
492         mdp5_crtc->enabled = true;
493 }
494
495 int mdp5_crtc_setup_pipeline(struct drm_crtc *crtc,
496                              struct drm_crtc_state *new_crtc_state,
497                              bool need_right_mixer)
498 {
499         struct mdp5_crtc_state *mdp5_cstate =
500                         to_mdp5_crtc_state(new_crtc_state);
501         struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
502         struct mdp5_interface *intf;
503         bool new_mixer = false;
504
505         new_mixer = !pipeline->mixer;
506
507         if ((need_right_mixer && !pipeline->r_mixer) ||
508             (!need_right_mixer && pipeline->r_mixer))
509                 new_mixer = true;
510
511         if (new_mixer) {
512                 struct mdp5_hw_mixer *old_mixer = pipeline->mixer;
513                 struct mdp5_hw_mixer *old_r_mixer = pipeline->r_mixer;
514                 u32 caps;
515                 int ret;
516
517                 caps = MDP_LM_CAP_DISPLAY;
518                 if (need_right_mixer)
519                         caps |= MDP_LM_CAP_PAIR;
520
521                 ret = mdp5_mixer_assign(new_crtc_state->state, crtc, caps,
522                                         &pipeline->mixer, need_right_mixer ?
523                                         &pipeline->r_mixer : NULL);
524                 if (ret)
525                         return ret;
526
527                 mdp5_mixer_release(new_crtc_state->state, old_mixer);
528                 if (old_r_mixer) {
529                         mdp5_mixer_release(new_crtc_state->state, old_r_mixer);
530                         if (!need_right_mixer)
531                                 pipeline->r_mixer = NULL;
532                 }
533         }
534
535         /*
536          * these should have been already set up in the encoder's atomic
537          * check (called by drm_atomic_helper_check_modeset)
538          */
539         intf = pipeline->intf;
540
541         mdp5_cstate->err_irqmask = intf2err(intf->num);
542         mdp5_cstate->vblank_irqmask = intf2vblank(pipeline->mixer, intf);
543
544         if ((intf->type == INTF_DSI) &&
545             (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) {
546                 mdp5_cstate->pp_done_irqmask = lm2ppdone(pipeline->mixer);
547                 mdp5_cstate->cmd_mode = true;
548         } else {
549                 mdp5_cstate->pp_done_irqmask = 0;
550                 mdp5_cstate->cmd_mode = false;
551         }
552
553         return 0;
554 }
555
556 struct plane_state {
557         struct drm_plane *plane;
558         struct mdp5_plane_state *state;
559 };
560
561 static int pstate_cmp(const void *a, const void *b)
562 {
563         struct plane_state *pa = (struct plane_state *)a;
564         struct plane_state *pb = (struct plane_state *)b;
565         return pa->state->zpos - pb->state->zpos;
566 }
567
568 /* is there a helper for this? */
569 static bool is_fullscreen(struct drm_crtc_state *cstate,
570                 struct drm_plane_state *pstate)
571 {
572         return (pstate->crtc_x <= 0) && (pstate->crtc_y <= 0) &&
573                 ((pstate->crtc_x + pstate->crtc_w) >= cstate->mode.hdisplay) &&
574                 ((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay);
575 }
576
577 static enum mdp_mixer_stage_id get_start_stage(struct drm_crtc *crtc,
578                                         struct drm_crtc_state *new_crtc_state,
579                                         struct drm_plane_state *bpstate)
580 {
581         struct mdp5_crtc_state *mdp5_cstate =
582                         to_mdp5_crtc_state(new_crtc_state);
583
584         /*
585          * if we're in source split mode, it's mandatory to have
586          * border out on the base stage
587          */
588         if (mdp5_cstate->pipeline.r_mixer)
589                 return STAGE0;
590
591         /* if the bottom-most layer is not fullscreen, we need to use
592          * it for solid-color:
593          */
594         if (!is_fullscreen(new_crtc_state, bpstate))
595                 return STAGE0;
596
597         return STAGE_BASE;
598 }
599
600 static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
601                 struct drm_crtc_state *state)
602 {
603         struct mdp5_kms *mdp5_kms = get_kms(crtc);
604         struct drm_plane *plane;
605         struct drm_device *dev = crtc->dev;
606         struct plane_state pstates[STAGE_MAX + 1];
607         const struct mdp5_cfg_hw *hw_cfg;
608         const struct drm_plane_state *pstate;
609         const struct drm_display_mode *mode = &state->adjusted_mode;
610         bool cursor_plane = false;
611         bool need_right_mixer = false;
612         int cnt = 0, i;
613         int ret;
614         enum mdp_mixer_stage_id start;
615
616         DBG("%s: check", crtc->name);
617
618         drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
619                 if (!pstate->visible)
620                         continue;
621
622                 pstates[cnt].plane = plane;
623                 pstates[cnt].state = to_mdp5_plane_state(pstate);
624
625                 /*
626                  * if any plane on this crtc uses 2 hwpipes, then we need
627                  * the crtc to have a right hwmixer.
628                  */
629                 if (pstates[cnt].state->r_hwpipe)
630                         need_right_mixer = true;
631                 cnt++;
632
633                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
634                         cursor_plane = true;
635         }
636
637         /* bail out early if there aren't any planes */
638         if (!cnt)
639                 return 0;
640
641         hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
642
643         /*
644          * we need a right hwmixer if the mode's width is greater than a single
645          * LM's max width
646          */
647         if (mode->hdisplay > hw_cfg->lm.max_width)
648                 need_right_mixer = true;
649
650         ret = mdp5_crtc_setup_pipeline(crtc, state, need_right_mixer);
651         if (ret) {
652                 dev_err(dev->dev, "couldn't assign mixers %d\n", ret);
653                 return ret;
654         }
655
656         /* assign a stage based on sorted zpos property */
657         sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
658
659         /* trigger a warning if cursor isn't the highest zorder */
660         WARN_ON(cursor_plane &&
661                 (pstates[cnt - 1].plane->type != DRM_PLANE_TYPE_CURSOR));
662
663         start = get_start_stage(crtc, state, &pstates[0].state->base);
664
665         /* verify that there are not too many planes attached to crtc
666          * and that we don't have conflicting mixer stages:
667          */
668         if ((cnt + start - 1) >= hw_cfg->lm.nb_stages) {
669                 dev_err(dev->dev, "too many planes! cnt=%d, start stage=%d\n",
670                         cnt, start);
671                 return -EINVAL;
672         }
673
674         for (i = 0; i < cnt; i++) {
675                 if (cursor_plane && (i == (cnt - 1)))
676                         pstates[i].state->stage = hw_cfg->lm.nb_stages;
677                 else
678                         pstates[i].state->stage = start + i;
679                 DBG("%s: assign pipe %s on stage=%d", crtc->name,
680                                 pstates[i].plane->name,
681                                 pstates[i].state->stage);
682         }
683
684         return 0;
685 }
686
687 static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc,
688                                    struct drm_crtc_state *old_crtc_state)
689 {
690         DBG("%s: begin", crtc->name);
691 }
692
693 static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
694                                    struct drm_crtc_state *old_crtc_state)
695 {
696         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
697         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
698         struct drm_device *dev = crtc->dev;
699         unsigned long flags;
700
701         DBG("%s: event: %p", crtc->name, crtc->state->event);
702
703         WARN_ON(mdp5_crtc->event);
704
705         spin_lock_irqsave(&dev->event_lock, flags);
706         mdp5_crtc->event = crtc->state->event;
707         spin_unlock_irqrestore(&dev->event_lock, flags);
708
709         /*
710          * If no CTL has been allocated in mdp5_crtc_atomic_check(),
711          * it means we are trying to flush a CRTC whose state is disabled:
712          * nothing else needs to be done.
713          */
714         /* XXX: Can this happen now ? */
715         if (unlikely(!mdp5_cstate->ctl))
716                 return;
717
718         blend_setup(crtc);
719
720         /* PP_DONE irq is only used by command mode for now.
721          * It is better to request pending before FLUSH and START trigger
722          * to make sure no pp_done irq missed.
723          * This is safe because no pp_done will happen before SW trigger
724          * in command mode.
725          */
726         if (mdp5_cstate->cmd_mode)
727                 request_pp_done_pending(crtc);
728
729         mdp5_crtc->flushed_mask = crtc_flush_all(crtc);
730
731         /* XXX are we leaking out state here? */
732         mdp5_crtc->vblank.irqmask = mdp5_cstate->vblank_irqmask;
733         mdp5_crtc->err.irqmask = mdp5_cstate->err_irqmask;
734         mdp5_crtc->pp_done.irqmask = mdp5_cstate->pp_done_irqmask;
735
736         request_pending(crtc, PENDING_FLIP);
737 }
738
739 static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
740 {
741         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
742         uint32_t xres = crtc->mode.hdisplay;
743         uint32_t yres = crtc->mode.vdisplay;
744
745         /*
746          * Cursor Region Of Interest (ROI) is a plane read from cursor
747          * buffer to render. The ROI region is determined by the visibility of
748          * the cursor point. In the default Cursor image the cursor point will
749          * be at the top left of the cursor image, unless it is specified
750          * otherwise using hotspot feature.
751          *
752          * If the cursor point reaches the right (xres - x < cursor.width) or
753          * bottom (yres - y < cursor.height) boundary of the screen, then ROI
754          * width and ROI height need to be evaluated to crop the cursor image
755          * accordingly.
756          * (xres-x) will be new cursor width when x > (xres - cursor.width)
757          * (yres-y) will be new cursor height when y > (yres - cursor.height)
758          */
759         *roi_w = min(mdp5_crtc->cursor.width, xres -
760                         mdp5_crtc->cursor.x);
761         *roi_h = min(mdp5_crtc->cursor.height, yres -
762                         mdp5_crtc->cursor.y);
763 }
764
765 static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
766 {
767         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
768         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
769         struct mdp5_kms *mdp5_kms = get_kms(crtc);
770         const enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
771         uint32_t blendcfg, stride;
772         uint32_t x, y, width, height;
773         uint32_t roi_w, roi_h;
774         int lm;
775
776         assert_spin_locked(&mdp5_crtc->cursor.lock);
777
778         lm = mdp5_cstate->pipeline.mixer->lm;
779
780         x = mdp5_crtc->cursor.x;
781         y = mdp5_crtc->cursor.y;
782         width = mdp5_crtc->cursor.width;
783         height = mdp5_crtc->cursor.height;
784
785         stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0);
786
787         get_roi(crtc, &roi_w, &roi_h);
788
789         mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
790         mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
791                         MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
792         mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_IMG_SIZE(lm),
793                         MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
794                         MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
795         mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
796                         MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
797                         MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
798         mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm),
799                         MDP5_LM_CURSOR_START_XY_Y_START(y) |
800                         MDP5_LM_CURSOR_START_XY_X_START(x));
801         mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm),
802                         mdp5_crtc->cursor.iova);
803
804         blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
805         blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
806         mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);
807 }
808
809 static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
810                 struct drm_file *file, uint32_t handle,
811                 uint32_t width, uint32_t height)
812 {
813         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
814         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
815         struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
816         struct drm_device *dev = crtc->dev;
817         struct mdp5_kms *mdp5_kms = get_kms(crtc);
818         struct platform_device *pdev = mdp5_kms->pdev;
819         struct msm_kms *kms = &mdp5_kms->base.base;
820         struct drm_gem_object *cursor_bo, *old_bo = NULL;
821         struct mdp5_ctl *ctl;
822         int ret;
823         uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
824         bool cursor_enable = true;
825         unsigned long flags;
826
827         if (!mdp5_crtc->lm_cursor_enabled) {
828                 dev_warn(dev->dev,
829                          "cursor_set is deprecated with cursor planes\n");
830                 return -EINVAL;
831         }
832
833         if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
834                 dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
835                 return -EINVAL;
836         }
837
838         ctl = mdp5_cstate->ctl;
839         if (!ctl)
840                 return -EINVAL;
841
842         /* don't support LM cursors when we we have source split enabled */
843         if (mdp5_cstate->pipeline.r_mixer)
844                 return -EINVAL;
845
846         if (!handle) {
847                 DBG("Cursor off");
848                 cursor_enable = false;
849                 mdp5_crtc->cursor.iova = 0;
850                 pm_runtime_get_sync(&pdev->dev);
851                 goto set_cursor;
852         }
853
854         cursor_bo = drm_gem_object_lookup(file, handle);
855         if (!cursor_bo)
856                 return -ENOENT;
857
858         ret = msm_gem_get_iova(cursor_bo, kms->aspace,
859                         &mdp5_crtc->cursor.iova);
860         if (ret)
861                 return -EINVAL;
862
863         pm_runtime_get_sync(&pdev->dev);
864
865         spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
866         old_bo = mdp5_crtc->cursor.scanout_bo;
867
868         mdp5_crtc->cursor.scanout_bo = cursor_bo;
869         mdp5_crtc->cursor.width = width;
870         mdp5_crtc->cursor.height = height;
871
872         mdp5_crtc_restore_cursor(crtc);
873
874         spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
875
876 set_cursor:
877         ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
878         if (ret) {
879                 dev_err(dev->dev, "failed to %sable cursor: %d\n",
880                                 cursor_enable ? "en" : "dis", ret);
881                 goto end;
882         }
883
884         crtc_flush(crtc, flush_mask);
885
886 end:
887         pm_runtime_put_sync(&pdev->dev);
888         if (old_bo) {
889                 drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
890                 /* enable vblank to complete cursor work: */
891                 request_pending(crtc, PENDING_CURSOR);
892         }
893         return ret;
894 }
895
896 static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
897 {
898         struct mdp5_kms *mdp5_kms = get_kms(crtc);
899         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
900         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
901         uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
902         struct drm_device *dev = crtc->dev;
903         uint32_t roi_w;
904         uint32_t roi_h;
905         unsigned long flags;
906
907         if (!mdp5_crtc->lm_cursor_enabled) {
908                 dev_warn(dev->dev,
909                          "cursor_move is deprecated with cursor planes\n");
910                 return -EINVAL;
911         }
912
913         /* don't support LM cursors when we we have source split enabled */
914         if (mdp5_cstate->pipeline.r_mixer)
915                 return -EINVAL;
916
917         /* In case the CRTC is disabled, just drop the cursor update */
918         if (unlikely(!crtc->state->enable))
919                 return 0;
920
921         mdp5_crtc->cursor.x = x = max(x, 0);
922         mdp5_crtc->cursor.y = y = max(y, 0);
923
924         get_roi(crtc, &roi_w, &roi_h);
925
926         pm_runtime_get_sync(&mdp5_kms->pdev->dev);
927
928         spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
929         mdp5_crtc_restore_cursor(crtc);
930         spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
931
932         crtc_flush(crtc, flush_mask);
933
934         pm_runtime_put_sync(&mdp5_kms->pdev->dev);
935
936         return 0;
937 }
938
939 static void
940 mdp5_crtc_atomic_print_state(struct drm_printer *p,
941                              const struct drm_crtc_state *state)
942 {
943         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
944         struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
945         struct mdp5_kms *mdp5_kms = get_kms(state->crtc);
946
947         if (WARN_ON(!pipeline))
948                 return;
949
950         drm_printf(p, "\thwmixer=%s\n", pipeline->mixer ?
951                         pipeline->mixer->name : "(null)");
952
953         if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT)
954                 drm_printf(p, "\tright hwmixer=%s\n", pipeline->r_mixer ?
955                            pipeline->r_mixer->name : "(null)");
956 }
957
958 static void mdp5_crtc_reset(struct drm_crtc *crtc)
959 {
960         struct mdp5_crtc_state *mdp5_cstate;
961
962         if (crtc->state) {
963                 __drm_atomic_helper_crtc_destroy_state(crtc->state);
964                 kfree(to_mdp5_crtc_state(crtc->state));
965         }
966
967         mdp5_cstate = kzalloc(sizeof(*mdp5_cstate), GFP_KERNEL);
968
969         if (mdp5_cstate) {
970                 mdp5_cstate->base.crtc = crtc;
971                 crtc->state = &mdp5_cstate->base;
972         }
973 }
974
975 static struct drm_crtc_state *
976 mdp5_crtc_duplicate_state(struct drm_crtc *crtc)
977 {
978         struct mdp5_crtc_state *mdp5_cstate;
979
980         if (WARN_ON(!crtc->state))
981                 return NULL;
982
983         mdp5_cstate = kmemdup(to_mdp5_crtc_state(crtc->state),
984                               sizeof(*mdp5_cstate), GFP_KERNEL);
985         if (!mdp5_cstate)
986                 return NULL;
987
988         __drm_atomic_helper_crtc_duplicate_state(crtc, &mdp5_cstate->base);
989
990         return &mdp5_cstate->base;
991 }
992
993 static void mdp5_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state)
994 {
995         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
996
997         __drm_atomic_helper_crtc_destroy_state(state);
998
999         kfree(mdp5_cstate);
1000 }
1001
1002 static const struct drm_crtc_funcs mdp5_crtc_funcs = {
1003         .set_config = drm_atomic_helper_set_config,
1004         .destroy = mdp5_crtc_destroy,
1005         .page_flip = drm_atomic_helper_page_flip,
1006         .reset = mdp5_crtc_reset,
1007         .atomic_duplicate_state = mdp5_crtc_duplicate_state,
1008         .atomic_destroy_state = mdp5_crtc_destroy_state,
1009         .cursor_set = mdp5_crtc_cursor_set,
1010         .cursor_move = mdp5_crtc_cursor_move,
1011         .atomic_print_state = mdp5_crtc_atomic_print_state,
1012 };
1013
1014 static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
1015         .mode_set_nofb = mdp5_crtc_mode_set_nofb,
1016         .atomic_check = mdp5_crtc_atomic_check,
1017         .atomic_begin = mdp5_crtc_atomic_begin,
1018         .atomic_flush = mdp5_crtc_atomic_flush,
1019         .atomic_enable = mdp5_crtc_atomic_enable,
1020         .atomic_disable = mdp5_crtc_atomic_disable,
1021 };
1022
1023 static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
1024 {
1025         struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
1026         struct drm_crtc *crtc = &mdp5_crtc->base;
1027         struct msm_drm_private *priv = crtc->dev->dev_private;
1028         unsigned pending;
1029
1030         mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
1031
1032         pending = atomic_xchg(&mdp5_crtc->pending, 0);
1033
1034         if (pending & PENDING_FLIP) {
1035                 complete_flip(crtc, NULL);
1036         }
1037
1038         if (pending & PENDING_CURSOR)
1039                 drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->wq);
1040 }
1041
1042 static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
1043 {
1044         struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
1045
1046         DBG("%s: error: %08x", mdp5_crtc->base.name, irqstatus);
1047 }
1048
1049 static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus)
1050 {
1051         struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc,
1052                                                                 pp_done);
1053
1054         complete(&mdp5_crtc->pp_completion);
1055 }
1056
1057 static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
1058 {
1059         struct drm_device *dev = crtc->dev;
1060         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
1061         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1062         int ret;
1063
1064         ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
1065                                                 msecs_to_jiffies(50));
1066         if (ret == 0)
1067                 dev_warn(dev->dev, "pp done time out, lm=%d\n",
1068                          mdp5_cstate->pipeline.mixer->lm);
1069 }
1070
1071 static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
1072 {
1073         struct drm_device *dev = crtc->dev;
1074         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
1075         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1076         struct mdp5_ctl *ctl = mdp5_cstate->ctl;
1077         int ret;
1078
1079         /* Should not call this function if crtc is disabled. */
1080         if (!ctl)
1081                 return;
1082
1083         ret = drm_crtc_vblank_get(crtc);
1084         if (ret)
1085                 return;
1086
1087         ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
1088                 ((mdp5_ctl_get_commit_status(ctl) &
1089                 mdp5_crtc->flushed_mask) == 0),
1090                 msecs_to_jiffies(50));
1091         if (ret <= 0)
1092                 dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp5_crtc->id);
1093
1094         mdp5_crtc->flushed_mask = 0;
1095
1096         drm_crtc_vblank_put(crtc);
1097 }
1098
1099 uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
1100 {
1101         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
1102         return mdp5_crtc->vblank.irqmask;
1103 }
1104
1105 void mdp5_crtc_set_pipeline(struct drm_crtc *crtc)
1106 {
1107         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1108         struct mdp5_kms *mdp5_kms = get_kms(crtc);
1109
1110         /* should this be done elsewhere ? */
1111         mdp_irq_update(&mdp5_kms->base);
1112
1113         mdp5_ctl_set_pipeline(mdp5_cstate->ctl, &mdp5_cstate->pipeline);
1114 }
1115
1116 struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
1117 {
1118         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1119
1120         return mdp5_cstate->ctl;
1121 }
1122
1123 struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc)
1124 {
1125         struct mdp5_crtc_state *mdp5_cstate;
1126
1127         if (WARN_ON(!crtc))
1128                 return ERR_PTR(-EINVAL);
1129
1130         mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1131
1132         return WARN_ON(!mdp5_cstate->pipeline.mixer) ?
1133                 ERR_PTR(-EINVAL) : mdp5_cstate->pipeline.mixer;
1134 }
1135
1136 struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc)
1137 {
1138         struct mdp5_crtc_state *mdp5_cstate;
1139
1140         if (WARN_ON(!crtc))
1141                 return ERR_PTR(-EINVAL);
1142
1143         mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1144
1145         return &mdp5_cstate->pipeline;
1146 }
1147
1148 void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
1149 {
1150         struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1151
1152         if (mdp5_cstate->cmd_mode)
1153                 mdp5_crtc_wait_for_pp_done(crtc);
1154         else
1155                 mdp5_crtc_wait_for_flush_done(crtc);
1156 }
1157
1158 /* initialize crtc */
1159 struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
1160                                 struct drm_plane *plane,
1161                                 struct drm_plane *cursor_plane, int id)
1162 {
1163         struct drm_crtc *crtc = NULL;
1164         struct mdp5_crtc *mdp5_crtc;
1165
1166         mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
1167         if (!mdp5_crtc)
1168                 return ERR_PTR(-ENOMEM);
1169
1170         crtc = &mdp5_crtc->base;
1171
1172         mdp5_crtc->id = id;
1173
1174         spin_lock_init(&mdp5_crtc->lm_lock);
1175         spin_lock_init(&mdp5_crtc->cursor.lock);
1176         init_completion(&mdp5_crtc->pp_completion);
1177
1178         mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
1179         mdp5_crtc->err.irq = mdp5_crtc_err_irq;
1180         mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq;
1181
1182         mdp5_crtc->lm_cursor_enabled = cursor_plane ? false : true;
1183
1184         drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
1185                                   &mdp5_crtc_funcs, NULL);
1186
1187         drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
1188                         "unref cursor", unref_cursor_worker);
1189
1190         drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
1191         plane->crtc = crtc;
1192
1193         return crtc;
1194 }