2 * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/sort.h>
20 #include <drm/drm_mode.h>
21 #include <drm/drm_crtc.h>
22 #include <drm/drm_crtc_helper.h>
23 #include <drm/drm_flip_work.h>
27 #define CURSOR_WIDTH 64
28 #define CURSOR_HEIGHT 64
35 spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */
37 /* if there is a pending flip, these will be non-null: */
38 struct drm_pending_vblank_event *event;
40 /* Bits have been flushed at the last commit,
41 * used to decide if a vsync has happened since last commit.
45 #define PENDING_CURSOR 0x1
46 #define PENDING_FLIP 0x2
49 /* for unref'ing cursor bo's after scanout completes: */
50 struct drm_flip_work unref_cursor_work;
52 struct mdp_irq vblank;
54 struct mdp_irq pp_done;
56 struct completion pp_completion;
59 /* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
62 /* current cursor being scanned out: */
63 struct drm_gem_object *scanout_bo;
64 uint32_t width, height;
68 #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
70 static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
72 struct msm_drm_private *priv = crtc->dev->dev_private;
73 return to_mdp5_kms(to_mdp_kms(priv->kms));
76 static void request_pending(struct drm_crtc *crtc, uint32_t pending)
78 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
80 atomic_or(pending, &mdp5_crtc->pending);
81 mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
84 static void request_pp_done_pending(struct drm_crtc *crtc)
86 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
87 reinit_completion(&mdp5_crtc->pp_completion);
90 static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
92 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
93 struct mdp5_ctl *ctl = mdp5_cstate->ctl;
94 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
96 DBG("%s: flush=%08x", crtc->name, flush_mask);
97 return mdp5_ctl_commit(ctl, pipeline, flush_mask);
101 * flush updates, to make sure hw is updated to new scanout fb,
102 * so that we can safely queue unref to current fb (ie. next
103 * vblank we know hw is done w/ previous scanout_fb).
105 static u32 crtc_flush_all(struct drm_crtc *crtc)
107 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
108 struct mdp5_hw_mixer *mixer, *r_mixer;
109 struct drm_plane *plane;
110 uint32_t flush_mask = 0;
112 /* this should not happen: */
113 if (WARN_ON(!mdp5_cstate->ctl))
116 drm_atomic_crtc_for_each_plane(plane, crtc) {
117 if (!plane->state->visible)
119 flush_mask |= mdp5_plane_get_flush(plane);
122 mixer = mdp5_cstate->pipeline.mixer;
123 flush_mask |= mdp_ctl_flush_mask_lm(mixer->lm);
125 r_mixer = mdp5_cstate->pipeline.r_mixer;
127 flush_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
129 return crtc_flush(crtc, flush_mask);
132 /* if file!=NULL, this is preclose potential cancel-flip path */
133 static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
135 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
136 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
137 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
138 struct mdp5_ctl *ctl = mdp5_cstate->ctl;
139 struct drm_device *dev = crtc->dev;
140 struct drm_pending_vblank_event *event;
143 spin_lock_irqsave(&dev->event_lock, flags);
144 event = mdp5_crtc->event;
146 mdp5_crtc->event = NULL;
147 DBG("%s: send event: %p", crtc->name, event);
148 drm_crtc_send_vblank_event(crtc, event);
150 spin_unlock_irqrestore(&dev->event_lock, flags);
152 if (ctl && !crtc->state->enable) {
153 /* set STAGE_UNUSED for all layers */
154 mdp5_ctl_blend(ctl, pipeline, NULL, NULL, 0, 0);
155 /* XXX: What to do here? */
156 /* mdp5_crtc->ctl = NULL; */
160 static void unref_cursor_worker(struct drm_flip_work *work, void *val)
162 struct mdp5_crtc *mdp5_crtc =
163 container_of(work, struct mdp5_crtc, unref_cursor_work);
164 struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
165 struct msm_kms *kms = &mdp5_kms->base.base;
167 msm_gem_put_iova(val, kms->aspace);
168 drm_gem_object_unreference_unlocked(val);
171 static void mdp5_crtc_destroy(struct drm_crtc *crtc)
173 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
175 drm_crtc_cleanup(crtc);
176 drm_flip_work_cleanup(&mdp5_crtc->unref_cursor_work);
181 static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage)
184 case STAGE0: return MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA;
185 case STAGE1: return MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA;
186 case STAGE2: return MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA;
187 case STAGE3: return MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA;
188 case STAGE4: return MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA;
189 case STAGE5: return MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA;
190 case STAGE6: return MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA;
197 * left/right pipe offsets for the stage array used in blend_setup()
203 * blend_setup() - blend all the planes of a CRTC
205 * If no base layer is available, border will be enabled as the base layer.
206 * Otherwise all layers will be blended based on their stage calculated
207 * in mdp5_crtc_atomic_check.
209 static void blend_setup(struct drm_crtc *crtc)
211 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
212 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
213 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
214 struct mdp5_kms *mdp5_kms = get_kms(crtc);
215 struct drm_plane *plane;
216 const struct mdp5_cfg_hw *hw_cfg;
217 struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL};
218 const struct mdp_format *format;
219 struct mdp5_hw_mixer *mixer = pipeline->mixer;
220 uint32_t lm = mixer->lm;
221 struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
222 uint32_t r_lm = r_mixer ? r_mixer->lm : 0;
223 struct mdp5_ctl *ctl = mdp5_cstate->ctl;
224 uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
226 enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
227 enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
228 int i, plane_cnt = 0;
229 bool bg_alpha_enabled = false;
230 u32 mixer_op_mode = 0;
232 #define blender(stage) ((stage) - STAGE0)
234 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
236 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
238 /* ctl could be released already when we are shutting down: */
239 /* XXX: Can this happen now? */
243 /* Collect all plane information */
244 drm_atomic_crtc_for_each_plane(plane, crtc) {
245 enum mdp5_pipe right_pipe;
247 if (!plane->state->visible)
250 pstate = to_mdp5_plane_state(plane->state);
251 pstates[pstate->stage] = pstate;
252 stage[pstate->stage][PIPE_LEFT] = mdp5_plane_pipe(plane);
254 * if we have a right mixer, stage the same pipe as we
255 * have on the left mixer
258 r_stage[pstate->stage][PIPE_LEFT] =
259 mdp5_plane_pipe(plane);
261 * if we have a right pipe (i.e, the plane comprises of 2
262 * hwpipes, then stage the right pipe on the right side of both
265 right_pipe = mdp5_plane_right_pipe(plane);
267 stage[pstate->stage][PIPE_RIGHT] = right_pipe;
268 r_stage[pstate->stage][PIPE_RIGHT] = right_pipe;
274 if (!pstates[STAGE_BASE]) {
275 ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
276 DBG("Border Color is enabled");
277 } else if (plane_cnt) {
278 format = to_mdp_format(msm_framebuffer_format(pstates[STAGE_BASE]->base.fb));
280 if (format->alpha_enable)
281 bg_alpha_enabled = true;
284 /* The reset for blending */
285 for (i = STAGE0; i <= STAGE_MAX; i++) {
289 format = to_mdp_format(
290 msm_framebuffer_format(pstates[i]->base.fb));
291 plane = pstates[i]->base.plane;
292 blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
293 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST);
294 fg_alpha = pstates[i]->alpha;
295 bg_alpha = 0xFF - pstates[i]->alpha;
297 if (!format->alpha_enable && bg_alpha_enabled)
300 mixer_op_mode |= mdp5_lm_use_fg_alpha_mask(i);
302 DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha);
304 if (format->alpha_enable && pstates[i]->premultiplied) {
305 blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
306 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
307 if (fg_alpha != 0xff) {
310 MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
311 MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
313 blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
315 } else if (format->alpha_enable) {
316 blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_PIXEL) |
317 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
318 if (fg_alpha != 0xff) {
321 MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA |
322 MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA |
323 MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
324 MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
326 blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
330 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(lm,
331 blender(i)), blend_op);
332 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
333 blender(i)), fg_alpha);
334 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
335 blender(i)), bg_alpha);
337 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(r_lm,
338 blender(i)), blend_op);
339 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(r_lm,
340 blender(i)), fg_alpha);
341 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(r_lm,
342 blender(i)), bg_alpha);
346 val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
347 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm),
348 val | mixer_op_mode);
350 val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
351 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm),
352 val | mixer_op_mode);
355 mdp5_ctl_blend(ctl, pipeline, stage, r_stage, plane_cnt,
358 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
361 static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
363 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
364 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
365 struct mdp5_kms *mdp5_kms = get_kms(crtc);
366 struct mdp5_hw_mixer *mixer = mdp5_cstate->pipeline.mixer;
367 struct mdp5_hw_mixer *r_mixer = mdp5_cstate->pipeline.r_mixer;
368 uint32_t lm = mixer->lm;
369 u32 mixer_width, val;
371 struct drm_display_mode *mode;
373 if (WARN_ON(!crtc->state))
376 mode = &crtc->state->adjusted_mode;
378 DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
379 crtc->name, mode->base.id, mode->name,
380 mode->vrefresh, mode->clock,
381 mode->hdisplay, mode->hsync_start,
382 mode->hsync_end, mode->htotal,
383 mode->vdisplay, mode->vsync_start,
384 mode->vsync_end, mode->vtotal,
385 mode->type, mode->flags);
387 mixer_width = mode->hdisplay;
391 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
392 mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(lm),
393 MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
394 MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
396 /* Assign mixer to LEFT side in source split mode */
397 val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
398 val &= ~MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
399 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), val);
402 u32 r_lm = r_mixer->lm;
404 mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(r_lm),
405 MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
406 MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
408 /* Assign mixer to RIGHT side in source split mode */
409 val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
410 val |= MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
411 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm), val);
414 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
417 static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
418 struct drm_crtc_state *old_state)
420 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
421 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
422 struct mdp5_kms *mdp5_kms = get_kms(crtc);
423 struct device *dev = &mdp5_kms->pdev->dev;
425 DBG("%s", crtc->name);
427 if (WARN_ON(!mdp5_crtc->enabled))
430 /* Disable/save vblank irq handling before power is disabled */
431 drm_crtc_vblank_off(crtc);
433 if (mdp5_cstate->cmd_mode)
434 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done);
436 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
437 pm_runtime_put_autosuspend(dev);
439 mdp5_crtc->enabled = false;
442 static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
443 struct drm_crtc_state *old_state)
445 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
446 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
447 struct mdp5_kms *mdp5_kms = get_kms(crtc);
448 struct device *dev = &mdp5_kms->pdev->dev;
450 DBG("%s", crtc->name);
452 if (WARN_ON(mdp5_crtc->enabled))
455 pm_runtime_get_sync(dev);
457 /* Restore vblank irq handling after power is enabled */
458 drm_crtc_vblank_on(crtc);
460 mdp5_crtc_mode_set_nofb(crtc);
462 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
464 if (mdp5_cstate->cmd_mode)
465 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->pp_done);
467 mdp5_crtc->enabled = true;
470 int mdp5_crtc_setup_pipeline(struct drm_crtc *crtc,
471 struct drm_crtc_state *new_crtc_state,
472 bool need_right_mixer)
474 struct mdp5_crtc_state *mdp5_cstate =
475 to_mdp5_crtc_state(new_crtc_state);
476 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
477 struct mdp5_interface *intf;
478 bool new_mixer = false;
480 new_mixer = !pipeline->mixer;
482 if ((need_right_mixer && !pipeline->r_mixer) ||
483 (!need_right_mixer && pipeline->r_mixer))
487 struct mdp5_hw_mixer *old_mixer = pipeline->mixer;
488 struct mdp5_hw_mixer *old_r_mixer = pipeline->r_mixer;
492 caps = MDP_LM_CAP_DISPLAY;
493 if (need_right_mixer)
494 caps |= MDP_LM_CAP_PAIR;
496 ret = mdp5_mixer_assign(new_crtc_state->state, crtc, caps,
497 &pipeline->mixer, need_right_mixer ?
498 &pipeline->r_mixer : NULL);
502 mdp5_mixer_release(new_crtc_state->state, old_mixer);
504 mdp5_mixer_release(new_crtc_state->state, old_r_mixer);
505 if (!need_right_mixer)
506 pipeline->r_mixer = NULL;
511 * these should have been already set up in the encoder's atomic
512 * check (called by drm_atomic_helper_check_modeset)
514 intf = pipeline->intf;
516 mdp5_cstate->err_irqmask = intf2err(intf->num);
517 mdp5_cstate->vblank_irqmask = intf2vblank(pipeline->mixer, intf);
519 if ((intf->type == INTF_DSI) &&
520 (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) {
521 mdp5_cstate->pp_done_irqmask = lm2ppdone(pipeline->mixer);
522 mdp5_cstate->cmd_mode = true;
524 mdp5_cstate->pp_done_irqmask = 0;
525 mdp5_cstate->cmd_mode = false;
532 struct drm_plane *plane;
533 struct mdp5_plane_state *state;
536 static int pstate_cmp(const void *a, const void *b)
538 struct plane_state *pa = (struct plane_state *)a;
539 struct plane_state *pb = (struct plane_state *)b;
540 return pa->state->zpos - pb->state->zpos;
543 /* is there a helper for this? */
544 static bool is_fullscreen(struct drm_crtc_state *cstate,
545 struct drm_plane_state *pstate)
547 return (pstate->crtc_x <= 0) && (pstate->crtc_y <= 0) &&
548 ((pstate->crtc_x + pstate->crtc_w) >= cstate->mode.hdisplay) &&
549 ((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay);
552 static enum mdp_mixer_stage_id get_start_stage(struct drm_crtc *crtc,
553 struct drm_crtc_state *new_crtc_state,
554 struct drm_plane_state *bpstate)
556 struct mdp5_crtc_state *mdp5_cstate =
557 to_mdp5_crtc_state(new_crtc_state);
560 * if we're in source split mode, it's mandatory to have
561 * border out on the base stage
563 if (mdp5_cstate->pipeline.r_mixer)
566 /* if the bottom-most layer is not fullscreen, we need to use
567 * it for solid-color:
569 if (!is_fullscreen(new_crtc_state, bpstate))
575 static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
576 struct drm_crtc_state *state)
578 struct mdp5_kms *mdp5_kms = get_kms(crtc);
579 struct drm_plane *plane;
580 struct drm_device *dev = crtc->dev;
581 struct plane_state pstates[STAGE_MAX + 1];
582 const struct mdp5_cfg_hw *hw_cfg;
583 const struct drm_plane_state *pstate;
584 const struct drm_display_mode *mode = &state->adjusted_mode;
585 bool cursor_plane = false;
586 bool need_right_mixer = false;
589 enum mdp_mixer_stage_id start;
591 DBG("%s: check", crtc->name);
593 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
594 if (!pstate->visible)
597 pstates[cnt].plane = plane;
598 pstates[cnt].state = to_mdp5_plane_state(pstate);
601 * if any plane on this crtc uses 2 hwpipes, then we need
602 * the crtc to have a right hwmixer.
604 if (pstates[cnt].state->r_hwpipe)
605 need_right_mixer = true;
608 if (plane->type == DRM_PLANE_TYPE_CURSOR)
612 /* bail out early if there aren't any planes */
616 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
619 * we need a right hwmixer if the mode's width is greater than a single
622 if (mode->hdisplay > hw_cfg->lm.max_width)
623 need_right_mixer = true;
625 ret = mdp5_crtc_setup_pipeline(crtc, state, need_right_mixer);
627 dev_err(dev->dev, "couldn't assign mixers %d\n", ret);
631 /* assign a stage based on sorted zpos property */
632 sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
634 /* trigger a warning if cursor isn't the highest zorder */
635 WARN_ON(cursor_plane &&
636 (pstates[cnt - 1].plane->type != DRM_PLANE_TYPE_CURSOR));
638 start = get_start_stage(crtc, state, &pstates[0].state->base);
640 /* verify that there are not too many planes attached to crtc
641 * and that we don't have conflicting mixer stages:
643 if ((cnt + start - 1) >= hw_cfg->lm.nb_stages) {
644 dev_err(dev->dev, "too many planes! cnt=%d, start stage=%d\n",
649 for (i = 0; i < cnt; i++) {
650 if (cursor_plane && (i == (cnt - 1)))
651 pstates[i].state->stage = hw_cfg->lm.nb_stages;
653 pstates[i].state->stage = start + i;
654 DBG("%s: assign pipe %s on stage=%d", crtc->name,
655 pstates[i].plane->name,
656 pstates[i].state->stage);
662 static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc,
663 struct drm_crtc_state *old_crtc_state)
665 DBG("%s: begin", crtc->name);
668 static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
669 struct drm_crtc_state *old_crtc_state)
671 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
672 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
673 struct drm_device *dev = crtc->dev;
676 DBG("%s: event: %p", crtc->name, crtc->state->event);
678 WARN_ON(mdp5_crtc->event);
680 spin_lock_irqsave(&dev->event_lock, flags);
681 mdp5_crtc->event = crtc->state->event;
682 spin_unlock_irqrestore(&dev->event_lock, flags);
685 * If no CTL has been allocated in mdp5_crtc_atomic_check(),
686 * it means we are trying to flush a CRTC whose state is disabled:
687 * nothing else needs to be done.
689 /* XXX: Can this happen now ? */
690 if (unlikely(!mdp5_cstate->ctl))
695 /* PP_DONE irq is only used by command mode for now.
696 * It is better to request pending before FLUSH and START trigger
697 * to make sure no pp_done irq missed.
698 * This is safe because no pp_done will happen before SW trigger
701 if (mdp5_cstate->cmd_mode)
702 request_pp_done_pending(crtc);
704 mdp5_crtc->flushed_mask = crtc_flush_all(crtc);
706 /* XXX are we leaking out state here? */
707 mdp5_crtc->vblank.irqmask = mdp5_cstate->vblank_irqmask;
708 mdp5_crtc->err.irqmask = mdp5_cstate->err_irqmask;
709 mdp5_crtc->pp_done.irqmask = mdp5_cstate->pp_done_irqmask;
711 request_pending(crtc, PENDING_FLIP);
714 static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
716 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
717 uint32_t xres = crtc->mode.hdisplay;
718 uint32_t yres = crtc->mode.vdisplay;
721 * Cursor Region Of Interest (ROI) is a plane read from cursor
722 * buffer to render. The ROI region is determined by the visibility of
723 * the cursor point. In the default Cursor image the cursor point will
724 * be at the top left of the cursor image, unless it is specified
725 * otherwise using hotspot feature.
727 * If the cursor point reaches the right (xres - x < cursor.width) or
728 * bottom (yres - y < cursor.height) boundary of the screen, then ROI
729 * width and ROI height need to be evaluated to crop the cursor image
731 * (xres-x) will be new cursor width when x > (xres - cursor.width)
732 * (yres-y) will be new cursor height when y > (yres - cursor.height)
734 *roi_w = min(mdp5_crtc->cursor.width, xres -
735 mdp5_crtc->cursor.x);
736 *roi_h = min(mdp5_crtc->cursor.height, yres -
737 mdp5_crtc->cursor.y);
740 static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
741 struct drm_file *file, uint32_t handle,
742 uint32_t width, uint32_t height)
744 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
745 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
746 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
747 struct drm_device *dev = crtc->dev;
748 struct mdp5_kms *mdp5_kms = get_kms(crtc);
749 struct platform_device *pdev = mdp5_kms->pdev;
750 struct msm_kms *kms = &mdp5_kms->base.base;
751 struct drm_gem_object *cursor_bo, *old_bo = NULL;
752 uint32_t blendcfg, stride;
753 uint64_t cursor_addr;
754 struct mdp5_ctl *ctl;
756 enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
757 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
758 uint32_t roi_w, roi_h;
759 bool cursor_enable = true;
762 if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
763 dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
767 ctl = mdp5_cstate->ctl;
771 /* don't support LM cursors when we we have source split enabled */
772 if (mdp5_cstate->pipeline.r_mixer)
777 cursor_enable = false;
778 pm_runtime_get_sync(&pdev->dev);
782 cursor_bo = drm_gem_object_lookup(file, handle);
786 ret = msm_gem_get_iova(cursor_bo, kms->aspace, &cursor_addr);
790 lm = mdp5_cstate->pipeline.mixer->lm;
791 stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0);
793 pm_runtime_get_sync(&pdev->dev);
795 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
796 old_bo = mdp5_crtc->cursor.scanout_bo;
798 mdp5_crtc->cursor.scanout_bo = cursor_bo;
799 mdp5_crtc->cursor.width = width;
800 mdp5_crtc->cursor.height = height;
802 get_roi(crtc, &roi_w, &roi_h);
804 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
805 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
806 MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
807 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_IMG_SIZE(lm),
808 MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
809 MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
810 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
811 MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
812 MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
813 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm), cursor_addr);
815 blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
816 blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
817 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);
819 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
821 pm_runtime_put_autosuspend(&pdev->dev);
824 ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
826 dev_err(dev->dev, "failed to %sable cursor: %d\n",
827 cursor_enable ? "en" : "dis", ret);
831 crtc_flush(crtc, flush_mask);
834 pm_runtime_put_autosuspend(&pdev->dev);
836 drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
837 /* enable vblank to complete cursor work: */
838 request_pending(crtc, PENDING_CURSOR);
843 static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
845 struct mdp5_kms *mdp5_kms = get_kms(crtc);
846 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
847 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
848 uint32_t lm = mdp5_cstate->pipeline.mixer->lm;
849 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
854 /* don't support LM cursors when we we have source split enabled */
855 if (mdp5_cstate->pipeline.r_mixer)
858 /* In case the CRTC is disabled, just drop the cursor update */
859 if (unlikely(!crtc->state->enable))
862 mdp5_crtc->cursor.x = x = max(x, 0);
863 mdp5_crtc->cursor.y = y = max(y, 0);
865 get_roi(crtc, &roi_w, &roi_h);
867 pm_runtime_get_sync(&mdp5_kms->pdev->dev);
869 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
870 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
871 MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
872 MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
873 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm),
874 MDP5_LM_CURSOR_START_XY_Y_START(y) |
875 MDP5_LM_CURSOR_START_XY_X_START(x));
876 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
878 crtc_flush(crtc, flush_mask);
880 pm_runtime_put_autosuspend(&mdp5_kms->pdev->dev);
886 mdp5_crtc_atomic_print_state(struct drm_printer *p,
887 const struct drm_crtc_state *state)
889 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
890 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
891 struct mdp5_kms *mdp5_kms = get_kms(state->crtc);
893 if (WARN_ON(!pipeline))
896 drm_printf(p, "\thwmixer=%s\n", pipeline->mixer ?
897 pipeline->mixer->name : "(null)");
899 if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT)
900 drm_printf(p, "\tright hwmixer=%s\n", pipeline->r_mixer ?
901 pipeline->r_mixer->name : "(null)");
904 static void mdp5_crtc_reset(struct drm_crtc *crtc)
906 struct mdp5_crtc_state *mdp5_cstate;
909 __drm_atomic_helper_crtc_destroy_state(crtc->state);
910 kfree(to_mdp5_crtc_state(crtc->state));
913 mdp5_cstate = kzalloc(sizeof(*mdp5_cstate), GFP_KERNEL);
916 mdp5_cstate->base.crtc = crtc;
917 crtc->state = &mdp5_cstate->base;
921 static struct drm_crtc_state *
922 mdp5_crtc_duplicate_state(struct drm_crtc *crtc)
924 struct mdp5_crtc_state *mdp5_cstate;
926 if (WARN_ON(!crtc->state))
929 mdp5_cstate = kmemdup(to_mdp5_crtc_state(crtc->state),
930 sizeof(*mdp5_cstate), GFP_KERNEL);
934 __drm_atomic_helper_crtc_duplicate_state(crtc, &mdp5_cstate->base);
936 return &mdp5_cstate->base;
939 static void mdp5_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state)
941 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
943 __drm_atomic_helper_crtc_destroy_state(state);
948 static const struct drm_crtc_funcs mdp5_crtc_funcs = {
949 .set_config = drm_atomic_helper_set_config,
950 .destroy = mdp5_crtc_destroy,
951 .page_flip = drm_atomic_helper_page_flip,
952 .reset = mdp5_crtc_reset,
953 .atomic_duplicate_state = mdp5_crtc_duplicate_state,
954 .atomic_destroy_state = mdp5_crtc_destroy_state,
955 .cursor_set = mdp5_crtc_cursor_set,
956 .cursor_move = mdp5_crtc_cursor_move,
957 .atomic_print_state = mdp5_crtc_atomic_print_state,
960 static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
961 .set_config = drm_atomic_helper_set_config,
962 .destroy = mdp5_crtc_destroy,
963 .page_flip = drm_atomic_helper_page_flip,
964 .reset = mdp5_crtc_reset,
965 .atomic_duplicate_state = mdp5_crtc_duplicate_state,
966 .atomic_destroy_state = mdp5_crtc_destroy_state,
967 .atomic_print_state = mdp5_crtc_atomic_print_state,
970 static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
971 .mode_set_nofb = mdp5_crtc_mode_set_nofb,
972 .atomic_check = mdp5_crtc_atomic_check,
973 .atomic_begin = mdp5_crtc_atomic_begin,
974 .atomic_flush = mdp5_crtc_atomic_flush,
975 .atomic_enable = mdp5_crtc_atomic_enable,
976 .atomic_disable = mdp5_crtc_atomic_disable,
979 static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
981 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
982 struct drm_crtc *crtc = &mdp5_crtc->base;
983 struct msm_drm_private *priv = crtc->dev->dev_private;
986 mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
988 pending = atomic_xchg(&mdp5_crtc->pending, 0);
990 if (pending & PENDING_FLIP) {
991 complete_flip(crtc, NULL);
994 if (pending & PENDING_CURSOR)
995 drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->wq);
998 static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
1000 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
1002 DBG("%s: error: %08x", mdp5_crtc->base.name, irqstatus);
1005 static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus)
1007 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc,
1010 complete(&mdp5_crtc->pp_completion);
1013 static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
1015 struct drm_device *dev = crtc->dev;
1016 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
1017 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1020 ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
1021 msecs_to_jiffies(50));
1023 dev_warn(dev->dev, "pp done time out, lm=%d\n",
1024 mdp5_cstate->pipeline.mixer->lm);
1027 static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
1029 struct drm_device *dev = crtc->dev;
1030 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
1031 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1032 struct mdp5_ctl *ctl = mdp5_cstate->ctl;
1035 /* Should not call this function if crtc is disabled. */
1039 ret = drm_crtc_vblank_get(crtc);
1043 ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
1044 ((mdp5_ctl_get_commit_status(ctl) &
1045 mdp5_crtc->flushed_mask) == 0),
1046 msecs_to_jiffies(50));
1048 dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp5_crtc->id);
1050 mdp5_crtc->flushed_mask = 0;
1052 drm_crtc_vblank_put(crtc);
1055 uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
1057 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
1058 return mdp5_crtc->vblank.irqmask;
1061 void mdp5_crtc_set_pipeline(struct drm_crtc *crtc)
1063 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1064 struct mdp5_kms *mdp5_kms = get_kms(crtc);
1066 /* should this be done elsewhere ? */
1067 mdp_irq_update(&mdp5_kms->base);
1069 mdp5_ctl_set_pipeline(mdp5_cstate->ctl, &mdp5_cstate->pipeline);
1072 struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
1074 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1076 return mdp5_cstate->ctl;
1079 struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc)
1081 struct mdp5_crtc_state *mdp5_cstate;
1084 return ERR_PTR(-EINVAL);
1086 mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1088 return WARN_ON(!mdp5_cstate->pipeline.mixer) ?
1089 ERR_PTR(-EINVAL) : mdp5_cstate->pipeline.mixer;
1092 struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc)
1094 struct mdp5_crtc_state *mdp5_cstate;
1097 return ERR_PTR(-EINVAL);
1099 mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1101 return &mdp5_cstate->pipeline;
1104 void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
1106 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1108 if (mdp5_cstate->cmd_mode)
1109 mdp5_crtc_wait_for_pp_done(crtc);
1111 mdp5_crtc_wait_for_flush_done(crtc);
1114 /* initialize crtc */
1115 struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
1116 struct drm_plane *plane,
1117 struct drm_plane *cursor_plane, int id)
1119 struct drm_crtc *crtc = NULL;
1120 struct mdp5_crtc *mdp5_crtc;
1122 mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
1124 return ERR_PTR(-ENOMEM);
1126 crtc = &mdp5_crtc->base;
1130 spin_lock_init(&mdp5_crtc->lm_lock);
1131 spin_lock_init(&mdp5_crtc->cursor.lock);
1132 init_completion(&mdp5_crtc->pp_completion);
1134 mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
1135 mdp5_crtc->err.irq = mdp5_crtc_err_irq;
1136 mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq;
1139 drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
1140 &mdp5_crtc_no_lm_cursor_funcs, NULL);
1142 drm_crtc_init_with_planes(dev, crtc, plane, NULL,
1143 &mdp5_crtc_funcs, NULL);
1145 drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
1146 "unref cursor", unref_cursor_worker);
1148 drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);