1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
4 * Copyright (C) 2013 Red Hat
5 * Author: Rob Clark <robdclark@gmail.com>
8 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
9 #include <linux/sort.h>
10 #include <linux/debugfs.h>
11 #include <linux/ktime.h>
13 #include <drm/drm_crtc.h>
14 #include <drm/drm_flip_work.h>
15 #include <drm/drm_mode.h>
16 #include <drm/drm_probe_helper.h>
17 #include <drm/drm_rect.h>
18 #include <drm/drm_vblank.h>
21 #include "dpu_hw_lm.h"
22 #include "dpu_hw_ctl.h"
24 #include "dpu_plane.h"
25 #include "dpu_encoder.h"
27 #include "dpu_core_perf.h"
28 #include "dpu_trace.h"
30 #define DPU_DRM_BLEND_OP_NOT_DEFINED 0
31 #define DPU_DRM_BLEND_OP_OPAQUE 1
32 #define DPU_DRM_BLEND_OP_PREMULTIPLIED 2
33 #define DPU_DRM_BLEND_OP_COVERAGE 3
34 #define DPU_DRM_BLEND_OP_MAX 4
36 /* layer mixer index on dpu_crtc */
40 /* timeout in ms waiting for frame done */
41 #define DPU_CRTC_FRAME_DONE_TIMEOUT_MS 60
43 static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
45 struct msm_drm_private *priv = crtc->dev->dev_private;
47 return to_dpu_kms(priv->kms);
50 static void dpu_crtc_destroy(struct drm_crtc *crtc)
52 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
59 drm_crtc_cleanup(crtc);
63 static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
64 struct dpu_plane_state *pstate, struct dpu_format *format)
66 struct dpu_hw_mixer *lm = mixer->hw_lm;
68 struct drm_format_name_buf format_name;
70 /* default to opaque blending */
71 blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
72 DPU_BLEND_BG_ALPHA_BG_CONST;
74 if (format->alpha_enable) {
75 /* coverage blending */
76 blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL |
77 DPU_BLEND_BG_ALPHA_FG_PIXEL |
78 DPU_BLEND_BG_INV_ALPHA;
81 lm->ops.setup_blend_config(lm, pstate->stage,
84 DPU_DEBUG("format:%s, alpha_en:%u blend_op:0x%x\n",
85 drm_get_format_name(format->base.pixel_format, &format_name),
86 format->alpha_enable, blend_op);
89 static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
91 struct dpu_crtc *dpu_crtc;
92 struct dpu_crtc_state *crtc_state;
93 int lm_idx, lm_horiz_position;
95 dpu_crtc = to_dpu_crtc(crtc);
96 crtc_state = to_dpu_crtc_state(crtc->state);
98 lm_horiz_position = 0;
99 for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) {
100 const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
101 struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm;
102 struct dpu_hw_mixer_cfg cfg;
104 if (!lm_roi || !drm_rect_visible(lm_roi))
107 cfg.out_width = drm_rect_width(lm_roi);
108 cfg.out_height = drm_rect_height(lm_roi);
109 cfg.right_mixer = lm_horiz_position++;
111 hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
115 static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
116 struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer)
118 struct drm_plane *plane;
119 struct drm_framebuffer *fb;
120 struct drm_plane_state *state;
121 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
122 struct dpu_plane_state *pstate = NULL;
123 struct dpu_format *format;
124 struct dpu_hw_ctl *ctl = mixer->lm_ctl;
125 struct dpu_hw_stage_cfg *stage_cfg = &dpu_crtc->stage_cfg;
128 uint32_t stage_idx, lm_idx;
129 int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
130 bool bg_alpha_enable = false;
132 drm_atomic_crtc_for_each_plane(plane, crtc) {
133 state = plane->state;
137 pstate = to_dpu_plane_state(state);
140 dpu_plane_get_ctl_flush(plane, ctl, &flush_mask);
142 DPU_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
146 dpu_plane_pipe(plane) - SSPP_VIG0,
147 state->fb ? state->fb->base.id : -1);
149 format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
151 if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
152 bg_alpha_enable = true;
154 stage_idx = zpos_cnt[pstate->stage]++;
155 stage_cfg->stage[pstate->stage][stage_idx] =
156 dpu_plane_pipe(plane);
157 stage_cfg->multirect_index[pstate->stage][stage_idx] =
158 pstate->multirect_index;
160 trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
161 state, pstate, stage_idx,
162 dpu_plane_pipe(plane) - SSPP_VIG0,
163 format->base.pixel_format,
164 fb ? fb->modifier : 0);
166 /* blend config update */
167 for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) {
168 _dpu_crtc_setup_blend_cfg(mixer + lm_idx,
171 mixer[lm_idx].flush_mask |= flush_mask;
173 if (bg_alpha_enable && !format->alpha_enable)
174 mixer[lm_idx].mixer_op_mode = 0;
176 mixer[lm_idx].mixer_op_mode |=
181 _dpu_crtc_program_lm_output_roi(crtc);
185 * _dpu_crtc_blend_setup - configure crtc mixers
186 * @crtc: Pointer to drm crtc structure
188 static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
190 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
191 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
192 struct dpu_crtc_mixer *mixer = cstate->mixers;
193 struct dpu_hw_ctl *ctl;
194 struct dpu_hw_mixer *lm;
197 DPU_DEBUG("%s\n", dpu_crtc->name);
199 for (i = 0; i < cstate->num_mixers; i++) {
200 if (!mixer[i].hw_lm || !mixer[i].lm_ctl) {
201 DPU_ERROR("invalid lm or ctl assigned to mixer\n");
204 mixer[i].mixer_op_mode = 0;
205 mixer[i].flush_mask = 0;
206 if (mixer[i].lm_ctl->ops.clear_all_blendstages)
207 mixer[i].lm_ctl->ops.clear_all_blendstages(
211 /* initialize stage cfg */
212 memset(&dpu_crtc->stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
214 _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer);
216 for (i = 0; i < cstate->num_mixers; i++) {
217 ctl = mixer[i].lm_ctl;
220 lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
222 mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
223 mixer[i].hw_lm->idx);
225 /* stage config flush mask */
226 ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
228 DPU_DEBUG("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
229 mixer[i].hw_lm->idx - LM_0,
230 mixer[i].mixer_op_mode,
232 mixer[i].flush_mask);
234 ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
235 &dpu_crtc->stage_cfg);
240 * _dpu_crtc_complete_flip - signal pending page_flip events
241 * Any pending vblank events are added to the vblank_event_list
242 * so that the next vblank interrupt shall signal them.
243 * However PAGE_FLIP events are not handled through the vblank_event_list.
244 * This API signals any pending PAGE_FLIP events requested through
245 * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event.
246 * @crtc: Pointer to drm crtc structure
248 static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
250 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
251 struct drm_device *dev = crtc->dev;
254 spin_lock_irqsave(&dev->event_lock, flags);
255 if (dpu_crtc->event) {
256 DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name,
258 trace_dpu_crtc_complete_flip(DRMID(crtc));
259 drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
260 dpu_crtc->event = NULL;
262 spin_unlock_irqrestore(&dev->event_lock, flags);
265 enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
267 struct drm_encoder *encoder;
270 DPU_ERROR("invalid crtc\n");
271 return INTF_MODE_NONE;
275 * TODO: This function is called from dpu debugfs and as part of atomic
276 * check. When called from debugfs, the crtc->mutex must be held to
277 * read crtc->state. However reading crtc->state from atomic check isn't
278 * allowed (unless you have a good reason, a big comment, and a deep
279 * understanding of how the atomic/modeset locks work (<- and this is
280 * probably not possible)). So we'll keep the WARN_ON here for now, but
281 * really we need to figure out a better way to track our operating mode
283 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
285 /* TODO: Returns the first INTF_MODE, could there be multiple values? */
286 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
287 return dpu_encoder_get_intf_mode(encoder);
289 return INTF_MODE_NONE;
292 void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
294 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
296 /* keep statistics on vblank callback - with auto reset via debugfs */
297 if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0)
298 dpu_crtc->vblank_cb_time = ktime_get();
300 dpu_crtc->vblank_cb_count++;
301 _dpu_crtc_complete_flip(crtc);
302 drm_crtc_handle_vblank(crtc);
303 trace_dpu_crtc_vblank_cb(DRMID(crtc));
306 static void dpu_crtc_frame_event_work(struct kthread_work *work)
308 struct dpu_crtc_frame_event *fevent = container_of(work,
309 struct dpu_crtc_frame_event, work);
310 struct drm_crtc *crtc = fevent->crtc;
311 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
313 bool frame_done = false;
315 DPU_ATRACE_BEGIN("crtc_frame_event");
317 DRM_DEBUG_KMS("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
318 ktime_to_ns(fevent->ts));
320 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
321 | DPU_ENCODER_FRAME_EVENT_ERROR
322 | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
324 if (atomic_read(&dpu_crtc->frame_pending) < 1) {
325 /* ignore vblank when not pending */
326 } else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) {
327 /* release bandwidth and other resources */
328 trace_dpu_crtc_frame_event_done(DRMID(crtc),
330 dpu_core_perf_crtc_release_bw(crtc);
332 trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
336 if (fevent->event & DPU_ENCODER_FRAME_EVENT_DONE)
337 dpu_core_perf_crtc_update(crtc, 0, false);
339 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
340 | DPU_ENCODER_FRAME_EVENT_ERROR))
344 if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)
345 DPU_ERROR("crtc%d ts:%lld received panel dead event\n",
346 crtc->base.id, ktime_to_ns(fevent->ts));
349 complete_all(&dpu_crtc->frame_done_comp);
351 spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
352 list_add_tail(&fevent->list, &dpu_crtc->frame_event_list);
353 spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
354 DPU_ATRACE_END("crtc_frame_event");
358 * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module
359 * registers this API to encoder for all frame event callbacks like
360 * frame_error, frame_done, idle_timeout, etc. Encoder may call different events
361 * from different context - IRQ, user thread, commit_thread, etc. Each event
362 * should be carefully reviewed and should be processed in proper task context
363 * to avoid schedulin delay or properly manage the irq context's bottom half
366 static void dpu_crtc_frame_event_cb(void *data, u32 event)
368 struct drm_crtc *crtc = (struct drm_crtc *)data;
369 struct dpu_crtc *dpu_crtc;
370 struct msm_drm_private *priv;
371 struct dpu_crtc_frame_event *fevent;
375 /* Nothing to do on idle event */
376 if (event & DPU_ENCODER_FRAME_EVENT_IDLE)
379 dpu_crtc = to_dpu_crtc(crtc);
380 priv = crtc->dev->dev_private;
381 crtc_id = drm_crtc_index(crtc);
383 trace_dpu_crtc_frame_event_cb(DRMID(crtc), event);
385 spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
386 fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list,
387 struct dpu_crtc_frame_event, list);
389 list_del_init(&fevent->list);
390 spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
393 DRM_ERROR("crtc%d event %d overflow\n", crtc->base.id, event);
397 fevent->event = event;
399 fevent->ts = ktime_get();
400 kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work);
403 void dpu_crtc_complete_commit(struct drm_crtc *crtc)
405 trace_dpu_crtc_complete_commit(DRMID(crtc));
408 static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
409 struct drm_crtc_state *state)
411 struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
412 struct drm_display_mode *adj_mode = &state->adjusted_mode;
413 u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers;
416 for (i = 0; i < cstate->num_mixers; i++) {
417 struct drm_rect *r = &cstate->lm_bounds[i];
418 r->x1 = crtc_split_width * i;
420 r->x2 = r->x1 + crtc_split_width;
421 r->y2 = adj_mode->vdisplay;
423 trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
426 drm_mode_debug_printmodeline(adj_mode);
429 static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
430 struct drm_crtc_state *old_state)
432 struct dpu_crtc *dpu_crtc;
433 struct dpu_crtc_state *cstate;
434 struct drm_encoder *encoder;
435 struct drm_device *dev;
437 struct dpu_crtc_smmu_state_data *smmu_state;
440 DPU_ERROR("invalid crtc\n");
444 if (!crtc->state->enable) {
445 DPU_DEBUG("crtc%d -> enable %d, skip atomic_begin\n",
446 crtc->base.id, crtc->state->enable);
450 DPU_DEBUG("crtc%d\n", crtc->base.id);
452 dpu_crtc = to_dpu_crtc(crtc);
453 cstate = to_dpu_crtc_state(crtc->state);
455 smmu_state = &dpu_crtc->smmu_state;
457 _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
459 if (dpu_crtc->event) {
460 WARN_ON(dpu_crtc->event);
462 spin_lock_irqsave(&dev->event_lock, flags);
463 dpu_crtc->event = crtc->state->event;
464 crtc->state->event = NULL;
465 spin_unlock_irqrestore(&dev->event_lock, flags);
468 /* encoder will trigger pending mask now */
469 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
470 dpu_encoder_trigger_kickoff_pending(encoder);
473 * If no mixers have been allocated in dpu_crtc_atomic_check(),
474 * it means we are trying to flush a CRTC whose state is disabled:
475 * nothing else needs to be done.
477 if (unlikely(!cstate->num_mixers))
480 _dpu_crtc_blend_setup(crtc);
483 * PP_DONE irq is only used by command mode for now.
484 * It is better to request pending before FLUSH and START trigger
485 * to make sure no pp_done irq missed.
486 * This is safe because no pp_done will happen before SW trigger
491 static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
492 struct drm_crtc_state *old_crtc_state)
494 struct dpu_crtc *dpu_crtc;
495 struct drm_device *dev;
496 struct drm_plane *plane;
497 struct msm_drm_private *priv;
498 struct msm_drm_thread *event_thread;
500 struct dpu_crtc_state *cstate;
502 if (!crtc->state->enable) {
503 DPU_DEBUG("crtc%d -> enable %d, skip atomic_flush\n",
504 crtc->base.id, crtc->state->enable);
508 DPU_DEBUG("crtc%d\n", crtc->base.id);
510 dpu_crtc = to_dpu_crtc(crtc);
511 cstate = to_dpu_crtc_state(crtc->state);
513 priv = dev->dev_private;
515 if (crtc->index >= ARRAY_SIZE(priv->event_thread)) {
516 DPU_ERROR("invalid crtc index[%d]\n", crtc->index);
520 event_thread = &priv->event_thread[crtc->index];
522 if (dpu_crtc->event) {
523 DPU_DEBUG("already received dpu_crtc->event\n");
525 spin_lock_irqsave(&dev->event_lock, flags);
526 dpu_crtc->event = crtc->state->event;
527 crtc->state->event = NULL;
528 spin_unlock_irqrestore(&dev->event_lock, flags);
532 * If no mixers has been allocated in dpu_crtc_atomic_check(),
533 * it means we are trying to flush a CRTC whose state is disabled:
534 * nothing else needs to be done.
536 if (unlikely(!cstate->num_mixers))
540 * For planes without commit update, drm framework will not add
541 * those planes to current state since hardware update is not
542 * required. However, if those planes were power collapsed since
543 * last commit cycle, driver has to restore the hardware state
544 * of those planes explicitly here prior to plane flush.
546 drm_atomic_crtc_for_each_plane(plane, crtc)
547 dpu_plane_restore(plane);
549 /* update performance setting before crtc kickoff */
550 dpu_core_perf_crtc_update(crtc, 1, false);
553 * Final plane updates: Give each plane a chance to complete all
554 * required writes/flushing before crtc's "flush
555 * everything" call below.
557 drm_atomic_crtc_for_each_plane(plane, crtc) {
558 if (dpu_crtc->smmu_state.transition_error)
559 dpu_plane_set_error(plane, true);
560 dpu_plane_flush(plane);
563 /* Kickoff will be scheduled by outer layer */
567 * dpu_crtc_destroy_state - state destroy hook
569 * @state: CRTC state object to release
571 static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
572 struct drm_crtc_state *state)
574 struct dpu_crtc *dpu_crtc;
575 struct dpu_crtc_state *cstate;
577 if (!crtc || !state) {
578 DPU_ERROR("invalid argument(s)\n");
582 dpu_crtc = to_dpu_crtc(crtc);
583 cstate = to_dpu_crtc_state(state);
585 DPU_DEBUG("crtc%d\n", crtc->base.id);
587 __drm_atomic_helper_crtc_destroy_state(state);
592 static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
594 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
597 if (!atomic_read(&dpu_crtc->frame_pending)) {
598 DPU_DEBUG("no frames pending\n");
602 DPU_ATRACE_BEGIN("frame done completion wait");
603 ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
604 msecs_to_jiffies(DPU_CRTC_FRAME_DONE_TIMEOUT_MS));
606 DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
609 DPU_ATRACE_END("frame done completion wait");
614 void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
616 struct drm_encoder *encoder;
617 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
618 struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
619 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
622 * If no mixers has been allocated in dpu_crtc_atomic_check(),
623 * it means we are trying to start a CRTC whose state is disabled:
624 * nothing else needs to be done.
626 if (unlikely(!cstate->num_mixers))
629 DPU_ATRACE_BEGIN("crtc_commit");
632 * Encoder will flush/start now, unless it has a tx pending. If so, it
633 * may delay and flush at an irq event (e.g. ppdone)
635 drm_for_each_encoder_mask(encoder, crtc->dev,
636 crtc->state->encoder_mask)
637 dpu_encoder_prepare_for_kickoff(encoder);
639 if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
640 /* acquire bandwidth and other resources */
641 DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
643 DPU_DEBUG("crtc%d commit\n", crtc->base.id);
645 dpu_crtc->play_count++;
647 dpu_vbif_clear_errors(dpu_kms);
649 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
650 dpu_encoder_kickoff(encoder);
652 reinit_completion(&dpu_crtc->frame_done_comp);
653 DPU_ATRACE_END("crtc_commit");
656 static void dpu_crtc_reset(struct drm_crtc *crtc)
658 struct dpu_crtc_state *cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
661 dpu_crtc_destroy_state(crtc, crtc->state);
663 __drm_atomic_helper_crtc_reset(crtc, &cstate->base);
667 * dpu_crtc_duplicate_state - state duplicate hook
668 * @crtc: Pointer to drm crtc structure
669 * @Returns: Pointer to new drm_crtc_state structure
671 static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
673 struct dpu_crtc *dpu_crtc;
674 struct dpu_crtc_state *cstate, *old_cstate;
676 if (!crtc || !crtc->state) {
677 DPU_ERROR("invalid argument(s)\n");
681 dpu_crtc = to_dpu_crtc(crtc);
682 old_cstate = to_dpu_crtc_state(crtc->state);
683 cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
685 DPU_ERROR("failed to allocate state\n");
689 /* duplicate base helper */
690 __drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
692 return &cstate->base;
695 static void dpu_crtc_disable(struct drm_crtc *crtc,
696 struct drm_crtc_state *old_crtc_state)
698 struct dpu_crtc *dpu_crtc;
699 struct dpu_crtc_state *cstate;
700 struct drm_display_mode *mode;
701 struct drm_encoder *encoder;
702 struct msm_drm_private *priv;
704 bool release_bandwidth = false;
706 if (!crtc || !crtc->state) {
707 DPU_ERROR("invalid crtc\n");
710 dpu_crtc = to_dpu_crtc(crtc);
711 cstate = to_dpu_crtc_state(crtc->state);
712 mode = &cstate->base.adjusted_mode;
713 priv = crtc->dev->dev_private;
715 DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
717 /* Disable/save vblank irq handling */
718 drm_crtc_vblank_off(crtc);
720 drm_for_each_encoder_mask(encoder, crtc->dev,
721 old_crtc_state->encoder_mask) {
722 /* in video mode, we hold an extra bandwidth reference
723 * as we cannot drop bandwidth at frame-done if any
724 * crtc is being used in video mode.
726 if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
727 release_bandwidth = true;
728 dpu_encoder_assign_crtc(encoder, NULL);
731 /* wait for frame_event_done completion */
732 if (_dpu_crtc_wait_for_frame_done(crtc))
733 DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
735 atomic_read(&dpu_crtc->frame_pending));
737 trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
738 dpu_crtc->enabled = false;
740 if (atomic_read(&dpu_crtc->frame_pending)) {
741 trace_dpu_crtc_disable_frame_pending(DRMID(crtc),
742 atomic_read(&dpu_crtc->frame_pending));
743 if (release_bandwidth)
744 dpu_core_perf_crtc_release_bw(crtc);
745 atomic_set(&dpu_crtc->frame_pending, 0);
748 dpu_core_perf_crtc_update(crtc, 0, true);
750 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
751 dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
753 memset(cstate->mixers, 0, sizeof(cstate->mixers));
754 cstate->num_mixers = 0;
756 /* disable clk & bw control until clk & bw properties are set */
757 cstate->bw_control = false;
758 cstate->bw_split_vote = false;
760 if (crtc->state->event && !crtc->state->active) {
761 spin_lock_irqsave(&crtc->dev->event_lock, flags);
762 drm_crtc_send_vblank_event(crtc, crtc->state->event);
763 crtc->state->event = NULL;
764 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
767 pm_runtime_put_sync(crtc->dev->dev);
770 static void dpu_crtc_enable(struct drm_crtc *crtc,
771 struct drm_crtc_state *old_crtc_state)
773 struct dpu_crtc *dpu_crtc;
774 struct drm_encoder *encoder;
775 struct msm_drm_private *priv;
776 bool request_bandwidth;
779 DPU_ERROR("invalid crtc\n");
782 priv = crtc->dev->dev_private;
784 pm_runtime_get_sync(crtc->dev->dev);
786 DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
787 dpu_crtc = to_dpu_crtc(crtc);
789 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
790 /* in video mode, we hold an extra bandwidth reference
791 * as we cannot drop bandwidth at frame-done if any
792 * crtc is being used in video mode.
794 if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
795 request_bandwidth = true;
796 dpu_encoder_register_frame_event_callback(encoder,
797 dpu_crtc_frame_event_cb, (void *)crtc);
800 if (request_bandwidth)
801 atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
803 trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
804 dpu_crtc->enabled = true;
806 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
807 dpu_encoder_assign_crtc(encoder, crtc);
809 /* Enable/restore vblank irq handling */
810 drm_crtc_vblank_on(crtc);
814 struct dpu_plane_state *dpu_pstate;
815 const struct drm_plane_state *drm_pstate;
820 static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
821 struct drm_crtc_state *state)
823 struct dpu_crtc *dpu_crtc;
824 struct plane_state *pstates;
825 struct dpu_crtc_state *cstate;
827 const struct drm_plane_state *pstate;
828 struct drm_plane *plane;
829 struct drm_display_mode *mode;
831 int cnt = 0, rc = 0, mixer_width, i, z_pos;
833 struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
834 int multirect_count = 0;
835 const struct drm_plane_state *pipe_staged[SSPP_MAX];
836 int left_zpos_cnt = 0, right_zpos_cnt = 0;
837 struct drm_rect crtc_rect = { 0 };
840 DPU_ERROR("invalid crtc\n");
844 pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
846 dpu_crtc = to_dpu_crtc(crtc);
847 cstate = to_dpu_crtc_state(state);
849 if (!state->enable || !state->active) {
850 DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
851 crtc->base.id, state->enable, state->active);
855 mode = &state->adjusted_mode;
856 DPU_DEBUG("%s: check", dpu_crtc->name);
858 /* force a full mode set if active state changed */
859 if (state->active_changed)
860 state->mode_changed = true;
862 memset(pipe_staged, 0, sizeof(pipe_staged));
864 mixer_width = mode->hdisplay / cstate->num_mixers;
866 _dpu_crtc_setup_lm_bounds(crtc, state);
868 crtc_rect.x2 = mode->hdisplay;
869 crtc_rect.y2 = mode->vdisplay;
871 /* get plane state for all drm planes associated with crtc state */
872 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
873 struct drm_rect dst, clip = crtc_rect;
875 if (IS_ERR_OR_NULL(pstate)) {
876 rc = PTR_ERR(pstate);
877 DPU_ERROR("%s: failed to get plane%d state, %d\n",
878 dpu_crtc->name, plane->base.id, rc);
881 if (cnt >= DPU_STAGE_MAX * 4)
884 pstates[cnt].dpu_pstate = to_dpu_plane_state(pstate);
885 pstates[cnt].drm_pstate = pstate;
886 pstates[cnt].stage = pstate->normalized_zpos;
887 pstates[cnt].pipe_id = dpu_plane_pipe(plane);
889 if (pipe_staged[pstates[cnt].pipe_id]) {
890 multirect_plane[multirect_count].r0 =
891 pipe_staged[pstates[cnt].pipe_id];
892 multirect_plane[multirect_count].r1 = pstate;
895 pipe_staged[pstates[cnt].pipe_id] = NULL;
897 pipe_staged[pstates[cnt].pipe_id] = pstate;
902 dst = drm_plane_state_dest(pstate);
903 if (!drm_rect_intersect(&clip, &dst)) {
904 DPU_ERROR("invalid vertical/horizontal destination\n");
905 DPU_ERROR("display: " DRM_RECT_FMT " plane: "
906 DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect),
913 for (i = 1; i < SSPP_MAX; i++) {
914 if (pipe_staged[i]) {
915 dpu_plane_clear_multirect(pipe_staged[i]);
917 if (is_dpu_plane_virtual(pipe_staged[i]->plane)) {
919 "r1 only virt plane:%d not supported\n",
920 pipe_staged[i]->plane->base.id);
928 for (i = 0; i < cnt; i++) {
929 /* reset counts at every new blend stage */
930 if (pstates[i].stage != z_pos) {
933 z_pos = pstates[i].stage;
936 /* verify z_pos setting before using it */
937 if (z_pos >= DPU_STAGE_MAX - DPU_STAGE_0) {
938 DPU_ERROR("> %d plane stages assigned\n",
939 DPU_STAGE_MAX - DPU_STAGE_0);
942 } else if (pstates[i].drm_pstate->crtc_x < mixer_width) {
943 if (left_zpos_cnt == 2) {
944 DPU_ERROR("> 2 planes @ stage %d on left\n",
952 if (right_zpos_cnt == 2) {
953 DPU_ERROR("> 2 planes @ stage %d on right\n",
961 pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0;
962 DPU_DEBUG("%s: zpos %d", dpu_crtc->name, z_pos);
965 for (i = 0; i < multirect_count; i++) {
966 if (dpu_plane_validate_multirect_v2(&multirect_plane[i])) {
968 "multirect validation failed for planes (%d - %d)\n",
969 multirect_plane[i].r0->plane->base.id,
970 multirect_plane[i].r1->plane->base.id);
976 atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
978 rc = dpu_core_perf_crtc_check(crtc, state);
980 DPU_ERROR("crtc%d failed performance check %d\n",
985 /* validate source split:
986 * use pstates sorted by stage to check planes on same stage
987 * we assume that all pipes are in source split so its valid to compare
988 * without taking into account left/right mixer placement
990 for (i = 1; i < cnt; i++) {
991 struct plane_state *prv_pstate, *cur_pstate;
992 struct drm_rect left_rect, right_rect;
993 int32_t left_pid, right_pid;
996 prv_pstate = &pstates[i - 1];
997 cur_pstate = &pstates[i];
998 if (prv_pstate->stage != cur_pstate->stage)
1001 stage = cur_pstate->stage;
1003 left_pid = prv_pstate->dpu_pstate->base.plane->base.id;
1004 left_rect = drm_plane_state_dest(prv_pstate->drm_pstate);
1006 right_pid = cur_pstate->dpu_pstate->base.plane->base.id;
1007 right_rect = drm_plane_state_dest(cur_pstate->drm_pstate);
1009 if (right_rect.x1 < left_rect.x1) {
1010 swap(left_pid, right_pid);
1011 swap(left_rect, right_rect);
1015 * - planes are enumerated in pipe-priority order such that
1016 * planes with lower drm_id must be left-most in a shared
1017 * blend-stage when using source split.
1018 * - planes in source split must be contiguous in width
1019 * - planes in source split must have same dest yoff and height
1021 if (right_pid < left_pid) {
1023 "invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n",
1024 stage, left_pid, right_pid);
1027 } else if (right_rect.x1 != drm_rect_width(&left_rect)) {
1028 DPU_ERROR("non-contiguous coordinates for src split. "
1029 "stage: %d left: " DRM_RECT_FMT " right: "
1030 DRM_RECT_FMT "\n", stage,
1031 DRM_RECT_ARG(&left_rect),
1032 DRM_RECT_ARG(&right_rect));
1035 } else if (left_rect.y1 != right_rect.y1 ||
1036 drm_rect_height(&left_rect) != drm_rect_height(&right_rect)) {
1037 DPU_ERROR("source split at stage: %d. invalid "
1038 "yoff/height: left: " DRM_RECT_FMT " right: "
1039 DRM_RECT_FMT "\n", stage,
1040 DRM_RECT_ARG(&left_rect),
1041 DRM_RECT_ARG(&right_rect));
1052 int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
1054 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1055 struct drm_encoder *enc;
1057 trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
1060 * Normally we would iterate through encoder_mask in crtc state to find
1061 * attached encoders. In this case, we might be disabling vblank _after_
1062 * encoder_mask has been cleared.
1064 * Instead, we "assign" a crtc to the encoder in enable and clear it in
1065 * disable (which is also after encoder_mask is cleared). So instead of
1066 * using encoder mask, we'll ask the encoder to toggle itself iff it's
1067 * currently assigned to our crtc.
1069 * Note also that this function cannot be called while crtc is disabled
1070 * since we use drm_crtc_vblank_on/off. So we don't need to worry
1071 * about the assigned crtcs being inconsistent with the current state
1072 * (which means no need to worry about modeset locks).
1074 list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
1075 trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en,
1078 dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en);
1084 #ifdef CONFIG_DEBUG_FS
1085 static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
1087 struct dpu_crtc *dpu_crtc;
1088 struct dpu_plane_state *pstate = NULL;
1089 struct dpu_crtc_mixer *m;
1091 struct drm_crtc *crtc;
1092 struct drm_plane *plane;
1093 struct drm_display_mode *mode;
1094 struct drm_framebuffer *fb;
1095 struct drm_plane_state *state;
1096 struct dpu_crtc_state *cstate;
1100 dpu_crtc = s->private;
1101 crtc = &dpu_crtc->base;
1103 drm_modeset_lock_all(crtc->dev);
1104 cstate = to_dpu_crtc_state(crtc->state);
1106 mode = &crtc->state->adjusted_mode;
1107 out_width = mode->hdisplay / cstate->num_mixers;
1109 seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
1110 mode->hdisplay, mode->vdisplay);
1114 for (i = 0; i < cstate->num_mixers; ++i) {
1115 m = &cstate->mixers[i];
1117 seq_printf(s, "\tmixer[%d] has no lm\n", i);
1118 else if (!m->lm_ctl)
1119 seq_printf(s, "\tmixer[%d] has no ctl\n", i);
1121 seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
1122 m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
1123 out_width, mode->vdisplay);
1128 drm_atomic_crtc_for_each_plane(plane, crtc) {
1129 pstate = to_dpu_plane_state(plane->state);
1130 state = plane->state;
1132 if (!pstate || !state)
1135 seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
1138 if (plane->state->fb) {
1139 fb = plane->state->fb;
1141 seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ",
1142 fb->base.id, (char *) &fb->format->format,
1143 fb->width, fb->height);
1144 for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i)
1145 seq_printf(s, "cpp[%d]:%u ",
1146 i, fb->format->cpp[i]);
1147 seq_puts(s, "\n\t");
1149 seq_printf(s, "modifier:%8llu ", fb->modifier);
1153 for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
1154 seq_printf(s, "pitches[%d]:%8u ", i,
1159 for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
1160 seq_printf(s, "offsets[%d]:%8u ", i,
1165 seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
1166 state->src_x, state->src_y, state->src_w, state->src_h);
1168 seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
1169 state->crtc_x, state->crtc_y, state->crtc_w,
1171 seq_printf(s, "\tmultirect: mode: %d index: %d\n",
1172 pstate->multirect_mode, pstate->multirect_index);
1176 if (dpu_crtc->vblank_cb_count) {
1177 ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time);
1178 s64 diff_ms = ktime_to_ms(diff);
1179 s64 fps = diff_ms ? div_s64(
1180 dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0;
1183 "vblank fps:%lld count:%u total:%llums total_framecount:%llu\n",
1184 fps, dpu_crtc->vblank_cb_count,
1185 ktime_to_ms(diff), dpu_crtc->play_count);
1187 /* reset time & count for next measurement */
1188 dpu_crtc->vblank_cb_count = 0;
1189 dpu_crtc->vblank_cb_time = ktime_set(0, 0);
1192 drm_modeset_unlock_all(crtc->dev);
1197 static int _dpu_debugfs_status_open(struct inode *inode, struct file *file)
1199 return single_open(file, _dpu_debugfs_status_show, inode->i_private);
1202 #define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \
1203 static int __prefix ## _open(struct inode *inode, struct file *file) \
1205 return single_open(file, __prefix ## _show, inode->i_private); \
1207 static const struct file_operations __prefix ## _fops = { \
1208 .owner = THIS_MODULE, \
1209 .open = __prefix ## _open, \
1210 .release = single_release, \
1212 .llseek = seq_lseek, \
1215 static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
1217 struct drm_crtc *crtc = (struct drm_crtc *) s->private;
1218 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1220 seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
1221 seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
1222 seq_printf(s, "core_clk_rate: %llu\n",
1223 dpu_crtc->cur_perf.core_clk_rate);
1224 seq_printf(s, "bw_ctl: %llu\n", dpu_crtc->cur_perf.bw_ctl);
1225 seq_printf(s, "max_per_pipe_ib: %llu\n",
1226 dpu_crtc->cur_perf.max_per_pipe_ib);
1230 DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state);
1232 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1234 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1236 static const struct file_operations debugfs_status_fops = {
1237 .open = _dpu_debugfs_status_open,
1239 .llseek = seq_lseek,
1240 .release = single_release,
1243 dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
1244 crtc->dev->primary->debugfs_root);
1246 debugfs_create_file("status", 0400,
1247 dpu_crtc->debugfs_root,
1248 dpu_crtc, &debugfs_status_fops);
1249 debugfs_create_file("state", 0600,
1250 dpu_crtc->debugfs_root,
1252 &dpu_crtc_debugfs_state_fops);
1257 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1261 #endif /* CONFIG_DEBUG_FS */
1263 static int dpu_crtc_late_register(struct drm_crtc *crtc)
1265 return _dpu_crtc_init_debugfs(crtc);
1268 static void dpu_crtc_early_unregister(struct drm_crtc *crtc)
1270 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1272 debugfs_remove_recursive(dpu_crtc->debugfs_root);
1275 static const struct drm_crtc_funcs dpu_crtc_funcs = {
1276 .set_config = drm_atomic_helper_set_config,
1277 .destroy = dpu_crtc_destroy,
1278 .page_flip = drm_atomic_helper_page_flip,
1279 .reset = dpu_crtc_reset,
1280 .atomic_duplicate_state = dpu_crtc_duplicate_state,
1281 .atomic_destroy_state = dpu_crtc_destroy_state,
1282 .late_register = dpu_crtc_late_register,
1283 .early_unregister = dpu_crtc_early_unregister,
1286 static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
1287 .atomic_disable = dpu_crtc_disable,
1288 .atomic_enable = dpu_crtc_enable,
1289 .atomic_check = dpu_crtc_atomic_check,
1290 .atomic_begin = dpu_crtc_atomic_begin,
1291 .atomic_flush = dpu_crtc_atomic_flush,
1294 /* initialize crtc */
1295 struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
1296 struct drm_plane *cursor)
1298 struct drm_crtc *crtc = NULL;
1299 struct dpu_crtc *dpu_crtc = NULL;
1302 dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
1304 return ERR_PTR(-ENOMEM);
1306 crtc = &dpu_crtc->base;
1309 spin_lock_init(&dpu_crtc->spin_lock);
1310 atomic_set(&dpu_crtc->frame_pending, 0);
1312 init_completion(&dpu_crtc->frame_done_comp);
1314 INIT_LIST_HEAD(&dpu_crtc->frame_event_list);
1316 for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) {
1317 INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list);
1318 list_add(&dpu_crtc->frame_events[i].list,
1319 &dpu_crtc->frame_event_list);
1320 kthread_init_work(&dpu_crtc->frame_events[i].work,
1321 dpu_crtc_frame_event_work);
1324 drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs,
1327 drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
1329 /* save user friendly CRTC name for later */
1330 snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
1332 /* initialize event handling */
1333 spin_lock_init(&dpu_crtc->event_lock);
1335 DPU_DEBUG("%s: successfully initialized crtc\n", dpu_crtc->name);