2 * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
19 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
20 #include <linux/sort.h>
21 #include <linux/debugfs.h>
22 #include <linux/ktime.h>
23 #include <drm/drm_mode.h>
24 #include <drm/drm_crtc.h>
25 #include <drm/drm_crtc_helper.h>
26 #include <drm/drm_flip_work.h>
27 #include <drm/drm_rect.h>
30 #include "dpu_hw_lm.h"
31 #include "dpu_hw_ctl.h"
33 #include "dpu_plane.h"
34 #include "dpu_encoder.h"
36 #include "dpu_power_handle.h"
37 #include "dpu_core_perf.h"
38 #include "dpu_trace.h"
40 #define DPU_DRM_BLEND_OP_NOT_DEFINED 0
41 #define DPU_DRM_BLEND_OP_OPAQUE 1
42 #define DPU_DRM_BLEND_OP_PREMULTIPLIED 2
43 #define DPU_DRM_BLEND_OP_COVERAGE 3
44 #define DPU_DRM_BLEND_OP_MAX 4
46 /* layer mixer index on dpu_crtc */
50 static inline int _dpu_crtc_get_mixer_width(struct dpu_crtc_state *cstate,
51 struct drm_display_mode *mode)
53 return mode->hdisplay / cstate->num_mixers;
56 static inline struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
58 struct msm_drm_private *priv = crtc->dev->dev_private;
60 return to_dpu_kms(priv->kms);
63 static void dpu_crtc_destroy(struct drm_crtc *crtc)
65 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
72 dpu_crtc->phandle = NULL;
74 drm_crtc_cleanup(crtc);
75 mutex_destroy(&dpu_crtc->crtc_lock);
79 static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
80 struct dpu_plane_state *pstate, struct dpu_format *format)
82 struct dpu_hw_mixer *lm = mixer->hw_lm;
84 struct drm_format_name_buf format_name;
86 /* default to opaque blending */
87 blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
88 DPU_BLEND_BG_ALPHA_BG_CONST;
90 if (format->alpha_enable) {
91 /* coverage blending */
92 blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL |
93 DPU_BLEND_BG_ALPHA_FG_PIXEL |
94 DPU_BLEND_BG_INV_ALPHA;
97 lm->ops.setup_blend_config(lm, pstate->stage,
100 DPU_DEBUG("format:%s, alpha_en:%u blend_op:0x%x\n",
101 drm_get_format_name(format->base.pixel_format, &format_name),
102 format->alpha_enable, blend_op);
105 static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
107 struct dpu_crtc *dpu_crtc;
108 struct dpu_crtc_state *crtc_state;
109 int lm_idx, lm_horiz_position;
111 dpu_crtc = to_dpu_crtc(crtc);
112 crtc_state = to_dpu_crtc_state(crtc->state);
114 lm_horiz_position = 0;
115 for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) {
116 const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
117 struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm;
118 struct dpu_hw_mixer_cfg cfg;
120 if (!lm_roi || !drm_rect_visible(lm_roi))
123 cfg.out_width = drm_rect_width(lm_roi);
124 cfg.out_height = drm_rect_height(lm_roi);
125 cfg.right_mixer = lm_horiz_position++;
127 hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
131 static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
132 struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer)
134 struct drm_plane *plane;
135 struct drm_framebuffer *fb;
136 struct drm_plane_state *state;
137 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
138 struct dpu_plane_state *pstate = NULL;
139 struct dpu_format *format;
140 struct dpu_hw_ctl *ctl = mixer->lm_ctl;
141 struct dpu_hw_stage_cfg *stage_cfg = &dpu_crtc->stage_cfg;
144 uint32_t stage_idx, lm_idx;
145 int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
146 bool bg_alpha_enable = false;
148 drm_atomic_crtc_for_each_plane(plane, crtc) {
149 state = plane->state;
153 pstate = to_dpu_plane_state(state);
156 dpu_plane_get_ctl_flush(plane, ctl, &flush_mask);
158 DPU_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
162 dpu_plane_pipe(plane) - SSPP_VIG0,
163 state->fb ? state->fb->base.id : -1);
165 format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
167 if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
168 bg_alpha_enable = true;
170 stage_idx = zpos_cnt[pstate->stage]++;
171 stage_cfg->stage[pstate->stage][stage_idx] =
172 dpu_plane_pipe(plane);
173 stage_cfg->multirect_index[pstate->stage][stage_idx] =
174 pstate->multirect_index;
176 trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
177 state, pstate, stage_idx,
178 dpu_plane_pipe(plane) - SSPP_VIG0,
179 format->base.pixel_format,
180 fb ? fb->modifier : 0);
182 /* blend config update */
183 for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) {
184 _dpu_crtc_setup_blend_cfg(mixer + lm_idx,
187 mixer[lm_idx].flush_mask |= flush_mask;
189 if (bg_alpha_enable && !format->alpha_enable)
190 mixer[lm_idx].mixer_op_mode = 0;
192 mixer[lm_idx].mixer_op_mode |=
197 _dpu_crtc_program_lm_output_roi(crtc);
201 * _dpu_crtc_blend_setup - configure crtc mixers
202 * @crtc: Pointer to drm crtc structure
204 static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
206 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
207 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
208 struct dpu_crtc_mixer *mixer = cstate->mixers;
209 struct dpu_hw_ctl *ctl;
210 struct dpu_hw_mixer *lm;
213 DPU_DEBUG("%s\n", dpu_crtc->name);
215 for (i = 0; i < cstate->num_mixers; i++) {
216 if (!mixer[i].hw_lm || !mixer[i].lm_ctl) {
217 DPU_ERROR("invalid lm or ctl assigned to mixer\n");
220 mixer[i].mixer_op_mode = 0;
221 mixer[i].flush_mask = 0;
222 if (mixer[i].lm_ctl->ops.clear_all_blendstages)
223 mixer[i].lm_ctl->ops.clear_all_blendstages(
227 /* initialize stage cfg */
228 memset(&dpu_crtc->stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
230 _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer);
232 for (i = 0; i < cstate->num_mixers; i++) {
233 ctl = mixer[i].lm_ctl;
236 lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
238 mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
239 mixer[i].hw_lm->idx);
241 /* stage config flush mask */
242 ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
244 DPU_DEBUG("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
245 mixer[i].hw_lm->idx - LM_0,
246 mixer[i].mixer_op_mode,
248 mixer[i].flush_mask);
250 ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
251 &dpu_crtc->stage_cfg);
256 * _dpu_crtc_complete_flip - signal pending page_flip events
257 * Any pending vblank events are added to the vblank_event_list
258 * so that the next vblank interrupt shall signal them.
259 * However PAGE_FLIP events are not handled through the vblank_event_list.
260 * This API signals any pending PAGE_FLIP events requested through
261 * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event.
262 * @crtc: Pointer to drm crtc structure
264 static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
266 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
267 struct drm_device *dev = crtc->dev;
270 spin_lock_irqsave(&dev->event_lock, flags);
271 if (dpu_crtc->event) {
272 DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name,
274 trace_dpu_crtc_complete_flip(DRMID(crtc));
275 drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
276 dpu_crtc->event = NULL;
278 spin_unlock_irqrestore(&dev->event_lock, flags);
281 enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
283 struct drm_encoder *encoder;
285 if (!crtc || !crtc->dev) {
286 DPU_ERROR("invalid crtc\n");
287 return INTF_MODE_NONE;
290 drm_for_each_encoder(encoder, crtc->dev)
291 if (encoder->crtc == crtc)
292 return dpu_encoder_get_intf_mode(encoder);
294 return INTF_MODE_NONE;
297 static void dpu_crtc_vblank_cb(void *data)
299 struct drm_crtc *crtc = (struct drm_crtc *)data;
300 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
302 /* keep statistics on vblank callback - with auto reset via debugfs */
303 if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0)
304 dpu_crtc->vblank_cb_time = ktime_get();
306 dpu_crtc->vblank_cb_count++;
307 _dpu_crtc_complete_flip(crtc);
308 drm_crtc_handle_vblank(crtc);
309 trace_dpu_crtc_vblank_cb(DRMID(crtc));
312 static void dpu_crtc_frame_event_work(struct kthread_work *work)
314 struct dpu_crtc_frame_event *fevent = container_of(work,
315 struct dpu_crtc_frame_event, work);
316 struct drm_crtc *crtc = fevent->crtc;
317 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
319 bool frame_done = false;
321 DPU_ATRACE_BEGIN("crtc_frame_event");
323 DRM_DEBUG_KMS("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
324 ktime_to_ns(fevent->ts));
326 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
327 | DPU_ENCODER_FRAME_EVENT_ERROR
328 | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
330 if (atomic_read(&dpu_crtc->frame_pending) < 1) {
331 /* this should not happen */
332 DRM_ERROR("crtc%d ev:%u ts:%lld frame_pending:%d\n",
335 ktime_to_ns(fevent->ts),
336 atomic_read(&dpu_crtc->frame_pending));
337 } else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) {
338 /* release bandwidth and other resources */
339 trace_dpu_crtc_frame_event_done(DRMID(crtc),
341 dpu_core_perf_crtc_release_bw(crtc);
343 trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
347 if (fevent->event & DPU_ENCODER_FRAME_EVENT_DONE)
348 dpu_core_perf_crtc_update(crtc, 0, false);
350 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
351 | DPU_ENCODER_FRAME_EVENT_ERROR))
355 if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)
356 DPU_ERROR("crtc%d ts:%lld received panel dead event\n",
357 crtc->base.id, ktime_to_ns(fevent->ts));
360 complete_all(&dpu_crtc->frame_done_comp);
362 spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
363 list_add_tail(&fevent->list, &dpu_crtc->frame_event_list);
364 spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
365 DPU_ATRACE_END("crtc_frame_event");
369 * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module
370 * registers this API to encoder for all frame event callbacks like
371 * frame_error, frame_done, idle_timeout, etc. Encoder may call different events
372 * from different context - IRQ, user thread, commit_thread, etc. Each event
373 * should be carefully reviewed and should be processed in proper task context
374 * to avoid schedulin delay or properly manage the irq context's bottom half
377 static void dpu_crtc_frame_event_cb(void *data, u32 event)
379 struct drm_crtc *crtc = (struct drm_crtc *)data;
380 struct dpu_crtc *dpu_crtc;
381 struct msm_drm_private *priv;
382 struct dpu_crtc_frame_event *fevent;
386 /* Nothing to do on idle event */
387 if (event & DPU_ENCODER_FRAME_EVENT_IDLE)
390 dpu_crtc = to_dpu_crtc(crtc);
391 priv = crtc->dev->dev_private;
392 crtc_id = drm_crtc_index(crtc);
394 trace_dpu_crtc_frame_event_cb(DRMID(crtc), event);
396 spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
397 fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list,
398 struct dpu_crtc_frame_event, list);
400 list_del_init(&fevent->list);
401 spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
404 DRM_ERROR("crtc%d event %d overflow\n", crtc->base.id, event);
408 fevent->event = event;
410 fevent->ts = ktime_get();
411 kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work);
414 void dpu_crtc_complete_commit(struct drm_crtc *crtc,
415 struct drm_crtc_state *old_state)
417 if (!crtc || !crtc->state) {
418 DPU_ERROR("invalid crtc\n");
421 trace_dpu_crtc_complete_commit(DRMID(crtc));
424 static void _dpu_crtc_setup_mixer_for_encoder(
425 struct drm_crtc *crtc,
426 struct drm_encoder *enc)
428 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
429 struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
430 struct dpu_rm *rm = &dpu_kms->rm;
431 struct dpu_crtc_mixer *mixer;
432 struct dpu_hw_ctl *last_valid_ctl = NULL;
434 struct dpu_rm_hw_iter lm_iter, ctl_iter;
436 dpu_rm_init_hw_iter(&lm_iter, enc->base.id, DPU_HW_BLK_LM);
437 dpu_rm_init_hw_iter(&ctl_iter, enc->base.id, DPU_HW_BLK_CTL);
439 /* Set up all the mixers and ctls reserved by this encoder */
440 for (i = cstate->num_mixers; i < ARRAY_SIZE(cstate->mixers); i++) {
441 mixer = &cstate->mixers[i];
443 if (!dpu_rm_get_hw(rm, &lm_iter))
445 mixer->hw_lm = (struct dpu_hw_mixer *)lm_iter.hw;
447 /* CTL may be <= LMs, if <, multiple LMs controlled by 1 CTL */
448 if (!dpu_rm_get_hw(rm, &ctl_iter)) {
449 DPU_DEBUG("no ctl assigned to lm %d, using previous\n",
450 mixer->hw_lm->idx - LM_0);
451 mixer->lm_ctl = last_valid_ctl;
453 mixer->lm_ctl = (struct dpu_hw_ctl *)ctl_iter.hw;
454 last_valid_ctl = mixer->lm_ctl;
457 /* Shouldn't happen, mixers are always >= ctls */
458 if (!mixer->lm_ctl) {
459 DPU_ERROR("no valid ctls found for lm %d\n",
460 mixer->hw_lm->idx - LM_0);
464 mixer->encoder = enc;
466 cstate->num_mixers++;
467 DPU_DEBUG("setup mixer %d: lm %d\n",
468 i, mixer->hw_lm->idx - LM_0);
469 DPU_DEBUG("setup mixer %d: ctl %d\n",
470 i, mixer->lm_ctl->idx - CTL_0);
474 static void _dpu_crtc_setup_mixers(struct drm_crtc *crtc)
476 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
477 struct drm_encoder *enc;
479 mutex_lock(&dpu_crtc->crtc_lock);
480 /* Check for mixers on all encoders attached to this crtc */
481 list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
482 if (enc->crtc != crtc)
485 _dpu_crtc_setup_mixer_for_encoder(crtc, enc);
488 mutex_unlock(&dpu_crtc->crtc_lock);
491 static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
492 struct drm_crtc_state *state)
494 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
495 struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
496 struct drm_display_mode *adj_mode = &state->adjusted_mode;
497 u32 crtc_split_width = _dpu_crtc_get_mixer_width(cstate, adj_mode);
500 for (i = 0; i < cstate->num_mixers; i++) {
501 struct drm_rect *r = &cstate->lm_bounds[i];
502 r->x1 = crtc_split_width * i;
504 r->x2 = r->x1 + crtc_split_width;
505 r->y2 = dpu_crtc_get_mixer_height(dpu_crtc, cstate, adj_mode);
507 trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
510 drm_mode_debug_printmodeline(adj_mode);
513 static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
514 struct drm_crtc_state *old_state)
516 struct dpu_crtc *dpu_crtc;
517 struct dpu_crtc_state *cstate;
518 struct drm_encoder *encoder;
519 struct drm_device *dev;
521 struct dpu_crtc_smmu_state_data *smmu_state;
524 DPU_ERROR("invalid crtc\n");
528 if (!crtc->state->enable) {
529 DPU_DEBUG("crtc%d -> enable %d, skip atomic_begin\n",
530 crtc->base.id, crtc->state->enable);
534 DPU_DEBUG("crtc%d\n", crtc->base.id);
536 dpu_crtc = to_dpu_crtc(crtc);
537 cstate = to_dpu_crtc_state(crtc->state);
539 smmu_state = &dpu_crtc->smmu_state;
541 if (!cstate->num_mixers) {
542 _dpu_crtc_setup_mixers(crtc);
543 _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
546 if (dpu_crtc->event) {
547 WARN_ON(dpu_crtc->event);
549 spin_lock_irqsave(&dev->event_lock, flags);
550 dpu_crtc->event = crtc->state->event;
551 crtc->state->event = NULL;
552 spin_unlock_irqrestore(&dev->event_lock, flags);
555 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
556 if (encoder->crtc != crtc)
559 /* encoder will trigger pending mask now */
560 dpu_encoder_trigger_kickoff_pending(encoder);
564 * If no mixers have been allocated in dpu_crtc_atomic_check(),
565 * it means we are trying to flush a CRTC whose state is disabled:
566 * nothing else needs to be done.
568 if (unlikely(!cstate->num_mixers))
571 _dpu_crtc_blend_setup(crtc);
574 * PP_DONE irq is only used by command mode for now.
575 * It is better to request pending before FLUSH and START trigger
576 * to make sure no pp_done irq missed.
577 * This is safe because no pp_done will happen before SW trigger
582 static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
583 struct drm_crtc_state *old_crtc_state)
585 struct dpu_crtc *dpu_crtc;
586 struct drm_device *dev;
587 struct drm_plane *plane;
588 struct msm_drm_private *priv;
589 struct msm_drm_thread *event_thread;
591 struct dpu_crtc_state *cstate;
593 if (!crtc->state->enable) {
594 DPU_DEBUG("crtc%d -> enable %d, skip atomic_flush\n",
595 crtc->base.id, crtc->state->enable);
599 DPU_DEBUG("crtc%d\n", crtc->base.id);
601 dpu_crtc = to_dpu_crtc(crtc);
602 cstate = to_dpu_crtc_state(crtc->state);
604 priv = dev->dev_private;
606 if (crtc->index >= ARRAY_SIZE(priv->event_thread)) {
607 DPU_ERROR("invalid crtc index[%d]\n", crtc->index);
611 event_thread = &priv->event_thread[crtc->index];
613 if (dpu_crtc->event) {
614 DPU_DEBUG("already received dpu_crtc->event\n");
616 spin_lock_irqsave(&dev->event_lock, flags);
617 dpu_crtc->event = crtc->state->event;
618 crtc->state->event = NULL;
619 spin_unlock_irqrestore(&dev->event_lock, flags);
623 * If no mixers has been allocated in dpu_crtc_atomic_check(),
624 * it means we are trying to flush a CRTC whose state is disabled:
625 * nothing else needs to be done.
627 if (unlikely(!cstate->num_mixers))
631 * For planes without commit update, drm framework will not add
632 * those planes to current state since hardware update is not
633 * required. However, if those planes were power collapsed since
634 * last commit cycle, driver has to restore the hardware state
635 * of those planes explicitly here prior to plane flush.
637 drm_atomic_crtc_for_each_plane(plane, crtc)
638 dpu_plane_restore(plane);
640 /* update performance setting before crtc kickoff */
641 dpu_core_perf_crtc_update(crtc, 1, false);
644 * Final plane updates: Give each plane a chance to complete all
645 * required writes/flushing before crtc's "flush
646 * everything" call below.
648 drm_atomic_crtc_for_each_plane(plane, crtc) {
649 if (dpu_crtc->smmu_state.transition_error)
650 dpu_plane_set_error(plane, true);
651 dpu_plane_flush(plane);
654 /* Kickoff will be scheduled by outer layer */
658 * dpu_crtc_destroy_state - state destroy hook
660 * @state: CRTC state object to release
662 static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
663 struct drm_crtc_state *state)
665 struct dpu_crtc *dpu_crtc;
666 struct dpu_crtc_state *cstate;
668 if (!crtc || !state) {
669 DPU_ERROR("invalid argument(s)\n");
673 dpu_crtc = to_dpu_crtc(crtc);
674 cstate = to_dpu_crtc_state(state);
676 DPU_DEBUG("crtc%d\n", crtc->base.id);
678 __drm_atomic_helper_crtc_destroy_state(state);
683 static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
685 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
688 if (!atomic_read(&dpu_crtc->frame_pending)) {
689 DPU_DEBUG("no frames pending\n");
693 DPU_ATRACE_BEGIN("frame done completion wait");
694 ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
695 msecs_to_jiffies(DPU_FRAME_DONE_TIMEOUT));
697 DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
700 DPU_ATRACE_END("frame done completion wait");
705 void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
707 struct drm_encoder *encoder;
708 struct drm_device *dev = crtc->dev;
709 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
710 struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
711 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
715 * If no mixers has been allocated in dpu_crtc_atomic_check(),
716 * it means we are trying to start a CRTC whose state is disabled:
717 * nothing else needs to be done.
719 if (unlikely(!cstate->num_mixers))
722 DPU_ATRACE_BEGIN("crtc_commit");
724 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
725 struct dpu_encoder_kickoff_params params = { 0 };
727 if (encoder->crtc != crtc)
731 * Encoder will flush/start now, unless it has a tx pending.
732 * If so, it may delay and flush at an irq event (e.g. ppdone)
734 dpu_encoder_prepare_for_kickoff(encoder, ¶ms);
737 /* wait for frame_event_done completion */
738 DPU_ATRACE_BEGIN("wait_for_frame_done_event");
739 ret = _dpu_crtc_wait_for_frame_done(crtc);
740 DPU_ATRACE_END("wait_for_frame_done_event");
742 DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
744 atomic_read(&dpu_crtc->frame_pending));
748 if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
749 /* acquire bandwidth and other resources */
750 DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
752 DPU_DEBUG("crtc%d commit\n", crtc->base.id);
754 dpu_crtc->play_count++;
756 dpu_vbif_clear_errors(dpu_kms);
758 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
759 if (encoder->crtc != crtc)
762 dpu_encoder_kickoff(encoder);
766 reinit_completion(&dpu_crtc->frame_done_comp);
767 DPU_ATRACE_END("crtc_commit");
771 * _dpu_crtc_vblank_enable_no_lock - update power resource and vblank request
772 * @dpu_crtc: Pointer to dpu crtc structure
773 * @enable: Whether to enable/disable vblanks
775 static void _dpu_crtc_vblank_enable_no_lock(
776 struct dpu_crtc *dpu_crtc, bool enable)
778 struct drm_crtc *crtc = &dpu_crtc->base;
779 struct drm_device *dev = crtc->dev;
780 struct drm_encoder *enc;
783 /* drop lock since power crtc cb may try to re-acquire lock */
784 mutex_unlock(&dpu_crtc->crtc_lock);
785 pm_runtime_get_sync(dev->dev);
786 mutex_lock(&dpu_crtc->crtc_lock);
788 list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
789 if (enc->crtc != crtc)
792 trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base),
796 dpu_encoder_register_vblank_callback(enc,
797 dpu_crtc_vblank_cb, (void *)crtc);
800 list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
801 if (enc->crtc != crtc)
804 trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base),
808 dpu_encoder_register_vblank_callback(enc, NULL, NULL);
811 /* drop lock since power crtc cb may try to re-acquire lock */
812 mutex_unlock(&dpu_crtc->crtc_lock);
813 pm_runtime_put_sync(dev->dev);
814 mutex_lock(&dpu_crtc->crtc_lock);
819 * _dpu_crtc_set_suspend - notify crtc of suspend enable/disable
820 * @crtc: Pointer to drm crtc object
821 * @enable: true to enable suspend, false to indicate resume
823 static void _dpu_crtc_set_suspend(struct drm_crtc *crtc, bool enable)
825 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
827 DRM_DEBUG_KMS("crtc%d suspend = %d\n", crtc->base.id, enable);
829 mutex_lock(&dpu_crtc->crtc_lock);
832 * If the vblank is enabled, release a power reference on suspend
833 * and take it back during resume (if it is still enabled).
835 trace_dpu_crtc_set_suspend(DRMID(&dpu_crtc->base), enable, dpu_crtc);
836 if (dpu_crtc->suspend == enable)
837 DPU_DEBUG("crtc%d suspend already set to %d, ignoring update\n",
838 crtc->base.id, enable);
839 else if (dpu_crtc->enabled && dpu_crtc->vblank_requested) {
840 _dpu_crtc_vblank_enable_no_lock(dpu_crtc, !enable);
843 dpu_crtc->suspend = enable;
844 mutex_unlock(&dpu_crtc->crtc_lock);
848 * dpu_crtc_duplicate_state - state duplicate hook
849 * @crtc: Pointer to drm crtc structure
850 * @Returns: Pointer to new drm_crtc_state structure
852 static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
854 struct dpu_crtc *dpu_crtc;
855 struct dpu_crtc_state *cstate, *old_cstate;
857 if (!crtc || !crtc->state) {
858 DPU_ERROR("invalid argument(s)\n");
862 dpu_crtc = to_dpu_crtc(crtc);
863 old_cstate = to_dpu_crtc_state(crtc->state);
864 cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
866 DPU_ERROR("failed to allocate state\n");
870 /* duplicate base helper */
871 __drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
873 return &cstate->base;
877 * dpu_crtc_reset - reset hook for CRTCs
878 * Resets the atomic state for @crtc by freeing the state pointer (which might
879 * be NULL, e.g. at driver load time) and allocating a new empty state object.
880 * @crtc: Pointer to drm crtc structure
882 static void dpu_crtc_reset(struct drm_crtc *crtc)
884 struct dpu_crtc *dpu_crtc;
885 struct dpu_crtc_state *cstate;
888 DPU_ERROR("invalid crtc\n");
892 /* revert suspend actions, if necessary */
893 if (dpu_kms_is_suspend_state(crtc->dev))
894 _dpu_crtc_set_suspend(crtc, false);
896 /* remove previous state, if present */
898 dpu_crtc_destroy_state(crtc, crtc->state);
902 dpu_crtc = to_dpu_crtc(crtc);
903 cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
905 DPU_ERROR("failed to allocate state\n");
909 cstate->base.crtc = crtc;
910 crtc->state = &cstate->base;
913 static void dpu_crtc_handle_power_event(u32 event_type, void *arg)
915 struct drm_crtc *crtc = arg;
916 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
917 struct drm_encoder *encoder;
919 mutex_lock(&dpu_crtc->crtc_lock);
921 trace_dpu_crtc_handle_power_event(DRMID(crtc), event_type);
923 /* restore encoder; crtc will be programmed during commit */
924 drm_for_each_encoder(encoder, crtc->dev) {
925 if (encoder->crtc != crtc)
928 dpu_encoder_virt_restore(encoder);
931 mutex_unlock(&dpu_crtc->crtc_lock);
934 static void dpu_crtc_disable(struct drm_crtc *crtc)
936 struct dpu_crtc *dpu_crtc;
937 struct dpu_crtc_state *cstate;
938 struct drm_display_mode *mode;
939 struct drm_encoder *encoder;
940 struct msm_drm_private *priv;
943 if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) {
944 DPU_ERROR("invalid crtc\n");
947 dpu_crtc = to_dpu_crtc(crtc);
948 cstate = to_dpu_crtc_state(crtc->state);
949 mode = &cstate->base.adjusted_mode;
950 priv = crtc->dev->dev_private;
952 DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
954 if (dpu_kms_is_suspend_state(crtc->dev))
955 _dpu_crtc_set_suspend(crtc, true);
957 /* Disable/save vblank irq handling */
958 drm_crtc_vblank_off(crtc);
960 mutex_lock(&dpu_crtc->crtc_lock);
962 /* wait for frame_event_done completion */
963 if (_dpu_crtc_wait_for_frame_done(crtc))
964 DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
966 atomic_read(&dpu_crtc->frame_pending));
968 trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
969 if (dpu_crtc->enabled && !dpu_crtc->suspend &&
970 dpu_crtc->vblank_requested) {
971 _dpu_crtc_vblank_enable_no_lock(dpu_crtc, false);
973 dpu_crtc->enabled = false;
975 if (atomic_read(&dpu_crtc->frame_pending)) {
976 trace_dpu_crtc_disable_frame_pending(DRMID(crtc),
977 atomic_read(&dpu_crtc->frame_pending));
978 dpu_core_perf_crtc_release_bw(crtc);
979 atomic_set(&dpu_crtc->frame_pending, 0);
982 dpu_core_perf_crtc_update(crtc, 0, true);
984 drm_for_each_encoder(encoder, crtc->dev) {
985 if (encoder->crtc != crtc)
987 dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
990 if (dpu_crtc->power_event)
991 dpu_power_handle_unregister_event(dpu_crtc->phandle,
992 dpu_crtc->power_event);
994 memset(cstate->mixers, 0, sizeof(cstate->mixers));
995 cstate->num_mixers = 0;
997 /* disable clk & bw control until clk & bw properties are set */
998 cstate->bw_control = false;
999 cstate->bw_split_vote = false;
1001 mutex_unlock(&dpu_crtc->crtc_lock);
1003 if (crtc->state->event && !crtc->state->active) {
1004 spin_lock_irqsave(&crtc->dev->event_lock, flags);
1005 drm_crtc_send_vblank_event(crtc, crtc->state->event);
1006 crtc->state->event = NULL;
1007 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
1011 static void dpu_crtc_enable(struct drm_crtc *crtc,
1012 struct drm_crtc_state *old_crtc_state)
1014 struct dpu_crtc *dpu_crtc;
1015 struct drm_encoder *encoder;
1016 struct msm_drm_private *priv;
1018 if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
1019 DPU_ERROR("invalid crtc\n");
1022 priv = crtc->dev->dev_private;
1024 DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
1025 dpu_crtc = to_dpu_crtc(crtc);
1027 drm_for_each_encoder(encoder, crtc->dev) {
1028 if (encoder->crtc != crtc)
1030 dpu_encoder_register_frame_event_callback(encoder,
1031 dpu_crtc_frame_event_cb, (void *)crtc);
1034 mutex_lock(&dpu_crtc->crtc_lock);
1035 trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
1036 if (!dpu_crtc->enabled && !dpu_crtc->suspend &&
1037 dpu_crtc->vblank_requested) {
1038 _dpu_crtc_vblank_enable_no_lock(dpu_crtc, true);
1040 dpu_crtc->enabled = true;
1042 mutex_unlock(&dpu_crtc->crtc_lock);
1044 /* Enable/restore vblank irq handling */
1045 drm_crtc_vblank_on(crtc);
1047 dpu_crtc->power_event = dpu_power_handle_register_event(
1048 dpu_crtc->phandle, DPU_POWER_EVENT_ENABLE,
1049 dpu_crtc_handle_power_event, crtc, dpu_crtc->name);
1053 struct plane_state {
1054 struct dpu_plane_state *dpu_pstate;
1055 const struct drm_plane_state *drm_pstate;
1060 static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
1061 struct drm_crtc_state *state)
1063 struct dpu_crtc *dpu_crtc;
1064 struct plane_state *pstates;
1065 struct dpu_crtc_state *cstate;
1067 const struct drm_plane_state *pstate;
1068 struct drm_plane *plane;
1069 struct drm_display_mode *mode;
1071 int cnt = 0, rc = 0, mixer_width, i, z_pos;
1073 struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
1074 int multirect_count = 0;
1075 const struct drm_plane_state *pipe_staged[SSPP_MAX];
1076 int left_zpos_cnt = 0, right_zpos_cnt = 0;
1077 struct drm_rect crtc_rect = { 0 };
1080 DPU_ERROR("invalid crtc\n");
1084 pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
1086 dpu_crtc = to_dpu_crtc(crtc);
1087 cstate = to_dpu_crtc_state(state);
1089 if (!state->enable || !state->active) {
1090 DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
1091 crtc->base.id, state->enable, state->active);
1095 mode = &state->adjusted_mode;
1096 DPU_DEBUG("%s: check", dpu_crtc->name);
1098 /* force a full mode set if active state changed */
1099 if (state->active_changed)
1100 state->mode_changed = true;
1102 memset(pipe_staged, 0, sizeof(pipe_staged));
1104 mixer_width = _dpu_crtc_get_mixer_width(cstate, mode);
1106 _dpu_crtc_setup_lm_bounds(crtc, state);
1108 crtc_rect.x2 = mode->hdisplay;
1109 crtc_rect.y2 = mode->vdisplay;
1111 /* get plane state for all drm planes associated with crtc state */
1112 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
1113 struct drm_rect dst, clip = crtc_rect;
1115 if (IS_ERR_OR_NULL(pstate)) {
1116 rc = PTR_ERR(pstate);
1117 DPU_ERROR("%s: failed to get plane%d state, %d\n",
1118 dpu_crtc->name, plane->base.id, rc);
1121 if (cnt >= DPU_STAGE_MAX * 4)
1124 pstates[cnt].dpu_pstate = to_dpu_plane_state(pstate);
1125 pstates[cnt].drm_pstate = pstate;
1126 pstates[cnt].stage = pstate->normalized_zpos;
1127 pstates[cnt].pipe_id = dpu_plane_pipe(plane);
1129 if (pipe_staged[pstates[cnt].pipe_id]) {
1130 multirect_plane[multirect_count].r0 =
1131 pipe_staged[pstates[cnt].pipe_id];
1132 multirect_plane[multirect_count].r1 = pstate;
1135 pipe_staged[pstates[cnt].pipe_id] = NULL;
1137 pipe_staged[pstates[cnt].pipe_id] = pstate;
1142 dst = drm_plane_state_dest(pstate);
1143 if (!drm_rect_intersect(&clip, &dst)) {
1144 DPU_ERROR("invalid vertical/horizontal destination\n");
1145 DPU_ERROR("display: " DRM_RECT_FMT " plane: "
1146 DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect),
1147 DRM_RECT_ARG(&dst));
1153 for (i = 1; i < SSPP_MAX; i++) {
1154 if (pipe_staged[i]) {
1155 dpu_plane_clear_multirect(pipe_staged[i]);
1157 if (is_dpu_plane_virtual(pipe_staged[i]->plane)) {
1159 "r1 only virt plane:%d not supported\n",
1160 pipe_staged[i]->plane->base.id);
1168 for (i = 0; i < cnt; i++) {
1169 /* reset counts at every new blend stage */
1170 if (pstates[i].stage != z_pos) {
1173 z_pos = pstates[i].stage;
1176 /* verify z_pos setting before using it */
1177 if (z_pos >= DPU_STAGE_MAX - DPU_STAGE_0) {
1178 DPU_ERROR("> %d plane stages assigned\n",
1179 DPU_STAGE_MAX - DPU_STAGE_0);
1182 } else if (pstates[i].drm_pstate->crtc_x < mixer_width) {
1183 if (left_zpos_cnt == 2) {
1184 DPU_ERROR("> 2 planes @ stage %d on left\n",
1192 if (right_zpos_cnt == 2) {
1193 DPU_ERROR("> 2 planes @ stage %d on right\n",
1201 pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0;
1202 DPU_DEBUG("%s: zpos %d", dpu_crtc->name, z_pos);
1205 for (i = 0; i < multirect_count; i++) {
1206 if (dpu_plane_validate_multirect_v2(&multirect_plane[i])) {
1208 "multirect validation failed for planes (%d - %d)\n",
1209 multirect_plane[i].r0->plane->base.id,
1210 multirect_plane[i].r1->plane->base.id);
1216 rc = dpu_core_perf_crtc_check(crtc, state);
1218 DPU_ERROR("crtc%d failed performance check %d\n",
1223 /* validate source split:
1224 * use pstates sorted by stage to check planes on same stage
1225 * we assume that all pipes are in source split so its valid to compare
1226 * without taking into account left/right mixer placement
1228 for (i = 1; i < cnt; i++) {
1229 struct plane_state *prv_pstate, *cur_pstate;
1230 struct drm_rect left_rect, right_rect;
1231 int32_t left_pid, right_pid;
1234 prv_pstate = &pstates[i - 1];
1235 cur_pstate = &pstates[i];
1236 if (prv_pstate->stage != cur_pstate->stage)
1239 stage = cur_pstate->stage;
1241 left_pid = prv_pstate->dpu_pstate->base.plane->base.id;
1242 left_rect = drm_plane_state_dest(prv_pstate->drm_pstate);
1244 right_pid = cur_pstate->dpu_pstate->base.plane->base.id;
1245 right_rect = drm_plane_state_dest(cur_pstate->drm_pstate);
1247 if (right_rect.x1 < left_rect.x1) {
1248 swap(left_pid, right_pid);
1249 swap(left_rect, right_rect);
1253 * - planes are enumerated in pipe-priority order such that
1254 * planes with lower drm_id must be left-most in a shared
1255 * blend-stage when using source split.
1256 * - planes in source split must be contiguous in width
1257 * - planes in source split must have same dest yoff and height
1259 if (right_pid < left_pid) {
1261 "invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n",
1262 stage, left_pid, right_pid);
1265 } else if (right_rect.x1 != drm_rect_width(&left_rect)) {
1266 DPU_ERROR("non-contiguous coordinates for src split. "
1267 "stage: %d left: " DRM_RECT_FMT " right: "
1268 DRM_RECT_FMT "\n", stage,
1269 DRM_RECT_ARG(&left_rect),
1270 DRM_RECT_ARG(&right_rect));
1273 } else if (left_rect.y1 != right_rect.y1 ||
1274 drm_rect_height(&left_rect) != drm_rect_height(&right_rect)) {
1275 DPU_ERROR("source split at stage: %d. invalid "
1276 "yoff/height: left: " DRM_RECT_FMT " right: "
1277 DRM_RECT_FMT "\n", stage,
1278 DRM_RECT_ARG(&left_rect),
1279 DRM_RECT_ARG(&right_rect));
1290 int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
1292 struct dpu_crtc *dpu_crtc;
1295 DPU_ERROR("invalid crtc\n");
1298 dpu_crtc = to_dpu_crtc(crtc);
1300 mutex_lock(&dpu_crtc->crtc_lock);
1301 trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
1302 if (dpu_crtc->enabled && !dpu_crtc->suspend) {
1303 _dpu_crtc_vblank_enable_no_lock(dpu_crtc, en);
1305 dpu_crtc->vblank_requested = en;
1306 mutex_unlock(&dpu_crtc->crtc_lock);
1311 #ifdef CONFIG_DEBUG_FS
1312 static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
1314 struct dpu_crtc *dpu_crtc;
1315 struct dpu_plane_state *pstate = NULL;
1316 struct dpu_crtc_mixer *m;
1318 struct drm_crtc *crtc;
1319 struct drm_plane *plane;
1320 struct drm_display_mode *mode;
1321 struct drm_framebuffer *fb;
1322 struct drm_plane_state *state;
1323 struct dpu_crtc_state *cstate;
1327 if (!s || !s->private)
1330 dpu_crtc = s->private;
1331 crtc = &dpu_crtc->base;
1333 drm_modeset_lock_all(crtc->dev);
1334 cstate = to_dpu_crtc_state(crtc->state);
1336 mutex_lock(&dpu_crtc->crtc_lock);
1337 mode = &crtc->state->adjusted_mode;
1338 out_width = _dpu_crtc_get_mixer_width(cstate, mode);
1340 seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
1341 mode->hdisplay, mode->vdisplay);
1345 for (i = 0; i < cstate->num_mixers; ++i) {
1346 m = &cstate->mixers[i];
1348 seq_printf(s, "\tmixer[%d] has no lm\n", i);
1349 else if (!m->lm_ctl)
1350 seq_printf(s, "\tmixer[%d] has no ctl\n", i);
1352 seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
1353 m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
1354 out_width, mode->vdisplay);
1359 drm_atomic_crtc_for_each_plane(plane, crtc) {
1360 pstate = to_dpu_plane_state(plane->state);
1361 state = plane->state;
1363 if (!pstate || !state)
1366 seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
1369 if (plane->state->fb) {
1370 fb = plane->state->fb;
1372 seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ",
1373 fb->base.id, (char *) &fb->format->format,
1374 fb->width, fb->height);
1375 for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i)
1376 seq_printf(s, "cpp[%d]:%u ",
1377 i, fb->format->cpp[i]);
1378 seq_puts(s, "\n\t");
1380 seq_printf(s, "modifier:%8llu ", fb->modifier);
1384 for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
1385 seq_printf(s, "pitches[%d]:%8u ", i,
1390 for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
1391 seq_printf(s, "offsets[%d]:%8u ", i,
1396 seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
1397 state->src_x, state->src_y, state->src_w, state->src_h);
1399 seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
1400 state->crtc_x, state->crtc_y, state->crtc_w,
1402 seq_printf(s, "\tmultirect: mode: %d index: %d\n",
1403 pstate->multirect_mode, pstate->multirect_index);
1407 if (dpu_crtc->vblank_cb_count) {
1408 ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time);
1409 s64 diff_ms = ktime_to_ms(diff);
1410 s64 fps = diff_ms ? div_s64(
1411 dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0;
1414 "vblank fps:%lld count:%u total:%llums total_framecount:%llu\n",
1415 fps, dpu_crtc->vblank_cb_count,
1416 ktime_to_ms(diff), dpu_crtc->play_count);
1418 /* reset time & count for next measurement */
1419 dpu_crtc->vblank_cb_count = 0;
1420 dpu_crtc->vblank_cb_time = ktime_set(0, 0);
1423 seq_printf(s, "vblank_enable:%d\n", dpu_crtc->vblank_requested);
1425 mutex_unlock(&dpu_crtc->crtc_lock);
1426 drm_modeset_unlock_all(crtc->dev);
1431 static int _dpu_debugfs_status_open(struct inode *inode, struct file *file)
1433 return single_open(file, _dpu_debugfs_status_show, inode->i_private);
1436 #define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \
1437 static int __prefix ## _open(struct inode *inode, struct file *file) \
1439 return single_open(file, __prefix ## _show, inode->i_private); \
1441 static const struct file_operations __prefix ## _fops = { \
1442 .owner = THIS_MODULE, \
1443 .open = __prefix ## _open, \
1444 .release = single_release, \
1446 .llseek = seq_lseek, \
1449 static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
1451 struct drm_crtc *crtc = (struct drm_crtc *) s->private;
1452 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1455 seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
1456 seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
1457 seq_printf(s, "core_clk_rate: %llu\n",
1458 dpu_crtc->cur_perf.core_clk_rate);
1459 for (i = DPU_POWER_HANDLE_DBUS_ID_MNOC;
1460 i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
1461 seq_printf(s, "bw_ctl[%s]: %llu\n",
1462 dpu_power_handle_get_dbus_name(i),
1463 dpu_crtc->cur_perf.bw_ctl[i]);
1464 seq_printf(s, "max_per_pipe_ib[%s]: %llu\n",
1465 dpu_power_handle_get_dbus_name(i),
1466 dpu_crtc->cur_perf.max_per_pipe_ib[i]);
1471 DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state);
1473 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1475 struct dpu_crtc *dpu_crtc;
1476 struct dpu_kms *dpu_kms;
1478 static const struct file_operations debugfs_status_fops = {
1479 .open = _dpu_debugfs_status_open,
1481 .llseek = seq_lseek,
1482 .release = single_release,
1487 dpu_crtc = to_dpu_crtc(crtc);
1489 dpu_kms = _dpu_crtc_get_kms(crtc);
1491 dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
1492 crtc->dev->primary->debugfs_root);
1493 if (!dpu_crtc->debugfs_root)
1496 /* don't error check these */
1497 debugfs_create_file("status", 0400,
1498 dpu_crtc->debugfs_root,
1499 dpu_crtc, &debugfs_status_fops);
1500 debugfs_create_file("state", 0600,
1501 dpu_crtc->debugfs_root,
1503 &dpu_crtc_debugfs_state_fops);
1508 static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc)
1510 struct dpu_crtc *dpu_crtc;
1514 dpu_crtc = to_dpu_crtc(crtc);
1515 debugfs_remove_recursive(dpu_crtc->debugfs_root);
1518 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1523 static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc)
1526 #endif /* CONFIG_DEBUG_FS */
1528 static int dpu_crtc_late_register(struct drm_crtc *crtc)
1530 return _dpu_crtc_init_debugfs(crtc);
1533 static void dpu_crtc_early_unregister(struct drm_crtc *crtc)
1535 _dpu_crtc_destroy_debugfs(crtc);
1538 static const struct drm_crtc_funcs dpu_crtc_funcs = {
1539 .set_config = drm_atomic_helper_set_config,
1540 .destroy = dpu_crtc_destroy,
1541 .page_flip = drm_atomic_helper_page_flip,
1542 .reset = dpu_crtc_reset,
1543 .atomic_duplicate_state = dpu_crtc_duplicate_state,
1544 .atomic_destroy_state = dpu_crtc_destroy_state,
1545 .late_register = dpu_crtc_late_register,
1546 .early_unregister = dpu_crtc_early_unregister,
1549 static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
1550 .disable = dpu_crtc_disable,
1551 .atomic_enable = dpu_crtc_enable,
1552 .atomic_check = dpu_crtc_atomic_check,
1553 .atomic_begin = dpu_crtc_atomic_begin,
1554 .atomic_flush = dpu_crtc_atomic_flush,
1557 /* initialize crtc */
1558 struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
1559 struct drm_plane *cursor)
1561 struct drm_crtc *crtc = NULL;
1562 struct dpu_crtc *dpu_crtc = NULL;
1563 struct msm_drm_private *priv = NULL;
1564 struct dpu_kms *kms = NULL;
1567 priv = dev->dev_private;
1568 kms = to_dpu_kms(priv->kms);
1570 dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
1572 return ERR_PTR(-ENOMEM);
1574 crtc = &dpu_crtc->base;
1577 mutex_init(&dpu_crtc->crtc_lock);
1578 spin_lock_init(&dpu_crtc->spin_lock);
1579 atomic_set(&dpu_crtc->frame_pending, 0);
1581 init_completion(&dpu_crtc->frame_done_comp);
1583 INIT_LIST_HEAD(&dpu_crtc->frame_event_list);
1585 for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) {
1586 INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list);
1587 list_add(&dpu_crtc->frame_events[i].list,
1588 &dpu_crtc->frame_event_list);
1589 kthread_init_work(&dpu_crtc->frame_events[i].work,
1590 dpu_crtc_frame_event_work);
1593 drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs,
1596 drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
1599 /* save user friendly CRTC name for later */
1600 snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
1602 /* initialize event handling */
1603 spin_lock_init(&dpu_crtc->event_lock);
1605 dpu_crtc->phandle = &kms->phandle;
1607 DPU_DEBUG("%s: successfully initialized crtc\n", dpu_crtc->name);