]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
msm:disp:dpu1: add scaler support on SC7180 display
[linux.git] / drivers / gpu / drm / msm / disp / dpu1 / dpu_crtc.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
4  * Copyright (C) 2013 Red Hat
5  * Author: Rob Clark <robdclark@gmail.com>
6  */
7
8 #define pr_fmt(fmt)     "[drm:%s:%d] " fmt, __func__, __LINE__
9 #include <linux/sort.h>
10 #include <linux/debugfs.h>
11 #include <linux/ktime.h>
12
13 #include <drm/drm_crtc.h>
14 #include <drm/drm_flip_work.h>
15 #include <drm/drm_mode.h>
16 #include <drm/drm_probe_helper.h>
17 #include <drm/drm_rect.h>
18 #include <drm/drm_vblank.h>
19
20 #include "dpu_kms.h"
21 #include "dpu_hw_lm.h"
22 #include "dpu_hw_ctl.h"
23 #include "dpu_crtc.h"
24 #include "dpu_plane.h"
25 #include "dpu_encoder.h"
26 #include "dpu_vbif.h"
27 #include "dpu_core_perf.h"
28 #include "dpu_trace.h"
29
30 #define DPU_DRM_BLEND_OP_NOT_DEFINED    0
31 #define DPU_DRM_BLEND_OP_OPAQUE         1
32 #define DPU_DRM_BLEND_OP_PREMULTIPLIED  2
33 #define DPU_DRM_BLEND_OP_COVERAGE       3
34 #define DPU_DRM_BLEND_OP_MAX            4
35
36 /* layer mixer index on dpu_crtc */
37 #define LEFT_MIXER 0
38 #define RIGHT_MIXER 1
39
40 /* timeout in ms waiting for frame done */
41 #define DPU_CRTC_FRAME_DONE_TIMEOUT_MS  60
42
43 static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
44 {
45         struct msm_drm_private *priv = crtc->dev->dev_private;
46
47         return to_dpu_kms(priv->kms);
48 }
49
50 static void dpu_crtc_destroy(struct drm_crtc *crtc)
51 {
52         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
53
54         DPU_DEBUG("\n");
55
56         if (!crtc)
57                 return;
58
59         drm_crtc_cleanup(crtc);
60         kfree(dpu_crtc);
61 }
62
63 static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
64                 struct dpu_plane_state *pstate, struct dpu_format *format)
65 {
66         struct dpu_hw_mixer *lm = mixer->hw_lm;
67         uint32_t blend_op;
68         struct drm_format_name_buf format_name;
69
70         /* default to opaque blending */
71         blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
72                 DPU_BLEND_BG_ALPHA_BG_CONST;
73
74         if (format->alpha_enable) {
75                 /* coverage blending */
76                 blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL |
77                         DPU_BLEND_BG_ALPHA_FG_PIXEL |
78                         DPU_BLEND_BG_INV_ALPHA;
79         }
80
81         lm->ops.setup_blend_config(lm, pstate->stage,
82                                 0xFF, 0, blend_op);
83
84         DPU_DEBUG("format:%s, alpha_en:%u blend_op:0x%x\n",
85                 drm_get_format_name(format->base.pixel_format, &format_name),
86                 format->alpha_enable, blend_op);
87 }
88
89 static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
90 {
91         struct dpu_crtc *dpu_crtc;
92         struct dpu_crtc_state *crtc_state;
93         int lm_idx, lm_horiz_position;
94
95         dpu_crtc = to_dpu_crtc(crtc);
96         crtc_state = to_dpu_crtc_state(crtc->state);
97
98         lm_horiz_position = 0;
99         for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) {
100                 const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
101                 struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm;
102                 struct dpu_hw_mixer_cfg cfg;
103
104                 if (!lm_roi || !drm_rect_visible(lm_roi))
105                         continue;
106
107                 cfg.out_width = drm_rect_width(lm_roi);
108                 cfg.out_height = drm_rect_height(lm_roi);
109                 cfg.right_mixer = lm_horiz_position++;
110                 cfg.flags = 0;
111                 hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
112         }
113 }
114
115 static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
116         struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer)
117 {
118         struct drm_plane *plane;
119         struct drm_framebuffer *fb;
120         struct drm_plane_state *state;
121         struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
122         struct dpu_plane_state *pstate = NULL;
123         struct dpu_format *format;
124         struct dpu_hw_ctl *ctl = mixer->lm_ctl;
125         struct dpu_hw_stage_cfg *stage_cfg = &dpu_crtc->stage_cfg;
126
127         u32 flush_mask;
128         uint32_t stage_idx, lm_idx;
129         int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
130         bool bg_alpha_enable = false;
131
132         drm_atomic_crtc_for_each_plane(plane, crtc) {
133                 state = plane->state;
134                 if (!state)
135                         continue;
136
137                 pstate = to_dpu_plane_state(state);
138                 fb = state->fb;
139
140                 dpu_plane_get_ctl_flush(plane, ctl, &flush_mask);
141
142                 DPU_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
143                                 crtc->base.id,
144                                 pstate->stage,
145                                 plane->base.id,
146                                 dpu_plane_pipe(plane) - SSPP_VIG0,
147                                 state->fb ? state->fb->base.id : -1);
148
149                 format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
150
151                 if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
152                         bg_alpha_enable = true;
153
154                 stage_idx = zpos_cnt[pstate->stage]++;
155                 stage_cfg->stage[pstate->stage][stage_idx] =
156                                         dpu_plane_pipe(plane);
157                 stage_cfg->multirect_index[pstate->stage][stage_idx] =
158                                         pstate->multirect_index;
159
160                 trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
161                                            state, pstate, stage_idx,
162                                            dpu_plane_pipe(plane) - SSPP_VIG0,
163                                            format->base.pixel_format,
164                                            fb ? fb->modifier : 0);
165
166                 /* blend config update */
167                 for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) {
168                         _dpu_crtc_setup_blend_cfg(mixer + lm_idx,
169                                                 pstate, format);
170
171                         mixer[lm_idx].flush_mask |= flush_mask;
172
173                         if (bg_alpha_enable && !format->alpha_enable)
174                                 mixer[lm_idx].mixer_op_mode = 0;
175                         else
176                                 mixer[lm_idx].mixer_op_mode |=
177                                                 1 << pstate->stage;
178                 }
179         }
180
181          _dpu_crtc_program_lm_output_roi(crtc);
182 }
183
184 /**
185  * _dpu_crtc_blend_setup - configure crtc mixers
186  * @crtc: Pointer to drm crtc structure
187  */
188 static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
189 {
190         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
191         struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
192         struct dpu_crtc_mixer *mixer = cstate->mixers;
193         struct dpu_hw_ctl *ctl;
194         struct dpu_hw_mixer *lm;
195         int i;
196
197         DPU_DEBUG("%s\n", dpu_crtc->name);
198
199         for (i = 0; i < cstate->num_mixers; i++) {
200                 if (!mixer[i].hw_lm || !mixer[i].lm_ctl) {
201                         DPU_ERROR("invalid lm or ctl assigned to mixer\n");
202                         return;
203                 }
204                 mixer[i].mixer_op_mode = 0;
205                 mixer[i].flush_mask = 0;
206                 if (mixer[i].lm_ctl->ops.clear_all_blendstages)
207                         mixer[i].lm_ctl->ops.clear_all_blendstages(
208                                         mixer[i].lm_ctl);
209         }
210
211         /* initialize stage cfg */
212         memset(&dpu_crtc->stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
213
214         _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer);
215
216         for (i = 0; i < cstate->num_mixers; i++) {
217                 ctl = mixer[i].lm_ctl;
218                 lm = mixer[i].hw_lm;
219
220                 lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
221
222                 mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
223                         mixer[i].hw_lm->idx);
224
225                 /* stage config flush mask */
226                 ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
227
228                 DPU_DEBUG("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
229                         mixer[i].hw_lm->idx - LM_0,
230                         mixer[i].mixer_op_mode,
231                         ctl->idx - CTL_0,
232                         mixer[i].flush_mask);
233
234                 ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
235                         &dpu_crtc->stage_cfg);
236         }
237 }
238
239 /**
240  *  _dpu_crtc_complete_flip - signal pending page_flip events
241  * Any pending vblank events are added to the vblank_event_list
242  * so that the next vblank interrupt shall signal them.
243  * However PAGE_FLIP events are not handled through the vblank_event_list.
244  * This API signals any pending PAGE_FLIP events requested through
245  * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event.
246  * @crtc: Pointer to drm crtc structure
247  */
248 static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
249 {
250         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
251         struct drm_device *dev = crtc->dev;
252         unsigned long flags;
253
254         spin_lock_irqsave(&dev->event_lock, flags);
255         if (dpu_crtc->event) {
256                 DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name,
257                               dpu_crtc->event);
258                 trace_dpu_crtc_complete_flip(DRMID(crtc));
259                 drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
260                 dpu_crtc->event = NULL;
261         }
262         spin_unlock_irqrestore(&dev->event_lock, flags);
263 }
264
265 enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
266 {
267         struct drm_encoder *encoder;
268
269         if (!crtc) {
270                 DPU_ERROR("invalid crtc\n");
271                 return INTF_MODE_NONE;
272         }
273
274         /*
275          * TODO: This function is called from dpu debugfs and as part of atomic
276          * check. When called from debugfs, the crtc->mutex must be held to
277          * read crtc->state. However reading crtc->state from atomic check isn't
278          * allowed (unless you have a good reason, a big comment, and a deep
279          * understanding of how the atomic/modeset locks work (<- and this is
280          * probably not possible)). So we'll keep the WARN_ON here for now, but
281          * really we need to figure out a better way to track our operating mode
282          */
283         WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
284
285         /* TODO: Returns the first INTF_MODE, could there be multiple values? */
286         drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
287                 return dpu_encoder_get_intf_mode(encoder);
288
289         return INTF_MODE_NONE;
290 }
291
292 void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
293 {
294         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
295
296         /* keep statistics on vblank callback - with auto reset via debugfs */
297         if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0)
298                 dpu_crtc->vblank_cb_time = ktime_get();
299         else
300                 dpu_crtc->vblank_cb_count++;
301         _dpu_crtc_complete_flip(crtc);
302         drm_crtc_handle_vblank(crtc);
303         trace_dpu_crtc_vblank_cb(DRMID(crtc));
304 }
305
306 static void dpu_crtc_frame_event_work(struct kthread_work *work)
307 {
308         struct dpu_crtc_frame_event *fevent = container_of(work,
309                         struct dpu_crtc_frame_event, work);
310         struct drm_crtc *crtc = fevent->crtc;
311         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
312         unsigned long flags;
313         bool frame_done = false;
314
315         DPU_ATRACE_BEGIN("crtc_frame_event");
316
317         DRM_DEBUG_KMS("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
318                         ktime_to_ns(fevent->ts));
319
320         if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
321                                 | DPU_ENCODER_FRAME_EVENT_ERROR
322                                 | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
323
324                 if (atomic_read(&dpu_crtc->frame_pending) < 1) {
325                         /* ignore vblank when not pending */
326                 } else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) {
327                         /* release bandwidth and other resources */
328                         trace_dpu_crtc_frame_event_done(DRMID(crtc),
329                                                         fevent->event);
330                         dpu_core_perf_crtc_release_bw(crtc);
331                 } else {
332                         trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
333                                                                 fevent->event);
334                 }
335
336                 if (fevent->event & DPU_ENCODER_FRAME_EVENT_DONE)
337                         dpu_core_perf_crtc_update(crtc, 0, false);
338
339                 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
340                                         | DPU_ENCODER_FRAME_EVENT_ERROR))
341                         frame_done = true;
342         }
343
344         if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)
345                 DPU_ERROR("crtc%d ts:%lld received panel dead event\n",
346                                 crtc->base.id, ktime_to_ns(fevent->ts));
347
348         if (frame_done)
349                 complete_all(&dpu_crtc->frame_done_comp);
350
351         spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
352         list_add_tail(&fevent->list, &dpu_crtc->frame_event_list);
353         spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
354         DPU_ATRACE_END("crtc_frame_event");
355 }
356
357 /*
358  * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module
359  * registers this API to encoder for all frame event callbacks like
360  * frame_error, frame_done, idle_timeout, etc. Encoder may call different events
361  * from different context - IRQ, user thread, commit_thread, etc. Each event
362  * should be carefully reviewed and should be processed in proper task context
363  * to avoid schedulin delay or properly manage the irq context's bottom half
364  * processing.
365  */
366 static void dpu_crtc_frame_event_cb(void *data, u32 event)
367 {
368         struct drm_crtc *crtc = (struct drm_crtc *)data;
369         struct dpu_crtc *dpu_crtc;
370         struct msm_drm_private *priv;
371         struct dpu_crtc_frame_event *fevent;
372         unsigned long flags;
373         u32 crtc_id;
374
375         /* Nothing to do on idle event */
376         if (event & DPU_ENCODER_FRAME_EVENT_IDLE)
377                 return;
378
379         dpu_crtc = to_dpu_crtc(crtc);
380         priv = crtc->dev->dev_private;
381         crtc_id = drm_crtc_index(crtc);
382
383         trace_dpu_crtc_frame_event_cb(DRMID(crtc), event);
384
385         spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
386         fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list,
387                         struct dpu_crtc_frame_event, list);
388         if (fevent)
389                 list_del_init(&fevent->list);
390         spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
391
392         if (!fevent) {
393                 DRM_ERROR("crtc%d event %d overflow\n", crtc->base.id, event);
394                 return;
395         }
396
397         fevent->event = event;
398         fevent->crtc = crtc;
399         fevent->ts = ktime_get();
400         kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work);
401 }
402
403 void dpu_crtc_complete_commit(struct drm_crtc *crtc)
404 {
405         trace_dpu_crtc_complete_commit(DRMID(crtc));
406 }
407
408 static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
409                 struct drm_crtc_state *state)
410 {
411         struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
412         struct drm_display_mode *adj_mode = &state->adjusted_mode;
413         u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers;
414         int i;
415
416         for (i = 0; i < cstate->num_mixers; i++) {
417                 struct drm_rect *r = &cstate->lm_bounds[i];
418                 r->x1 = crtc_split_width * i;
419                 r->y1 = 0;
420                 r->x2 = r->x1 + crtc_split_width;
421                 r->y2 = adj_mode->vdisplay;
422
423                 trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
424         }
425
426         drm_mode_debug_printmodeline(adj_mode);
427 }
428
429 static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
430                 struct drm_crtc_state *old_state)
431 {
432         struct dpu_crtc *dpu_crtc;
433         struct dpu_crtc_state *cstate;
434         struct drm_encoder *encoder;
435         struct drm_device *dev;
436         unsigned long flags;
437         struct dpu_crtc_smmu_state_data *smmu_state;
438
439         if (!crtc) {
440                 DPU_ERROR("invalid crtc\n");
441                 return;
442         }
443
444         if (!crtc->state->enable) {
445                 DPU_DEBUG("crtc%d -> enable %d, skip atomic_begin\n",
446                                 crtc->base.id, crtc->state->enable);
447                 return;
448         }
449
450         DPU_DEBUG("crtc%d\n", crtc->base.id);
451
452         dpu_crtc = to_dpu_crtc(crtc);
453         cstate = to_dpu_crtc_state(crtc->state);
454         dev = crtc->dev;
455         smmu_state = &dpu_crtc->smmu_state;
456
457         _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
458
459         if (dpu_crtc->event) {
460                 WARN_ON(dpu_crtc->event);
461         } else {
462                 spin_lock_irqsave(&dev->event_lock, flags);
463                 dpu_crtc->event = crtc->state->event;
464                 crtc->state->event = NULL;
465                 spin_unlock_irqrestore(&dev->event_lock, flags);
466         }
467
468         /* encoder will trigger pending mask now */
469         drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
470                 dpu_encoder_trigger_kickoff_pending(encoder);
471
472         /*
473          * If no mixers have been allocated in dpu_crtc_atomic_check(),
474          * it means we are trying to flush a CRTC whose state is disabled:
475          * nothing else needs to be done.
476          */
477         if (unlikely(!cstate->num_mixers))
478                 return;
479
480         _dpu_crtc_blend_setup(crtc);
481
482         /*
483          * PP_DONE irq is only used by command mode for now.
484          * It is better to request pending before FLUSH and START trigger
485          * to make sure no pp_done irq missed.
486          * This is safe because no pp_done will happen before SW trigger
487          * in command mode.
488          */
489 }
490
491 static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
492                 struct drm_crtc_state *old_crtc_state)
493 {
494         struct dpu_crtc *dpu_crtc;
495         struct drm_device *dev;
496         struct drm_plane *plane;
497         struct msm_drm_private *priv;
498         struct msm_drm_thread *event_thread;
499         unsigned long flags;
500         struct dpu_crtc_state *cstate;
501
502         if (!crtc->state->enable) {
503                 DPU_DEBUG("crtc%d -> enable %d, skip atomic_flush\n",
504                                 crtc->base.id, crtc->state->enable);
505                 return;
506         }
507
508         DPU_DEBUG("crtc%d\n", crtc->base.id);
509
510         dpu_crtc = to_dpu_crtc(crtc);
511         cstate = to_dpu_crtc_state(crtc->state);
512         dev = crtc->dev;
513         priv = dev->dev_private;
514
515         if (crtc->index >= ARRAY_SIZE(priv->event_thread)) {
516                 DPU_ERROR("invalid crtc index[%d]\n", crtc->index);
517                 return;
518         }
519
520         event_thread = &priv->event_thread[crtc->index];
521
522         if (dpu_crtc->event) {
523                 DPU_DEBUG("already received dpu_crtc->event\n");
524         } else {
525                 spin_lock_irqsave(&dev->event_lock, flags);
526                 dpu_crtc->event = crtc->state->event;
527                 crtc->state->event = NULL;
528                 spin_unlock_irqrestore(&dev->event_lock, flags);
529         }
530
531         /*
532          * If no mixers has been allocated in dpu_crtc_atomic_check(),
533          * it means we are trying to flush a CRTC whose state is disabled:
534          * nothing else needs to be done.
535          */
536         if (unlikely(!cstate->num_mixers))
537                 return;
538
539         /*
540          * For planes without commit update, drm framework will not add
541          * those planes to current state since hardware update is not
542          * required. However, if those planes were power collapsed since
543          * last commit cycle, driver has to restore the hardware state
544          * of those planes explicitly here prior to plane flush.
545          */
546         drm_atomic_crtc_for_each_plane(plane, crtc)
547                 dpu_plane_restore(plane);
548
549         /* update performance setting before crtc kickoff */
550         dpu_core_perf_crtc_update(crtc, 1, false);
551
552         /*
553          * Final plane updates: Give each plane a chance to complete all
554          *                      required writes/flushing before crtc's "flush
555          *                      everything" call below.
556          */
557         drm_atomic_crtc_for_each_plane(plane, crtc) {
558                 if (dpu_crtc->smmu_state.transition_error)
559                         dpu_plane_set_error(plane, true);
560                 dpu_plane_flush(plane);
561         }
562
563         /* Kickoff will be scheduled by outer layer */
564 }
565
566 /**
567  * dpu_crtc_destroy_state - state destroy hook
568  * @crtc: drm CRTC
569  * @state: CRTC state object to release
570  */
571 static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
572                 struct drm_crtc_state *state)
573 {
574         struct dpu_crtc *dpu_crtc;
575         struct dpu_crtc_state *cstate;
576
577         if (!crtc || !state) {
578                 DPU_ERROR("invalid argument(s)\n");
579                 return;
580         }
581
582         dpu_crtc = to_dpu_crtc(crtc);
583         cstate = to_dpu_crtc_state(state);
584
585         DPU_DEBUG("crtc%d\n", crtc->base.id);
586
587         __drm_atomic_helper_crtc_destroy_state(state);
588
589         kfree(cstate);
590 }
591
592 static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
593 {
594         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
595         int ret, rc = 0;
596
597         if (!atomic_read(&dpu_crtc->frame_pending)) {
598                 DPU_DEBUG("no frames pending\n");
599                 return 0;
600         }
601
602         DPU_ATRACE_BEGIN("frame done completion wait");
603         ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
604                         msecs_to_jiffies(DPU_CRTC_FRAME_DONE_TIMEOUT_MS));
605         if (!ret) {
606                 DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
607                 rc = -ETIMEDOUT;
608         }
609         DPU_ATRACE_END("frame done completion wait");
610
611         return rc;
612 }
613
614 void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
615 {
616         struct drm_encoder *encoder;
617         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
618         struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
619         struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
620
621         /*
622          * If no mixers has been allocated in dpu_crtc_atomic_check(),
623          * it means we are trying to start a CRTC whose state is disabled:
624          * nothing else needs to be done.
625          */
626         if (unlikely(!cstate->num_mixers))
627                 return;
628
629         DPU_ATRACE_BEGIN("crtc_commit");
630
631         /*
632          * Encoder will flush/start now, unless it has a tx pending. If so, it
633          * may delay and flush at an irq event (e.g. ppdone)
634          */
635         drm_for_each_encoder_mask(encoder, crtc->dev,
636                                   crtc->state->encoder_mask)
637                 dpu_encoder_prepare_for_kickoff(encoder);
638
639         if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
640                 /* acquire bandwidth and other resources */
641                 DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
642         } else
643                 DPU_DEBUG("crtc%d commit\n", crtc->base.id);
644
645         dpu_crtc->play_count++;
646
647         dpu_vbif_clear_errors(dpu_kms);
648
649         drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
650                 dpu_encoder_kickoff(encoder);
651
652         reinit_completion(&dpu_crtc->frame_done_comp);
653         DPU_ATRACE_END("crtc_commit");
654 }
655
656 static void dpu_crtc_reset(struct drm_crtc *crtc)
657 {
658         struct dpu_crtc_state *cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
659
660         if (crtc->state)
661                 dpu_crtc_destroy_state(crtc, crtc->state);
662
663         __drm_atomic_helper_crtc_reset(crtc, &cstate->base);
664 }
665
666 /**
667  * dpu_crtc_duplicate_state - state duplicate hook
668  * @crtc: Pointer to drm crtc structure
669  * @Returns: Pointer to new drm_crtc_state structure
670  */
671 static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
672 {
673         struct dpu_crtc *dpu_crtc;
674         struct dpu_crtc_state *cstate, *old_cstate;
675
676         if (!crtc || !crtc->state) {
677                 DPU_ERROR("invalid argument(s)\n");
678                 return NULL;
679         }
680
681         dpu_crtc = to_dpu_crtc(crtc);
682         old_cstate = to_dpu_crtc_state(crtc->state);
683         cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
684         if (!cstate) {
685                 DPU_ERROR("failed to allocate state\n");
686                 return NULL;
687         }
688
689         /* duplicate base helper */
690         __drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
691
692         return &cstate->base;
693 }
694
695 static void dpu_crtc_disable(struct drm_crtc *crtc,
696                              struct drm_crtc_state *old_crtc_state)
697 {
698         struct dpu_crtc *dpu_crtc;
699         struct dpu_crtc_state *cstate;
700         struct drm_display_mode *mode;
701         struct drm_encoder *encoder;
702         struct msm_drm_private *priv;
703         unsigned long flags;
704         bool release_bandwidth = false;
705
706         if (!crtc || !crtc->state) {
707                 DPU_ERROR("invalid crtc\n");
708                 return;
709         }
710         dpu_crtc = to_dpu_crtc(crtc);
711         cstate = to_dpu_crtc_state(crtc->state);
712         mode = &cstate->base.adjusted_mode;
713         priv = crtc->dev->dev_private;
714
715         DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
716
717         /* Disable/save vblank irq handling */
718         drm_crtc_vblank_off(crtc);
719
720         drm_for_each_encoder_mask(encoder, crtc->dev,
721                                   old_crtc_state->encoder_mask) {
722                 /* in video mode, we hold an extra bandwidth reference
723                  * as we cannot drop bandwidth at frame-done if any
724                  * crtc is being used in video mode.
725                  */
726                 if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
727                         release_bandwidth = true;
728                 dpu_encoder_assign_crtc(encoder, NULL);
729         }
730
731         /* wait for frame_event_done completion */
732         if (_dpu_crtc_wait_for_frame_done(crtc))
733                 DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
734                                 crtc->base.id,
735                                 atomic_read(&dpu_crtc->frame_pending));
736
737         trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
738         dpu_crtc->enabled = false;
739
740         if (atomic_read(&dpu_crtc->frame_pending)) {
741                 trace_dpu_crtc_disable_frame_pending(DRMID(crtc),
742                                      atomic_read(&dpu_crtc->frame_pending));
743                 if (release_bandwidth)
744                         dpu_core_perf_crtc_release_bw(crtc);
745                 atomic_set(&dpu_crtc->frame_pending, 0);
746         }
747
748         dpu_core_perf_crtc_update(crtc, 0, true);
749
750         drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
751                 dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
752
753         memset(cstate->mixers, 0, sizeof(cstate->mixers));
754         cstate->num_mixers = 0;
755
756         /* disable clk & bw control until clk & bw properties are set */
757         cstate->bw_control = false;
758         cstate->bw_split_vote = false;
759
760         if (crtc->state->event && !crtc->state->active) {
761                 spin_lock_irqsave(&crtc->dev->event_lock, flags);
762                 drm_crtc_send_vblank_event(crtc, crtc->state->event);
763                 crtc->state->event = NULL;
764                 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
765         }
766
767         pm_runtime_put_sync(crtc->dev->dev);
768 }
769
770 static void dpu_crtc_enable(struct drm_crtc *crtc,
771                 struct drm_crtc_state *old_crtc_state)
772 {
773         struct dpu_crtc *dpu_crtc;
774         struct drm_encoder *encoder;
775         struct msm_drm_private *priv;
776         bool request_bandwidth;
777
778         if (!crtc) {
779                 DPU_ERROR("invalid crtc\n");
780                 return;
781         }
782         priv = crtc->dev->dev_private;
783
784         pm_runtime_get_sync(crtc->dev->dev);
785
786         DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
787         dpu_crtc = to_dpu_crtc(crtc);
788
789         drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
790                 /* in video mode, we hold an extra bandwidth reference
791                  * as we cannot drop bandwidth at frame-done if any
792                  * crtc is being used in video mode.
793                  */
794                 if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
795                         request_bandwidth = true;
796                 dpu_encoder_register_frame_event_callback(encoder,
797                                 dpu_crtc_frame_event_cb, (void *)crtc);
798         }
799
800         if (request_bandwidth)
801                 atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
802
803         trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
804         dpu_crtc->enabled = true;
805
806         drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
807                 dpu_encoder_assign_crtc(encoder, crtc);
808
809         /* Enable/restore vblank irq handling */
810         drm_crtc_vblank_on(crtc);
811 }
812
813 struct plane_state {
814         struct dpu_plane_state *dpu_pstate;
815         const struct drm_plane_state *drm_pstate;
816         int stage;
817         u32 pipe_id;
818 };
819
820 static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
821                 struct drm_crtc_state *state)
822 {
823         struct dpu_crtc *dpu_crtc;
824         struct plane_state *pstates;
825         struct dpu_crtc_state *cstate;
826
827         const struct drm_plane_state *pstate;
828         struct drm_plane *plane;
829         struct drm_display_mode *mode;
830
831         int cnt = 0, rc = 0, mixer_width, i, z_pos;
832
833         struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
834         int multirect_count = 0;
835         const struct drm_plane_state *pipe_staged[SSPP_MAX];
836         int left_zpos_cnt = 0, right_zpos_cnt = 0;
837         struct drm_rect crtc_rect = { 0 };
838
839         if (!crtc) {
840                 DPU_ERROR("invalid crtc\n");
841                 return -EINVAL;
842         }
843
844         pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
845
846         dpu_crtc = to_dpu_crtc(crtc);
847         cstate = to_dpu_crtc_state(state);
848
849         if (!state->enable || !state->active) {
850                 DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
851                                 crtc->base.id, state->enable, state->active);
852                 goto end;
853         }
854
855         mode = &state->adjusted_mode;
856         DPU_DEBUG("%s: check", dpu_crtc->name);
857
858         /* force a full mode set if active state changed */
859         if (state->active_changed)
860                 state->mode_changed = true;
861
862         memset(pipe_staged, 0, sizeof(pipe_staged));
863
864         mixer_width = mode->hdisplay / cstate->num_mixers;
865
866         _dpu_crtc_setup_lm_bounds(crtc, state);
867
868         crtc_rect.x2 = mode->hdisplay;
869         crtc_rect.y2 = mode->vdisplay;
870
871          /* get plane state for all drm planes associated with crtc state */
872         drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
873                 struct drm_rect dst, clip = crtc_rect;
874
875                 if (IS_ERR_OR_NULL(pstate)) {
876                         rc = PTR_ERR(pstate);
877                         DPU_ERROR("%s: failed to get plane%d state, %d\n",
878                                         dpu_crtc->name, plane->base.id, rc);
879                         goto end;
880                 }
881                 if (cnt >= DPU_STAGE_MAX * 4)
882                         continue;
883
884                 pstates[cnt].dpu_pstate = to_dpu_plane_state(pstate);
885                 pstates[cnt].drm_pstate = pstate;
886                 pstates[cnt].stage = pstate->normalized_zpos;
887                 pstates[cnt].pipe_id = dpu_plane_pipe(plane);
888
889                 if (pipe_staged[pstates[cnt].pipe_id]) {
890                         multirect_plane[multirect_count].r0 =
891                                 pipe_staged[pstates[cnt].pipe_id];
892                         multirect_plane[multirect_count].r1 = pstate;
893                         multirect_count++;
894
895                         pipe_staged[pstates[cnt].pipe_id] = NULL;
896                 } else {
897                         pipe_staged[pstates[cnt].pipe_id] = pstate;
898                 }
899
900                 cnt++;
901
902                 dst = drm_plane_state_dest(pstate);
903                 if (!drm_rect_intersect(&clip, &dst)) {
904                         DPU_ERROR("invalid vertical/horizontal destination\n");
905                         DPU_ERROR("display: " DRM_RECT_FMT " plane: "
906                                   DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect),
907                                   DRM_RECT_ARG(&dst));
908                         rc = -E2BIG;
909                         goto end;
910                 }
911         }
912
913         for (i = 1; i < SSPP_MAX; i++) {
914                 if (pipe_staged[i]) {
915                         dpu_plane_clear_multirect(pipe_staged[i]);
916
917                         if (is_dpu_plane_virtual(pipe_staged[i]->plane)) {
918                                 DPU_ERROR(
919                                         "r1 only virt plane:%d not supported\n",
920                                         pipe_staged[i]->plane->base.id);
921                                 rc  = -EINVAL;
922                                 goto end;
923                         }
924                 }
925         }
926
927         z_pos = -1;
928         for (i = 0; i < cnt; i++) {
929                 /* reset counts at every new blend stage */
930                 if (pstates[i].stage != z_pos) {
931                         left_zpos_cnt = 0;
932                         right_zpos_cnt = 0;
933                         z_pos = pstates[i].stage;
934                 }
935
936                 /* verify z_pos setting before using it */
937                 if (z_pos >= DPU_STAGE_MAX - DPU_STAGE_0) {
938                         DPU_ERROR("> %d plane stages assigned\n",
939                                         DPU_STAGE_MAX - DPU_STAGE_0);
940                         rc = -EINVAL;
941                         goto end;
942                 } else if (pstates[i].drm_pstate->crtc_x < mixer_width) {
943                         if (left_zpos_cnt == 2) {
944                                 DPU_ERROR("> 2 planes @ stage %d on left\n",
945                                         z_pos);
946                                 rc = -EINVAL;
947                                 goto end;
948                         }
949                         left_zpos_cnt++;
950
951                 } else {
952                         if (right_zpos_cnt == 2) {
953                                 DPU_ERROR("> 2 planes @ stage %d on right\n",
954                                         z_pos);
955                                 rc = -EINVAL;
956                                 goto end;
957                         }
958                         right_zpos_cnt++;
959                 }
960
961                 pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0;
962                 DPU_DEBUG("%s: zpos %d", dpu_crtc->name, z_pos);
963         }
964
965         for (i = 0; i < multirect_count; i++) {
966                 if (dpu_plane_validate_multirect_v2(&multirect_plane[i])) {
967                         DPU_ERROR(
968                         "multirect validation failed for planes (%d - %d)\n",
969                                         multirect_plane[i].r0->plane->base.id,
970                                         multirect_plane[i].r1->plane->base.id);
971                         rc = -EINVAL;
972                         goto end;
973                 }
974         }
975
976         atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
977
978         rc = dpu_core_perf_crtc_check(crtc, state);
979         if (rc) {
980                 DPU_ERROR("crtc%d failed performance check %d\n",
981                                 crtc->base.id, rc);
982                 goto end;
983         }
984
985         /* validate source split:
986          * use pstates sorted by stage to check planes on same stage
987          * we assume that all pipes are in source split so its valid to compare
988          * without taking into account left/right mixer placement
989          */
990         for (i = 1; i < cnt; i++) {
991                 struct plane_state *prv_pstate, *cur_pstate;
992                 struct drm_rect left_rect, right_rect;
993                 int32_t left_pid, right_pid;
994                 int32_t stage;
995
996                 prv_pstate = &pstates[i - 1];
997                 cur_pstate = &pstates[i];
998                 if (prv_pstate->stage != cur_pstate->stage)
999                         continue;
1000
1001                 stage = cur_pstate->stage;
1002
1003                 left_pid = prv_pstate->dpu_pstate->base.plane->base.id;
1004                 left_rect = drm_plane_state_dest(prv_pstate->drm_pstate);
1005
1006                 right_pid = cur_pstate->dpu_pstate->base.plane->base.id;
1007                 right_rect = drm_plane_state_dest(cur_pstate->drm_pstate);
1008
1009                 if (right_rect.x1 < left_rect.x1) {
1010                         swap(left_pid, right_pid);
1011                         swap(left_rect, right_rect);
1012                 }
1013
1014                 /**
1015                  * - planes are enumerated in pipe-priority order such that
1016                  *   planes with lower drm_id must be left-most in a shared
1017                  *   blend-stage when using source split.
1018                  * - planes in source split must be contiguous in width
1019                  * - planes in source split must have same dest yoff and height
1020                  */
1021                 if (right_pid < left_pid) {
1022                         DPU_ERROR(
1023                                 "invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n",
1024                                 stage, left_pid, right_pid);
1025                         rc = -EINVAL;
1026                         goto end;
1027                 } else if (right_rect.x1 != drm_rect_width(&left_rect)) {
1028                         DPU_ERROR("non-contiguous coordinates for src split. "
1029                                   "stage: %d left: " DRM_RECT_FMT " right: "
1030                                   DRM_RECT_FMT "\n", stage,
1031                                   DRM_RECT_ARG(&left_rect),
1032                                   DRM_RECT_ARG(&right_rect));
1033                         rc = -EINVAL;
1034                         goto end;
1035                 } else if (left_rect.y1 != right_rect.y1 ||
1036                            drm_rect_height(&left_rect) != drm_rect_height(&right_rect)) {
1037                         DPU_ERROR("source split at stage: %d. invalid "
1038                                   "yoff/height: left: " DRM_RECT_FMT " right: "
1039                                   DRM_RECT_FMT "\n", stage,
1040                                   DRM_RECT_ARG(&left_rect),
1041                                   DRM_RECT_ARG(&right_rect));
1042                         rc = -EINVAL;
1043                         goto end;
1044                 }
1045         }
1046
1047 end:
1048         kfree(pstates);
1049         return rc;
1050 }
1051
1052 int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
1053 {
1054         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1055         struct drm_encoder *enc;
1056
1057         trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
1058
1059         /*
1060          * Normally we would iterate through encoder_mask in crtc state to find
1061          * attached encoders. In this case, we might be disabling vblank _after_
1062          * encoder_mask has been cleared.
1063          *
1064          * Instead, we "assign" a crtc to the encoder in enable and clear it in
1065          * disable (which is also after encoder_mask is cleared). So instead of
1066          * using encoder mask, we'll ask the encoder to toggle itself iff it's
1067          * currently assigned to our crtc.
1068          *
1069          * Note also that this function cannot be called while crtc is disabled
1070          * since we use drm_crtc_vblank_on/off. So we don't need to worry
1071          * about the assigned crtcs being inconsistent with the current state
1072          * (which means no need to worry about modeset locks).
1073          */
1074         list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
1075                 trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en,
1076                                              dpu_crtc);
1077
1078                 dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en);
1079         }
1080
1081         return 0;
1082 }
1083
1084 #ifdef CONFIG_DEBUG_FS
1085 static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
1086 {
1087         struct dpu_crtc *dpu_crtc;
1088         struct dpu_plane_state *pstate = NULL;
1089         struct dpu_crtc_mixer *m;
1090
1091         struct drm_crtc *crtc;
1092         struct drm_plane *plane;
1093         struct drm_display_mode *mode;
1094         struct drm_framebuffer *fb;
1095         struct drm_plane_state *state;
1096         struct dpu_crtc_state *cstate;
1097
1098         int i, out_width;
1099
1100         dpu_crtc = s->private;
1101         crtc = &dpu_crtc->base;
1102
1103         drm_modeset_lock_all(crtc->dev);
1104         cstate = to_dpu_crtc_state(crtc->state);
1105
1106         mode = &crtc->state->adjusted_mode;
1107         out_width = mode->hdisplay / cstate->num_mixers;
1108
1109         seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
1110                                 mode->hdisplay, mode->vdisplay);
1111
1112         seq_puts(s, "\n");
1113
1114         for (i = 0; i < cstate->num_mixers; ++i) {
1115                 m = &cstate->mixers[i];
1116                 if (!m->hw_lm)
1117                         seq_printf(s, "\tmixer[%d] has no lm\n", i);
1118                 else if (!m->lm_ctl)
1119                         seq_printf(s, "\tmixer[%d] has no ctl\n", i);
1120                 else
1121                         seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
1122                                 m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
1123                                 out_width, mode->vdisplay);
1124         }
1125
1126         seq_puts(s, "\n");
1127
1128         drm_atomic_crtc_for_each_plane(plane, crtc) {
1129                 pstate = to_dpu_plane_state(plane->state);
1130                 state = plane->state;
1131
1132                 if (!pstate || !state)
1133                         continue;
1134
1135                 seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
1136                         pstate->stage);
1137
1138                 if (plane->state->fb) {
1139                         fb = plane->state->fb;
1140
1141                         seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ",
1142                                 fb->base.id, (char *) &fb->format->format,
1143                                 fb->width, fb->height);
1144                         for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i)
1145                                 seq_printf(s, "cpp[%d]:%u ",
1146                                                 i, fb->format->cpp[i]);
1147                         seq_puts(s, "\n\t");
1148
1149                         seq_printf(s, "modifier:%8llu ", fb->modifier);
1150                         seq_puts(s, "\n");
1151
1152                         seq_puts(s, "\t");
1153                         for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
1154                                 seq_printf(s, "pitches[%d]:%8u ", i,
1155                                                         fb->pitches[i]);
1156                         seq_puts(s, "\n");
1157
1158                         seq_puts(s, "\t");
1159                         for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
1160                                 seq_printf(s, "offsets[%d]:%8u ", i,
1161                                                         fb->offsets[i]);
1162                         seq_puts(s, "\n");
1163                 }
1164
1165                 seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
1166                         state->src_x, state->src_y, state->src_w, state->src_h);
1167
1168                 seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
1169                         state->crtc_x, state->crtc_y, state->crtc_w,
1170                         state->crtc_h);
1171                 seq_printf(s, "\tmultirect: mode: %d index: %d\n",
1172                         pstate->multirect_mode, pstate->multirect_index);
1173
1174                 seq_puts(s, "\n");
1175         }
1176         if (dpu_crtc->vblank_cb_count) {
1177                 ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time);
1178                 s64 diff_ms = ktime_to_ms(diff);
1179                 s64 fps = diff_ms ? div_s64(
1180                                 dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0;
1181
1182                 seq_printf(s,
1183                         "vblank fps:%lld count:%u total:%llums total_framecount:%llu\n",
1184                                 fps, dpu_crtc->vblank_cb_count,
1185                                 ktime_to_ms(diff), dpu_crtc->play_count);
1186
1187                 /* reset time & count for next measurement */
1188                 dpu_crtc->vblank_cb_count = 0;
1189                 dpu_crtc->vblank_cb_time = ktime_set(0, 0);
1190         }
1191
1192         drm_modeset_unlock_all(crtc->dev);
1193
1194         return 0;
1195 }
1196
1197 static int _dpu_debugfs_status_open(struct inode *inode, struct file *file)
1198 {
1199         return single_open(file, _dpu_debugfs_status_show, inode->i_private);
1200 }
1201
1202 #define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix)                          \
1203 static int __prefix ## _open(struct inode *inode, struct file *file)    \
1204 {                                                                       \
1205         return single_open(file, __prefix ## _show, inode->i_private);  \
1206 }                                                                       \
1207 static const struct file_operations __prefix ## _fops = {               \
1208         .owner = THIS_MODULE,                                           \
1209         .open = __prefix ## _open,                                      \
1210         .release = single_release,                                      \
1211         .read = seq_read,                                               \
1212         .llseek = seq_lseek,                                            \
1213 }
1214
1215 static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
1216 {
1217         struct drm_crtc *crtc = (struct drm_crtc *) s->private;
1218         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1219
1220         seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
1221         seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
1222         seq_printf(s, "core_clk_rate: %llu\n",
1223                         dpu_crtc->cur_perf.core_clk_rate);
1224         seq_printf(s, "bw_ctl: %llu\n", dpu_crtc->cur_perf.bw_ctl);
1225         seq_printf(s, "max_per_pipe_ib: %llu\n",
1226                                 dpu_crtc->cur_perf.max_per_pipe_ib);
1227
1228         return 0;
1229 }
1230 DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state);
1231
1232 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1233 {
1234         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1235
1236         static const struct file_operations debugfs_status_fops = {
1237                 .open =         _dpu_debugfs_status_open,
1238                 .read =         seq_read,
1239                 .llseek =       seq_lseek,
1240                 .release =      single_release,
1241         };
1242
1243         dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
1244                         crtc->dev->primary->debugfs_root);
1245
1246         debugfs_create_file("status", 0400,
1247                         dpu_crtc->debugfs_root,
1248                         dpu_crtc, &debugfs_status_fops);
1249         debugfs_create_file("state", 0600,
1250                         dpu_crtc->debugfs_root,
1251                         &dpu_crtc->base,
1252                         &dpu_crtc_debugfs_state_fops);
1253
1254         return 0;
1255 }
1256 #else
1257 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1258 {
1259         return 0;
1260 }
1261 #endif /* CONFIG_DEBUG_FS */
1262
1263 static int dpu_crtc_late_register(struct drm_crtc *crtc)
1264 {
1265         return _dpu_crtc_init_debugfs(crtc);
1266 }
1267
1268 static void dpu_crtc_early_unregister(struct drm_crtc *crtc)
1269 {
1270         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1271
1272         debugfs_remove_recursive(dpu_crtc->debugfs_root);
1273 }
1274
1275 static const struct drm_crtc_funcs dpu_crtc_funcs = {
1276         .set_config = drm_atomic_helper_set_config,
1277         .destroy = dpu_crtc_destroy,
1278         .page_flip = drm_atomic_helper_page_flip,
1279         .reset = dpu_crtc_reset,
1280         .atomic_duplicate_state = dpu_crtc_duplicate_state,
1281         .atomic_destroy_state = dpu_crtc_destroy_state,
1282         .late_register = dpu_crtc_late_register,
1283         .early_unregister = dpu_crtc_early_unregister,
1284 };
1285
1286 static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
1287         .atomic_disable = dpu_crtc_disable,
1288         .atomic_enable = dpu_crtc_enable,
1289         .atomic_check = dpu_crtc_atomic_check,
1290         .atomic_begin = dpu_crtc_atomic_begin,
1291         .atomic_flush = dpu_crtc_atomic_flush,
1292 };
1293
1294 /* initialize crtc */
1295 struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
1296                                 struct drm_plane *cursor)
1297 {
1298         struct drm_crtc *crtc = NULL;
1299         struct dpu_crtc *dpu_crtc = NULL;
1300         int i;
1301
1302         dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
1303         if (!dpu_crtc)
1304                 return ERR_PTR(-ENOMEM);
1305
1306         crtc = &dpu_crtc->base;
1307         crtc->dev = dev;
1308
1309         spin_lock_init(&dpu_crtc->spin_lock);
1310         atomic_set(&dpu_crtc->frame_pending, 0);
1311
1312         init_completion(&dpu_crtc->frame_done_comp);
1313
1314         INIT_LIST_HEAD(&dpu_crtc->frame_event_list);
1315
1316         for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) {
1317                 INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list);
1318                 list_add(&dpu_crtc->frame_events[i].list,
1319                                 &dpu_crtc->frame_event_list);
1320                 kthread_init_work(&dpu_crtc->frame_events[i].work,
1321                                 dpu_crtc_frame_event_work);
1322         }
1323
1324         drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs,
1325                                 NULL);
1326
1327         drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
1328
1329         /* save user friendly CRTC name for later */
1330         snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
1331
1332         /* initialize event handling */
1333         spin_lock_init(&dpu_crtc->event_lock);
1334
1335         DPU_DEBUG("%s: successfully initialized crtc\n", dpu_crtc->name);
1336         return crtc;
1337 }