2 * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
18 * CTL - MDP Control Pool Manager
20 * Controls are shared between all display interfaces.
22 * They are intended to be used for data path configuration.
23 * The top level register programming describes the complete data path for
24 * a specific data path ID - REG_MDP5_CTL_*(<id>, ...)
26 * Hardware capabilities determine the number of concurrent data paths
28 * In certain use cases (high-resolution dual pipe), one single CTL can be
29 * shared across multiple CRTCs.
32 #define CTL_STAT_BUSY 0x1
33 #define CTL_STAT_BOOKED 0x2
36 struct mdp5_ctl_manager *ctlm;
40 /* CTL status bitmask */
45 /* pending flush_mask bits */
48 /* REG_MDP5_CTL_*(<id>) registers access info + lock: */
52 /* when do CTL registers need to be flushed? (mask of trigger bits) */
53 u32 pending_ctl_trigger;
57 /* True if the current CTL has FLUSH bits pending for single FLUSH. */
60 struct mdp5_ctl *pair; /* Paired CTL to be flushed together */
63 struct mdp5_ctl_manager {
64 struct drm_device *dev;
66 /* number of CTL / Layer Mixers in this hw config: */
70 /* to filter out non-present bits in the current hardware config */
73 /* status for single FLUSH */
74 bool single_flush_supported;
75 u32 single_flush_pending_mask;
77 /* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
79 struct mdp5_ctl ctls[MAX_CTL];
83 struct mdp5_kms *get_kms(struct mdp5_ctl_manager *ctl_mgr)
85 struct msm_drm_private *priv = ctl_mgr->dev->dev_private;
87 return to_mdp5_kms(to_mdp_kms(priv->kms));
91 void ctl_write(struct mdp5_ctl *ctl, u32 reg, u32 data)
93 struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
95 (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
96 mdp5_write(mdp5_kms, reg, data);
100 u32 ctl_read(struct mdp5_ctl *ctl, u32 reg)
102 struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
104 (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
105 return mdp5_read(mdp5_kms, reg);
108 static void set_display_intf(struct mdp5_kms *mdp5_kms,
109 struct mdp5_interface *intf)
114 spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
115 intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
119 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
120 intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf->type);
123 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
124 intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf->type);
127 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
128 intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf->type);
131 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
132 intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf->type);
139 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
140 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
143 static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
146 struct mdp5_interface *intf = pipeline->intf;
149 if (!mdp5_cfg_intf_is_virtual(intf->type))
150 ctl_op |= MDP5_CTL_OP_INTF_NUM(INTF0 + intf->num);
152 switch (intf->type) {
154 if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
155 ctl_op |= MDP5_CTL_OP_CMD_MODE;
159 if (intf->mode == MDP5_INTF_WB_MODE_LINE)
160 ctl_op |= MDP5_CTL_OP_MODE(MODE_WB_2_LINE);
167 if (pipeline->r_mixer)
168 ctl_op |= MDP5_CTL_OP_PACK_3D_ENABLE |
169 MDP5_CTL_OP_PACK_3D(1);
171 spin_lock_irqsave(&ctl->hw_lock, flags);
172 ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), ctl_op);
173 spin_unlock_irqrestore(&ctl->hw_lock, flags);
176 int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
178 struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
179 struct mdp5_interface *intf = pipeline->intf;
181 /* Virtual interfaces need not set a display intf (e.g.: Writeback) */
182 if (!mdp5_cfg_intf_is_virtual(intf->type))
183 set_display_intf(mdp5_kms, intf);
185 set_ctl_op(ctl, pipeline);
190 static bool start_signal_needed(struct mdp5_ctl *ctl,
191 struct mdp5_pipeline *pipeline)
193 struct mdp5_interface *intf = pipeline->intf;
195 if (!ctl->encoder_enabled)
198 switch (intf->type) {
202 return intf->mode == MDP5_INTF_DSI_MODE_COMMAND;
209 * send_start_signal() - Overlay Processor Start Signal
211 * For a given control operation (display pipeline), a START signal needs to be
212 * executed in order to kick off operation and activate all layers.
213 * e.g.: DSI command mode, Writeback
215 static void send_start_signal(struct mdp5_ctl *ctl)
219 spin_lock_irqsave(&ctl->hw_lock, flags);
220 ctl_write(ctl, REG_MDP5_CTL_START(ctl->id), 1);
221 spin_unlock_irqrestore(&ctl->hw_lock, flags);
225 * mdp5_ctl_set_encoder_state() - set the encoder state
227 * @enable: true, when encoder is ready for data streaming; false, otherwise.
230 * This encoder state is needed to trigger START signal (data path kickoff).
232 int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl,
233 struct mdp5_pipeline *pipeline,
236 struct mdp5_interface *intf = pipeline->intf;
241 ctl->encoder_enabled = enabled;
242 DBG("intf_%d: %s", intf->num, enabled ? "on" : "off");
244 if (start_signal_needed(ctl, pipeline)) {
245 send_start_signal(ctl);
253 * CTL registers need to be flushed after calling this function
254 * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
256 int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
257 int cursor_id, bool enable)
259 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
262 struct mdp5_hw_mixer *mixer = pipeline->mixer;
264 if (unlikely(WARN_ON(!mixer))) {
265 dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM",
270 if (pipeline->r_mixer) {
271 dev_err(ctl_mgr->dev->dev, "unsupported configuration");
275 spin_lock_irqsave(&ctl->hw_lock, flags);
277 blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm));
280 blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
282 blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
284 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
285 ctl->cursor_on = enable;
287 spin_unlock_irqrestore(&ctl->hw_lock, flags);
289 ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id);
294 static u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
295 enum mdp_mixer_stage_id stage)
298 case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage);
299 case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage);
300 case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage);
301 case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage);
302 case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage);
303 case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage);
304 case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage);
305 case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage);
306 case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage);
307 case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage);
314 static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe,
315 enum mdp_mixer_stage_id stage)
317 if (stage < STAGE6 && (pipe != SSPP_CURSOR0 && pipe != SSPP_CURSOR1))
321 case SSPP_VIG0: return MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3;
322 case SSPP_VIG1: return MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3;
323 case SSPP_VIG2: return MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3;
324 case SSPP_RGB0: return MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3;
325 case SSPP_RGB1: return MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3;
326 case SSPP_RGB2: return MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3;
327 case SSPP_DMA0: return MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3;
328 case SSPP_DMA1: return MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3;
329 case SSPP_VIG3: return MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3;
330 case SSPP_RGB3: return MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3;
331 case SSPP_CURSOR0: return MDP5_CTL_LAYER_EXT_REG_CURSOR0(stage);
332 case SSPP_CURSOR1: return MDP5_CTL_LAYER_EXT_REG_CURSOR1(stage);
337 static void mdp5_ctl_reset_blend_regs(struct mdp5_ctl *ctl)
340 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
343 spin_lock_irqsave(&ctl->hw_lock, flags);
345 for (i = 0; i < ctl_mgr->nlm; i++) {
346 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, i), 0x0);
347 ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, i), 0x0);
350 spin_unlock_irqrestore(&ctl->hw_lock, flags);
355 int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
356 enum mdp5_pipe stage[][MAX_PIPE_STAGE],
357 enum mdp5_pipe r_stage[][MAX_PIPE_STAGE],
358 u32 stage_cnt, u32 ctl_blend_op_flags)
360 struct mdp5_hw_mixer *mixer = pipeline->mixer;
361 struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
363 u32 blend_cfg = 0, blend_ext_cfg = 0;
364 u32 r_blend_cfg = 0, r_blend_ext_cfg = 0;
367 mdp5_ctl_reset_blend_regs(ctl);
369 if (ctl_blend_op_flags & MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT) {
370 start_stage = STAGE0;
371 blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
373 r_blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
375 start_stage = STAGE_BASE;
378 for (i = start_stage; stage_cnt && i <= STAGE_MAX; i++) {
380 mdp_ctl_blend_mask(stage[i][PIPE_LEFT], i) |
381 mdp_ctl_blend_mask(stage[i][PIPE_RIGHT], i);
383 mdp_ctl_blend_ext_mask(stage[i][PIPE_LEFT], i) |
384 mdp_ctl_blend_ext_mask(stage[i][PIPE_RIGHT], i);
387 mdp_ctl_blend_mask(r_stage[i][PIPE_LEFT], i) |
388 mdp_ctl_blend_mask(r_stage[i][PIPE_RIGHT], i);
390 mdp_ctl_blend_ext_mask(r_stage[i][PIPE_LEFT], i) |
391 mdp_ctl_blend_ext_mask(r_stage[i][PIPE_RIGHT], i);
395 spin_lock_irqsave(&ctl->hw_lock, flags);
397 blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
399 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
400 ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, mixer->lm),
403 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, r_mixer->lm),
405 ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, r_mixer->lm),
408 spin_unlock_irqrestore(&ctl->hw_lock, flags);
410 ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(mixer->lm);
412 ctl->pending_ctl_trigger |= mdp_ctl_flush_mask_lm(r_mixer->lm);
414 DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", mixer->lm,
415 blend_cfg, blend_ext_cfg);
417 DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x",
418 r_mixer->lm, r_blend_cfg, r_blend_ext_cfg);
423 u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf)
425 if (intf->type == INTF_WB)
426 return MDP5_CTL_FLUSH_WB;
429 case 0: return MDP5_CTL_FLUSH_TIMING_0;
430 case 1: return MDP5_CTL_FLUSH_TIMING_1;
431 case 2: return MDP5_CTL_FLUSH_TIMING_2;
432 case 3: return MDP5_CTL_FLUSH_TIMING_3;
437 u32 mdp_ctl_flush_mask_cursor(int cursor_id)
440 case 0: return MDP5_CTL_FLUSH_CURSOR_0;
441 case 1: return MDP5_CTL_FLUSH_CURSOR_1;
446 u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
449 case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
450 case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
451 case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
452 case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
453 case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
454 case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
455 case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
456 case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
457 case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
458 case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
459 case SSPP_CURSOR0: return MDP5_CTL_FLUSH_CURSOR_0;
460 case SSPP_CURSOR1: return MDP5_CTL_FLUSH_CURSOR_1;
465 u32 mdp_ctl_flush_mask_lm(int lm)
468 case 0: return MDP5_CTL_FLUSH_LM0;
469 case 1: return MDP5_CTL_FLUSH_LM1;
470 case 2: return MDP5_CTL_FLUSH_LM2;
471 case 5: return MDP5_CTL_FLUSH_LM5;
476 static u32 fix_sw_flush(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
479 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
481 #define BIT_NEEDS_SW_FIX(bit) \
482 (!(ctl_mgr->flush_hw_mask & bit) && (flush_mask & bit))
484 /* for some targets, cursor bit is the same as LM bit */
485 if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0))
486 sw_mask |= mdp_ctl_flush_mask_lm(pipeline->mixer->lm);
491 static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask,
494 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
497 DBG("CTL %d FLUSH pending mask %x", ctl->id, *flush_mask);
498 ctl->flush_pending = true;
499 ctl_mgr->single_flush_pending_mask |= (*flush_mask);
502 if (ctl->pair->flush_pending) {
503 *flush_id = min_t(u32, ctl->id, ctl->pair->id);
504 *flush_mask = ctl_mgr->single_flush_pending_mask;
506 ctl->flush_pending = false;
507 ctl->pair->flush_pending = false;
508 ctl_mgr->single_flush_pending_mask = 0;
510 DBG("Single FLUSH mask %x,ID %d", *flush_mask,
517 * mdp5_ctl_commit() - Register Flush
519 * The flush register is used to indicate several registers are all
520 * programmed, and are safe to update to the back copy of the double
521 * buffered registers.
523 * Some registers FLUSH bits are shared when the hardware does not have
524 * dedicated bits for them; handling these is the job of fix_sw_flush().
526 * CTL registers need to be flushed in some circumstances; if that is the
527 * case, some trigger bits will be present in both flush mask and
528 * ctl->pending_ctl_trigger.
530 * Return H/W flushed bit mask.
532 u32 mdp5_ctl_commit(struct mdp5_ctl *ctl,
533 struct mdp5_pipeline *pipeline,
534 u32 flush_mask, bool start)
536 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
538 u32 flush_id = ctl->id;
539 u32 curr_ctl_flush_mask;
541 VERB("flush_mask=%x, trigger=%x", flush_mask, ctl->pending_ctl_trigger);
543 if (ctl->pending_ctl_trigger & flush_mask) {
544 flush_mask |= MDP5_CTL_FLUSH_CTL;
545 ctl->pending_ctl_trigger = 0;
548 flush_mask |= fix_sw_flush(ctl, pipeline, flush_mask);
550 flush_mask &= ctl_mgr->flush_hw_mask;
552 curr_ctl_flush_mask = flush_mask;
554 fix_for_single_flush(ctl, &flush_mask, &flush_id);
557 ctl->flush_mask |= flush_mask;
558 return curr_ctl_flush_mask;
560 flush_mask |= ctl->flush_mask;
565 spin_lock_irqsave(&ctl->hw_lock, flags);
566 ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask);
567 spin_unlock_irqrestore(&ctl->hw_lock, flags);
570 if (start_signal_needed(ctl, pipeline)) {
571 send_start_signal(ctl);
574 return curr_ctl_flush_mask;
577 u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl)
579 return ctl_read(ctl, REG_MDP5_CTL_FLUSH(ctl->id));
582 int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl)
584 return WARN_ON(!ctl) ? -EINVAL : ctl->id;
588 * mdp5_ctl_pair() - Associate 2 booked CTLs for single FLUSH
590 int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable)
592 struct mdp5_ctl_manager *ctl_mgr = ctlx->ctlm;
593 struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
595 /* do nothing silently if hw doesn't support */
596 if (!ctl_mgr->single_flush_supported)
602 mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, 0);
604 } else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) {
605 dev_err(ctl_mgr->dev->dev, "CTLs already paired\n");
607 } else if (!(ctlx->status & ctly->status & CTL_STAT_BOOKED)) {
608 dev_err(ctl_mgr->dev->dev, "Only pair booked CTLs\n");
615 mdp5_write(mdp5_kms, REG_MDP5_SPARE_0,
616 MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
622 * mdp5_ctl_request() - CTL allocation
624 * Try to return booked CTL for @intf_num is 1 or 2, unbooked for other INTFs.
625 * If no CTL is available in preferred category, allocate from the other one.
627 * @return fail if no CTL is available.
629 struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
632 struct mdp5_ctl *ctl = NULL;
633 const u32 checkm = CTL_STAT_BUSY | CTL_STAT_BOOKED;
634 u32 match = ((intf_num == 1) || (intf_num == 2)) ? CTL_STAT_BOOKED : 0;
638 spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
640 /* search the preferred */
641 for (c = 0; c < ctl_mgr->nctl; c++)
642 if ((ctl_mgr->ctls[c].status & checkm) == match)
645 dev_warn(ctl_mgr->dev->dev,
646 "fall back to the other CTL category for INTF %d!\n", intf_num);
648 match ^= CTL_STAT_BOOKED;
649 for (c = 0; c < ctl_mgr->nctl; c++)
650 if ((ctl_mgr->ctls[c].status & checkm) == match)
653 dev_err(ctl_mgr->dev->dev, "No more CTL available!");
657 ctl = &ctl_mgr->ctls[c];
658 ctl->status |= CTL_STAT_BUSY;
659 ctl->pending_ctl_trigger = 0;
660 DBG("CTL %d allocated", ctl->id);
663 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
667 void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr)
672 for (c = 0; c < ctl_mgr->nctl; c++) {
673 struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
675 spin_lock_irqsave(&ctl->hw_lock, flags);
676 ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 0);
677 spin_unlock_irqrestore(&ctl->hw_lock, flags);
681 void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr)
686 struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
687 void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd)
689 struct mdp5_ctl_manager *ctl_mgr;
690 const struct mdp5_cfg_hw *hw_cfg = mdp5_cfg_get_hw_config(cfg_hnd);
691 int rev = mdp5_cfg_get_hw_rev(cfg_hnd);
692 const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl;
696 ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL);
698 dev_err(dev->dev, "failed to allocate CTL manager\n");
703 if (unlikely(WARN_ON(ctl_cfg->count > MAX_CTL))) {
704 dev_err(dev->dev, "Increase static pool size to at least %d\n",
710 /* initialize the CTL manager: */
712 ctl_mgr->nlm = hw_cfg->lm.count;
713 ctl_mgr->nctl = ctl_cfg->count;
714 ctl_mgr->flush_hw_mask = ctl_cfg->flush_hw_mask;
715 spin_lock_init(&ctl_mgr->pool_lock);
717 /* initialize each CTL of the pool: */
718 spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
719 for (c = 0; c < ctl_mgr->nctl; c++) {
720 struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
722 if (WARN_ON(!ctl_cfg->base[c])) {
723 dev_err(dev->dev, "CTL_%d: base is null!\n", c);
725 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
730 ctl->reg_offset = ctl_cfg->base[c];
732 spin_lock_init(&ctl->hw_lock);
736 * In Dual DSI case, CTL0 and CTL1 are always assigned to two DSI
737 * interfaces to support single FLUSH feature (Flush CTL0 and CTL1 when
738 * only write into CTL0's FLUSH register) to keep two DSI pipes in sync.
739 * Single FLUSH is supported from hw rev v3.0.
742 ctl_mgr->single_flush_supported = true;
743 /* Reserve CTL0/1 for INTF1/2 */
744 ctl_mgr->ctls[0].status |= CTL_STAT_BOOKED;
745 ctl_mgr->ctls[1].status |= CTL_STAT_BOOKED;
747 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
748 DBG("Pool of %d CTLs created.", ctl_mgr->nctl);
754 mdp5_ctlm_destroy(ctl_mgr);