1 // SPDX-License-Identifier: GPL-2.0
3 * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
4 * Author: James.Qian.Wang <james.qian.wang@arm.com>
8 #include <drm/drm_print.h>
10 #include "komeda_dev.h"
11 #include "komeda_kms.h"
12 #include "komeda_pipeline.h"
13 #include "komeda_framebuffer.h"
15 static inline bool is_switching_user(void *old, void *new)
23 static struct komeda_pipeline_state *
24 komeda_pipeline_get_state(struct komeda_pipeline *pipe,
25 struct drm_atomic_state *state)
27 struct drm_private_state *priv_st;
29 priv_st = drm_atomic_get_private_obj_state(state, &pipe->obj);
31 return ERR_CAST(priv_st);
33 return priv_to_pipe_st(priv_st);
36 struct komeda_pipeline_state *
37 komeda_pipeline_get_old_state(struct komeda_pipeline *pipe,
38 struct drm_atomic_state *state)
40 struct drm_private_state *priv_st;
42 priv_st = drm_atomic_get_old_private_obj_state(state, &pipe->obj);
44 return priv_to_pipe_st(priv_st);
48 static struct komeda_pipeline_state *
49 komeda_pipeline_get_new_state(struct komeda_pipeline *pipe,
50 struct drm_atomic_state *state)
52 struct drm_private_state *priv_st;
54 priv_st = drm_atomic_get_new_private_obj_state(state, &pipe->obj);
56 return priv_to_pipe_st(priv_st);
60 /* Assign pipeline for crtc */
61 static struct komeda_pipeline_state *
62 komeda_pipeline_get_state_and_set_crtc(struct komeda_pipeline *pipe,
63 struct drm_atomic_state *state,
64 struct drm_crtc *crtc)
66 struct komeda_pipeline_state *st;
68 st = komeda_pipeline_get_state(pipe, state);
72 if (is_switching_user(crtc, st->crtc)) {
73 DRM_DEBUG_ATOMIC("CRTC%d required pipeline%d is busy.\n",
74 drm_crtc_index(crtc), pipe->id);
75 return ERR_PTR(-EBUSY);
78 /* pipeline only can be disabled when the it is free or unused */
79 if (!crtc && st->active_comps) {
80 DRM_DEBUG_ATOMIC("Disabling a busy pipeline:%d.\n", pipe->id);
81 return ERR_PTR(-EBUSY);
87 struct komeda_crtc_state *kcrtc_st;
89 kcrtc_st = to_kcrtc_st(drm_atomic_get_new_crtc_state(state,
92 kcrtc_st->active_pipes |= BIT(pipe->id);
93 kcrtc_st->affected_pipes |= BIT(pipe->id);
98 static struct komeda_component_state *
99 komeda_component_get_state(struct komeda_component *c,
100 struct drm_atomic_state *state)
102 struct drm_private_state *priv_st;
104 WARN_ON(!drm_modeset_is_locked(&c->pipeline->obj.lock));
106 priv_st = drm_atomic_get_private_obj_state(state, &c->obj);
108 return ERR_CAST(priv_st);
110 return priv_to_comp_st(priv_st);
113 static struct komeda_component_state *
114 komeda_component_get_old_state(struct komeda_component *c,
115 struct drm_atomic_state *state)
117 struct drm_private_state *priv_st;
119 priv_st = drm_atomic_get_old_private_obj_state(state, &c->obj);
121 return priv_to_comp_st(priv_st);
126 * komeda_component_get_state_and_set_user()
128 * @c: component to get state and set user
129 * @state: global atomic state
130 * @user: direct user, the binding user
131 * @crtc: the CRTC user, the big boss :)
133 * This function accepts two users:
134 * - The direct user: can be plane/crtc/wb_connector depends on component
135 * - The big boss (CRTC)
136 * CRTC is the big boss (the final user), because all component resources
137 * eventually will be assigned to CRTC, like the layer will be binding to
138 * kms_plane, but kms plane will be binding to a CRTC eventually.
140 * The big boss (CRTC) is for pipeline assignment, since &komeda_component isn't
141 * independent and can be assigned to CRTC freely, but belongs to a specific
142 * pipeline, only pipeline can be shared between crtc, and pipeline as a whole
143 * (include all the internal components) assigned to a specific CRTC.
145 * So when set a user to komeda_component, need first to check the status of
146 * component->pipeline to see if the pipeline is available on this specific
147 * CRTC. if the pipeline is busy (assigned to another CRTC), even the required
148 * component is free, the component still cannot be assigned to the direct user.
150 static struct komeda_component_state *
151 komeda_component_get_state_and_set_user(struct komeda_component *c,
152 struct drm_atomic_state *state,
154 struct drm_crtc *crtc)
156 struct komeda_pipeline_state *pipe_st;
157 struct komeda_component_state *st;
159 /* First check if the pipeline is available */
160 pipe_st = komeda_pipeline_get_state_and_set_crtc(c->pipeline,
163 return ERR_CAST(pipe_st);
165 st = komeda_component_get_state(c, state);
169 /* check if the component has been occupied */
170 if (is_switching_user(user, st->binding_user)) {
171 DRM_DEBUG_ATOMIC("required %s is busy.\n", c->name);
172 return ERR_PTR(-EBUSY);
175 st->binding_user = user;
176 /* mark the component as active if user is valid */
177 if (st->binding_user)
178 pipe_st->active_comps |= BIT(c->id);
184 komeda_component_add_input(struct komeda_component_state *state,
185 struct komeda_component_output *input,
188 struct komeda_component *c = state->component;
190 WARN_ON((idx < 0 || idx >= c->max_active_inputs));
192 /* since the inputs[i] is only valid when it is active. So if a input[i]
193 * is a newly enabled input which switches from disable to enable, then
194 * the old inputs[i] is undefined (NOT zeroed), we can not rely on
195 * memcmp, but directly mark it changed
197 if (!has_bit(idx, state->affected_inputs) ||
198 memcmp(&state->inputs[idx], input, sizeof(*input))) {
199 memcpy(&state->inputs[idx], input, sizeof(*input));
200 state->changed_active_inputs |= BIT(idx);
202 state->active_inputs |= BIT(idx);
203 state->affected_inputs |= BIT(idx);
207 komeda_component_check_input(struct komeda_component_state *state,
208 struct komeda_component_output *input,
211 struct komeda_component *c = state->component;
213 if ((idx < 0) || (idx >= c->max_active_inputs)) {
214 DRM_DEBUG_ATOMIC("%s invalid input id: %d.\n", c->name, idx);
218 if (has_bit(idx, state->active_inputs)) {
219 DRM_DEBUG_ATOMIC("%s required input_id: %d has been occupied already.\n",
228 komeda_component_set_output(struct komeda_component_output *output,
229 struct komeda_component *comp,
232 output->component = comp;
233 output->output_port = output_port;
237 komeda_component_validate_private(struct komeda_component *c,
238 struct komeda_component_state *st)
242 if (!c->funcs->validate)
245 err = c->funcs->validate(c, st);
247 DRM_DEBUG_ATOMIC("%s validate private failed.\n", c->name);
252 /* Get current available scaler from the component->supported_outputs */
253 static struct komeda_scaler *
254 komeda_component_get_avail_scaler(struct komeda_component *c,
255 struct drm_atomic_state *state)
257 struct komeda_pipeline_state *pipe_st;
260 pipe_st = komeda_pipeline_get_state(c->pipeline, state);
264 avail_scalers = (pipe_st->active_comps & KOMEDA_PIPELINE_SCALERS) ^
265 KOMEDA_PIPELINE_SCALERS;
267 c = komeda_component_pickup_output(c, avail_scalers);
273 komeda_layer_check_cfg(struct komeda_layer *layer,
274 struct komeda_fb *kfb,
275 struct komeda_data_flow_cfg *dflow)
277 u32 hsize_in, vsize_in;
279 if (!komeda_fb_is_layer_supported(kfb, layer->layer_type, dflow->rot))
282 if (komeda_fb_check_src_coords(kfb, dflow->in_x, dflow->in_y,
283 dflow->in_w, dflow->in_h))
286 if (layer->base.id == KOMEDA_COMPONENT_WB_LAYER) {
287 hsize_in = dflow->out_w;
288 vsize_in = dflow->out_h;
290 hsize_in = dflow->in_w;
291 vsize_in = dflow->in_h;
294 if (!in_range(&layer->hsize_in, hsize_in)) {
295 DRM_DEBUG_ATOMIC("invalidate src_w %d.\n", hsize_in);
299 if (!in_range(&layer->vsize_in, vsize_in)) {
300 DRM_DEBUG_ATOMIC("invalidate src_h %d.\n", vsize_in);
308 komeda_layer_validate(struct komeda_layer *layer,
309 struct komeda_plane_state *kplane_st,
310 struct komeda_data_flow_cfg *dflow)
312 struct drm_plane_state *plane_st = &kplane_st->base;
313 struct drm_framebuffer *fb = plane_st->fb;
314 struct komeda_fb *kfb = to_kfb(fb);
315 struct komeda_component_state *c_st;
316 struct komeda_layer_state *st;
319 err = komeda_layer_check_cfg(layer, kfb, dflow);
323 c_st = komeda_component_get_state_and_set_user(&layer->base,
324 plane_st->state, plane_st->plane, plane_st->crtc);
326 return PTR_ERR(c_st);
328 st = to_layer_st(c_st);
330 st->rot = dflow->rot;
333 st->hsize = kfb->aligned_w;
334 st->vsize = kfb->aligned_h;
335 st->afbc_crop_l = dflow->in_x;
336 st->afbc_crop_r = kfb->aligned_w - dflow->in_x - dflow->in_w;
337 st->afbc_crop_t = dflow->in_y;
338 st->afbc_crop_b = kfb->aligned_h - dflow->in_y - dflow->in_h;
340 st->hsize = dflow->in_w;
341 st->vsize = dflow->in_h;
348 for (i = 0; i < fb->format->num_planes; i++)
349 st->addr[i] = komeda_fb_get_pixel_addr(kfb, dflow->in_x,
352 err = komeda_component_validate_private(&layer->base, c_st);
356 /* update the data flow for the next stage */
357 komeda_component_set_output(&dflow->input, &layer->base, 0);
360 * The rotation has been handled by layer, so adjusted the data flow for
363 if (drm_rotation_90_or_270(st->rot))
364 swap(dflow->in_h, dflow->in_w);
370 komeda_wb_layer_validate(struct komeda_layer *wb_layer,
371 struct drm_connector_state *conn_st,
372 struct komeda_data_flow_cfg *dflow)
374 struct komeda_fb *kfb = to_kfb(conn_st->writeback_job->fb);
375 struct komeda_component_state *c_st;
376 struct komeda_layer_state *st;
379 err = komeda_layer_check_cfg(wb_layer, kfb, dflow);
383 c_st = komeda_component_get_state_and_set_user(&wb_layer->base,
384 conn_st->state, conn_st->connector, conn_st->crtc);
386 return PTR_ERR(c_st);
388 st = to_layer_st(c_st);
390 st->hsize = dflow->out_w;
391 st->vsize = dflow->out_h;
393 for (i = 0; i < kfb->base.format->num_planes; i++)
394 st->addr[i] = komeda_fb_get_pixel_addr(kfb, dflow->out_x,
397 komeda_component_add_input(&st->base, &dflow->input, 0);
398 komeda_component_set_output(&dflow->input, &wb_layer->base, 0);
403 static bool scaling_ratio_valid(u32 size_in, u32 size_out,
404 u32 max_upscaling, u32 max_downscaling)
406 if (size_out > size_in * max_upscaling)
408 else if (size_in > size_out * max_downscaling)
414 komeda_scaler_check_cfg(struct komeda_scaler *scaler,
415 struct komeda_crtc_state *kcrtc_st,
416 struct komeda_data_flow_cfg *dflow)
418 u32 hsize_in, vsize_in, hsize_out, vsize_out;
421 hsize_in = dflow->in_w;
422 vsize_in = dflow->in_h;
423 hsize_out = dflow->out_w;
424 vsize_out = dflow->out_h;
426 if (!in_range(&scaler->hsize, hsize_in) ||
427 !in_range(&scaler->hsize, hsize_out)) {
428 DRM_DEBUG_ATOMIC("Invalid horizontal sizes");
432 if (!in_range(&scaler->vsize, vsize_in) ||
433 !in_range(&scaler->vsize, vsize_out)) {
434 DRM_DEBUG_ATOMIC("Invalid vertical sizes");
438 /* If input comes from compiz that means the scaling is for writeback
439 * and scaler can not do upscaling for writeback
441 if (has_bit(dflow->input.component->id, KOMEDA_PIPELINE_COMPIZS))
444 max_upscaling = scaler->max_upscaling;
446 if (!scaling_ratio_valid(hsize_in, hsize_out, max_upscaling,
447 scaler->max_downscaling)) {
448 DRM_DEBUG_ATOMIC("Invalid horizontal scaling ratio");
452 if (!scaling_ratio_valid(vsize_in, vsize_out, max_upscaling,
453 scaler->max_downscaling)) {
454 DRM_DEBUG_ATOMIC("Invalid vertical scaling ratio");
458 if (hsize_in > hsize_out || vsize_in > vsize_out) {
459 struct komeda_pipeline *pipe = scaler->base.pipeline;
462 err = pipe->funcs->downscaling_clk_check(pipe,
463 &kcrtc_st->base.adjusted_mode,
464 komeda_calc_aclk(kcrtc_st), dflow);
466 DRM_DEBUG_ATOMIC("aclk can't satisfy the clock requirement of the downscaling\n");
475 komeda_scaler_validate(void *user,
476 struct komeda_crtc_state *kcrtc_st,
477 struct komeda_data_flow_cfg *dflow)
479 struct drm_atomic_state *drm_st = kcrtc_st->base.state;
480 struct komeda_component_state *c_st;
481 struct komeda_scaler_state *st;
482 struct komeda_scaler *scaler;
485 if (!(dflow->en_scaling || dflow->en_img_enhancement))
488 scaler = komeda_component_get_avail_scaler(dflow->input.component,
491 DRM_DEBUG_ATOMIC("No scaler available");
495 err = komeda_scaler_check_cfg(scaler, kcrtc_st, dflow);
499 c_st = komeda_component_get_state_and_set_user(&scaler->base,
500 drm_st, user, kcrtc_st->base.crtc);
502 return PTR_ERR(c_st);
504 st = to_scaler_st(c_st);
506 st->hsize_in = dflow->in_w;
507 st->vsize_in = dflow->in_h;
508 st->hsize_out = dflow->out_w;
509 st->vsize_out = dflow->out_h;
511 /* Enable alpha processing if the next stage needs the pixel alpha */
512 st->en_alpha = dflow->pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE;
513 st->en_scaling = dflow->en_scaling;
514 st->en_img_enhancement = dflow->en_img_enhancement;
516 komeda_component_add_input(&st->base, &dflow->input, 0);
517 komeda_component_set_output(&dflow->input, &scaler->base, 0);
521 void pipeline_composition_size(struct komeda_crtc_state *kcrtc_st,
522 u16 *hsize, u16 *vsize)
524 struct drm_display_mode *m = &kcrtc_st->base.adjusted_mode;
527 *hsize = m->hdisplay;
529 *vsize = m->vdisplay;
533 komeda_compiz_set_input(struct komeda_compiz *compiz,
534 struct komeda_crtc_state *kcrtc_st,
535 struct komeda_data_flow_cfg *dflow)
537 struct drm_atomic_state *drm_st = kcrtc_st->base.state;
538 struct komeda_component_state *c_st, *old_st;
539 struct komeda_compiz_input_cfg *cin;
540 u16 compiz_w, compiz_h;
541 int idx = dflow->blending_zorder;
543 pipeline_composition_size(kcrtc_st, &compiz_w, &compiz_h);
544 /* check display rect */
545 if ((dflow->out_x + dflow->out_w > compiz_w) ||
546 (dflow->out_y + dflow->out_h > compiz_h) ||
547 dflow->out_w == 0 || dflow->out_h == 0) {
548 DRM_DEBUG_ATOMIC("invalid disp rect [x=%d, y=%d, w=%d, h=%d]\n",
549 dflow->out_x, dflow->out_y,
550 dflow->out_w, dflow->out_h);
554 c_st = komeda_component_get_state_and_set_user(&compiz->base, drm_st,
555 kcrtc_st->base.crtc, kcrtc_st->base.crtc);
557 return PTR_ERR(c_st);
559 if (komeda_component_check_input(c_st, &dflow->input, idx))
562 cin = &(to_compiz_st(c_st)->cins[idx]);
564 cin->hsize = dflow->out_w;
565 cin->vsize = dflow->out_h;
566 cin->hoffset = dflow->out_x;
567 cin->voffset = dflow->out_y;
568 cin->pixel_blend_mode = dflow->pixel_blend_mode;
569 cin->layer_alpha = dflow->layer_alpha;
571 old_st = komeda_component_get_old_state(&compiz->base, drm_st);
574 /* compare with old to check if this input has been changed */
575 if (memcmp(&(to_compiz_st(old_st)->cins[idx]), cin, sizeof(*cin)))
576 c_st->changed_active_inputs |= BIT(idx);
578 komeda_component_add_input(c_st, &dflow->input, idx);
584 komeda_compiz_validate(struct komeda_compiz *compiz,
585 struct komeda_crtc_state *state,
586 struct komeda_data_flow_cfg *dflow)
588 struct komeda_component_state *c_st;
589 struct komeda_compiz_state *st;
591 c_st = komeda_component_get_state_and_set_user(&compiz->base,
592 state->base.state, state->base.crtc, state->base.crtc);
594 return PTR_ERR(c_st);
596 st = to_compiz_st(c_st);
598 pipeline_composition_size(state, &st->hsize, &st->vsize);
600 komeda_component_set_output(&dflow->input, &compiz->base, 0);
602 /* compiz output dflow will be fed to the next pipeline stage, prepare
603 * the data flow configuration for the next stage
606 dflow->in_w = st->hsize;
607 dflow->in_h = st->vsize;
608 dflow->out_w = dflow->in_w;
609 dflow->out_h = dflow->in_h;
610 /* the output data of compiz doesn't have alpha, it only can be
611 * used as bottom layer when blend it with master layers
613 dflow->pixel_blend_mode = DRM_MODE_BLEND_PIXEL_NONE;
614 dflow->layer_alpha = 0xFF;
615 dflow->blending_zorder = 0;
622 komeda_improc_validate(struct komeda_improc *improc,
623 struct komeda_crtc_state *kcrtc_st,
624 struct komeda_data_flow_cfg *dflow)
626 struct drm_crtc *crtc = kcrtc_st->base.crtc;
627 struct komeda_component_state *c_st;
628 struct komeda_improc_state *st;
630 c_st = komeda_component_get_state_and_set_user(&improc->base,
631 kcrtc_st->base.state, crtc, crtc);
633 return PTR_ERR(c_st);
635 st = to_improc_st(c_st);
637 st->hsize = dflow->in_w;
638 st->vsize = dflow->in_h;
640 komeda_component_add_input(&st->base, &dflow->input, 0);
641 komeda_component_set_output(&dflow->input, &improc->base, 0);
647 komeda_timing_ctrlr_validate(struct komeda_timing_ctrlr *ctrlr,
648 struct komeda_crtc_state *kcrtc_st,
649 struct komeda_data_flow_cfg *dflow)
651 struct drm_crtc *crtc = kcrtc_st->base.crtc;
652 struct komeda_timing_ctrlr_state *st;
653 struct komeda_component_state *c_st;
655 c_st = komeda_component_get_state_and_set_user(&ctrlr->base,
656 kcrtc_st->base.state, crtc, crtc);
658 return PTR_ERR(c_st);
660 st = to_ctrlr_st(c_st);
662 komeda_component_add_input(&st->base, &dflow->input, 0);
663 komeda_component_set_output(&dflow->input, &ctrlr->base, 0);
668 void komeda_complete_data_flow_cfg(struct komeda_data_flow_cfg *dflow)
673 if (drm_rotation_90_or_270(dflow->rot))
676 dflow->en_scaling = (w != dflow->out_w) || (h != dflow->out_h);
679 int komeda_build_layer_data_flow(struct komeda_layer *layer,
680 struct komeda_plane_state *kplane_st,
681 struct komeda_crtc_state *kcrtc_st,
682 struct komeda_data_flow_cfg *dflow)
684 struct drm_plane *plane = kplane_st->base.plane;
685 struct komeda_pipeline *pipe = layer->base.pipeline;
688 DRM_DEBUG_ATOMIC("%s handling [PLANE:%d:%s]: src[x/y:%d/%d, w/h:%d/%d] disp[x/y:%d/%d, w/h:%d/%d]",
689 layer->base.name, plane->base.id, plane->name,
690 dflow->in_x, dflow->in_y, dflow->in_w, dflow->in_h,
691 dflow->out_x, dflow->out_y, dflow->out_w, dflow->out_h);
693 err = komeda_layer_validate(layer, kplane_st, dflow);
697 err = komeda_scaler_validate(plane, kcrtc_st, dflow);
701 err = komeda_compiz_set_input(pipe->compiz, kcrtc_st, dflow);
706 /* writeback data path: compiz -> scaler -> wb_layer -> memory */
707 int komeda_build_wb_data_flow(struct komeda_layer *wb_layer,
708 struct drm_connector_state *conn_st,
709 struct komeda_crtc_state *kcrtc_st,
710 struct komeda_data_flow_cfg *dflow)
712 struct drm_connector *conn = conn_st->connector;
715 err = komeda_scaler_validate(conn, kcrtc_st, dflow);
719 return komeda_wb_layer_validate(wb_layer, conn_st, dflow);
722 /* build display output data flow, the data path is:
723 * compiz -> improc -> timing_ctrlr
725 int komeda_build_display_data_flow(struct komeda_crtc *kcrtc,
726 struct komeda_crtc_state *kcrtc_st)
728 struct komeda_pipeline *master = kcrtc->master;
729 struct komeda_data_flow_cfg m_dflow; /* master data flow */
732 memset(&m_dflow, 0, sizeof(m_dflow));
734 err = komeda_compiz_validate(master->compiz, kcrtc_st, &m_dflow);
738 err = komeda_improc_validate(master->improc, kcrtc_st, &m_dflow);
742 err = komeda_timing_ctrlr_validate(master->ctrlr, kcrtc_st, &m_dflow);
750 komeda_pipeline_unbound_components(struct komeda_pipeline *pipe,
751 struct komeda_pipeline_state *new)
753 struct drm_atomic_state *drm_st = new->obj.state;
754 struct komeda_pipeline_state *old = priv_to_pipe_st(pipe->obj.state);
755 struct komeda_component_state *c_st;
756 struct komeda_component *c;
757 u32 disabling_comps, id;
761 disabling_comps = (~new->active_comps) & old->active_comps;
763 /* unbound all disabling component */
764 dp_for_each_set_bit(id, disabling_comps) {
765 c = komeda_pipeline_get_component(pipe, id);
766 c_st = komeda_component_get_state_and_set_user(c,
767 drm_st, NULL, new->crtc);
768 WARN_ON(IS_ERR(c_st));
772 /* release unclaimed pipeline resource */
773 int komeda_release_unclaimed_resources(struct komeda_pipeline *pipe,
774 struct komeda_crtc_state *kcrtc_st)
776 struct drm_atomic_state *drm_st = kcrtc_st->base.state;
777 struct komeda_pipeline_state *st;
779 /* ignore the pipeline which is not affected */
780 if (!pipe || !has_bit(pipe->id, kcrtc_st->affected_pipes))
783 if (has_bit(pipe->id, kcrtc_st->active_pipes))
784 st = komeda_pipeline_get_new_state(pipe, drm_st);
786 st = komeda_pipeline_get_state_and_set_crtc(pipe, drm_st, NULL);
788 if (WARN_ON(IS_ERR_OR_NULL(st)))
791 komeda_pipeline_unbound_components(pipe, st);
796 void komeda_pipeline_disable(struct komeda_pipeline *pipe,
797 struct drm_atomic_state *old_state)
799 struct komeda_pipeline_state *old;
800 struct komeda_component *c;
801 struct komeda_component_state *c_st;
802 u32 id, disabling_comps = 0;
804 old = komeda_pipeline_get_old_state(pipe, old_state);
806 disabling_comps = old->active_comps;
807 DRM_DEBUG_ATOMIC("PIPE%d: disabling_comps: 0x%x.\n",
808 pipe->id, disabling_comps);
810 dp_for_each_set_bit(id, disabling_comps) {
811 c = komeda_pipeline_get_component(pipe, id);
812 c_st = priv_to_comp_st(c->obj.state);
815 * If we disabled a component then all active_inputs should be
816 * put in the list of changed_active_inputs, so they get
818 * This usually happens during a modeset when the pipeline is
819 * first disabled and then the actual state gets committed
822 c_st->changed_active_inputs |= c_st->active_inputs;
824 c->funcs->disable(c);
828 void komeda_pipeline_update(struct komeda_pipeline *pipe,
829 struct drm_atomic_state *old_state)
831 struct komeda_pipeline_state *new = priv_to_pipe_st(pipe->obj.state);
832 struct komeda_pipeline_state *old;
833 struct komeda_component *c;
834 u32 id, changed_comps = 0;
836 old = komeda_pipeline_get_old_state(pipe, old_state);
838 changed_comps = new->active_comps | old->active_comps;
840 DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, changed: 0x%x.\n",
841 pipe->id, new->active_comps, changed_comps);
843 dp_for_each_set_bit(id, changed_comps) {
844 c = komeda_pipeline_get_component(pipe, id);
846 if (new->active_comps & BIT(c->id))
847 c->funcs->update(c, priv_to_comp_st(c->obj.state));
849 c->funcs->disable(c);