2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include "dm_services.h"
29 #include "core_status.h"
30 #include "core_types.h"
31 #include "hw_sequencer.h"
32 #include "dce/dce_hwseq.h"
36 #include "clock_source.h"
37 #include "dc_bios_types.h"
39 #include "bios_parser_interface.h"
40 #include "include/irq_service_interface.h"
41 #include "transform.h"
43 #include "timing_generator.h"
44 #include "virtual/virtual_link_encoder.h"
46 #include "link_hwss.h"
47 #include "link_encoder.h"
49 #include "dc_link_ddc.h"
50 #include "dm_helpers.h"
51 #include "mem_input.h"
55 /*******************************************************************************
57 ******************************************************************************/
59 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
65 static void destroy_links(struct dc *dc)
69 for (i = 0; i < dc->link_count; i++) {
70 if (NULL != dc->links[i])
71 link_destroy(&dc->links[i]);
75 static bool create_links(
77 uint32_t num_virtual_links)
81 struct dc_bios *bios = dc->ctx->dc_bios;
85 connectors_num = bios->funcs->get_connectors_number(bios);
87 if (connectors_num > ENUM_ID_COUNT) {
89 "DC: Number of connectors %d exceeds maximum of %d!\n",
95 if (connectors_num == 0 && num_virtual_links == 0) {
96 dm_error("DC: Number of connectors is zero!\n");
100 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
105 for (i = 0; i < connectors_num; i++) {
106 struct link_init_data link_init_params = {0};
107 struct dc_link *link;
109 link_init_params.ctx = dc->ctx;
110 /* next BIOS object table connector */
111 link_init_params.connector_index = i;
112 link_init_params.link_index = dc->link_count;
113 link_init_params.dc = dc;
114 link = link_create(&link_init_params);
117 dc->links[dc->link_count] = link;
123 for (i = 0; i < num_virtual_links; i++) {
124 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
125 struct encoder_init_data enc_init = {0};
132 link->link_index = dc->link_count;
133 dc->links[dc->link_count] = link;
138 link->connector_signal = SIGNAL_TYPE_VIRTUAL;
139 link->link_id.type = OBJECT_TYPE_CONNECTOR;
140 link->link_id.id = CONNECTOR_ID_VIRTUAL;
141 link->link_id.enum_id = ENUM_ID_1;
142 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
144 if (!link->link_enc) {
149 link->link_status.dpcd_caps = &link->dpcd_caps;
151 enc_init.ctx = dc->ctx;
152 enc_init.channel = CHANNEL_ID_UNKNOWN;
153 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
154 enc_init.transmitter = TRANSMITTER_UNKNOWN;
155 enc_init.connector = link->link_id;
156 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
157 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
158 enc_init.encoder.enum_id = ENUM_ID_1;
159 virtual_link_encoder_construct(link->link_enc, &enc_init);
168 bool dc_stream_adjust_vmin_vmax(struct dc *dc,
169 struct dc_stream_state **streams, int num_streams,
172 /* TODO: Support multiple streams */
173 struct dc_stream_state *stream = streams[0];
177 for (i = 0; i < MAX_PIPES; i++) {
178 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
180 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
181 dc->hwss.set_drr(&pipe, 1, vmin, vmax);
183 /* build and update the info frame */
184 resource_build_info_frame(pipe);
185 dc->hwss.update_info_frame(pipe);
193 bool dc_stream_get_crtc_position(struct dc *dc,
194 struct dc_stream_state **streams, int num_streams,
195 unsigned int *v_pos, unsigned int *nom_v_pos)
197 /* TODO: Support multiple streams */
198 struct dc_stream_state *stream = streams[0];
201 struct crtc_position position;
203 for (i = 0; i < MAX_PIPES; i++) {
204 struct pipe_ctx *pipe =
205 &dc->current_state->res_ctx.pipe_ctx[i];
207 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
208 dc->hwss.get_position(&pipe, 1, &position);
210 *v_pos = position.vertical_count;
211 *nom_v_pos = position.nominal_vcount;
219 * dc_stream_configure_crc: Configure CRC capture for the given stream.
221 * @stream: The stream to configure CRC on.
222 * @enable: Enable CRC if true, disable otherwise.
223 * @continuous: Capture CRC on every frame if true. Otherwise, only capture
226 * By default, only CRC0 is configured, and the entire frame is used to
229 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
230 bool enable, bool continuous)
233 struct pipe_ctx *pipe;
234 struct crc_params param;
235 struct timing_generator *tg;
237 for (i = 0; i < MAX_PIPES; i++) {
238 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
239 if (pipe->stream == stream)
242 /* Stream not found */
246 /* Always capture the full frame */
247 param.windowa_x_start = 0;
248 param.windowa_y_start = 0;
249 param.windowa_x_end = pipe->stream->timing.h_addressable;
250 param.windowa_y_end = pipe->stream->timing.v_addressable;
251 param.windowb_x_start = 0;
252 param.windowb_y_start = 0;
253 param.windowb_x_end = pipe->stream->timing.h_addressable;
254 param.windowb_y_end = pipe->stream->timing.v_addressable;
256 /* Default to the union of both windows */
257 param.selection = UNION_WINDOW_A_B;
258 param.continuous_mode = continuous;
259 param.enable = enable;
261 tg = pipe->stream_res.tg;
263 /* Only call if supported */
264 if (tg->funcs->configure_crc)
265 return tg->funcs->configure_crc(tg, ¶m);
266 dm_logger_write(dc->ctx->logger, LOG_WARNING, "CRC capture not supported.");
271 * dc_stream_get_crc: Get CRC values for the given stream.
273 * @stream: The DC stream state of the stream to get CRCs from.
274 * @r_cr, g_y, b_cb: CRC values for the three channels are stored here.
276 * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
277 * Return false if stream is not found, or if CRCs are not enabled.
279 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
280 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
283 struct pipe_ctx *pipe;
284 struct timing_generator *tg;
286 for (i = 0; i < MAX_PIPES; i++) {
287 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
288 if (pipe->stream == stream)
291 /* Stream not found */
295 tg = pipe->stream_res.tg;
297 if (tg->funcs->get_crc)
298 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
299 dm_logger_write(dc->ctx->logger, LOG_WARNING, "CRC capture not supported.");
303 void dc_stream_set_static_screen_events(struct dc *dc,
304 struct dc_stream_state **streams,
306 const struct dc_static_screen_events *events)
310 struct pipe_ctx *pipes_affected[MAX_PIPES];
311 int num_pipes_affected = 0;
313 for (i = 0; i < num_streams; i++) {
314 struct dc_stream_state *stream = streams[i];
316 for (j = 0; j < MAX_PIPES; j++) {
317 if (dc->current_state->res_ctx.pipe_ctx[j].stream
319 pipes_affected[num_pipes_affected++] =
320 &dc->current_state->res_ctx.pipe_ctx[j];
325 dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
328 static void destruct(struct dc *dc)
330 dc_release_state(dc->current_state);
331 dc->current_state = NULL;
335 dc_destroy_resource_pool(dc);
337 if (dc->ctx->gpio_service)
338 dal_gpio_service_destroy(&dc->ctx->gpio_service);
341 dal_i2caux_destroy(&dc->ctx->i2caux);
343 if (dc->ctx->created_bios)
344 dal_bios_parser_destroy(&dc->ctx->dc_bios);
347 dal_logger_destroy(&dc->ctx->logger);
358 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
368 static bool construct(struct dc *dc,
369 const struct dc_init_data *init_params)
371 struct dal_logger *logger;
372 struct dc_context *dc_ctx;
373 struct bw_calcs_dceip *dc_dceip;
374 struct bw_calcs_vbios *dc_vbios;
375 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
376 struct dcn_soc_bounding_box *dcn_soc;
377 struct dcn_ip_params *dcn_ip;
380 enum dce_version dc_version = DCE_VERSION_UNKNOWN;
382 dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
384 dm_error("%s: failed to create dceip\n", __func__);
388 dc->bw_dceip = dc_dceip;
390 dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
392 dm_error("%s: failed to create vbios\n", __func__);
396 dc->bw_vbios = dc_vbios;
397 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
398 dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
400 dm_error("%s: failed to create dcn_soc\n", __func__);
404 dc->dcn_soc = dcn_soc;
406 dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
408 dm_error("%s: failed to create dcn_ip\n", __func__);
415 dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
417 dm_error("%s: failed to create ctx\n", __func__);
421 dc_ctx->cgs_device = init_params->cgs_device;
422 dc_ctx->driver_context = init_params->driver;
424 dc_ctx->asic_id = init_params->asic_id;
427 dc->current_state = dc_create_state();
429 if (!dc->current_state) {
430 dm_error("%s: failed to create validate ctx\n", __func__);
435 logger = dal_logger_create(dc_ctx, init_params->log_mask);
438 /* can *not* call logger. call base driver 'print error' */
439 dm_error("%s: failed to create Logger!\n", __func__);
442 dc_ctx->logger = logger;
443 dc_ctx->dce_environment = init_params->dce_environment;
445 dc_version = resource_parse_asic_id(init_params->asic_id);
446 dc_ctx->dce_version = dc_version;
448 /* Resource should construct all asic specific resources.
449 * This should be the only place where we need to parse the asic id
451 if (init_params->vbios_override)
452 dc_ctx->dc_bios = init_params->vbios_override;
454 /* Create BIOS parser */
455 struct bp_init_data bp_init_data;
457 bp_init_data.ctx = dc_ctx;
458 bp_init_data.bios = init_params->asic_id.atombios_base_address;
460 dc_ctx->dc_bios = dal_bios_parser_create(
461 &bp_init_data, dc_version);
463 if (!dc_ctx->dc_bios) {
464 ASSERT_CRITICAL(false);
468 dc_ctx->created_bios = true;
472 dc_ctx->i2caux = dal_i2caux_create(dc_ctx);
474 if (!dc_ctx->i2caux) {
475 ASSERT_CRITICAL(false);
479 /* Create GPIO service */
480 dc_ctx->gpio_service = dal_gpio_service_create(
482 dc_ctx->dce_environment,
485 if (!dc_ctx->gpio_service) {
486 ASSERT_CRITICAL(false);
490 dc->res_pool = dc_create_resource_pool(
492 init_params->num_virtual_links,
494 init_params->asic_id);
498 dc_resource_state_construct(dc, dc->current_state);
500 if (!create_links(dc, init_params->num_virtual_links))
511 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
514 struct dc_state *dangling_context = dc_create_state();
515 struct dc_state *current_ctx;
517 if (dangling_context == NULL)
520 dc_resource_state_copy_construct(dc->current_state, dangling_context);
522 for (i = 0; i < dc->res_pool->pipe_count; i++) {
523 struct dc_stream_state *old_stream =
524 dc->current_state->res_ctx.pipe_ctx[i].stream;
525 bool should_disable = true;
527 for (j = 0; j < context->stream_count; j++) {
528 if (old_stream == context->streams[j]) {
529 should_disable = false;
533 if (should_disable && old_stream) {
534 dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
535 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
539 current_ctx = dc->current_state;
540 dc->current_state = dangling_context;
541 dc_release_state(current_ctx);
544 /*******************************************************************************
546 ******************************************************************************/
548 struct dc *dc_create(const struct dc_init_data *init_params)
550 struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
551 unsigned int full_pipe_count;
556 if (false == construct(dc, init_params))
559 /*TODO: separate HW and SW initialization*/
560 dc->hwss.init_hw(dc);
562 full_pipe_count = dc->res_pool->pipe_count;
563 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
565 dc->caps.max_streams = min(
567 dc->res_pool->stream_enc_count);
569 dc->caps.max_links = dc->link_count;
570 dc->caps.max_audios = dc->res_pool->audio_count;
571 dc->caps.linear_pitch_alignment = 64;
573 dc->config = init_params->flags;
575 dm_logger_write(dc->ctx->logger, LOG_DC,
576 "Display Core initialized\n");
579 /* TODO: missing feature to be enabled */
580 dc->debug.disable_dfs_bypass = true;
591 void dc_destroy(struct dc **dc)
598 static void enable_timing_multisync(
600 struct dc_state *ctx)
602 int i = 0, multisync_count = 0;
603 int pipe_count = dc->res_pool->pipe_count;
604 struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
606 for (i = 0; i < pipe_count; i++) {
607 if (!ctx->res_ctx.pipe_ctx[i].stream ||
608 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
610 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
612 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
616 if (multisync_count > 0) {
617 dc->hwss.enable_per_frame_crtc_position_reset(
618 dc, multisync_count, multisync_pipes);
622 static void program_timing_sync(
624 struct dc_state *ctx)
628 int pipe_count = dc->res_pool->pipe_count;
629 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
631 for (i = 0; i < pipe_count; i++) {
632 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
635 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
638 for (i = 0; i < pipe_count; i++) {
640 struct pipe_ctx *pipe_set[MAX_PIPES];
642 if (!unsynced_pipes[i])
645 pipe_set[0] = unsynced_pipes[i];
646 unsynced_pipes[i] = NULL;
648 /* Add tg to the set, search rest of the tg's for ones with
649 * same timing, add all tgs with same timing to the group
651 for (j = i + 1; j < pipe_count; j++) {
652 if (!unsynced_pipes[j])
655 if (resource_are_streams_timing_synchronizable(
656 unsynced_pipes[j]->stream,
657 pipe_set[0]->stream)) {
658 pipe_set[group_size] = unsynced_pipes[j];
659 unsynced_pipes[j] = NULL;
664 /* set first unblanked pipe as master */
665 for (j = 0; j < group_size; j++) {
666 struct pipe_ctx *temp;
668 if (pipe_set[j]->stream_res.tg->funcs->is_blanked && !pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
673 pipe_set[0] = pipe_set[j];
679 /* remove any other unblanked pipes as they have already been synced */
680 for (j = j + 1; j < group_size; j++) {
681 if (pipe_set[j]->stream_res.tg->funcs->is_blanked && !pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
683 pipe_set[j] = pipe_set[group_size];
688 if (group_size > 1) {
689 dc->hwss.enable_timing_synchronization(
690 dc, group_index, group_size, pipe_set);
696 static bool context_changed(
698 struct dc_state *context)
702 if (context->stream_count != dc->current_state->stream_count)
705 for (i = 0; i < dc->current_state->stream_count; i++) {
706 if (dc->current_state->streams[i] != context->streams[i])
713 bool dc_enable_stereo(
715 struct dc_state *context,
716 struct dc_stream_state *streams[],
717 uint8_t stream_count)
721 struct pipe_ctx *pipe;
723 for (i = 0; i < MAX_PIPES; i++) {
725 pipe = &context->res_ctx.pipe_ctx[i];
727 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
728 for (j = 0 ; pipe && j < stream_count; j++) {
729 if (streams[j] && streams[j] == pipe->stream &&
730 dc->hwss.setup_stereo)
731 dc->hwss.setup_stereo(pipe, dc);
739 * Applies given context to HW and copy it into current context.
740 * It's up to the user to release the src context afterwards.
742 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
744 struct dc_bios *dcb = dc->ctx->dc_bios;
745 enum dc_status result = DC_ERROR_UNEXPECTED;
746 struct pipe_ctx *pipe;
748 struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
750 disable_dangling_plane(dc, context);
752 for (i = 0; i < context->stream_count; i++)
753 dc_streams[i] = context->streams[i];
755 if (!dcb->funcs->is_accelerated_mode(dcb))
756 dc->hwss.enable_accelerated_mode(dc, context);
758 /* re-program planes for existing stream, in case we need to
759 * free up plane resource for later use
761 for (i = 0; i < context->stream_count; i++) {
762 if (context->streams[i]->mode_changed)
765 dc->hwss.apply_ctx_for_surface(
766 dc, context->streams[i],
767 context->stream_status[i].plane_count,
768 context); /* use new pipe config in new context */
771 /* Program hardware */
772 dc->hwss.ready_shared_resources(dc, context);
774 for (i = 0; i < dc->res_pool->pipe_count; i++) {
775 pipe = &context->res_ctx.pipe_ctx[i];
776 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
779 result = dc->hwss.apply_ctx_to_hw(dc, context);
784 if (context->stream_count > 1) {
785 enable_timing_multisync(dc, context);
786 program_timing_sync(dc, context);
789 /* Program all planes within new context*/
790 for (i = 0; i < context->stream_count; i++) {
791 const struct dc_sink *sink = context->streams[i]->sink;
793 if (!context->streams[i]->mode_changed)
796 dc->hwss.apply_ctx_for_surface(
797 dc, context->streams[i],
798 context->stream_status[i].plane_count,
803 * TODO rework dc_enable_stereo call to work with validation sets?
805 for (k = 0; k < MAX_PIPES; k++) {
806 pipe = &context->res_ctx.pipe_ctx[k];
808 for (l = 0 ; pipe && l < context->stream_count; l++) {
809 if (context->streams[l] &&
810 context->streams[l] == pipe->stream &&
811 dc->hwss.setup_stereo)
812 dc->hwss.setup_stereo(pipe, dc);
816 CONN_MSG_MODE(sink->link, "{%dx%d, %dx%d@%dKhz}",
817 context->streams[i]->timing.h_addressable,
818 context->streams[i]->timing.v_addressable,
819 context->streams[i]->timing.h_total,
820 context->streams[i]->timing.v_total,
821 context->streams[i]->timing.pix_clk_khz);
824 dc_enable_stereo(dc, context, dc_streams, context->stream_count);
826 dc_release_state(dc->current_state);
828 dc->current_state = context;
830 dc_retain_state(dc->current_state);
832 dc->hwss.optimize_shared_resources(dc);
837 bool dc_commit_state(struct dc *dc, struct dc_state *context)
839 enum dc_status result = DC_ERROR_UNEXPECTED;
842 if (false == context_changed(dc, context))
845 dm_logger_write(dc->ctx->logger, LOG_DC, "%s: %d streams\n",
846 __func__, context->stream_count);
848 for (i = 0; i < context->stream_count; i++) {
849 struct dc_stream_state *stream = context->streams[i];
851 dc_stream_log(stream,
856 result = dc_commit_state_no_check(dc, context);
858 return (result == DC_OK);
861 bool dc_post_update_surfaces_to_stream(struct dc *dc)
864 struct dc_state *context = dc->current_state;
866 post_surface_trace(dc);
868 for (i = 0; i < dc->res_pool->pipe_count; i++)
869 if (context->res_ctx.pipe_ctx[i].stream == NULL ||
870 context->res_ctx.pipe_ctx[i].plane_state == NULL) {
871 context->res_ctx.pipe_ctx[i].pipe_idx = i;
872 dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
875 dc->optimized_required = false;
877 /* 3rd param should be true, temp w/a for RV*/
878 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
879 dc->hwss.set_bandwidth(dc, context, dc->ctx->dce_version < DCN_VERSION_1_0);
881 dc->hwss.set_bandwidth(dc, context, true);
887 * TODO this whole function needs to go
889 * dc_surface_update is needlessly complex. See if we can just replace this
890 * with a dc_plane_state and follow the atomic model a bit more closely here.
892 bool dc_commit_planes_to_stream(
894 struct dc_plane_state **plane_states,
895 uint8_t new_plane_count,
896 struct dc_stream_state *dc_stream,
897 struct dc_state *state)
899 /* no need to dynamically allocate this. it's pretty small */
900 struct dc_surface_update updates[MAX_SURFACES];
901 struct dc_flip_addrs *flip_addr;
902 struct dc_plane_info *plane_info;
903 struct dc_scaling_info *scaling_info;
905 struct dc_stream_update *stream_update =
906 kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL);
908 if (!stream_update) {
913 flip_addr = kcalloc(MAX_SURFACES, sizeof(struct dc_flip_addrs),
915 plane_info = kcalloc(MAX_SURFACES, sizeof(struct dc_plane_info),
917 scaling_info = kcalloc(MAX_SURFACES, sizeof(struct dc_scaling_info),
920 if (!flip_addr || !plane_info || !scaling_info) {
924 kfree(stream_update);
928 memset(updates, 0, sizeof(updates));
930 stream_update->src = dc_stream->src;
931 stream_update->dst = dc_stream->dst;
932 stream_update->out_transfer_func = dc_stream->out_transfer_func;
934 for (i = 0; i < new_plane_count; i++) {
935 updates[i].surface = plane_states[i];
937 (struct dc_gamma *)plane_states[i]->gamma_correction;
938 updates[i].in_transfer_func = plane_states[i]->in_transfer_func;
939 flip_addr[i].address = plane_states[i]->address;
940 flip_addr[i].flip_immediate = plane_states[i]->flip_immediate;
941 plane_info[i].color_space = plane_states[i]->color_space;
942 plane_info[i].input_tf = plane_states[i]->input_tf;
943 plane_info[i].format = plane_states[i]->format;
944 plane_info[i].plane_size = plane_states[i]->plane_size;
945 plane_info[i].rotation = plane_states[i]->rotation;
946 plane_info[i].horizontal_mirror = plane_states[i]->horizontal_mirror;
947 plane_info[i].stereo_format = plane_states[i]->stereo_format;
948 plane_info[i].tiling_info = plane_states[i]->tiling_info;
949 plane_info[i].visible = plane_states[i]->visible;
950 plane_info[i].per_pixel_alpha = plane_states[i]->per_pixel_alpha;
951 plane_info[i].dcc = plane_states[i]->dcc;
952 scaling_info[i].scaling_quality = plane_states[i]->scaling_quality;
953 scaling_info[i].src_rect = plane_states[i]->src_rect;
954 scaling_info[i].dst_rect = plane_states[i]->dst_rect;
955 scaling_info[i].clip_rect = plane_states[i]->clip_rect;
957 updates[i].flip_addr = &flip_addr[i];
958 updates[i].plane_info = &plane_info[i];
959 updates[i].scaling_info = &scaling_info[i];
962 dc_commit_updates_for_stream(
966 dc_stream, stream_update, plane_states, state);
971 kfree(stream_update);
975 struct dc_state *dc_create_state(void)
977 struct dc_state *context = kzalloc(sizeof(struct dc_state),
983 kref_init(&context->refcount);
987 void dc_retain_state(struct dc_state *context)
989 kref_get(&context->refcount);
992 static void dc_state_free(struct kref *kref)
994 struct dc_state *context = container_of(kref, struct dc_state, refcount);
995 dc_resource_state_destruct(context);
999 void dc_release_state(struct dc_state *context)
1001 kref_put(&context->refcount, dc_state_free);
1004 static bool is_surface_in_context(
1005 const struct dc_state *context,
1006 const struct dc_plane_state *plane_state)
1010 for (j = 0; j < MAX_PIPES; j++) {
1011 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1013 if (plane_state == pipe_ctx->plane_state) {
1021 static unsigned int pixel_format_to_bpp(enum surface_pixel_format format)
1024 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
1025 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
1027 case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
1028 case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
1029 case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
1030 case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
1032 case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
1033 case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
1034 case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
1035 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
1037 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
1038 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
1039 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
1042 ASSERT_CRITICAL(false);
1047 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
1049 union surface_update_flags *update_flags = &u->surface->update_flags;
1052 return UPDATE_TYPE_FAST;
1054 if (u->plane_info->color_space != u->surface->color_space)
1055 update_flags->bits.color_space_change = 1;
1057 if (u->plane_info->input_tf != u->surface->input_tf)
1058 update_flags->bits.input_tf_change = 1;
1060 if (u->plane_info->sdr_white_level != u->surface->sdr_white_level)
1061 update_flags->bits.output_tf_change = 1;
1063 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror)
1064 update_flags->bits.horizontal_mirror_change = 1;
1066 if (u->plane_info->rotation != u->surface->rotation)
1067 update_flags->bits.rotation_change = 1;
1069 if (u->plane_info->format != u->surface->format)
1070 update_flags->bits.pixel_format_change = 1;
1072 if (u->plane_info->stereo_format != u->surface->stereo_format)
1073 update_flags->bits.stereo_format_change = 1;
1075 if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha)
1076 update_flags->bits.per_pixel_alpha_change = 1;
1078 if (u->plane_info->dcc.enable != u->surface->dcc.enable
1079 || u->plane_info->dcc.grph.independent_64b_blks != u->surface->dcc.grph.independent_64b_blks
1080 || u->plane_info->dcc.grph.meta_pitch != u->surface->dcc.grph.meta_pitch)
1081 update_flags->bits.dcc_change = 1;
1083 if (pixel_format_to_bpp(u->plane_info->format) !=
1084 pixel_format_to_bpp(u->surface->format))
1085 /* different bytes per element will require full bandwidth
1086 * and DML calculation
1088 update_flags->bits.bpp_change = 1;
1090 if (u->gamma && dce_use_lut(u->plane_info->format))
1091 update_flags->bits.gamma_change = 1;
1093 if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
1094 sizeof(union dc_tiling_info)) != 0) {
1095 update_flags->bits.swizzle_change = 1;
1096 /* todo: below are HW dependent, we should add a hook to
1097 * DCE/N resource and validated there.
1099 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR)
1100 /* swizzled mode requires RQ to be setup properly,
1101 * thus need to run DML to calculate RQ settings
1103 update_flags->bits.bandwidth_change = 1;
1106 if (update_flags->bits.rotation_change
1107 || update_flags->bits.stereo_format_change
1108 || update_flags->bits.pixel_format_change
1109 || update_flags->bits.gamma_change
1110 || update_flags->bits.bpp_change
1111 || update_flags->bits.bandwidth_change
1112 || update_flags->bits.output_tf_change)
1113 return UPDATE_TYPE_FULL;
1115 return UPDATE_TYPE_MED;
1118 static enum surface_update_type get_scaling_info_update_type(
1119 const struct dc_surface_update *u)
1121 union surface_update_flags *update_flags = &u->surface->update_flags;
1123 if (!u->scaling_info)
1124 return UPDATE_TYPE_FAST;
1126 if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width
1127 || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
1128 || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
1129 || u->scaling_info->dst_rect.height != u->surface->dst_rect.height) {
1130 update_flags->bits.scaling_change = 1;
1132 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
1133 || u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
1134 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width
1135 || u->scaling_info->dst_rect.height < u->surface->src_rect.height))
1136 /* Making dst rect smaller requires a bandwidth change */
1137 update_flags->bits.bandwidth_change = 1;
1140 if (u->scaling_info->src_rect.width != u->surface->src_rect.width
1141 || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
1143 update_flags->bits.scaling_change = 1;
1144 if (u->scaling_info->src_rect.width > u->surface->src_rect.width
1145 && u->scaling_info->src_rect.height > u->surface->src_rect.height)
1146 /* Making src rect bigger requires a bandwidth change */
1147 update_flags->bits.clock_change = 1;
1150 if (u->scaling_info->src_rect.x != u->surface->src_rect.x
1151 || u->scaling_info->src_rect.y != u->surface->src_rect.y
1152 || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
1153 || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
1154 || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
1155 || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
1156 update_flags->bits.position_change = 1;
1158 if (update_flags->bits.clock_change
1159 || update_flags->bits.bandwidth_change)
1160 return UPDATE_TYPE_FULL;
1162 if (update_flags->bits.scaling_change
1163 || update_flags->bits.position_change)
1164 return UPDATE_TYPE_MED;
1166 return UPDATE_TYPE_FAST;
1169 static enum surface_update_type det_surface_update(const struct dc *dc,
1170 const struct dc_surface_update *u)
1172 const struct dc_state *context = dc->current_state;
1173 enum surface_update_type type;
1174 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1175 union surface_update_flags *update_flags = &u->surface->update_flags;
1177 update_flags->raw = 0; // Reset all flags
1179 if (!is_surface_in_context(context, u->surface)) {
1180 update_flags->bits.new_plane = 1;
1181 return UPDATE_TYPE_FULL;
1184 type = get_plane_info_update_type(u);
1185 elevate_update_type(&overall_type, type);
1187 type = get_scaling_info_update_type(u);
1188 elevate_update_type(&overall_type, type);
1190 if (u->in_transfer_func)
1191 update_flags->bits.in_transfer_func_change = 1;
1193 if (u->input_csc_color_matrix)
1194 update_flags->bits.input_csc_change = 1;
1196 if (update_flags->bits.in_transfer_func_change
1197 || update_flags->bits.input_csc_change) {
1198 type = UPDATE_TYPE_MED;
1199 elevate_update_type(&overall_type, type);
1202 return overall_type;
1205 static enum surface_update_type check_update_surfaces_for_stream(
1207 struct dc_surface_update *updates,
1209 struct dc_stream_update *stream_update,
1210 const struct dc_stream_status *stream_status)
1213 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1215 if (stream_status == NULL || stream_status->plane_count != surface_count)
1216 return UPDATE_TYPE_FULL;
1219 return UPDATE_TYPE_FULL;
1221 for (i = 0 ; i < surface_count; i++) {
1222 enum surface_update_type type =
1223 det_surface_update(dc, &updates[i]);
1225 if (type == UPDATE_TYPE_FULL)
1228 elevate_update_type(&overall_type, type);
1231 return overall_type;
1234 enum surface_update_type dc_check_update_surfaces_for_stream(
1236 struct dc_surface_update *updates,
1238 struct dc_stream_update *stream_update,
1239 const struct dc_stream_status *stream_status)
1242 enum surface_update_type type;
1244 for (i = 0; i < surface_count; i++)
1245 updates[i].surface->update_flags.raw = 0;
1247 type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
1248 if (type == UPDATE_TYPE_FULL)
1249 for (i = 0; i < surface_count; i++)
1250 updates[i].surface->update_flags.bits.full_update = 1;
1255 static struct dc_stream_status *stream_get_status(
1256 struct dc_state *ctx,
1257 struct dc_stream_state *stream)
1261 for (i = 0; i < ctx->stream_count; i++) {
1262 if (stream == ctx->streams[i]) {
1263 return &ctx->stream_status[i];
1270 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
1273 static void commit_planes_for_stream(struct dc *dc,
1274 struct dc_surface_update *srf_updates,
1276 struct dc_stream_state *stream,
1277 struct dc_stream_update *stream_update,
1278 enum surface_update_type update_type,
1279 struct dc_state *context)
1282 struct pipe_ctx *top_pipe_to_program = NULL;
1284 if (update_type == UPDATE_TYPE_FULL) {
1285 dc->hwss.set_bandwidth(dc, context, false);
1286 context_clock_trace(dc, context);
1289 if (surface_count == 0) {
1291 * In case of turning off screen, no need to program front end a second time.
1292 * just return after program front end.
1294 dc->hwss.apply_ctx_for_surface(dc, stream, surface_count, context);
1299 for (j = 0; j < dc->res_pool->pipe_count; j++) {
1300 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1302 if (!pipe_ctx->top_pipe &&
1304 pipe_ctx->stream == stream) {
1305 struct dc_stream_status *stream_status = NULL;
1307 top_pipe_to_program = pipe_ctx;
1309 if (update_type == UPDATE_TYPE_FAST || !pipe_ctx->plane_state)
1313 stream_get_status(context, pipe_ctx->stream);
1315 dc->hwss.apply_ctx_for_surface(
1316 dc, pipe_ctx->stream, stream_status->plane_count, context);
1320 if (update_type == UPDATE_TYPE_FULL)
1321 context_timing_trace(dc, &context->res_ctx);
1323 /* Lock the top pipe while updating plane addrs, since freesync requires
1324 * plane addr update event triggers to be synchronized.
1325 * top_pipe_to_program is expected to never be NULL
1327 if (update_type == UPDATE_TYPE_FAST) {
1328 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
1330 /* Perform requested Updates */
1331 for (i = 0; i < surface_count; i++) {
1332 struct dc_plane_state *plane_state = srf_updates[i].surface;
1334 for (j = 0; j < dc->res_pool->pipe_count; j++) {
1335 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1337 if (pipe_ctx->stream != stream)
1340 if (pipe_ctx->plane_state != plane_state)
1343 if (srf_updates[i].flip_addr)
1344 dc->hwss.update_plane_addr(dc, pipe_ctx);
1348 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
1351 if (stream && stream_update && update_type > UPDATE_TYPE_FAST)
1352 for (j = 0; j < dc->res_pool->pipe_count; j++) {
1353 struct pipe_ctx *pipe_ctx =
1354 &context->res_ctx.pipe_ctx[j];
1356 if (pipe_ctx->stream != stream)
1359 if (stream_update->hdr_static_metadata) {
1360 resource_build_info_frame(pipe_ctx);
1361 dc->hwss.update_info_frame(pipe_ctx);
1366 void dc_commit_updates_for_stream(struct dc *dc,
1367 struct dc_surface_update *srf_updates,
1369 struct dc_stream_state *stream,
1370 struct dc_stream_update *stream_update,
1371 struct dc_plane_state **plane_states,
1372 struct dc_state *state)
1374 const struct dc_stream_status *stream_status;
1375 enum surface_update_type update_type;
1376 struct dc_state *context;
1377 struct dc_context *dc_ctx = dc->ctx;
1380 stream_status = dc_stream_get_status(stream);
1381 context = dc->current_state;
1383 update_type = dc_check_update_surfaces_for_stream(
1384 dc, srf_updates, surface_count, stream_update, stream_status);
1386 if (update_type >= update_surface_trace_level)
1387 update_surface_trace(dc, srf_updates, surface_count);
1390 if (update_type >= UPDATE_TYPE_FULL) {
1392 /* initialize scratch memory for building context */
1393 context = dc_create_state();
1394 if (context == NULL) {
1395 DC_ERROR("Failed to allocate new validate context!\n");
1399 dc_resource_state_copy_construct(state, context);
1403 for (i = 0; i < surface_count; i++) {
1404 struct dc_plane_state *surface = srf_updates[i].surface;
1406 /* TODO: On flip we don't build the state, so it still has the
1407 * old address. Which is why we are updating the address here
1409 if (srf_updates[i].flip_addr) {
1410 surface->address = srf_updates[i].flip_addr->address;
1411 surface->flip_immediate = srf_updates[i].flip_addr->flip_immediate;
1415 if (update_type >= UPDATE_TYPE_MED) {
1416 for (j = 0; j < dc->res_pool->pipe_count; j++) {
1417 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1419 if (pipe_ctx->plane_state != surface)
1422 resource_build_scaling_params(pipe_ctx);
1427 commit_planes_for_stream(
1435 /*update current_State*/
1436 if (dc->current_state != context) {
1438 struct dc_state *old = dc->current_state;
1440 dc->current_state = context;
1441 dc_release_state(old);
1444 /*let's use current_state to update watermark etc*/
1445 if (update_type >= UPDATE_TYPE_FULL)
1446 dc_post_update_surfaces_to_stream(dc);
1452 uint8_t dc_get_current_stream_count(struct dc *dc)
1454 return dc->current_state->stream_count;
1457 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
1459 if (i < dc->current_state->stream_count)
1460 return dc->current_state->streams[i];
1464 enum dc_irq_source dc_interrupt_to_irq_source(
1469 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
1472 void dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
1478 dal_irq_service_set(dc->res_pool->irqs, src, enable);
1481 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
1483 dal_irq_service_ack(dc->res_pool->irqs, src);
1486 void dc_set_power_state(
1488 enum dc_acpi_cm_power_state power_state)
1490 struct kref refcount;
1492 switch (power_state) {
1493 case DC_ACPI_CM_POWER_STATE_D0:
1494 dc_resource_state_construct(dc, dc->current_state);
1496 dc->hwss.init_hw(dc);
1500 dc->hwss.power_down(dc);
1502 /* Zero out the current context so that on resume we start with
1503 * clean state, and dc hw programming optimizations will not
1504 * cause any trouble.
1507 /* Preserve refcount */
1508 refcount = dc->current_state->refcount;
1509 dc_resource_state_destruct(dc->current_state);
1510 memset(dc->current_state, 0,
1511 sizeof(*dc->current_state));
1513 dc->current_state->refcount = refcount;
1520 void dc_resume(struct dc *dc)
1525 for (i = 0; i < dc->link_count; i++)
1526 core_link_resume(dc->links[i]);
1531 uint32_t link_index,
1532 struct i2c_command *cmd)
1535 struct dc_link *link = dc->links[link_index];
1536 struct ddc_service *ddc = link->ddc;
1538 return dal_i2caux_submit_i2c_command(
1544 static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
1546 if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
1547 BREAK_TO_DEBUGGER();
1551 dc_sink_retain(sink);
1553 dc_link->remote_sinks[dc_link->sink_count] = sink;
1554 dc_link->sink_count++;
1559 struct dc_sink *dc_link_add_remote_sink(
1560 struct dc_link *link,
1561 const uint8_t *edid,
1563 struct dc_sink_init_data *init_data)
1565 struct dc_sink *dc_sink;
1566 enum dc_edid_status edid_status;
1568 if (len > MAX_EDID_BUFFER_SIZE) {
1569 dm_error("Max EDID buffer size breached!\n");
1574 BREAK_TO_DEBUGGER();
1578 if (!init_data->link) {
1579 BREAK_TO_DEBUGGER();
1583 dc_sink = dc_sink_create(init_data);
1588 memmove(dc_sink->dc_edid.raw_edid, edid, len);
1589 dc_sink->dc_edid.length = len;
1591 if (!link_add_remote_sink_helper(
1596 edid_status = dm_helpers_parse_edid_caps(
1599 &dc_sink->edid_caps);
1601 if (edid_status != EDID_OK)
1606 dc_link_remove_remote_sink(link, dc_sink);
1608 dc_sink_release(dc_sink);
1612 void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
1616 if (!link->sink_count) {
1617 BREAK_TO_DEBUGGER();
1621 for (i = 0; i < link->sink_count; i++) {
1622 if (link->remote_sinks[i] == sink) {
1623 dc_sink_release(sink);
1624 link->remote_sinks[i] = NULL;
1626 /* shrink array to remove empty place */
1627 while (i < link->sink_count - 1) {
1628 link->remote_sinks[i] = link->remote_sinks[i+1];
1631 link->remote_sinks[i] = NULL;