]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/amd/display/dc/core/dc.c
drm/amd/display: Force full update on pixel_format_change
[linux.git] / drivers / gpu / drm / amd / display / dc / core / dc.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  */
24
25 #include "dm_services.h"
26
27 #include "dc.h"
28
29 #include "core_status.h"
30 #include "core_types.h"
31 #include "hw_sequencer.h"
32 #include "dce/dce_hwseq.h"
33
34 #include "resource.h"
35
36 #include "clock_source.h"
37 #include "dc_bios_types.h"
38
39 #include "bios_parser_interface.h"
40 #include "include/irq_service_interface.h"
41 #include "transform.h"
42 #include "dpp.h"
43 #include "timing_generator.h"
44 #include "virtual/virtual_link_encoder.h"
45
46 #include "link_hwss.h"
47 #include "link_encoder.h"
48
49 #include "dc_link_ddc.h"
50 #include "dm_helpers.h"
51 #include "mem_input.h"
52 #include "hubp.h"
53
54
55 /*******************************************************************************
56  * Private functions
57  ******************************************************************************/
58
59 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
60 {
61         if (new > *original)
62                 *original = new;
63 }
64
65 static void destroy_links(struct dc *dc)
66 {
67         uint32_t i;
68
69         for (i = 0; i < dc->link_count; i++) {
70                 if (NULL != dc->links[i])
71                         link_destroy(&dc->links[i]);
72         }
73 }
74
75 static bool create_links(
76                 struct dc *dc,
77                 uint32_t num_virtual_links)
78 {
79         int i;
80         int connectors_num;
81         struct dc_bios *bios = dc->ctx->dc_bios;
82
83         dc->link_count = 0;
84
85         connectors_num = bios->funcs->get_connectors_number(bios);
86
87         if (connectors_num > ENUM_ID_COUNT) {
88                 dm_error(
89                         "DC: Number of connectors %d exceeds maximum of %d!\n",
90                         connectors_num,
91                         ENUM_ID_COUNT);
92                 return false;
93         }
94
95         if (connectors_num == 0 && num_virtual_links == 0) {
96                 dm_error("DC: Number of connectors is zero!\n");
97         }
98
99         dm_output_to_console(
100                 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
101                 __func__,
102                 connectors_num,
103                 num_virtual_links);
104
105         for (i = 0; i < connectors_num; i++) {
106                 struct link_init_data link_init_params = {0};
107                 struct dc_link *link;
108
109                 link_init_params.ctx = dc->ctx;
110                 /* next BIOS object table connector */
111                 link_init_params.connector_index = i;
112                 link_init_params.link_index = dc->link_count;
113                 link_init_params.dc = dc;
114                 link = link_create(&link_init_params);
115
116                 if (link) {
117                         dc->links[dc->link_count] = link;
118                         link->dc = dc;
119                         ++dc->link_count;
120                 }
121         }
122
123         for (i = 0; i < num_virtual_links; i++) {
124                 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
125                 struct encoder_init_data enc_init = {0};
126
127                 if (link == NULL) {
128                         BREAK_TO_DEBUGGER();
129                         goto failed_alloc;
130                 }
131
132                 link->link_index = dc->link_count;
133                 dc->links[dc->link_count] = link;
134                 dc->link_count++;
135
136                 link->ctx = dc->ctx;
137                 link->dc = dc;
138                 link->connector_signal = SIGNAL_TYPE_VIRTUAL;
139                 link->link_id.type = OBJECT_TYPE_CONNECTOR;
140                 link->link_id.id = CONNECTOR_ID_VIRTUAL;
141                 link->link_id.enum_id = ENUM_ID_1;
142                 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
143
144                 if (!link->link_enc) {
145                         BREAK_TO_DEBUGGER();
146                         goto failed_alloc;
147                 }
148
149                 link->link_status.dpcd_caps = &link->dpcd_caps;
150
151                 enc_init.ctx = dc->ctx;
152                 enc_init.channel = CHANNEL_ID_UNKNOWN;
153                 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
154                 enc_init.transmitter = TRANSMITTER_UNKNOWN;
155                 enc_init.connector = link->link_id;
156                 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
157                 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
158                 enc_init.encoder.enum_id = ENUM_ID_1;
159                 virtual_link_encoder_construct(link->link_enc, &enc_init);
160         }
161
162         return true;
163
164 failed_alloc:
165         return false;
166 }
167
168 bool dc_stream_adjust_vmin_vmax(struct dc *dc,
169                 struct dc_stream_state **streams, int num_streams,
170                 int vmin, int vmax)
171 {
172         /* TODO: Support multiple streams */
173         struct dc_stream_state *stream = streams[0];
174         int i = 0;
175         bool ret = false;
176
177         for (i = 0; i < MAX_PIPES; i++) {
178                 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
179
180                 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
181                         dc->hwss.set_drr(&pipe, 1, vmin, vmax);
182
183                         /* build and update the info frame */
184                         resource_build_info_frame(pipe);
185                         dc->hwss.update_info_frame(pipe);
186
187                         ret = true;
188                 }
189         }
190         return ret;
191 }
192
193 bool dc_stream_get_crtc_position(struct dc *dc,
194                 struct dc_stream_state **streams, int num_streams,
195                 unsigned int *v_pos, unsigned int *nom_v_pos)
196 {
197         /* TODO: Support multiple streams */
198         struct dc_stream_state *stream = streams[0];
199         int i = 0;
200         bool ret = false;
201         struct crtc_position position;
202
203         for (i = 0; i < MAX_PIPES; i++) {
204                 struct pipe_ctx *pipe =
205                                 &dc->current_state->res_ctx.pipe_ctx[i];
206
207                 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
208                         dc->hwss.get_position(&pipe, 1, &position);
209
210                         *v_pos = position.vertical_count;
211                         *nom_v_pos = position.nominal_vcount;
212                         ret = true;
213                 }
214         }
215         return ret;
216 }
217
218 /**
219  * dc_stream_configure_crc: Configure CRC capture for the given stream.
220  * @dc: DC Object
221  * @stream: The stream to configure CRC on.
222  * @enable: Enable CRC if true, disable otherwise.
223  * @continuous: Capture CRC on every frame if true. Otherwise, only capture
224  *              once.
225  *
226  * By default, only CRC0 is configured, and the entire frame is used to
227  * calculate the crc.
228  */
229 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
230                              bool enable, bool continuous)
231 {
232         int i;
233         struct pipe_ctx *pipe;
234         struct crc_params param;
235         struct timing_generator *tg;
236
237         for (i = 0; i < MAX_PIPES; i++) {
238                 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
239                 if (pipe->stream == stream)
240                         break;
241         }
242         /* Stream not found */
243         if (i == MAX_PIPES)
244                 return false;
245
246         /* Always capture the full frame */
247         param.windowa_x_start = 0;
248         param.windowa_y_start = 0;
249         param.windowa_x_end = pipe->stream->timing.h_addressable;
250         param.windowa_y_end = pipe->stream->timing.v_addressable;
251         param.windowb_x_start = 0;
252         param.windowb_y_start = 0;
253         param.windowb_x_end = pipe->stream->timing.h_addressable;
254         param.windowb_y_end = pipe->stream->timing.v_addressable;
255
256         /* Default to the union of both windows */
257         param.selection = UNION_WINDOW_A_B;
258         param.continuous_mode = continuous;
259         param.enable = enable;
260
261         tg = pipe->stream_res.tg;
262
263         /* Only call if supported */
264         if (tg->funcs->configure_crc)
265                 return tg->funcs->configure_crc(tg, &param);
266         dm_logger_write(dc->ctx->logger, LOG_WARNING, "CRC capture not supported.");
267         return false;
268 }
269
270 /**
271  * dc_stream_get_crc: Get CRC values for the given stream.
272  * @dc: DC object
273  * @stream: The DC stream state of the stream to get CRCs from.
274  * @r_cr, g_y, b_cb: CRC values for the three channels are stored here.
275  *
276  * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
277  * Return false if stream is not found, or if CRCs are not enabled.
278  */
279 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
280                        uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
281 {
282         int i;
283         struct pipe_ctx *pipe;
284         struct timing_generator *tg;
285
286         for (i = 0; i < MAX_PIPES; i++) {
287                 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
288                 if (pipe->stream == stream)
289                         break;
290         }
291         /* Stream not found */
292         if (i == MAX_PIPES)
293                 return false;
294
295         tg = pipe->stream_res.tg;
296
297         if (tg->funcs->get_crc)
298                 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
299         dm_logger_write(dc->ctx->logger, LOG_WARNING, "CRC capture not supported.");
300         return false;
301 }
302
303 void dc_stream_set_static_screen_events(struct dc *dc,
304                 struct dc_stream_state **streams,
305                 int num_streams,
306                 const struct dc_static_screen_events *events)
307 {
308         int i = 0;
309         int j = 0;
310         struct pipe_ctx *pipes_affected[MAX_PIPES];
311         int num_pipes_affected = 0;
312
313         for (i = 0; i < num_streams; i++) {
314                 struct dc_stream_state *stream = streams[i];
315
316                 for (j = 0; j < MAX_PIPES; j++) {
317                         if (dc->current_state->res_ctx.pipe_ctx[j].stream
318                                         == stream) {
319                                 pipes_affected[num_pipes_affected++] =
320                                                 &dc->current_state->res_ctx.pipe_ctx[j];
321                         }
322                 }
323         }
324
325         dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
326 }
327
328 static void destruct(struct dc *dc)
329 {
330         dc_release_state(dc->current_state);
331         dc->current_state = NULL;
332
333         destroy_links(dc);
334
335         dc_destroy_resource_pool(dc);
336
337         if (dc->ctx->gpio_service)
338                 dal_gpio_service_destroy(&dc->ctx->gpio_service);
339
340         if (dc->ctx->i2caux)
341                 dal_i2caux_destroy(&dc->ctx->i2caux);
342
343         if (dc->ctx->created_bios)
344                 dal_bios_parser_destroy(&dc->ctx->dc_bios);
345
346         if (dc->ctx->logger)
347                 dal_logger_destroy(&dc->ctx->logger);
348
349         kfree(dc->ctx);
350         dc->ctx = NULL;
351
352         kfree(dc->bw_vbios);
353         dc->bw_vbios = NULL;
354
355         kfree(dc->bw_dceip);
356         dc->bw_dceip = NULL;
357
358 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
359         kfree(dc->dcn_soc);
360         dc->dcn_soc = NULL;
361
362         kfree(dc->dcn_ip);
363         dc->dcn_ip = NULL;
364
365 #endif
366 }
367
368 static bool construct(struct dc *dc,
369                 const struct dc_init_data *init_params)
370 {
371         struct dal_logger *logger;
372         struct dc_context *dc_ctx;
373         struct bw_calcs_dceip *dc_dceip;
374         struct bw_calcs_vbios *dc_vbios;
375 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
376         struct dcn_soc_bounding_box *dcn_soc;
377         struct dcn_ip_params *dcn_ip;
378 #endif
379
380         enum dce_version dc_version = DCE_VERSION_UNKNOWN;
381
382         dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
383         if (!dc_dceip) {
384                 dm_error("%s: failed to create dceip\n", __func__);
385                 goto fail;
386         }
387
388         dc->bw_dceip = dc_dceip;
389
390         dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
391         if (!dc_vbios) {
392                 dm_error("%s: failed to create vbios\n", __func__);
393                 goto fail;
394         }
395
396         dc->bw_vbios = dc_vbios;
397 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
398         dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
399         if (!dcn_soc) {
400                 dm_error("%s: failed to create dcn_soc\n", __func__);
401                 goto fail;
402         }
403
404         dc->dcn_soc = dcn_soc;
405
406         dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
407         if (!dcn_ip) {
408                 dm_error("%s: failed to create dcn_ip\n", __func__);
409                 goto fail;
410         }
411
412         dc->dcn_ip = dcn_ip;
413 #endif
414
415         dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
416         if (!dc_ctx) {
417                 dm_error("%s: failed to create ctx\n", __func__);
418                 goto fail;
419         }
420
421         dc_ctx->cgs_device = init_params->cgs_device;
422         dc_ctx->driver_context = init_params->driver;
423         dc_ctx->dc = dc;
424         dc_ctx->asic_id = init_params->asic_id;
425         dc->ctx = dc_ctx;
426
427         dc->current_state = dc_create_state();
428
429         if (!dc->current_state) {
430                 dm_error("%s: failed to create validate ctx\n", __func__);
431                 goto fail;
432         }
433
434         /* Create logger */
435         logger = dal_logger_create(dc_ctx, init_params->log_mask);
436
437         if (!logger) {
438                 /* can *not* call logger. call base driver 'print error' */
439                 dm_error("%s: failed to create Logger!\n", __func__);
440                 goto fail;
441         }
442         dc_ctx->logger = logger;
443         dc_ctx->dce_environment = init_params->dce_environment;
444
445         dc_version = resource_parse_asic_id(init_params->asic_id);
446         dc_ctx->dce_version = dc_version;
447
448         /* Resource should construct all asic specific resources.
449          * This should be the only place where we need to parse the asic id
450          */
451         if (init_params->vbios_override)
452                 dc_ctx->dc_bios = init_params->vbios_override;
453         else {
454                 /* Create BIOS parser */
455                 struct bp_init_data bp_init_data;
456
457                 bp_init_data.ctx = dc_ctx;
458                 bp_init_data.bios = init_params->asic_id.atombios_base_address;
459
460                 dc_ctx->dc_bios = dal_bios_parser_create(
461                                 &bp_init_data, dc_version);
462
463                 if (!dc_ctx->dc_bios) {
464                         ASSERT_CRITICAL(false);
465                         goto fail;
466                 }
467
468                 dc_ctx->created_bios = true;
469                 }
470
471         /* Create I2C AUX */
472         dc_ctx->i2caux = dal_i2caux_create(dc_ctx);
473
474         if (!dc_ctx->i2caux) {
475                 ASSERT_CRITICAL(false);
476                 goto fail;
477         }
478
479         /* Create GPIO service */
480         dc_ctx->gpio_service = dal_gpio_service_create(
481                         dc_version,
482                         dc_ctx->dce_environment,
483                         dc_ctx);
484
485         if (!dc_ctx->gpio_service) {
486                 ASSERT_CRITICAL(false);
487                 goto fail;
488         }
489
490         dc->res_pool = dc_create_resource_pool(
491                         dc,
492                         init_params->num_virtual_links,
493                         dc_version,
494                         init_params->asic_id);
495         if (!dc->res_pool)
496                 goto fail;
497
498         dc_resource_state_construct(dc, dc->current_state);
499
500         if (!create_links(dc, init_params->num_virtual_links))
501                 goto fail;
502
503         return true;
504
505 fail:
506
507         destruct(dc);
508         return false;
509 }
510
511 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
512 {
513         int i, j;
514         struct dc_state *dangling_context = dc_create_state();
515         struct dc_state *current_ctx;
516
517         if (dangling_context == NULL)
518                 return;
519
520         dc_resource_state_copy_construct(dc->current_state, dangling_context);
521
522         for (i = 0; i < dc->res_pool->pipe_count; i++) {
523                 struct dc_stream_state *old_stream =
524                                 dc->current_state->res_ctx.pipe_ctx[i].stream;
525                 bool should_disable = true;
526
527                 for (j = 0; j < context->stream_count; j++) {
528                         if (old_stream == context->streams[j]) {
529                                 should_disable = false;
530                                 break;
531                         }
532                 }
533                 if (should_disable && old_stream) {
534                         dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
535                         dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
536                 }
537         }
538
539         current_ctx = dc->current_state;
540         dc->current_state = dangling_context;
541         dc_release_state(current_ctx);
542 }
543
544 /*******************************************************************************
545  * Public functions
546  ******************************************************************************/
547
548 struct dc *dc_create(const struct dc_init_data *init_params)
549  {
550         struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
551         unsigned int full_pipe_count;
552
553         if (NULL == dc)
554                 goto alloc_fail;
555
556         if (false == construct(dc, init_params))
557                 goto construct_fail;
558
559         /*TODO: separate HW and SW initialization*/
560         dc->hwss.init_hw(dc);
561
562         full_pipe_count = dc->res_pool->pipe_count;
563         if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
564                 full_pipe_count--;
565         dc->caps.max_streams = min(
566                         full_pipe_count,
567                         dc->res_pool->stream_enc_count);
568
569         dc->caps.max_links = dc->link_count;
570         dc->caps.max_audios = dc->res_pool->audio_count;
571         dc->caps.linear_pitch_alignment = 64;
572
573         dc->config = init_params->flags;
574
575         dm_logger_write(dc->ctx->logger, LOG_DC,
576                         "Display Core initialized\n");
577
578
579         /* TODO: missing feature to be enabled */
580         dc->debug.disable_dfs_bypass = true;
581
582         return dc;
583
584 construct_fail:
585         kfree(dc);
586
587 alloc_fail:
588         return NULL;
589 }
590
591 void dc_destroy(struct dc **dc)
592 {
593         destruct(*dc);
594         kfree(*dc);
595         *dc = NULL;
596 }
597
598 static void enable_timing_multisync(
599                 struct dc *dc,
600                 struct dc_state *ctx)
601 {
602         int i = 0, multisync_count = 0;
603         int pipe_count = dc->res_pool->pipe_count;
604         struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
605
606         for (i = 0; i < pipe_count; i++) {
607                 if (!ctx->res_ctx.pipe_ctx[i].stream ||
608                                 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
609                         continue;
610                 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
611                         continue;
612                 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
613                 multisync_count++;
614         }
615
616         if (multisync_count > 0) {
617                 dc->hwss.enable_per_frame_crtc_position_reset(
618                         dc, multisync_count, multisync_pipes);
619         }
620 }
621
622 static void program_timing_sync(
623                 struct dc *dc,
624                 struct dc_state *ctx)
625 {
626         int i, j;
627         int group_index = 0;
628         int pipe_count = dc->res_pool->pipe_count;
629         struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
630
631         for (i = 0; i < pipe_count; i++) {
632                 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
633                         continue;
634
635                 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
636         }
637
638         for (i = 0; i < pipe_count; i++) {
639                 int group_size = 1;
640                 struct pipe_ctx *pipe_set[MAX_PIPES];
641
642                 if (!unsynced_pipes[i])
643                         continue;
644
645                 pipe_set[0] = unsynced_pipes[i];
646                 unsynced_pipes[i] = NULL;
647
648                 /* Add tg to the set, search rest of the tg's for ones with
649                  * same timing, add all tgs with same timing to the group
650                  */
651                 for (j = i + 1; j < pipe_count; j++) {
652                         if (!unsynced_pipes[j])
653                                 continue;
654
655                         if (resource_are_streams_timing_synchronizable(
656                                         unsynced_pipes[j]->stream,
657                                         pipe_set[0]->stream)) {
658                                 pipe_set[group_size] = unsynced_pipes[j];
659                                 unsynced_pipes[j] = NULL;
660                                 group_size++;
661                         }
662                 }
663
664                 /* set first unblanked pipe as master */
665                 for (j = 0; j < group_size; j++) {
666                         struct pipe_ctx *temp;
667
668                         if (pipe_set[j]->stream_res.tg->funcs->is_blanked && !pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
669                                 if (j == 0)
670                                         break;
671
672                                 temp = pipe_set[0];
673                                 pipe_set[0] = pipe_set[j];
674                                 pipe_set[j] = temp;
675                                 break;
676                         }
677                 }
678
679                 /* remove any other unblanked pipes as they have already been synced */
680                 for (j = j + 1; j < group_size; j++) {
681                         if (pipe_set[j]->stream_res.tg->funcs->is_blanked && !pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
682                                 group_size--;
683                                 pipe_set[j] = pipe_set[group_size];
684                                 j--;
685                         }
686                 }
687
688                 if (group_size > 1) {
689                         dc->hwss.enable_timing_synchronization(
690                                 dc, group_index, group_size, pipe_set);
691                         group_index++;
692                 }
693         }
694 }
695
696 static bool context_changed(
697                 struct dc *dc,
698                 struct dc_state *context)
699 {
700         uint8_t i;
701
702         if (context->stream_count != dc->current_state->stream_count)
703                 return true;
704
705         for (i = 0; i < dc->current_state->stream_count; i++) {
706                 if (dc->current_state->streams[i] != context->streams[i])
707                         return true;
708         }
709
710         return false;
711 }
712
713 bool dc_enable_stereo(
714         struct dc *dc,
715         struct dc_state *context,
716         struct dc_stream_state *streams[],
717         uint8_t stream_count)
718 {
719         bool ret = true;
720         int i, j;
721         struct pipe_ctx *pipe;
722
723         for (i = 0; i < MAX_PIPES; i++) {
724                 if (context != NULL)
725                         pipe = &context->res_ctx.pipe_ctx[i];
726                 else
727                         pipe = &dc->current_state->res_ctx.pipe_ctx[i];
728                 for (j = 0 ; pipe && j < stream_count; j++)  {
729                         if (streams[j] && streams[j] == pipe->stream &&
730                                 dc->hwss.setup_stereo)
731                                 dc->hwss.setup_stereo(pipe, dc);
732                 }
733         }
734
735         return ret;
736 }
737
738 /*
739  * Applies given context to HW and copy it into current context.
740  * It's up to the user to release the src context afterwards.
741  */
742 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
743 {
744         struct dc_bios *dcb = dc->ctx->dc_bios;
745         enum dc_status result = DC_ERROR_UNEXPECTED;
746         struct pipe_ctx *pipe;
747         int i, k, l;
748         struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
749
750         disable_dangling_plane(dc, context);
751
752         for (i = 0; i < context->stream_count; i++)
753                 dc_streams[i] =  context->streams[i];
754
755         if (!dcb->funcs->is_accelerated_mode(dcb))
756                 dc->hwss.enable_accelerated_mode(dc, context);
757
758         /* re-program planes for existing stream, in case we need to
759          * free up plane resource for later use
760          */
761         for (i = 0; i < context->stream_count; i++) {
762                 if (context->streams[i]->mode_changed)
763                         continue;
764
765                 dc->hwss.apply_ctx_for_surface(
766                         dc, context->streams[i],
767                         context->stream_status[i].plane_count,
768                         context); /* use new pipe config in new context */
769         }
770
771         /* Program hardware */
772         dc->hwss.ready_shared_resources(dc, context);
773
774         for (i = 0; i < dc->res_pool->pipe_count; i++) {
775                 pipe = &context->res_ctx.pipe_ctx[i];
776                 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
777         }
778
779         result = dc->hwss.apply_ctx_to_hw(dc, context);
780
781         if (result != DC_OK)
782                 return result;
783
784         if (context->stream_count > 1) {
785                 enable_timing_multisync(dc, context);
786                 program_timing_sync(dc, context);
787         }
788
789         /* Program all planes within new context*/
790         for (i = 0; i < context->stream_count; i++) {
791                 const struct dc_sink *sink = context->streams[i]->sink;
792
793                 if (!context->streams[i]->mode_changed)
794                         continue;
795
796                 dc->hwss.apply_ctx_for_surface(
797                                 dc, context->streams[i],
798                                 context->stream_status[i].plane_count,
799                                 context);
800
801                 /*
802                  * enable stereo
803                  * TODO rework dc_enable_stereo call to work with validation sets?
804                  */
805                 for (k = 0; k < MAX_PIPES; k++) {
806                         pipe = &context->res_ctx.pipe_ctx[k];
807
808                         for (l = 0 ; pipe && l < context->stream_count; l++)  {
809                                 if (context->streams[l] &&
810                                         context->streams[l] == pipe->stream &&
811                                         dc->hwss.setup_stereo)
812                                         dc->hwss.setup_stereo(pipe, dc);
813                         }
814                 }
815
816                 CONN_MSG_MODE(sink->link, "{%dx%d, %dx%d@%dKhz}",
817                                 context->streams[i]->timing.h_addressable,
818                                 context->streams[i]->timing.v_addressable,
819                                 context->streams[i]->timing.h_total,
820                                 context->streams[i]->timing.v_total,
821                                 context->streams[i]->timing.pix_clk_khz);
822         }
823
824         dc_enable_stereo(dc, context, dc_streams, context->stream_count);
825
826         dc_release_state(dc->current_state);
827
828         dc->current_state = context;
829
830         dc_retain_state(dc->current_state);
831
832         dc->hwss.optimize_shared_resources(dc);
833
834         return result;
835 }
836
837 bool dc_commit_state(struct dc *dc, struct dc_state *context)
838 {
839         enum dc_status result = DC_ERROR_UNEXPECTED;
840         int i;
841
842         if (false == context_changed(dc, context))
843                 return DC_OK;
844
845         dm_logger_write(dc->ctx->logger, LOG_DC, "%s: %d streams\n",
846                                 __func__, context->stream_count);
847
848         for (i = 0; i < context->stream_count; i++) {
849                 struct dc_stream_state *stream = context->streams[i];
850
851                 dc_stream_log(stream,
852                                 dc->ctx->logger,
853                                 LOG_DC);
854         }
855
856         result = dc_commit_state_no_check(dc, context);
857
858         return (result == DC_OK);
859 }
860
861 bool dc_post_update_surfaces_to_stream(struct dc *dc)
862 {
863         int i;
864         struct dc_state *context = dc->current_state;
865
866         post_surface_trace(dc);
867
868         for (i = 0; i < dc->res_pool->pipe_count; i++)
869                 if (context->res_ctx.pipe_ctx[i].stream == NULL ||
870                     context->res_ctx.pipe_ctx[i].plane_state == NULL) {
871                         context->res_ctx.pipe_ctx[i].pipe_idx = i;
872                         dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
873                 }
874
875         dc->optimized_required = false;
876
877         /* 3rd param should be true, temp w/a for RV*/
878 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
879         dc->hwss.set_bandwidth(dc, context, dc->ctx->dce_version < DCN_VERSION_1_0);
880 #else
881         dc->hwss.set_bandwidth(dc, context, true);
882 #endif
883         return true;
884 }
885
886 /*
887  * TODO this whole function needs to go
888  *
889  * dc_surface_update is needlessly complex. See if we can just replace this
890  * with a dc_plane_state and follow the atomic model a bit more closely here.
891  */
892 bool dc_commit_planes_to_stream(
893                 struct dc *dc,
894                 struct dc_plane_state **plane_states,
895                 uint8_t new_plane_count,
896                 struct dc_stream_state *dc_stream,
897                 struct dc_state *state)
898 {
899         /* no need to dynamically allocate this. it's pretty small */
900         struct dc_surface_update updates[MAX_SURFACES];
901         struct dc_flip_addrs *flip_addr;
902         struct dc_plane_info *plane_info;
903         struct dc_scaling_info *scaling_info;
904         int i;
905         struct dc_stream_update *stream_update =
906                         kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL);
907
908         if (!stream_update) {
909                 BREAK_TO_DEBUGGER();
910                 return false;
911         }
912
913         flip_addr = kcalloc(MAX_SURFACES, sizeof(struct dc_flip_addrs),
914                             GFP_KERNEL);
915         plane_info = kcalloc(MAX_SURFACES, sizeof(struct dc_plane_info),
916                              GFP_KERNEL);
917         scaling_info = kcalloc(MAX_SURFACES, sizeof(struct dc_scaling_info),
918                                GFP_KERNEL);
919
920         if (!flip_addr || !plane_info || !scaling_info) {
921                 kfree(flip_addr);
922                 kfree(plane_info);
923                 kfree(scaling_info);
924                 kfree(stream_update);
925                 return false;
926         }
927
928         memset(updates, 0, sizeof(updates));
929
930         stream_update->src = dc_stream->src;
931         stream_update->dst = dc_stream->dst;
932         stream_update->out_transfer_func = dc_stream->out_transfer_func;
933
934         for (i = 0; i < new_plane_count; i++) {
935                 updates[i].surface = plane_states[i];
936                 updates[i].gamma =
937                         (struct dc_gamma *)plane_states[i]->gamma_correction;
938                 updates[i].in_transfer_func = plane_states[i]->in_transfer_func;
939                 flip_addr[i].address = plane_states[i]->address;
940                 flip_addr[i].flip_immediate = plane_states[i]->flip_immediate;
941                 plane_info[i].color_space = plane_states[i]->color_space;
942                 plane_info[i].input_tf = plane_states[i]->input_tf;
943                 plane_info[i].format = plane_states[i]->format;
944                 plane_info[i].plane_size = plane_states[i]->plane_size;
945                 plane_info[i].rotation = plane_states[i]->rotation;
946                 plane_info[i].horizontal_mirror = plane_states[i]->horizontal_mirror;
947                 plane_info[i].stereo_format = plane_states[i]->stereo_format;
948                 plane_info[i].tiling_info = plane_states[i]->tiling_info;
949                 plane_info[i].visible = plane_states[i]->visible;
950                 plane_info[i].per_pixel_alpha = plane_states[i]->per_pixel_alpha;
951                 plane_info[i].dcc = plane_states[i]->dcc;
952                 scaling_info[i].scaling_quality = plane_states[i]->scaling_quality;
953                 scaling_info[i].src_rect = plane_states[i]->src_rect;
954                 scaling_info[i].dst_rect = plane_states[i]->dst_rect;
955                 scaling_info[i].clip_rect = plane_states[i]->clip_rect;
956
957                 updates[i].flip_addr = &flip_addr[i];
958                 updates[i].plane_info = &plane_info[i];
959                 updates[i].scaling_info = &scaling_info[i];
960         }
961
962         dc_commit_updates_for_stream(
963                         dc,
964                         updates,
965                         new_plane_count,
966                         dc_stream, stream_update, plane_states, state);
967
968         kfree(flip_addr);
969         kfree(plane_info);
970         kfree(scaling_info);
971         kfree(stream_update);
972         return true;
973 }
974
975 struct dc_state *dc_create_state(void)
976 {
977         struct dc_state *context = kzalloc(sizeof(struct dc_state),
978                                            GFP_KERNEL);
979
980         if (!context)
981                 return NULL;
982
983         kref_init(&context->refcount);
984         return context;
985 }
986
987 void dc_retain_state(struct dc_state *context)
988 {
989         kref_get(&context->refcount);
990 }
991
992 static void dc_state_free(struct kref *kref)
993 {
994         struct dc_state *context = container_of(kref, struct dc_state, refcount);
995         dc_resource_state_destruct(context);
996         kfree(context);
997 }
998
999 void dc_release_state(struct dc_state *context)
1000 {
1001         kref_put(&context->refcount, dc_state_free);
1002 }
1003
1004 static bool is_surface_in_context(
1005                 const struct dc_state *context,
1006                 const struct dc_plane_state *plane_state)
1007 {
1008         int j;
1009
1010         for (j = 0; j < MAX_PIPES; j++) {
1011                 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1012
1013                 if (plane_state == pipe_ctx->plane_state) {
1014                         return true;
1015                 }
1016         }
1017
1018         return false;
1019 }
1020
1021 static unsigned int pixel_format_to_bpp(enum surface_pixel_format format)
1022 {
1023         switch (format) {
1024         case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
1025         case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
1026                 return 12;
1027         case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
1028         case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
1029         case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
1030         case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
1031                 return 16;
1032         case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
1033         case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
1034         case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
1035         case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
1036                 return 32;
1037         case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
1038         case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
1039         case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
1040                 return 64;
1041         default:
1042                 ASSERT_CRITICAL(false);
1043                 return -1;
1044         }
1045 }
1046
1047 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
1048 {
1049         union surface_update_flags *update_flags = &u->surface->update_flags;
1050
1051         if (!u->plane_info)
1052                 return UPDATE_TYPE_FAST;
1053
1054         if (u->plane_info->color_space != u->surface->color_space)
1055                 update_flags->bits.color_space_change = 1;
1056
1057         if (u->plane_info->input_tf != u->surface->input_tf)
1058                 update_flags->bits.input_tf_change = 1;
1059
1060         if (u->plane_info->sdr_white_level != u->surface->sdr_white_level)
1061                 update_flags->bits.output_tf_change = 1;
1062
1063         if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror)
1064                 update_flags->bits.horizontal_mirror_change = 1;
1065
1066         if (u->plane_info->rotation != u->surface->rotation)
1067                 update_flags->bits.rotation_change = 1;
1068
1069         if (u->plane_info->format != u->surface->format)
1070                 update_flags->bits.pixel_format_change = 1;
1071
1072         if (u->plane_info->stereo_format != u->surface->stereo_format)
1073                 update_flags->bits.stereo_format_change = 1;
1074
1075         if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha)
1076                 update_flags->bits.per_pixel_alpha_change = 1;
1077
1078         if (u->plane_info->dcc.enable != u->surface->dcc.enable
1079                         || u->plane_info->dcc.grph.independent_64b_blks != u->surface->dcc.grph.independent_64b_blks
1080                         || u->plane_info->dcc.grph.meta_pitch != u->surface->dcc.grph.meta_pitch)
1081                 update_flags->bits.dcc_change = 1;
1082
1083         if (pixel_format_to_bpp(u->plane_info->format) !=
1084                         pixel_format_to_bpp(u->surface->format))
1085                 /* different bytes per element will require full bandwidth
1086                  * and DML calculation
1087                  */
1088                 update_flags->bits.bpp_change = 1;
1089
1090         if (u->gamma && dce_use_lut(u->plane_info->format))
1091                 update_flags->bits.gamma_change = 1;
1092
1093         if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
1094                         sizeof(union dc_tiling_info)) != 0) {
1095                 update_flags->bits.swizzle_change = 1;
1096                 /* todo: below are HW dependent, we should add a hook to
1097                  * DCE/N resource and validated there.
1098                  */
1099                 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR)
1100                         /* swizzled mode requires RQ to be setup properly,
1101                          * thus need to run DML to calculate RQ settings
1102                          */
1103                         update_flags->bits.bandwidth_change = 1;
1104         }
1105
1106         if (update_flags->bits.rotation_change
1107                         || update_flags->bits.stereo_format_change
1108                         || update_flags->bits.pixel_format_change
1109                         || update_flags->bits.gamma_change
1110                         || update_flags->bits.bpp_change
1111                         || update_flags->bits.bandwidth_change
1112                         || update_flags->bits.output_tf_change)
1113                 return UPDATE_TYPE_FULL;
1114
1115         return UPDATE_TYPE_MED;
1116 }
1117
1118 static enum surface_update_type get_scaling_info_update_type(
1119                 const struct dc_surface_update *u)
1120 {
1121         union surface_update_flags *update_flags = &u->surface->update_flags;
1122
1123         if (!u->scaling_info)
1124                 return UPDATE_TYPE_FAST;
1125
1126         if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width
1127                         || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
1128                         || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
1129                         || u->scaling_info->dst_rect.height != u->surface->dst_rect.height) {
1130                 update_flags->bits.scaling_change = 1;
1131
1132                 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
1133                         || u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
1134                                 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width
1135                                         || u->scaling_info->dst_rect.height < u->surface->src_rect.height))
1136                         /* Making dst rect smaller requires a bandwidth change */
1137                         update_flags->bits.bandwidth_change = 1;
1138         }
1139
1140         if (u->scaling_info->src_rect.width != u->surface->src_rect.width
1141                 || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
1142
1143                 update_flags->bits.scaling_change = 1;
1144                 if (u->scaling_info->src_rect.width > u->surface->src_rect.width
1145                                 && u->scaling_info->src_rect.height > u->surface->src_rect.height)
1146                         /* Making src rect bigger requires a bandwidth change */
1147                         update_flags->bits.clock_change = 1;
1148         }
1149
1150         if (u->scaling_info->src_rect.x != u->surface->src_rect.x
1151                         || u->scaling_info->src_rect.y != u->surface->src_rect.y
1152                         || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
1153                         || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
1154                         || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
1155                         || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
1156                 update_flags->bits.position_change = 1;
1157
1158         if (update_flags->bits.clock_change
1159                         || update_flags->bits.bandwidth_change)
1160                 return UPDATE_TYPE_FULL;
1161
1162         if (update_flags->bits.scaling_change
1163                         || update_flags->bits.position_change)
1164                 return UPDATE_TYPE_MED;
1165
1166         return UPDATE_TYPE_FAST;
1167 }
1168
1169 static enum surface_update_type det_surface_update(const struct dc *dc,
1170                 const struct dc_surface_update *u)
1171 {
1172         const struct dc_state *context = dc->current_state;
1173         enum surface_update_type type;
1174         enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1175         union surface_update_flags *update_flags = &u->surface->update_flags;
1176
1177         update_flags->raw = 0; // Reset all flags
1178
1179         if (!is_surface_in_context(context, u->surface)) {
1180                 update_flags->bits.new_plane = 1;
1181                 return UPDATE_TYPE_FULL;
1182         }
1183
1184         type = get_plane_info_update_type(u);
1185         elevate_update_type(&overall_type, type);
1186
1187         type = get_scaling_info_update_type(u);
1188         elevate_update_type(&overall_type, type);
1189
1190         if (u->in_transfer_func)
1191                 update_flags->bits.in_transfer_func_change = 1;
1192
1193         if (u->input_csc_color_matrix)
1194                 update_flags->bits.input_csc_change = 1;
1195
1196         if (update_flags->bits.in_transfer_func_change
1197                         || update_flags->bits.input_csc_change) {
1198                 type = UPDATE_TYPE_MED;
1199                 elevate_update_type(&overall_type, type);
1200         }
1201
1202         return overall_type;
1203 }
1204
1205 static enum surface_update_type check_update_surfaces_for_stream(
1206                 struct dc *dc,
1207                 struct dc_surface_update *updates,
1208                 int surface_count,
1209                 struct dc_stream_update *stream_update,
1210                 const struct dc_stream_status *stream_status)
1211 {
1212         int i;
1213         enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1214
1215         if (stream_status == NULL || stream_status->plane_count != surface_count)
1216                 return UPDATE_TYPE_FULL;
1217
1218         if (stream_update)
1219                 return UPDATE_TYPE_FULL;
1220
1221         for (i = 0 ; i < surface_count; i++) {
1222                 enum surface_update_type type =
1223                                 det_surface_update(dc, &updates[i]);
1224
1225                 if (type == UPDATE_TYPE_FULL)
1226                         return type;
1227
1228                 elevate_update_type(&overall_type, type);
1229         }
1230
1231         return overall_type;
1232 }
1233
1234 enum surface_update_type dc_check_update_surfaces_for_stream(
1235                 struct dc *dc,
1236                 struct dc_surface_update *updates,
1237                 int surface_count,
1238                 struct dc_stream_update *stream_update,
1239                 const struct dc_stream_status *stream_status)
1240 {
1241         int i;
1242         enum surface_update_type type;
1243
1244         for (i = 0; i < surface_count; i++)
1245                 updates[i].surface->update_flags.raw = 0;
1246
1247         type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
1248         if (type == UPDATE_TYPE_FULL)
1249                 for (i = 0; i < surface_count; i++)
1250                         updates[i].surface->update_flags.bits.full_update = 1;
1251
1252         return type;
1253 }
1254
1255 static struct dc_stream_status *stream_get_status(
1256         struct dc_state *ctx,
1257         struct dc_stream_state *stream)
1258 {
1259         uint8_t i;
1260
1261         for (i = 0; i < ctx->stream_count; i++) {
1262                 if (stream == ctx->streams[i]) {
1263                         return &ctx->stream_status[i];
1264                 }
1265         }
1266
1267         return NULL;
1268 }
1269
1270 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
1271
1272
1273 static void commit_planes_for_stream(struct dc *dc,
1274                 struct dc_surface_update *srf_updates,
1275                 int surface_count,
1276                 struct dc_stream_state *stream,
1277                 struct dc_stream_update *stream_update,
1278                 enum surface_update_type update_type,
1279                 struct dc_state *context)
1280 {
1281         int i, j;
1282         struct pipe_ctx *top_pipe_to_program = NULL;
1283
1284         if (update_type == UPDATE_TYPE_FULL) {
1285                 dc->hwss.set_bandwidth(dc, context, false);
1286                 context_clock_trace(dc, context);
1287         }
1288
1289         if (surface_count == 0) {
1290                 /*
1291                  * In case of turning off screen, no need to program front end a second time.
1292                  * just return after program front end.
1293                  */
1294                 dc->hwss.apply_ctx_for_surface(dc, stream, surface_count, context);
1295                 return;
1296         }
1297
1298         /* Full fe update*/
1299         for (j = 0; j < dc->res_pool->pipe_count; j++) {
1300                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1301
1302                 if (!pipe_ctx->top_pipe &&
1303                         pipe_ctx->stream &&
1304                         pipe_ctx->stream == stream) {
1305                         struct dc_stream_status *stream_status = NULL;
1306
1307                         top_pipe_to_program = pipe_ctx;
1308
1309                         if (update_type == UPDATE_TYPE_FAST || !pipe_ctx->plane_state)
1310                                 continue;
1311
1312                         stream_status =
1313                                         stream_get_status(context, pipe_ctx->stream);
1314
1315                         dc->hwss.apply_ctx_for_surface(
1316                                         dc, pipe_ctx->stream, stream_status->plane_count, context);
1317                 }
1318         }
1319
1320         if (update_type == UPDATE_TYPE_FULL)
1321                 context_timing_trace(dc, &context->res_ctx);
1322
1323         /* Lock the top pipe while updating plane addrs, since freesync requires
1324          *  plane addr update event triggers to be synchronized.
1325          *  top_pipe_to_program is expected to never be NULL
1326          */
1327         if (update_type == UPDATE_TYPE_FAST) {
1328                 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
1329
1330                 /* Perform requested Updates */
1331                 for (i = 0; i < surface_count; i++) {
1332                         struct dc_plane_state *plane_state = srf_updates[i].surface;
1333
1334                         for (j = 0; j < dc->res_pool->pipe_count; j++) {
1335                                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1336
1337                                 if (pipe_ctx->stream != stream)
1338                                         continue;
1339
1340                                 if (pipe_ctx->plane_state != plane_state)
1341                                         continue;
1342
1343                                 if (srf_updates[i].flip_addr)
1344                                         dc->hwss.update_plane_addr(dc, pipe_ctx);
1345                         }
1346                 }
1347
1348                 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
1349         }
1350
1351         if (stream && stream_update && update_type > UPDATE_TYPE_FAST)
1352                 for (j = 0; j < dc->res_pool->pipe_count; j++) {
1353                         struct pipe_ctx *pipe_ctx =
1354                                         &context->res_ctx.pipe_ctx[j];
1355
1356                         if (pipe_ctx->stream != stream)
1357                                 continue;
1358
1359                         if (stream_update->hdr_static_metadata) {
1360                                 resource_build_info_frame(pipe_ctx);
1361                                 dc->hwss.update_info_frame(pipe_ctx);
1362                         }
1363                 }
1364 }
1365
1366 void dc_commit_updates_for_stream(struct dc *dc,
1367                 struct dc_surface_update *srf_updates,
1368                 int surface_count,
1369                 struct dc_stream_state *stream,
1370                 struct dc_stream_update *stream_update,
1371                 struct dc_plane_state **plane_states,
1372                 struct dc_state *state)
1373 {
1374         const struct dc_stream_status *stream_status;
1375         enum surface_update_type update_type;
1376         struct dc_state *context;
1377         struct dc_context *dc_ctx = dc->ctx;
1378         int i, j;
1379
1380         stream_status = dc_stream_get_status(stream);
1381         context = dc->current_state;
1382
1383         update_type = dc_check_update_surfaces_for_stream(
1384                                 dc, srf_updates, surface_count, stream_update, stream_status);
1385
1386         if (update_type >= update_surface_trace_level)
1387                 update_surface_trace(dc, srf_updates, surface_count);
1388
1389
1390         if (update_type >= UPDATE_TYPE_FULL) {
1391
1392                 /* initialize scratch memory for building context */
1393                 context = dc_create_state();
1394                 if (context == NULL) {
1395                         DC_ERROR("Failed to allocate new validate context!\n");
1396                         return;
1397                 }
1398
1399                 dc_resource_state_copy_construct(state, context);
1400         }
1401
1402
1403         for (i = 0; i < surface_count; i++) {
1404                 struct dc_plane_state *surface = srf_updates[i].surface;
1405
1406                 /* TODO: On flip we don't build the state, so it still has the
1407                  * old address. Which is why we are updating the address here
1408                  */
1409                 if (srf_updates[i].flip_addr) {
1410                         surface->address = srf_updates[i].flip_addr->address;
1411                         surface->flip_immediate = srf_updates[i].flip_addr->flip_immediate;
1412
1413                 }
1414
1415                 if (update_type >= UPDATE_TYPE_MED) {
1416                         for (j = 0; j < dc->res_pool->pipe_count; j++) {
1417                                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1418
1419                                 if (pipe_ctx->plane_state != surface)
1420                                         continue;
1421
1422                                 resource_build_scaling_params(pipe_ctx);
1423                         }
1424                 }
1425         }
1426
1427         commit_planes_for_stream(
1428                                 dc,
1429                                 srf_updates,
1430                                 surface_count,
1431                                 stream,
1432                                 stream_update,
1433                                 update_type,
1434                                 context);
1435         /*update current_State*/
1436         if (dc->current_state != context) {
1437
1438                 struct dc_state *old = dc->current_state;
1439
1440                 dc->current_state = context;
1441                 dc_release_state(old);
1442
1443         }
1444         /*let's use current_state to update watermark etc*/
1445         if (update_type >= UPDATE_TYPE_FULL)
1446                 dc_post_update_surfaces_to_stream(dc);
1447
1448         return;
1449
1450 }
1451
1452 uint8_t dc_get_current_stream_count(struct dc *dc)
1453 {
1454         return dc->current_state->stream_count;
1455 }
1456
1457 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
1458 {
1459         if (i < dc->current_state->stream_count)
1460                 return dc->current_state->streams[i];
1461         return NULL;
1462 }
1463
1464 enum dc_irq_source dc_interrupt_to_irq_source(
1465                 struct dc *dc,
1466                 uint32_t src_id,
1467                 uint32_t ext_id)
1468 {
1469         return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
1470 }
1471
1472 void dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
1473 {
1474
1475         if (dc == NULL)
1476                 return;
1477
1478         dal_irq_service_set(dc->res_pool->irqs, src, enable);
1479 }
1480
1481 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
1482 {
1483         dal_irq_service_ack(dc->res_pool->irqs, src);
1484 }
1485
1486 void dc_set_power_state(
1487         struct dc *dc,
1488         enum dc_acpi_cm_power_state power_state)
1489 {
1490         struct kref refcount;
1491
1492         switch (power_state) {
1493         case DC_ACPI_CM_POWER_STATE_D0:
1494                 dc_resource_state_construct(dc, dc->current_state);
1495
1496                 dc->hwss.init_hw(dc);
1497                 break;
1498         default:
1499
1500                 dc->hwss.power_down(dc);
1501
1502                 /* Zero out the current context so that on resume we start with
1503                  * clean state, and dc hw programming optimizations will not
1504                  * cause any trouble.
1505                  */
1506
1507                 /* Preserve refcount */
1508                 refcount = dc->current_state->refcount;
1509                 dc_resource_state_destruct(dc->current_state);
1510                 memset(dc->current_state, 0,
1511                                 sizeof(*dc->current_state));
1512
1513                 dc->current_state->refcount = refcount;
1514
1515                 break;
1516         }
1517
1518 }
1519
1520 void dc_resume(struct dc *dc)
1521 {
1522
1523         uint32_t i;
1524
1525         for (i = 0; i < dc->link_count; i++)
1526                 core_link_resume(dc->links[i]);
1527 }
1528
1529 bool dc_submit_i2c(
1530                 struct dc *dc,
1531                 uint32_t link_index,
1532                 struct i2c_command *cmd)
1533 {
1534
1535         struct dc_link *link = dc->links[link_index];
1536         struct ddc_service *ddc = link->ddc;
1537
1538         return dal_i2caux_submit_i2c_command(
1539                 ddc->ctx->i2caux,
1540                 ddc->ddc_pin,
1541                 cmd);
1542 }
1543
1544 static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
1545 {
1546         if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
1547                 BREAK_TO_DEBUGGER();
1548                 return false;
1549         }
1550
1551         dc_sink_retain(sink);
1552
1553         dc_link->remote_sinks[dc_link->sink_count] = sink;
1554         dc_link->sink_count++;
1555
1556         return true;
1557 }
1558
1559 struct dc_sink *dc_link_add_remote_sink(
1560                 struct dc_link *link,
1561                 const uint8_t *edid,
1562                 int len,
1563                 struct dc_sink_init_data *init_data)
1564 {
1565         struct dc_sink *dc_sink;
1566         enum dc_edid_status edid_status;
1567
1568         if (len > MAX_EDID_BUFFER_SIZE) {
1569                 dm_error("Max EDID buffer size breached!\n");
1570                 return NULL;
1571         }
1572
1573         if (!init_data) {
1574                 BREAK_TO_DEBUGGER();
1575                 return NULL;
1576         }
1577
1578         if (!init_data->link) {
1579                 BREAK_TO_DEBUGGER();
1580                 return NULL;
1581         }
1582
1583         dc_sink = dc_sink_create(init_data);
1584
1585         if (!dc_sink)
1586                 return NULL;
1587
1588         memmove(dc_sink->dc_edid.raw_edid, edid, len);
1589         dc_sink->dc_edid.length = len;
1590
1591         if (!link_add_remote_sink_helper(
1592                         link,
1593                         dc_sink))
1594                 goto fail_add_sink;
1595
1596         edid_status = dm_helpers_parse_edid_caps(
1597                         link->ctx,
1598                         &dc_sink->dc_edid,
1599                         &dc_sink->edid_caps);
1600
1601         if (edid_status != EDID_OK)
1602                 goto fail;
1603
1604         return dc_sink;
1605 fail:
1606         dc_link_remove_remote_sink(link, dc_sink);
1607 fail_add_sink:
1608         dc_sink_release(dc_sink);
1609         return NULL;
1610 }
1611
1612 void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
1613 {
1614         int i;
1615
1616         if (!link->sink_count) {
1617                 BREAK_TO_DEBUGGER();
1618                 return;
1619         }
1620
1621         for (i = 0; i < link->sink_count; i++) {
1622                 if (link->remote_sinks[i] == sink) {
1623                         dc_sink_release(sink);
1624                         link->remote_sinks[i] = NULL;
1625
1626                         /* shrink array to remove empty place */
1627                         while (i < link->sink_count - 1) {
1628                                 link->remote_sinks[i] = link->remote_sinks[i+1];
1629                                 i++;
1630                         }
1631                         link->remote_sinks[i] = NULL;
1632                         link->sink_count--;
1633                         return;
1634                 }
1635         }
1636 }