]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/amd/display/dc/core/dc.c
Merge tag 'kvm-4.16-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[linux.git] / drivers / gpu / drm / amd / display / dc / core / dc.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  */
24
25 #include "dm_services.h"
26
27 #include "dc.h"
28
29 #include "core_status.h"
30 #include "core_types.h"
31 #include "hw_sequencer.h"
32
33 #include "resource.h"
34
35 #include "clock_source.h"
36 #include "dc_bios_types.h"
37
38 #include "bios_parser_interface.h"
39 #include "include/irq_service_interface.h"
40 #include "transform.h"
41 #include "dpp.h"
42 #include "timing_generator.h"
43 #include "virtual/virtual_link_encoder.h"
44
45 #include "link_hwss.h"
46 #include "link_encoder.h"
47
48 #include "dc_link_ddc.h"
49 #include "dm_helpers.h"
50 #include "mem_input.h"
51 #include "hubp.h"
52
53
54 /*******************************************************************************
55  * Private functions
56  ******************************************************************************/
57
58 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
59 {
60         if (new > *original)
61                 *original = new;
62 }
63
64 static void destroy_links(struct dc *dc)
65 {
66         uint32_t i;
67
68         for (i = 0; i < dc->link_count; i++) {
69                 if (NULL != dc->links[i])
70                         link_destroy(&dc->links[i]);
71         }
72 }
73
74 static bool create_links(
75                 struct dc *dc,
76                 uint32_t num_virtual_links)
77 {
78         int i;
79         int connectors_num;
80         struct dc_bios *bios = dc->ctx->dc_bios;
81
82         dc->link_count = 0;
83
84         connectors_num = bios->funcs->get_connectors_number(bios);
85
86         if (connectors_num > ENUM_ID_COUNT) {
87                 dm_error(
88                         "DC: Number of connectors %d exceeds maximum of %d!\n",
89                         connectors_num,
90                         ENUM_ID_COUNT);
91                 return false;
92         }
93
94         if (connectors_num == 0 && num_virtual_links == 0) {
95                 dm_error("DC: Number of connectors is zero!\n");
96         }
97
98         dm_output_to_console(
99                 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
100                 __func__,
101                 connectors_num,
102                 num_virtual_links);
103
104         for (i = 0; i < connectors_num; i++) {
105                 struct link_init_data link_init_params = {0};
106                 struct dc_link *link;
107
108                 link_init_params.ctx = dc->ctx;
109                 /* next BIOS object table connector */
110                 link_init_params.connector_index = i;
111                 link_init_params.link_index = dc->link_count;
112                 link_init_params.dc = dc;
113                 link = link_create(&link_init_params);
114
115                 if (link) {
116                         dc->links[dc->link_count] = link;
117                         link->dc = dc;
118                         ++dc->link_count;
119                 }
120         }
121
122         for (i = 0; i < num_virtual_links; i++) {
123                 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
124                 struct encoder_init_data enc_init = {0};
125
126                 if (link == NULL) {
127                         BREAK_TO_DEBUGGER();
128                         goto failed_alloc;
129                 }
130
131                 link->link_index = dc->link_count;
132                 dc->links[dc->link_count] = link;
133                 dc->link_count++;
134
135                 link->ctx = dc->ctx;
136                 link->dc = dc;
137                 link->connector_signal = SIGNAL_TYPE_VIRTUAL;
138                 link->link_id.type = OBJECT_TYPE_CONNECTOR;
139                 link->link_id.id = CONNECTOR_ID_VIRTUAL;
140                 link->link_id.enum_id = ENUM_ID_1;
141                 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
142
143                 if (!link->link_enc) {
144                         BREAK_TO_DEBUGGER();
145                         goto failed_alloc;
146                 }
147
148                 link->link_status.dpcd_caps = &link->dpcd_caps;
149
150                 enc_init.ctx = dc->ctx;
151                 enc_init.channel = CHANNEL_ID_UNKNOWN;
152                 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
153                 enc_init.transmitter = TRANSMITTER_UNKNOWN;
154                 enc_init.connector = link->link_id;
155                 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
156                 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
157                 enc_init.encoder.enum_id = ENUM_ID_1;
158                 virtual_link_encoder_construct(link->link_enc, &enc_init);
159         }
160
161         return true;
162
163 failed_alloc:
164         return false;
165 }
166
167 bool dc_stream_adjust_vmin_vmax(struct dc *dc,
168                 struct dc_stream_state **streams, int num_streams,
169                 int vmin, int vmax)
170 {
171         /* TODO: Support multiple streams */
172         struct dc_stream_state *stream = streams[0];
173         int i = 0;
174         bool ret = false;
175
176         for (i = 0; i < MAX_PIPES; i++) {
177                 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
178
179                 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
180                         dc->hwss.set_drr(&pipe, 1, vmin, vmax);
181
182                         /* build and update the info frame */
183                         resource_build_info_frame(pipe);
184                         dc->hwss.update_info_frame(pipe);
185
186                         ret = true;
187                 }
188         }
189         return ret;
190 }
191
192 bool dc_stream_get_crtc_position(struct dc *dc,
193                 struct dc_stream_state **streams, int num_streams,
194                 unsigned int *v_pos, unsigned int *nom_v_pos)
195 {
196         /* TODO: Support multiple streams */
197         struct dc_stream_state *stream = streams[0];
198         int i = 0;
199         bool ret = false;
200         struct crtc_position position;
201
202         for (i = 0; i < MAX_PIPES; i++) {
203                 struct pipe_ctx *pipe =
204                                 &dc->current_state->res_ctx.pipe_ctx[i];
205
206                 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
207                         dc->hwss.get_position(&pipe, 1, &position);
208
209                         *v_pos = position.vertical_count;
210                         *nom_v_pos = position.nominal_vcount;
211                         ret = true;
212                 }
213         }
214         return ret;
215 }
216
217 void dc_stream_set_static_screen_events(struct dc *dc,
218                 struct dc_stream_state **streams,
219                 int num_streams,
220                 const struct dc_static_screen_events *events)
221 {
222         int i = 0;
223         int j = 0;
224         struct pipe_ctx *pipes_affected[MAX_PIPES];
225         int num_pipes_affected = 0;
226
227         for (i = 0; i < num_streams; i++) {
228                 struct dc_stream_state *stream = streams[i];
229
230                 for (j = 0; j < MAX_PIPES; j++) {
231                         if (dc->current_state->res_ctx.pipe_ctx[j].stream
232                                         == stream) {
233                                 pipes_affected[num_pipes_affected++] =
234                                                 &dc->current_state->res_ctx.pipe_ctx[j];
235                         }
236                 }
237         }
238
239         dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
240 }
241
242 static void destruct(struct dc *dc)
243 {
244         dc_release_state(dc->current_state);
245         dc->current_state = NULL;
246
247         destroy_links(dc);
248
249         dc_destroy_resource_pool(dc);
250
251         if (dc->ctx->gpio_service)
252                 dal_gpio_service_destroy(&dc->ctx->gpio_service);
253
254         if (dc->ctx->i2caux)
255                 dal_i2caux_destroy(&dc->ctx->i2caux);
256
257         if (dc->ctx->created_bios)
258                 dal_bios_parser_destroy(&dc->ctx->dc_bios);
259
260         if (dc->ctx->logger)
261                 dal_logger_destroy(&dc->ctx->logger);
262
263         kfree(dc->ctx);
264         dc->ctx = NULL;
265
266         kfree(dc->bw_vbios);
267         dc->bw_vbios = NULL;
268
269         kfree(dc->bw_dceip);
270         dc->bw_dceip = NULL;
271
272 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
273         kfree(dc->dcn_soc);
274         dc->dcn_soc = NULL;
275
276         kfree(dc->dcn_ip);
277         dc->dcn_ip = NULL;
278
279 #endif
280 }
281
282 static bool construct(struct dc *dc,
283                 const struct dc_init_data *init_params)
284 {
285         struct dal_logger *logger;
286         struct dc_context *dc_ctx;
287         struct bw_calcs_dceip *dc_dceip;
288         struct bw_calcs_vbios *dc_vbios;
289 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
290         struct dcn_soc_bounding_box *dcn_soc;
291         struct dcn_ip_params *dcn_ip;
292 #endif
293
294         enum dce_version dc_version = DCE_VERSION_UNKNOWN;
295
296         dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
297         if (!dc_dceip) {
298                 dm_error("%s: failed to create dceip\n", __func__);
299                 goto fail;
300         }
301
302         dc->bw_dceip = dc_dceip;
303
304         dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
305         if (!dc_vbios) {
306                 dm_error("%s: failed to create vbios\n", __func__);
307                 goto fail;
308         }
309
310         dc->bw_vbios = dc_vbios;
311 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
312         dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
313         if (!dcn_soc) {
314                 dm_error("%s: failed to create dcn_soc\n", __func__);
315                 goto fail;
316         }
317
318         dc->dcn_soc = dcn_soc;
319
320         dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
321         if (!dcn_ip) {
322                 dm_error("%s: failed to create dcn_ip\n", __func__);
323                 goto fail;
324         }
325
326         dc->dcn_ip = dcn_ip;
327 #endif
328
329         dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
330         if (!dc_ctx) {
331                 dm_error("%s: failed to create ctx\n", __func__);
332                 goto fail;
333         }
334
335         dc_ctx->cgs_device = init_params->cgs_device;
336         dc_ctx->driver_context = init_params->driver;
337         dc_ctx->dc = dc;
338         dc_ctx->asic_id = init_params->asic_id;
339         dc->ctx = dc_ctx;
340
341         dc->current_state = dc_create_state();
342
343         if (!dc->current_state) {
344                 dm_error("%s: failed to create validate ctx\n", __func__);
345                 goto fail;
346         }
347
348         /* Create logger */
349         logger = dal_logger_create(dc_ctx, init_params->log_mask);
350
351         if (!logger) {
352                 /* can *not* call logger. call base driver 'print error' */
353                 dm_error("%s: failed to create Logger!\n", __func__);
354                 goto fail;
355         }
356         dc_ctx->logger = logger;
357         dc_ctx->dce_environment = init_params->dce_environment;
358
359         dc_version = resource_parse_asic_id(init_params->asic_id);
360         dc_ctx->dce_version = dc_version;
361
362 #if defined(CONFIG_DRM_AMD_DC_FBC)
363         dc->ctx->fbc_gpu_addr = init_params->fbc_gpu_addr;
364 #endif
365         /* Resource should construct all asic specific resources.
366          * This should be the only place where we need to parse the asic id
367          */
368         if (init_params->vbios_override)
369                 dc_ctx->dc_bios = init_params->vbios_override;
370         else {
371                 /* Create BIOS parser */
372                 struct bp_init_data bp_init_data;
373
374                 bp_init_data.ctx = dc_ctx;
375                 bp_init_data.bios = init_params->asic_id.atombios_base_address;
376
377                 dc_ctx->dc_bios = dal_bios_parser_create(
378                                 &bp_init_data, dc_version);
379
380                 if (!dc_ctx->dc_bios) {
381                         ASSERT_CRITICAL(false);
382                         goto fail;
383                 }
384
385                 dc_ctx->created_bios = true;
386                 }
387
388         /* Create I2C AUX */
389         dc_ctx->i2caux = dal_i2caux_create(dc_ctx);
390
391         if (!dc_ctx->i2caux) {
392                 ASSERT_CRITICAL(false);
393                 goto fail;
394         }
395
396         /* Create GPIO service */
397         dc_ctx->gpio_service = dal_gpio_service_create(
398                         dc_version,
399                         dc_ctx->dce_environment,
400                         dc_ctx);
401
402         if (!dc_ctx->gpio_service) {
403                 ASSERT_CRITICAL(false);
404                 goto fail;
405         }
406
407         dc->res_pool = dc_create_resource_pool(
408                         dc,
409                         init_params->num_virtual_links,
410                         dc_version,
411                         init_params->asic_id);
412         if (!dc->res_pool)
413                 goto fail;
414
415         dc_resource_state_construct(dc, dc->current_state);
416
417         if (!create_links(dc, init_params->num_virtual_links))
418                 goto fail;
419
420         return true;
421
422 fail:
423
424         destruct(dc);
425         return false;
426 }
427
428 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
429 {
430         int i, j;
431         struct dc_state *dangling_context = dc_create_state();
432         struct dc_state *current_ctx;
433
434         if (dangling_context == NULL)
435                 return;
436
437         dc_resource_state_copy_construct(dc->current_state, dangling_context);
438
439         for (i = 0; i < dc->res_pool->pipe_count; i++) {
440                 struct dc_stream_state *old_stream =
441                                 dc->current_state->res_ctx.pipe_ctx[i].stream;
442                 bool should_disable = true;
443
444                 for (j = 0; j < context->stream_count; j++) {
445                         if (old_stream == context->streams[j]) {
446                                 should_disable = false;
447                                 break;
448                         }
449                 }
450                 if (should_disable && old_stream) {
451                         dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
452                         dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
453                 }
454         }
455
456         current_ctx = dc->current_state;
457         dc->current_state = dangling_context;
458         dc_release_state(current_ctx);
459 }
460
461 /*******************************************************************************
462  * Public functions
463  ******************************************************************************/
464
465 struct dc *dc_create(const struct dc_init_data *init_params)
466  {
467         struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
468         unsigned int full_pipe_count;
469
470         if (NULL == dc)
471                 goto alloc_fail;
472
473         if (false == construct(dc, init_params))
474                 goto construct_fail;
475
476         /*TODO: separate HW and SW initialization*/
477         dc->hwss.init_hw(dc);
478
479         full_pipe_count = dc->res_pool->pipe_count;
480         if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
481                 full_pipe_count--;
482         dc->caps.max_streams = min(
483                         full_pipe_count,
484                         dc->res_pool->stream_enc_count);
485
486         dc->caps.max_links = dc->link_count;
487         dc->caps.max_audios = dc->res_pool->audio_count;
488         dc->caps.linear_pitch_alignment = 64;
489
490         dc->config = init_params->flags;
491
492         dm_logger_write(dc->ctx->logger, LOG_DC,
493                         "Display Core initialized\n");
494
495
496         /* TODO: missing feature to be enabled */
497         dc->debug.disable_dfs_bypass = true;
498
499         return dc;
500
501 construct_fail:
502         kfree(dc);
503
504 alloc_fail:
505         return NULL;
506 }
507
508 void dc_destroy(struct dc **dc)
509 {
510         destruct(*dc);
511         kfree(*dc);
512         *dc = NULL;
513 }
514
515 static void enable_timing_multisync(
516                 struct dc *dc,
517                 struct dc_state *ctx)
518 {
519         int i = 0, multisync_count = 0;
520         int pipe_count = dc->res_pool->pipe_count;
521         struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
522
523         for (i = 0; i < pipe_count; i++) {
524                 if (!ctx->res_ctx.pipe_ctx[i].stream ||
525                                 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
526                         continue;
527                 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
528                 multisync_count++;
529         }
530
531         if (multisync_count > 1) {
532                 dc->hwss.enable_per_frame_crtc_position_reset(
533                         dc, multisync_count, multisync_pipes);
534         }
535 }
536
537 static void program_timing_sync(
538                 struct dc *dc,
539                 struct dc_state *ctx)
540 {
541         int i, j;
542         int group_index = 0;
543         int pipe_count = dc->res_pool->pipe_count;
544         struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
545
546         for (i = 0; i < pipe_count; i++) {
547                 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
548                         continue;
549
550                 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
551         }
552
553         for (i = 0; i < pipe_count; i++) {
554                 int group_size = 1;
555                 struct pipe_ctx *pipe_set[MAX_PIPES];
556
557                 if (!unsynced_pipes[i])
558                         continue;
559
560                 pipe_set[0] = unsynced_pipes[i];
561                 unsynced_pipes[i] = NULL;
562
563                 /* Add tg to the set, search rest of the tg's for ones with
564                  * same timing, add all tgs with same timing to the group
565                  */
566                 for (j = i + 1; j < pipe_count; j++) {
567                         if (!unsynced_pipes[j])
568                                 continue;
569
570                         if (resource_are_streams_timing_synchronizable(
571                                         unsynced_pipes[j]->stream,
572                                         pipe_set[0]->stream)) {
573                                 pipe_set[group_size] = unsynced_pipes[j];
574                                 unsynced_pipes[j] = NULL;
575                                 group_size++;
576                         }
577                 }
578
579                 /* set first unblanked pipe as master */
580                 for (j = 0; j < group_size; j++) {
581                         struct pipe_ctx *temp;
582
583                         if (pipe_set[j]->stream_res.tg->funcs->is_blanked && !pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
584                                 if (j == 0)
585                                         break;
586
587                                 temp = pipe_set[0];
588                                 pipe_set[0] = pipe_set[j];
589                                 pipe_set[j] = temp;
590                                 break;
591                         }
592                 }
593
594                 /* remove any other unblanked pipes as they have already been synced */
595                 for (j = j + 1; j < group_size; j++) {
596                         if (pipe_set[j]->stream_res.tg->funcs->is_blanked && !pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
597                                 group_size--;
598                                 pipe_set[j] = pipe_set[group_size];
599                                 j--;
600                         }
601                 }
602
603                 if (group_size > 1) {
604                         dc->hwss.enable_timing_synchronization(
605                                 dc, group_index, group_size, pipe_set);
606                         group_index++;
607                 }
608         }
609 }
610
611 static bool context_changed(
612                 struct dc *dc,
613                 struct dc_state *context)
614 {
615         uint8_t i;
616
617         if (context->stream_count != dc->current_state->stream_count)
618                 return true;
619
620         for (i = 0; i < dc->current_state->stream_count; i++) {
621                 if (dc->current_state->streams[i] != context->streams[i])
622                         return true;
623         }
624
625         return false;
626 }
627
628 bool dc_enable_stereo(
629         struct dc *dc,
630         struct dc_state *context,
631         struct dc_stream_state *streams[],
632         uint8_t stream_count)
633 {
634         bool ret = true;
635         int i, j;
636         struct pipe_ctx *pipe;
637
638         for (i = 0; i < MAX_PIPES; i++) {
639                 if (context != NULL)
640                         pipe = &context->res_ctx.pipe_ctx[i];
641                 else
642                         pipe = &dc->current_state->res_ctx.pipe_ctx[i];
643                 for (j = 0 ; pipe && j < stream_count; j++)  {
644                         if (streams[j] && streams[j] == pipe->stream &&
645                                 dc->hwss.setup_stereo)
646                                 dc->hwss.setup_stereo(pipe, dc);
647                 }
648         }
649
650         return ret;
651 }
652
653
654 /*
655  * Applies given context to HW and copy it into current context.
656  * It's up to the user to release the src context afterwards.
657  */
658 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
659 {
660         struct dc_bios *dcb = dc->ctx->dc_bios;
661         enum dc_status result = DC_ERROR_UNEXPECTED;
662         struct pipe_ctx *pipe;
663         int i, k, l;
664         struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
665
666         disable_dangling_plane(dc, context);
667
668         for (i = 0; i < context->stream_count; i++)
669                 dc_streams[i] =  context->streams[i];
670
671         if (!dcb->funcs->is_accelerated_mode(dcb))
672                 dc->hwss.enable_accelerated_mode(dc);
673
674         /* re-program planes for existing stream, in case we need to
675          * free up plane resource for later use
676          */
677         for (i = 0; i < context->stream_count; i++) {
678                 if (context->streams[i]->mode_changed)
679                         continue;
680
681                 dc->hwss.apply_ctx_for_surface(
682                         dc, context->streams[i],
683                         context->stream_status[i].plane_count,
684                         context); /* use new pipe config in new context */
685         }
686
687         /* Program hardware */
688         dc->hwss.ready_shared_resources(dc, context);
689
690         for (i = 0; i < dc->res_pool->pipe_count; i++) {
691                 pipe = &context->res_ctx.pipe_ctx[i];
692                 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
693         }
694
695         result = dc->hwss.apply_ctx_to_hw(dc, context);
696
697         if (result != DC_OK)
698                 return result;
699
700         if (context->stream_count > 1) {
701                 enable_timing_multisync(dc, context);
702                 program_timing_sync(dc, context);
703         }
704
705         /* Program all planes within new context*/
706         for (i = 0; i < context->stream_count; i++) {
707                 const struct dc_sink *sink = context->streams[i]->sink;
708
709                 if (!context->streams[i]->mode_changed)
710                         continue;
711
712                 dc->hwss.apply_ctx_for_surface(
713                                 dc, context->streams[i],
714                                 context->stream_status[i].plane_count,
715                                 context);
716
717                 /*
718                  * enable stereo
719                  * TODO rework dc_enable_stereo call to work with validation sets?
720                  */
721                 for (k = 0; k < MAX_PIPES; k++) {
722                         pipe = &context->res_ctx.pipe_ctx[k];
723
724                         for (l = 0 ; pipe && l < context->stream_count; l++)  {
725                                 if (context->streams[l] &&
726                                         context->streams[l] == pipe->stream &&
727                                         dc->hwss.setup_stereo)
728                                         dc->hwss.setup_stereo(pipe, dc);
729                         }
730                 }
731
732                 CONN_MSG_MODE(sink->link, "{%dx%d, %dx%d@%dKhz}",
733                                 context->streams[i]->timing.h_addressable,
734                                 context->streams[i]->timing.v_addressable,
735                                 context->streams[i]->timing.h_total,
736                                 context->streams[i]->timing.v_total,
737                                 context->streams[i]->timing.pix_clk_khz);
738         }
739
740         dc_enable_stereo(dc, context, dc_streams, context->stream_count);
741
742         dc_release_state(dc->current_state);
743
744         dc->current_state = context;
745
746         dc_retain_state(dc->current_state);
747
748         dc->hwss.optimize_shared_resources(dc);
749
750         return result;
751 }
752
753 bool dc_commit_state(struct dc *dc, struct dc_state *context)
754 {
755         enum dc_status result = DC_ERROR_UNEXPECTED;
756         int i;
757
758         if (false == context_changed(dc, context))
759                 return DC_OK;
760
761         dm_logger_write(dc->ctx->logger, LOG_DC, "%s: %d streams\n",
762                                 __func__, context->stream_count);
763
764         for (i = 0; i < context->stream_count; i++) {
765                 struct dc_stream_state *stream = context->streams[i];
766
767                 dc_stream_log(stream,
768                                 dc->ctx->logger,
769                                 LOG_DC);
770         }
771
772         result = dc_commit_state_no_check(dc, context);
773
774         return (result == DC_OK);
775 }
776
777 bool dc_post_update_surfaces_to_stream(struct dc *dc)
778 {
779         int i;
780         struct dc_state *context = dc->current_state;
781
782         post_surface_trace(dc);
783
784         for (i = 0; i < dc->res_pool->pipe_count; i++)
785                 if (context->res_ctx.pipe_ctx[i].stream == NULL ||
786                     context->res_ctx.pipe_ctx[i].plane_state == NULL) {
787                         context->res_ctx.pipe_ctx[i].pipe_idx = i;
788                         dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
789                 }
790
791         dc->optimized_required = false;
792
793         /* 3rd param should be true, temp w/a for RV*/
794 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
795         dc->hwss.set_bandwidth(dc, context, dc->ctx->dce_version < DCN_VERSION_1_0);
796 #else
797         dc->hwss.set_bandwidth(dc, context, true);
798 #endif
799         return true;
800 }
801
802 /*
803  * TODO this whole function needs to go
804  *
805  * dc_surface_update is needlessly complex. See if we can just replace this
806  * with a dc_plane_state and follow the atomic model a bit more closely here.
807  */
808 bool dc_commit_planes_to_stream(
809                 struct dc *dc,
810                 struct dc_plane_state **plane_states,
811                 uint8_t new_plane_count,
812                 struct dc_stream_state *dc_stream,
813                 struct dc_state *state)
814 {
815         /* no need to dynamically allocate this. it's pretty small */
816         struct dc_surface_update updates[MAX_SURFACES];
817         struct dc_flip_addrs *flip_addr;
818         struct dc_plane_info *plane_info;
819         struct dc_scaling_info *scaling_info;
820         int i;
821         struct dc_stream_update *stream_update =
822                         kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL);
823
824         if (!stream_update) {
825                 BREAK_TO_DEBUGGER();
826                 return false;
827         }
828
829         flip_addr = kcalloc(MAX_SURFACES, sizeof(struct dc_flip_addrs),
830                             GFP_KERNEL);
831         plane_info = kcalloc(MAX_SURFACES, sizeof(struct dc_plane_info),
832                              GFP_KERNEL);
833         scaling_info = kcalloc(MAX_SURFACES, sizeof(struct dc_scaling_info),
834                                GFP_KERNEL);
835
836         if (!flip_addr || !plane_info || !scaling_info) {
837                 kfree(flip_addr);
838                 kfree(plane_info);
839                 kfree(scaling_info);
840                 kfree(stream_update);
841                 return false;
842         }
843
844         memset(updates, 0, sizeof(updates));
845
846         stream_update->src = dc_stream->src;
847         stream_update->dst = dc_stream->dst;
848         stream_update->out_transfer_func = dc_stream->out_transfer_func;
849
850         for (i = 0; i < new_plane_count; i++) {
851                 updates[i].surface = plane_states[i];
852                 updates[i].gamma =
853                         (struct dc_gamma *)plane_states[i]->gamma_correction;
854                 updates[i].in_transfer_func = plane_states[i]->in_transfer_func;
855                 flip_addr[i].address = plane_states[i]->address;
856                 flip_addr[i].flip_immediate = plane_states[i]->flip_immediate;
857                 plane_info[i].color_space = plane_states[i]->color_space;
858                 plane_info[i].input_tf = plane_states[i]->input_tf;
859                 plane_info[i].format = plane_states[i]->format;
860                 plane_info[i].plane_size = plane_states[i]->plane_size;
861                 plane_info[i].rotation = plane_states[i]->rotation;
862                 plane_info[i].horizontal_mirror = plane_states[i]->horizontal_mirror;
863                 plane_info[i].stereo_format = plane_states[i]->stereo_format;
864                 plane_info[i].tiling_info = plane_states[i]->tiling_info;
865                 plane_info[i].visible = plane_states[i]->visible;
866                 plane_info[i].per_pixel_alpha = plane_states[i]->per_pixel_alpha;
867                 plane_info[i].dcc = plane_states[i]->dcc;
868                 scaling_info[i].scaling_quality = plane_states[i]->scaling_quality;
869                 scaling_info[i].src_rect = plane_states[i]->src_rect;
870                 scaling_info[i].dst_rect = plane_states[i]->dst_rect;
871                 scaling_info[i].clip_rect = plane_states[i]->clip_rect;
872
873                 updates[i].flip_addr = &flip_addr[i];
874                 updates[i].plane_info = &plane_info[i];
875                 updates[i].scaling_info = &scaling_info[i];
876         }
877
878         dc_commit_updates_for_stream(
879                         dc,
880                         updates,
881                         new_plane_count,
882                         dc_stream, stream_update, plane_states, state);
883
884         kfree(flip_addr);
885         kfree(plane_info);
886         kfree(scaling_info);
887         kfree(stream_update);
888         return true;
889 }
890
891 struct dc_state *dc_create_state(void)
892 {
893         struct dc_state *context = kzalloc(sizeof(struct dc_state),
894                                            GFP_KERNEL);
895
896         if (!context)
897                 return NULL;
898
899         kref_init(&context->refcount);
900         return context;
901 }
902
903 void dc_retain_state(struct dc_state *context)
904 {
905         kref_get(&context->refcount);
906 }
907
908 static void dc_state_free(struct kref *kref)
909 {
910         struct dc_state *context = container_of(kref, struct dc_state, refcount);
911         dc_resource_state_destruct(context);
912         kfree(context);
913 }
914
915 void dc_release_state(struct dc_state *context)
916 {
917         kref_put(&context->refcount, dc_state_free);
918 }
919
920 static bool is_surface_in_context(
921                 const struct dc_state *context,
922                 const struct dc_plane_state *plane_state)
923 {
924         int j;
925
926         for (j = 0; j < MAX_PIPES; j++) {
927                 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
928
929                 if (plane_state == pipe_ctx->plane_state) {
930                         return true;
931                 }
932         }
933
934         return false;
935 }
936
937 static unsigned int pixel_format_to_bpp(enum surface_pixel_format format)
938 {
939         switch (format) {
940         case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
941         case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
942                 return 12;
943         case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
944         case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
945         case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
946         case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
947                 return 16;
948         case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
949         case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
950         case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
951         case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
952                 return 32;
953         case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
954         case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
955         case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
956                 return 64;
957         default:
958                 ASSERT_CRITICAL(false);
959                 return -1;
960         }
961 }
962
963 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
964 {
965         union surface_update_flags *update_flags = &u->surface->update_flags;
966
967         if (!u->plane_info)
968                 return UPDATE_TYPE_FAST;
969
970         if (u->plane_info->color_space != u->surface->color_space)
971                 update_flags->bits.color_space_change = 1;
972
973         if (u->plane_info->input_tf != u->surface->input_tf)
974                 update_flags->bits.input_tf_change = 1;
975
976         if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror)
977                 update_flags->bits.horizontal_mirror_change = 1;
978
979         if (u->plane_info->rotation != u->surface->rotation)
980                 update_flags->bits.rotation_change = 1;
981
982         if (u->plane_info->stereo_format != u->surface->stereo_format)
983                 update_flags->bits.stereo_format_change = 1;
984
985         if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha)
986                 update_flags->bits.per_pixel_alpha_change = 1;
987
988         if (u->plane_info->dcc.enable != u->surface->dcc.enable
989                         || u->plane_info->dcc.grph.independent_64b_blks != u->surface->dcc.grph.independent_64b_blks
990                         || u->plane_info->dcc.grph.meta_pitch != u->surface->dcc.grph.meta_pitch)
991                 update_flags->bits.dcc_change = 1;
992
993         if (pixel_format_to_bpp(u->plane_info->format) !=
994                         pixel_format_to_bpp(u->surface->format))
995                 /* different bytes per element will require full bandwidth
996                  * and DML calculation
997                  */
998                 update_flags->bits.bpp_change = 1;
999
1000         if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
1001                         sizeof(union dc_tiling_info)) != 0) {
1002                 update_flags->bits.swizzle_change = 1;
1003                 /* todo: below are HW dependent, we should add a hook to
1004                  * DCE/N resource and validated there.
1005                  */
1006                 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR)
1007                         /* swizzled mode requires RQ to be setup properly,
1008                          * thus need to run DML to calculate RQ settings
1009                          */
1010                         update_flags->bits.bandwidth_change = 1;
1011         }
1012
1013         if (update_flags->bits.rotation_change
1014                         || update_flags->bits.stereo_format_change
1015                         || update_flags->bits.bpp_change
1016                         || update_flags->bits.bandwidth_change)
1017                 return UPDATE_TYPE_FULL;
1018
1019         return UPDATE_TYPE_MED;
1020 }
1021
1022 static enum surface_update_type get_scaling_info_update_type(
1023                 const struct dc_surface_update *u)
1024 {
1025         union surface_update_flags *update_flags = &u->surface->update_flags;
1026
1027         if (!u->scaling_info)
1028                 return UPDATE_TYPE_FAST;
1029
1030         if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width
1031                         || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
1032                         || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
1033                         || u->scaling_info->dst_rect.height != u->surface->dst_rect.height) {
1034                 update_flags->bits.scaling_change = 1;
1035
1036                 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
1037                         || u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
1038                                 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width
1039                                         || u->scaling_info->dst_rect.height < u->surface->src_rect.height))
1040                         /* Making dst rect smaller requires a bandwidth change */
1041                         update_flags->bits.bandwidth_change = 1;
1042         }
1043
1044         if (u->scaling_info->src_rect.width != u->surface->src_rect.width
1045                 || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
1046
1047                 update_flags->bits.scaling_change = 1;
1048                 if (u->scaling_info->src_rect.width > u->surface->src_rect.width
1049                                 && u->scaling_info->src_rect.height > u->surface->src_rect.height)
1050                         /* Making src rect bigger requires a bandwidth change */
1051                         update_flags->bits.clock_change = 1;
1052         }
1053
1054         if (u->scaling_info->src_rect.x != u->surface->src_rect.x
1055                         || u->scaling_info->src_rect.y != u->surface->src_rect.y
1056                         || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
1057                         || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
1058                         || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
1059                         || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
1060                 update_flags->bits.position_change = 1;
1061
1062         if (update_flags->bits.clock_change
1063                         || update_flags->bits.bandwidth_change)
1064                 return UPDATE_TYPE_FULL;
1065
1066         if (update_flags->bits.scaling_change
1067                         || update_flags->bits.position_change)
1068                 return UPDATE_TYPE_MED;
1069
1070         return UPDATE_TYPE_FAST;
1071 }
1072
1073 static enum surface_update_type det_surface_update(const struct dc *dc,
1074                 const struct dc_surface_update *u)
1075 {
1076         const struct dc_state *context = dc->current_state;
1077         enum surface_update_type type;
1078         enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1079         union surface_update_flags *update_flags = &u->surface->update_flags;
1080
1081         update_flags->raw = 0; // Reset all flags
1082
1083         if (!is_surface_in_context(context, u->surface)) {
1084                 update_flags->bits.new_plane = 1;
1085                 return UPDATE_TYPE_FULL;
1086         }
1087
1088         type = get_plane_info_update_type(u);
1089         elevate_update_type(&overall_type, type);
1090
1091         type = get_scaling_info_update_type(u);
1092         elevate_update_type(&overall_type, type);
1093
1094         if (u->in_transfer_func)
1095                 update_flags->bits.in_transfer_func = 1;
1096
1097         if (u->input_csc_color_matrix)
1098                 update_flags->bits.input_csc_change = 1;
1099
1100         if (update_flags->bits.in_transfer_func
1101                         || update_flags->bits.input_csc_change) {
1102                 type = UPDATE_TYPE_MED;
1103                 elevate_update_type(&overall_type, type);
1104         }
1105
1106         return overall_type;
1107 }
1108
1109 static enum surface_update_type check_update_surfaces_for_stream(
1110                 struct dc *dc,
1111                 struct dc_surface_update *updates,
1112                 int surface_count,
1113                 struct dc_stream_update *stream_update,
1114                 const struct dc_stream_status *stream_status)
1115 {
1116         int i;
1117         enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1118
1119         if (stream_status == NULL || stream_status->plane_count != surface_count)
1120                 return UPDATE_TYPE_FULL;
1121
1122         if (stream_update)
1123                 return UPDATE_TYPE_FULL;
1124
1125         for (i = 0 ; i < surface_count; i++) {
1126                 enum surface_update_type type =
1127                                 det_surface_update(dc, &updates[i]);
1128
1129                 if (type == UPDATE_TYPE_FULL)
1130                         return type;
1131
1132                 elevate_update_type(&overall_type, type);
1133         }
1134
1135         return overall_type;
1136 }
1137
1138 enum surface_update_type dc_check_update_surfaces_for_stream(
1139                 struct dc *dc,
1140                 struct dc_surface_update *updates,
1141                 int surface_count,
1142                 struct dc_stream_update *stream_update,
1143                 const struct dc_stream_status *stream_status)
1144 {
1145         int i;
1146         enum surface_update_type type;
1147
1148         for (i = 0; i < surface_count; i++)
1149                 updates[i].surface->update_flags.raw = 0;
1150
1151         type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
1152         if (type == UPDATE_TYPE_FULL)
1153                 for (i = 0; i < surface_count; i++)
1154                         updates[i].surface->update_flags.bits.full_update = 1;
1155
1156         return type;
1157 }
1158
1159 static struct dc_stream_status *stream_get_status(
1160         struct dc_state *ctx,
1161         struct dc_stream_state *stream)
1162 {
1163         uint8_t i;
1164
1165         for (i = 0; i < ctx->stream_count; i++) {
1166                 if (stream == ctx->streams[i]) {
1167                         return &ctx->stream_status[i];
1168                 }
1169         }
1170
1171         return NULL;
1172 }
1173
1174 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
1175
1176
1177 static void commit_planes_for_stream(struct dc *dc,
1178                 struct dc_surface_update *srf_updates,
1179                 int surface_count,
1180                 struct dc_stream_state *stream,
1181                 struct dc_stream_update *stream_update,
1182                 enum surface_update_type update_type,
1183                 struct dc_state *context)
1184 {
1185         int i, j;
1186
1187         if (update_type == UPDATE_TYPE_FULL) {
1188                 dc->hwss.set_bandwidth(dc, context, false);
1189                 context_clock_trace(dc, context);
1190         }
1191
1192         if (surface_count == 0) {
1193                 /*
1194                  * In case of turning off screen, no need to program front end a second time.
1195                  * just return after program front end.
1196                  */
1197                 dc->hwss.apply_ctx_for_surface(dc, stream, surface_count, context);
1198                 return;
1199         }
1200
1201         /* Full fe update*/
1202         for (j = 0; j < dc->res_pool->pipe_count; j++) {
1203                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1204
1205                 if (update_type == UPDATE_TYPE_FAST || !pipe_ctx->plane_state)
1206                         continue;
1207
1208                 if (!pipe_ctx->top_pipe &&
1209                     pipe_ctx->stream &&
1210                     pipe_ctx->stream == stream) {
1211                         struct dc_stream_status *stream_status =
1212                                         stream_get_status(context, pipe_ctx->stream);
1213
1214                         dc->hwss.apply_ctx_for_surface(
1215                                         dc, pipe_ctx->stream, stream_status->plane_count, context);
1216                 }
1217         }
1218
1219         if (update_type == UPDATE_TYPE_FULL)
1220                 context_timing_trace(dc, &context->res_ctx);
1221
1222         /* Perform requested Updates */
1223         for (i = 0; i < surface_count; i++) {
1224                 struct dc_plane_state *plane_state = srf_updates[i].surface;
1225
1226                 for (j = 0; j < dc->res_pool->pipe_count; j++) {
1227                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1228
1229                         if (pipe_ctx->stream != stream)
1230                                 continue;
1231
1232                         if (pipe_ctx->plane_state != plane_state)
1233                                 continue;
1234
1235                         if (update_type == UPDATE_TYPE_FAST && srf_updates[i].flip_addr)
1236                                         dc->hwss.update_plane_addr(dc, pipe_ctx);
1237                 }
1238         }
1239
1240         if (stream && stream_update && update_type > UPDATE_TYPE_FAST)
1241                 for (j = 0; j < dc->res_pool->pipe_count; j++) {
1242                         struct pipe_ctx *pipe_ctx =
1243                                         &context->res_ctx.pipe_ctx[j];
1244
1245                         if (pipe_ctx->stream != stream)
1246                                 continue;
1247
1248                         if (stream_update->hdr_static_metadata) {
1249                                 resource_build_info_frame(pipe_ctx);
1250                                 dc->hwss.update_info_frame(pipe_ctx);
1251                         }
1252                 }
1253 }
1254
1255 void dc_commit_updates_for_stream(struct dc *dc,
1256                 struct dc_surface_update *srf_updates,
1257                 int surface_count,
1258                 struct dc_stream_state *stream,
1259                 struct dc_stream_update *stream_update,
1260                 struct dc_plane_state **plane_states,
1261                 struct dc_state *state)
1262 {
1263         const struct dc_stream_status *stream_status;
1264         enum surface_update_type update_type;
1265         struct dc_state *context;
1266         struct dc_context *dc_ctx = dc->ctx;
1267         int i, j;
1268
1269         stream_status = dc_stream_get_status(stream);
1270         context = dc->current_state;
1271
1272         update_type = dc_check_update_surfaces_for_stream(
1273                                 dc, srf_updates, surface_count, stream_update, stream_status);
1274
1275         if (update_type >= update_surface_trace_level)
1276                 update_surface_trace(dc, srf_updates, surface_count);
1277
1278
1279         if (update_type >= UPDATE_TYPE_FULL) {
1280
1281                 /* initialize scratch memory for building context */
1282                 context = dc_create_state();
1283                 if (context == NULL) {
1284                         DC_ERROR("Failed to allocate new validate context!\n");
1285                         return;
1286                 }
1287
1288                 dc_resource_state_copy_construct(state, context);
1289         }
1290
1291
1292         for (i = 0; i < surface_count; i++) {
1293                 struct dc_plane_state *surface = srf_updates[i].surface;
1294
1295                 /* TODO: On flip we don't build the state, so it still has the
1296                  * old address. Which is why we are updating the address here
1297                  */
1298                 if (srf_updates[i].flip_addr) {
1299                         surface->address = srf_updates[i].flip_addr->address;
1300                         surface->flip_immediate = srf_updates[i].flip_addr->flip_immediate;
1301
1302                 }
1303
1304                 if (update_type >= UPDATE_TYPE_MED) {
1305                         for (j = 0; j < dc->res_pool->pipe_count; j++) {
1306                                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1307
1308                                 if (pipe_ctx->plane_state != surface)
1309                                         continue;
1310
1311                                 resource_build_scaling_params(pipe_ctx);
1312                         }
1313                 }
1314         }
1315
1316         commit_planes_for_stream(
1317                                 dc,
1318                                 srf_updates,
1319                                 surface_count,
1320                                 stream,
1321                                 stream_update,
1322                                 update_type,
1323                                 context);
1324         /*update current_State*/
1325         if (dc->current_state != context) {
1326
1327                 struct dc_state *old = dc->current_state;
1328
1329                 dc->current_state = context;
1330                 dc_release_state(old);
1331
1332         }
1333         /*let's use current_state to update watermark etc*/
1334         if (update_type >= UPDATE_TYPE_FULL)
1335                 dc_post_update_surfaces_to_stream(dc);
1336
1337         return;
1338
1339 }
1340
1341 uint8_t dc_get_current_stream_count(struct dc *dc)
1342 {
1343         return dc->current_state->stream_count;
1344 }
1345
1346 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
1347 {
1348         if (i < dc->current_state->stream_count)
1349                 return dc->current_state->streams[i];
1350         return NULL;
1351 }
1352
1353 enum dc_irq_source dc_interrupt_to_irq_source(
1354                 struct dc *dc,
1355                 uint32_t src_id,
1356                 uint32_t ext_id)
1357 {
1358         return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
1359 }
1360
1361 void dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
1362 {
1363
1364         if (dc == NULL)
1365                 return;
1366
1367         dal_irq_service_set(dc->res_pool->irqs, src, enable);
1368 }
1369
1370 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
1371 {
1372         dal_irq_service_ack(dc->res_pool->irqs, src);
1373 }
1374
1375 void dc_set_power_state(
1376         struct dc *dc,
1377         enum dc_acpi_cm_power_state power_state)
1378 {
1379         struct kref refcount;
1380
1381         switch (power_state) {
1382         case DC_ACPI_CM_POWER_STATE_D0:
1383                 dc_resource_state_construct(dc, dc->current_state);
1384
1385                 dc->hwss.init_hw(dc);
1386                 break;
1387         default:
1388
1389                 dc->hwss.power_down(dc);
1390
1391                 /* Zero out the current context so that on resume we start with
1392                  * clean state, and dc hw programming optimizations will not
1393                  * cause any trouble.
1394                  */
1395
1396                 /* Preserve refcount */
1397                 refcount = dc->current_state->refcount;
1398                 dc_resource_state_destruct(dc->current_state);
1399                 memset(dc->current_state, 0,
1400                                 sizeof(*dc->current_state));
1401
1402                 dc->current_state->refcount = refcount;
1403
1404                 break;
1405         }
1406
1407 }
1408
1409 void dc_resume(struct dc *dc)
1410 {
1411
1412         uint32_t i;
1413
1414         for (i = 0; i < dc->link_count; i++)
1415                 core_link_resume(dc->links[i]);
1416 }
1417
1418 bool dc_submit_i2c(
1419                 struct dc *dc,
1420                 uint32_t link_index,
1421                 struct i2c_command *cmd)
1422 {
1423
1424         struct dc_link *link = dc->links[link_index];
1425         struct ddc_service *ddc = link->ddc;
1426
1427         return dal_i2caux_submit_i2c_command(
1428                 ddc->ctx->i2caux,
1429                 ddc->ddc_pin,
1430                 cmd);
1431 }
1432
1433 static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
1434 {
1435         if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
1436                 BREAK_TO_DEBUGGER();
1437                 return false;
1438         }
1439
1440         dc_sink_retain(sink);
1441
1442         dc_link->remote_sinks[dc_link->sink_count] = sink;
1443         dc_link->sink_count++;
1444
1445         return true;
1446 }
1447
1448 struct dc_sink *dc_link_add_remote_sink(
1449                 struct dc_link *link,
1450                 const uint8_t *edid,
1451                 int len,
1452                 struct dc_sink_init_data *init_data)
1453 {
1454         struct dc_sink *dc_sink;
1455         enum dc_edid_status edid_status;
1456
1457         if (len > MAX_EDID_BUFFER_SIZE) {
1458                 dm_error("Max EDID buffer size breached!\n");
1459                 return NULL;
1460         }
1461
1462         if (!init_data) {
1463                 BREAK_TO_DEBUGGER();
1464                 return NULL;
1465         }
1466
1467         if (!init_data->link) {
1468                 BREAK_TO_DEBUGGER();
1469                 return NULL;
1470         }
1471
1472         dc_sink = dc_sink_create(init_data);
1473
1474         if (!dc_sink)
1475                 return NULL;
1476
1477         memmove(dc_sink->dc_edid.raw_edid, edid, len);
1478         dc_sink->dc_edid.length = len;
1479
1480         if (!link_add_remote_sink_helper(
1481                         link,
1482                         dc_sink))
1483                 goto fail_add_sink;
1484
1485         edid_status = dm_helpers_parse_edid_caps(
1486                         link->ctx,
1487                         &dc_sink->dc_edid,
1488                         &dc_sink->edid_caps);
1489
1490         if (edid_status != EDID_OK)
1491                 goto fail;
1492
1493         return dc_sink;
1494 fail:
1495         dc_link_remove_remote_sink(link, dc_sink);
1496 fail_add_sink:
1497         dc_sink_release(dc_sink);
1498         return NULL;
1499 }
1500
1501 void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
1502 {
1503         int i;
1504
1505         if (!link->sink_count) {
1506                 BREAK_TO_DEBUGGER();
1507                 return;
1508         }
1509
1510         for (i = 0; i < link->sink_count; i++) {
1511                 if (link->remote_sinks[i] == sink) {
1512                         dc_sink_release(sink);
1513                         link->remote_sinks[i] = NULL;
1514
1515                         /* shrink array to remove empty place */
1516                         while (i < link->sink_count - 1) {
1517                                 link->remote_sinks[i] = link->remote_sinks[i+1];
1518                                 i++;
1519                         }
1520                         link->remote_sinks[i] = NULL;
1521                         link->sink_count--;
1522                         return;
1523                 }
1524         }
1525 }