]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/amd/display/dc/core/dc.c
252b621d93a9c2cf65fe2793bbabb66c7bbfb165
[linux.git] / drivers / gpu / drm / amd / display / dc / core / dc.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  */
24
25 #include <linux/slab.h>
26
27 #include "dm_services.h"
28
29 #include "dc.h"
30
31 #include "core_status.h"
32 #include "core_types.h"
33 #include "hw_sequencer.h"
34 #include "dce/dce_hwseq.h"
35
36 #include "resource.h"
37
38 #include "clk_mgr.h"
39 #include "clock_source.h"
40 #include "dc_bios_types.h"
41
42 #include "bios_parser_interface.h"
43 #include "include/irq_service_interface.h"
44 #include "transform.h"
45 #include "dmcu.h"
46 #include "dpp.h"
47 #include "timing_generator.h"
48 #include "abm.h"
49 #include "virtual/virtual_link_encoder.h"
50
51 #include "link_hwss.h"
52 #include "link_encoder.h"
53
54 #include "dc_link_ddc.h"
55 #include "dm_helpers.h"
56 #include "mem_input.h"
57 #include "hubp.h"
58
59 #include "dc_link_dp.h"
60
61 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
62 #include "dsc.h"
63 #endif
64
65 #ifdef CONFIG_DRM_AMD_DC_DCN2_0
66 #include "vm_helper.h"
67 #endif
68
69 #include "dce/dce_i2c.h"
70
71 #define DC_LOGGER \
72         dc->ctx->logger
73
74 const static char DC_BUILD_ID[] = "production-build";
75
76 /**
77  * DOC: Overview
78  *
79  * DC is the OS-agnostic component of the amdgpu DC driver.
80  *
81  * DC maintains and validates a set of structs representing the state of the
82  * driver and writes that state to AMD hardware
83  *
84  * Main DC HW structs:
85  *
86  * struct dc - The central struct.  One per driver.  Created on driver load,
87  * destroyed on driver unload.
88  *
89  * struct dc_context - One per driver.
90  * Used as a backpointer by most other structs in dc.
91  *
92  * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP
93  * plugpoints).  Created on driver load, destroyed on driver unload.
94  *
95  * struct dc_sink - One per display.  Created on boot or hotplug.
96  * Destroyed on shutdown or hotunplug.  A dc_link can have a local sink
97  * (the display directly attached).  It may also have one or more remote
98  * sinks (in the Multi-Stream Transport case)
99  *
100  * struct resource_pool - One per driver.  Represents the hw blocks not in the
101  * main pipeline.  Not directly accessible by dm.
102  *
103  * Main dc state structs:
104  *
105  * These structs can be created and destroyed as needed.  There is a full set of
106  * these structs in dc->current_state representing the currently programmed state.
107  *
108  * struct dc_state - The global DC state to track global state information,
109  * such as bandwidth values.
110  *
111  * struct dc_stream_state - Represents the hw configuration for the pipeline from
112  * a framebuffer to a display.  Maps one-to-one with dc_sink.
113  *
114  * struct dc_plane_state - Represents a framebuffer.  Each stream has at least one,
115  * and may have more in the Multi-Plane Overlay case.
116  *
117  * struct resource_context - Represents the programmable state of everything in
118  * the resource_pool.  Not directly accessible by dm.
119  *
120  * struct pipe_ctx - A member of struct resource_context.  Represents the
121  * internal hardware pipeline components.  Each dc_plane_state has either
122  * one or two (in the pipe-split case).
123  */
124
125 /*******************************************************************************
126  * Private functions
127  ******************************************************************************/
128
129 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
130 {
131         if (new > *original)
132                 *original = new;
133 }
134
135 static void destroy_links(struct dc *dc)
136 {
137         uint32_t i;
138
139         for (i = 0; i < dc->link_count; i++) {
140                 if (NULL != dc->links[i])
141                         link_destroy(&dc->links[i]);
142         }
143 }
144
145 static bool create_links(
146                 struct dc *dc,
147                 uint32_t num_virtual_links)
148 {
149         int i;
150         int connectors_num;
151         struct dc_bios *bios = dc->ctx->dc_bios;
152
153         dc->link_count = 0;
154
155         connectors_num = bios->funcs->get_connectors_number(bios);
156
157         if (connectors_num > ENUM_ID_COUNT) {
158                 dm_error(
159                         "DC: Number of connectors %d exceeds maximum of %d!\n",
160                         connectors_num,
161                         ENUM_ID_COUNT);
162                 return false;
163         }
164
165         dm_output_to_console(
166                 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
167                 __func__,
168                 connectors_num,
169                 num_virtual_links);
170
171         for (i = 0; i < connectors_num; i++) {
172                 struct link_init_data link_init_params = {0};
173                 struct dc_link *link;
174
175                 link_init_params.ctx = dc->ctx;
176                 /* next BIOS object table connector */
177                 link_init_params.connector_index = i;
178                 link_init_params.link_index = dc->link_count;
179                 link_init_params.dc = dc;
180                 link = link_create(&link_init_params);
181
182                 if (link) {
183                         bool should_destory_link = false;
184
185                         if (link->connector_signal == SIGNAL_TYPE_EDP) {
186                                 if (dc->config.edp_not_connected)
187                                         should_destory_link = true;
188                                 else if (dc->debug.remove_disconnect_edp) {
189                                         enum dc_connection_type type;
190                                         dc_link_detect_sink(link, &type);
191                                         if (type == dc_connection_none)
192                                                 should_destory_link = true;
193                                 }
194                         }
195
196                         if (!should_destory_link) {
197                                 dc->links[dc->link_count] = link;
198                                 link->dc = dc;
199                                 ++dc->link_count;
200                         } else {
201                                 link_destroy(&link);
202                         }
203                 }
204         }
205
206         for (i = 0; i < num_virtual_links; i++) {
207                 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
208                 struct encoder_init_data enc_init = {0};
209
210                 if (link == NULL) {
211                         BREAK_TO_DEBUGGER();
212                         goto failed_alloc;
213                 }
214
215                 link->link_index = dc->link_count;
216                 dc->links[dc->link_count] = link;
217                 dc->link_count++;
218
219                 link->ctx = dc->ctx;
220                 link->dc = dc;
221                 link->connector_signal = SIGNAL_TYPE_VIRTUAL;
222                 link->link_id.type = OBJECT_TYPE_CONNECTOR;
223                 link->link_id.id = CONNECTOR_ID_VIRTUAL;
224                 link->link_id.enum_id = ENUM_ID_1;
225                 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
226
227                 if (!link->link_enc) {
228                         BREAK_TO_DEBUGGER();
229                         goto failed_alloc;
230                 }
231
232                 link->link_status.dpcd_caps = &link->dpcd_caps;
233
234                 enc_init.ctx = dc->ctx;
235                 enc_init.channel = CHANNEL_ID_UNKNOWN;
236                 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
237                 enc_init.transmitter = TRANSMITTER_UNKNOWN;
238                 enc_init.connector = link->link_id;
239                 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
240                 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
241                 enc_init.encoder.enum_id = ENUM_ID_1;
242                 virtual_link_encoder_construct(link->link_enc, &enc_init);
243         }
244
245         return true;
246
247 failed_alloc:
248         return false;
249 }
250
251 static struct dc_perf_trace *dc_perf_trace_create(void)
252 {
253         return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
254 }
255
256 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
257 {
258         kfree(*perf_trace);
259         *perf_trace = NULL;
260 }
261
262 /**
263  *****************************************************************************
264  *  Function: dc_stream_adjust_vmin_vmax
265  *
266  *  @brief
267  *     Looks up the pipe context of dc_stream_state and updates the
268  *     vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
269  *     Rate, which is a power-saving feature that targets reducing panel
270  *     refresh rate while the screen is static
271  *
272  *  @param [in] dc: dc reference
273  *  @param [in] stream: Initial dc stream state
274  *  @param [in] adjust: Updated parameters for vertical_total_min and
275  *  vertical_total_max
276  *****************************************************************************
277  */
278 bool dc_stream_adjust_vmin_vmax(struct dc *dc,
279                 struct dc_stream_state *stream,
280                 struct dc_crtc_timing_adjust *adjust)
281 {
282         int i = 0;
283         bool ret = false;
284
285         for (i = 0; i < MAX_PIPES; i++) {
286                 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
287
288                 if (pipe->stream == stream && pipe->stream_res.tg) {
289                         pipe->stream->adjust = *adjust;
290                         dc->hwss.set_drr(&pipe,
291                                         1,
292                                         adjust->v_total_min,
293                                         adjust->v_total_max);
294
295                         ret = true;
296                 }
297         }
298         return ret;
299 }
300
301 bool dc_stream_get_crtc_position(struct dc *dc,
302                 struct dc_stream_state **streams, int num_streams,
303                 unsigned int *v_pos, unsigned int *nom_v_pos)
304 {
305         /* TODO: Support multiple streams */
306         const struct dc_stream_state *stream = streams[0];
307         int i = 0;
308         bool ret = false;
309         struct crtc_position position;
310
311         for (i = 0; i < MAX_PIPES; i++) {
312                 struct pipe_ctx *pipe =
313                                 &dc->current_state->res_ctx.pipe_ctx[i];
314
315                 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
316                         dc->hwss.get_position(&pipe, 1, &position);
317
318                         *v_pos = position.vertical_count;
319                         *nom_v_pos = position.nominal_vcount;
320                         ret = true;
321                 }
322         }
323         return ret;
324 }
325
326 /**
327  * dc_stream_configure_crc() - Configure CRC capture for the given stream.
328  * @dc: DC Object
329  * @stream: The stream to configure CRC on.
330  * @enable: Enable CRC if true, disable otherwise.
331  * @continuous: Capture CRC on every frame if true. Otherwise, only capture
332  *              once.
333  *
334  * By default, only CRC0 is configured, and the entire frame is used to
335  * calculate the crc.
336  */
337 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
338                              bool enable, bool continuous)
339 {
340         int i;
341         struct pipe_ctx *pipe;
342         struct crc_params param;
343         struct timing_generator *tg;
344
345         for (i = 0; i < MAX_PIPES; i++) {
346                 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
347                 if (pipe->stream == stream)
348                         break;
349         }
350         /* Stream not found */
351         if (i == MAX_PIPES)
352                 return false;
353
354         /* Always capture the full frame */
355         param.windowa_x_start = 0;
356         param.windowa_y_start = 0;
357         param.windowa_x_end = pipe->stream->timing.h_addressable;
358         param.windowa_y_end = pipe->stream->timing.v_addressable;
359         param.windowb_x_start = 0;
360         param.windowb_y_start = 0;
361         param.windowb_x_end = pipe->stream->timing.h_addressable;
362         param.windowb_y_end = pipe->stream->timing.v_addressable;
363
364         /* Default to the union of both windows */
365         param.selection = UNION_WINDOW_A_B;
366         param.continuous_mode = continuous;
367         param.enable = enable;
368
369         tg = pipe->stream_res.tg;
370
371         /* Only call if supported */
372         if (tg->funcs->configure_crc)
373                 return tg->funcs->configure_crc(tg, &param);
374         DC_LOG_WARNING("CRC capture not supported.");
375         return false;
376 }
377
378 /**
379  * dc_stream_get_crc() - Get CRC values for the given stream.
380  * @dc: DC object
381  * @stream: The DC stream state of the stream to get CRCs from.
382  * @r_cr, g_y, b_cb: CRC values for the three channels are stored here.
383  *
384  * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
385  * Return false if stream is not found, or if CRCs are not enabled.
386  */
387 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
388                        uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
389 {
390         int i;
391         struct pipe_ctx *pipe;
392         struct timing_generator *tg;
393
394         for (i = 0; i < MAX_PIPES; i++) {
395                 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
396                 if (pipe->stream == stream)
397                         break;
398         }
399         /* Stream not found */
400         if (i == MAX_PIPES)
401                 return false;
402
403         tg = pipe->stream_res.tg;
404
405         if (tg->funcs->get_crc)
406                 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
407         DC_LOG_WARNING("CRC capture not supported.");
408         return false;
409 }
410
411 void dc_stream_set_dither_option(struct dc_stream_state *stream,
412                 enum dc_dither_option option)
413 {
414         struct bit_depth_reduction_params params;
415         struct dc_link *link = stream->link;
416         struct pipe_ctx *pipes = NULL;
417         int i;
418
419         for (i = 0; i < MAX_PIPES; i++) {
420                 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
421                                 stream) {
422                         pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
423                         break;
424                 }
425         }
426
427         if (!pipes)
428                 return;
429         if (option > DITHER_OPTION_MAX)
430                 return;
431
432         stream->dither_option = option;
433
434         memset(&params, 0, sizeof(params));
435         resource_build_bit_depth_reduction_params(stream, &params);
436         stream->bit_depth_params = params;
437
438         if (pipes->plane_res.xfm &&
439             pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
440                 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
441                         pipes->plane_res.xfm,
442                         pipes->plane_res.scl_data.lb_params.depth,
443                         &stream->bit_depth_params);
444         }
445
446         pipes->stream_res.opp->funcs->
447                 opp_program_bit_depth_reduction(pipes->stream_res.opp, &params);
448 }
449
450 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
451 {
452         int i = 0;
453         bool ret = false;
454         struct pipe_ctx *pipes;
455
456         for (i = 0; i < MAX_PIPES; i++) {
457                 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
458                         pipes = &dc->current_state->res_ctx.pipe_ctx[i];
459                         dc->hwss.program_gamut_remap(pipes);
460                         ret = true;
461                 }
462         }
463
464         return ret;
465 }
466
467 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
468 {
469         int i = 0;
470         bool ret = false;
471         struct pipe_ctx *pipes;
472
473         for (i = 0; i < MAX_PIPES; i++) {
474                 if (dc->current_state->res_ctx.pipe_ctx[i].stream
475                                 == stream) {
476
477                         pipes = &dc->current_state->res_ctx.pipe_ctx[i];
478                         dc->hwss.program_output_csc(dc,
479                                         pipes,
480                                         stream->output_color_space,
481                                         stream->csc_color_matrix.matrix,
482                                         pipes->stream_res.opp->inst);
483                         ret = true;
484                 }
485         }
486
487         return ret;
488 }
489
490 void dc_stream_set_static_screen_events(struct dc *dc,
491                 struct dc_stream_state **streams,
492                 int num_streams,
493                 const struct dc_static_screen_events *events)
494 {
495         int i = 0;
496         int j = 0;
497         struct pipe_ctx *pipes_affected[MAX_PIPES];
498         int num_pipes_affected = 0;
499
500         for (i = 0; i < num_streams; i++) {
501                 struct dc_stream_state *stream = streams[i];
502
503                 for (j = 0; j < MAX_PIPES; j++) {
504                         if (dc->current_state->res_ctx.pipe_ctx[j].stream
505                                         == stream) {
506                                 pipes_affected[num_pipes_affected++] =
507                                                 &dc->current_state->res_ctx.pipe_ctx[j];
508                         }
509                 }
510         }
511
512         dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
513 }
514
515 static void destruct(struct dc *dc)
516 {
517         if (dc->current_state) {
518                 dc_release_state(dc->current_state);
519                 dc->current_state = NULL;
520         }
521
522         destroy_links(dc);
523
524         if (dc->clk_mgr) {
525                 dc_destroy_clk_mgr(dc->clk_mgr);
526                 dc->clk_mgr = NULL;
527         }
528
529         dc_destroy_resource_pool(dc);
530
531         if (dc->ctx->gpio_service)
532                 dal_gpio_service_destroy(&dc->ctx->gpio_service);
533
534         if (dc->ctx->created_bios)
535                 dal_bios_parser_destroy(&dc->ctx->dc_bios);
536
537         dc_perf_trace_destroy(&dc->ctx->perf_trace);
538
539         kfree(dc->ctx);
540         dc->ctx = NULL;
541
542         kfree(dc->bw_vbios);
543         dc->bw_vbios = NULL;
544
545         kfree(dc->bw_dceip);
546         dc->bw_dceip = NULL;
547
548 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
549         kfree(dc->dcn_soc);
550         dc->dcn_soc = NULL;
551
552         kfree(dc->dcn_ip);
553         dc->dcn_ip = NULL;
554
555 #endif
556 #ifdef CONFIG_DRM_AMD_DC_DCN2_0
557         kfree(dc->vm_helper);
558         dc->vm_helper = NULL;
559
560 #endif
561 }
562
563 static bool construct(struct dc *dc,
564                 const struct dc_init_data *init_params)
565 {
566         struct dc_context *dc_ctx;
567         struct bw_calcs_dceip *dc_dceip;
568         struct bw_calcs_vbios *dc_vbios;
569 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
570         struct dcn_soc_bounding_box *dcn_soc;
571         struct dcn_ip_params *dcn_ip;
572 #endif
573
574         enum dce_version dc_version = DCE_VERSION_UNKNOWN;
575         dc->config = init_params->flags;
576
577 #ifdef CONFIG_DRM_AMD_DC_DCN2_0
578         // Allocate memory for the vm_helper
579         dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
580
581 #endif
582         memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
583
584         dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
585         if (!dc_dceip) {
586                 dm_error("%s: failed to create dceip\n", __func__);
587                 goto fail;
588         }
589
590         dc->bw_dceip = dc_dceip;
591
592         dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
593         if (!dc_vbios) {
594                 dm_error("%s: failed to create vbios\n", __func__);
595                 goto fail;
596         }
597
598         dc->bw_vbios = dc_vbios;
599 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
600         dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
601         if (!dcn_soc) {
602                 dm_error("%s: failed to create dcn_soc\n", __func__);
603                 goto fail;
604         }
605
606         dc->dcn_soc = dcn_soc;
607
608         dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
609         if (!dcn_ip) {
610                 dm_error("%s: failed to create dcn_ip\n", __func__);
611                 goto fail;
612         }
613
614         dc->dcn_ip = dcn_ip;
615 #ifdef CONFIG_DRM_AMD_DC_DCN2_0
616         dc->soc_bounding_box = init_params->soc_bounding_box;
617 #endif
618 #endif
619
620         dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
621         if (!dc_ctx) {
622                 dm_error("%s: failed to create ctx\n", __func__);
623                 goto fail;
624         }
625
626         dc_ctx->cgs_device = init_params->cgs_device;
627         dc_ctx->driver_context = init_params->driver;
628         dc_ctx->dc = dc;
629         dc_ctx->asic_id = init_params->asic_id;
630         dc_ctx->dc_sink_id_count = 0;
631         dc_ctx->dc_stream_id_count = 0;
632         dc->ctx = dc_ctx;
633
634         /* Create logger */
635
636         dc_ctx->dce_environment = init_params->dce_environment;
637
638         dc_version = resource_parse_asic_id(init_params->asic_id);
639         dc_ctx->dce_version = dc_version;
640
641         /* Resource should construct all asic specific resources.
642          * This should be the only place where we need to parse the asic id
643          */
644         if (init_params->vbios_override)
645                 dc_ctx->dc_bios = init_params->vbios_override;
646         else {
647                 /* Create BIOS parser */
648                 struct bp_init_data bp_init_data;
649
650                 bp_init_data.ctx = dc_ctx;
651                 bp_init_data.bios = init_params->asic_id.atombios_base_address;
652
653                 dc_ctx->dc_bios = dal_bios_parser_create(
654                                 &bp_init_data, dc_version);
655
656                 if (!dc_ctx->dc_bios) {
657                         ASSERT_CRITICAL(false);
658                         goto fail;
659                 }
660
661                 dc_ctx->created_bios = true;
662                 }
663
664         dc_ctx->perf_trace = dc_perf_trace_create();
665         if (!dc_ctx->perf_trace) {
666                 ASSERT_CRITICAL(false);
667                 goto fail;
668         }
669
670         /* Create GPIO service */
671         dc_ctx->gpio_service = dal_gpio_service_create(
672                         dc_version,
673                         dc_ctx->dce_environment,
674                         dc_ctx);
675
676         if (!dc_ctx->gpio_service) {
677                 ASSERT_CRITICAL(false);
678                 goto fail;
679         }
680
681         dc->res_pool = dc_create_resource_pool(dc, init_params, dc_version);
682         if (!dc->res_pool)
683                 goto fail;
684
685         dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
686         if (!dc->clk_mgr)
687                 goto fail;
688
689         /* Creation of current_state must occur after dc->dml
690          * is initialized in dc_create_resource_pool because
691          * on creation it copies the contents of dc->dml
692          */
693
694         dc->current_state = dc_create_state(dc);
695
696         if (!dc->current_state) {
697                 dm_error("%s: failed to create validate ctx\n", __func__);
698                 goto fail;
699         }
700
701         dc_resource_state_construct(dc, dc->current_state);
702
703         if (!create_links(dc, init_params->num_virtual_links))
704                 goto fail;
705
706         return true;
707
708 fail:
709
710         destruct(dc);
711         return false;
712 }
713
714 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
715 static bool disable_all_writeback_pipes_for_stream(
716                 const struct dc *dc,
717                 struct dc_stream_state *stream,
718                 struct dc_state *context)
719 {
720         int i;
721
722         for (i = 0; i < stream->num_wb_info; i++)
723                 stream->writeback_info[i].wb_enabled = false;
724
725         return true;
726 }
727 #endif
728
729 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
730 {
731         int i, j;
732         struct dc_state *dangling_context = dc_create_state(dc);
733         struct dc_state *current_ctx;
734
735         if (dangling_context == NULL)
736                 return;
737
738         dc_resource_state_copy_construct(dc->current_state, dangling_context);
739
740         for (i = 0; i < dc->res_pool->pipe_count; i++) {
741                 struct dc_stream_state *old_stream =
742                                 dc->current_state->res_ctx.pipe_ctx[i].stream;
743                 bool should_disable = true;
744
745                 for (j = 0; j < context->stream_count; j++) {
746                         if (old_stream == context->streams[j]) {
747                                 should_disable = false;
748                                 break;
749                         }
750                 }
751                 if (should_disable && old_stream) {
752                         dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
753 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
754                         disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
755 #endif
756                         dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
757                 }
758         }
759
760         current_ctx = dc->current_state;
761         dc->current_state = dangling_context;
762         dc_release_state(current_ctx);
763 }
764
765 /*******************************************************************************
766  * Public functions
767  ******************************************************************************/
768
769 struct dc *dc_create(const struct dc_init_data *init_params)
770 {
771         struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
772         unsigned int full_pipe_count;
773
774         if (NULL == dc)
775                 goto alloc_fail;
776
777         if (false == construct(dc, init_params))
778                 goto construct_fail;
779
780         /*TODO: separate HW and SW initialization*/
781         dc->hwss.init_hw(dc);
782
783         full_pipe_count = dc->res_pool->pipe_count;
784         if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
785                 full_pipe_count--;
786         dc->caps.max_streams = min(
787                         full_pipe_count,
788                         dc->res_pool->stream_enc_count);
789
790         dc->caps.max_links = dc->link_count;
791         dc->caps.max_audios = dc->res_pool->audio_count;
792         dc->caps.linear_pitch_alignment = 64;
793
794         /* Populate versioning information */
795         dc->versions.dc_ver = DC_VER;
796
797         if (dc->res_pool->dmcu != NULL)
798                 dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
799
800         dc->build_id = DC_BUILD_ID;
801
802         DC_LOG_DC("Display Core initialized\n");
803
804
805
806         return dc;
807
808 construct_fail:
809         kfree(dc);
810
811 alloc_fail:
812         return NULL;
813 }
814
815 void dc_init_callbacks(struct dc *dc,
816                 const struct dc_callback_init *init_params)
817 {
818 }
819
820 void dc_destroy(struct dc **dc)
821 {
822         destruct(*dc);
823         kfree(*dc);
824         *dc = NULL;
825 }
826
827 static void enable_timing_multisync(
828                 struct dc *dc,
829                 struct dc_state *ctx)
830 {
831         int i = 0, multisync_count = 0;
832         int pipe_count = dc->res_pool->pipe_count;
833         struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
834
835         for (i = 0; i < pipe_count; i++) {
836                 if (!ctx->res_ctx.pipe_ctx[i].stream ||
837                                 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
838                         continue;
839                 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
840                         continue;
841                 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
842                 multisync_count++;
843         }
844
845         if (multisync_count > 0) {
846                 dc->hwss.enable_per_frame_crtc_position_reset(
847                         dc, multisync_count, multisync_pipes);
848         }
849 }
850
851 static void program_timing_sync(
852                 struct dc *dc,
853                 struct dc_state *ctx)
854 {
855         int i, j, k;
856         int group_index = 0;
857         int num_group = 0;
858         int pipe_count = dc->res_pool->pipe_count;
859         struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
860
861         for (i = 0; i < pipe_count; i++) {
862                 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
863                         continue;
864
865                 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
866         }
867
868         for (i = 0; i < pipe_count; i++) {
869                 int group_size = 1;
870                 struct pipe_ctx *pipe_set[MAX_PIPES];
871
872                 if (!unsynced_pipes[i])
873                         continue;
874
875                 pipe_set[0] = unsynced_pipes[i];
876                 unsynced_pipes[i] = NULL;
877
878                 /* Add tg to the set, search rest of the tg's for ones with
879                  * same timing, add all tgs with same timing to the group
880                  */
881                 for (j = i + 1; j < pipe_count; j++) {
882                         if (!unsynced_pipes[j])
883                                 continue;
884
885                         if (resource_are_streams_timing_synchronizable(
886                                         unsynced_pipes[j]->stream,
887                                         pipe_set[0]->stream)) {
888                                 pipe_set[group_size] = unsynced_pipes[j];
889                                 unsynced_pipes[j] = NULL;
890                                 group_size++;
891                         }
892                 }
893
894                 /* set first pipe with plane as master */
895                 for (j = 0; j < group_size; j++) {
896                         struct pipe_ctx *temp;
897
898                         if (pipe_set[j]->plane_state) {
899                                 if (j == 0)
900                                         break;
901
902                                 temp = pipe_set[0];
903                                 pipe_set[0] = pipe_set[j];
904                                 pipe_set[j] = temp;
905                                 break;
906                         }
907                 }
908
909
910                 for (k = 0; k < group_size; k++) {
911                         struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream);
912
913                         status->timing_sync_info.group_id = num_group;
914                         status->timing_sync_info.group_size = group_size;
915                         if (k == 0)
916                                 status->timing_sync_info.master = true;
917                         else
918                                 status->timing_sync_info.master = false;
919
920                 }
921                 /* remove any other pipes with plane as they have already been synced */
922                 for (j = j + 1; j < group_size; j++) {
923                         if (pipe_set[j]->plane_state) {
924                                 group_size--;
925                                 pipe_set[j] = pipe_set[group_size];
926                                 j--;
927                         }
928                 }
929
930                 if (group_size > 1) {
931                         dc->hwss.enable_timing_synchronization(
932                                 dc, group_index, group_size, pipe_set);
933                         group_index++;
934                 }
935                 num_group++;
936         }
937 }
938
939 static bool context_changed(
940                 struct dc *dc,
941                 struct dc_state *context)
942 {
943         uint8_t i;
944
945         if (context->stream_count != dc->current_state->stream_count)
946                 return true;
947
948         for (i = 0; i < dc->current_state->stream_count; i++) {
949                 if (dc->current_state->streams[i] != context->streams[i])
950                         return true;
951         }
952
953         return false;
954 }
955
956 bool dc_validate_seamless_boot_timing(const struct dc *dc,
957                                 const struct dc_sink *sink,
958                                 struct dc_crtc_timing *crtc_timing)
959 {
960         struct timing_generator *tg;
961         struct dc_link *link = sink->link;
962         unsigned int inst;
963
964         /* Check for enabled DIG to identify enabled display */
965         if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
966                 return false;
967
968         /* Check for which front end is used by this encoder.
969          * Note the inst is 1 indexed, where 0 is undefined.
970          * Note that DIG_FE can source from different OTG but our
971          * current implementation always map 1-to-1, so this code makes
972          * the same assumption and doesn't check OTG source.
973          */
974         inst = link->link_enc->funcs->get_dig_frontend(link->link_enc) - 1;
975
976         /* Instance should be within the range of the pool */
977         if (inst >= dc->res_pool->pipe_count)
978                 return false;
979
980         tg = dc->res_pool->timing_generators[inst];
981
982         if (!tg->funcs->is_matching_timing)
983                 return false;
984
985         if (!tg->funcs->is_matching_timing(tg, crtc_timing))
986                 return false;
987
988         if (dc_is_dp_signal(link->connector_signal)) {
989                 unsigned int pix_clk_100hz;
990
991                 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
992                         dc->res_pool->dp_clock_source,
993                         inst, &pix_clk_100hz);
994
995                 if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
996                         return false;
997         }
998
999         return true;
1000 }
1001
1002 bool dc_enable_stereo(
1003         struct dc *dc,
1004         struct dc_state *context,
1005         struct dc_stream_state *streams[],
1006         uint8_t stream_count)
1007 {
1008         bool ret = true;
1009         int i, j;
1010         struct pipe_ctx *pipe;
1011
1012         for (i = 0; i < MAX_PIPES; i++) {
1013                 if (context != NULL)
1014                         pipe = &context->res_ctx.pipe_ctx[i];
1015                 else
1016                         pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1017                 for (j = 0 ; pipe && j < stream_count; j++)  {
1018                         if (streams[j] && streams[j] == pipe->stream &&
1019                                 dc->hwss.setup_stereo)
1020                                 dc->hwss.setup_stereo(pipe, dc);
1021                 }
1022         }
1023
1024         return ret;
1025 }
1026
1027 /*
1028  * Applies given context to HW and copy it into current context.
1029  * It's up to the user to release the src context afterwards.
1030  */
1031 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
1032 {
1033         struct dc_bios *dcb = dc->ctx->dc_bios;
1034         enum dc_status result = DC_ERROR_UNEXPECTED;
1035         struct pipe_ctx *pipe;
1036         int i, k, l;
1037         struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
1038
1039         disable_dangling_plane(dc, context);
1040
1041         for (i = 0; i < context->stream_count; i++)
1042                 dc_streams[i] =  context->streams[i];
1043
1044         if (!dcb->funcs->is_accelerated_mode(dcb))
1045                 dc->hwss.enable_accelerated_mode(dc, context);
1046
1047         for (i = 0; i < context->stream_count; i++) {
1048                 if (context->streams[i]->apply_seamless_boot_optimization)
1049                         dc->optimize_seamless_boot = true;
1050         }
1051
1052         if (!dc->optimize_seamless_boot)
1053                 dc->hwss.prepare_bandwidth(dc, context);
1054
1055         /* re-program planes for existing stream, in case we need to
1056          * free up plane resource for later use
1057          */
1058         for (i = 0; i < context->stream_count; i++) {
1059                 if (context->streams[i]->mode_changed)
1060                         continue;
1061
1062                 dc->hwss.apply_ctx_for_surface(
1063                         dc, context->streams[i],
1064                         context->stream_status[i].plane_count,
1065                         context); /* use new pipe config in new context */
1066         }
1067
1068         /* Program hardware */
1069         for (i = 0; i < dc->res_pool->pipe_count; i++) {
1070                 pipe = &context->res_ctx.pipe_ctx[i];
1071                 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
1072         }
1073
1074         result = dc->hwss.apply_ctx_to_hw(dc, context);
1075
1076         if (result != DC_OK)
1077                 return result;
1078
1079         if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
1080                 enable_timing_multisync(dc, context);
1081                 program_timing_sync(dc, context);
1082         }
1083
1084         /* Program all planes within new context*/
1085         for (i = 0; i < context->stream_count; i++) {
1086                 const struct dc_link *link = context->streams[i]->link;
1087
1088                 if (!context->streams[i]->mode_changed)
1089                         continue;
1090
1091                 dc->hwss.apply_ctx_for_surface(
1092                                 dc, context->streams[i],
1093                                 context->stream_status[i].plane_count,
1094                                 context);
1095
1096                 /*
1097                  * enable stereo
1098                  * TODO rework dc_enable_stereo call to work with validation sets?
1099                  */
1100                 for (k = 0; k < MAX_PIPES; k++) {
1101                         pipe = &context->res_ctx.pipe_ctx[k];
1102
1103                         for (l = 0 ; pipe && l < context->stream_count; l++)  {
1104                                 if (context->streams[l] &&
1105                                         context->streams[l] == pipe->stream &&
1106                                         dc->hwss.setup_stereo)
1107                                         dc->hwss.setup_stereo(pipe, dc);
1108                         }
1109                 }
1110
1111                 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
1112                                 context->streams[i]->timing.h_addressable,
1113                                 context->streams[i]->timing.v_addressable,
1114                                 context->streams[i]->timing.h_total,
1115                                 context->streams[i]->timing.v_total,
1116                                 context->streams[i]->timing.pix_clk_100hz / 10);
1117         }
1118
1119         dc_enable_stereo(dc, context, dc_streams, context->stream_count);
1120
1121         if (!dc->optimize_seamless_boot)
1122                 /* pplib is notified if disp_num changed */
1123                 dc->hwss.optimize_bandwidth(dc, context);
1124
1125         for (i = 0; i < context->stream_count; i++)
1126                 context->streams[i]->mode_changed = false;
1127
1128         memset(&context->commit_hints, 0, sizeof(context->commit_hints));
1129
1130         dc_release_state(dc->current_state);
1131
1132         dc->current_state = context;
1133
1134         dc_retain_state(dc->current_state);
1135
1136         return result;
1137 }
1138
1139 bool dc_commit_state(struct dc *dc, struct dc_state *context)
1140 {
1141         enum dc_status result = DC_ERROR_UNEXPECTED;
1142         int i;
1143
1144         if (false == context_changed(dc, context))
1145                 return DC_OK;
1146
1147         DC_LOG_DC("%s: %d streams\n",
1148                                 __func__, context->stream_count);
1149
1150         for (i = 0; i < context->stream_count; i++) {
1151                 struct dc_stream_state *stream = context->streams[i];
1152
1153                 dc_stream_log(dc, stream);
1154         }
1155
1156         result = dc_commit_state_no_check(dc, context);
1157
1158         return (result == DC_OK);
1159 }
1160
1161 bool dc_post_update_surfaces_to_stream(struct dc *dc)
1162 {
1163         int i;
1164         struct dc_state *context = dc->current_state;
1165
1166         if (!dc->optimized_required || dc->optimize_seamless_boot)
1167                 return true;
1168
1169         post_surface_trace(dc);
1170
1171         for (i = 0; i < dc->res_pool->pipe_count; i++)
1172                 if (context->res_ctx.pipe_ctx[i].stream == NULL ||
1173                     context->res_ctx.pipe_ctx[i].plane_state == NULL) {
1174                         context->res_ctx.pipe_ctx[i].pipe_idx = i;
1175                         dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
1176                 }
1177
1178         dc->optimized_required = false;
1179
1180         dc->hwss.optimize_bandwidth(dc, context);
1181         return true;
1182 }
1183
1184 struct dc_state *dc_create_state(struct dc *dc)
1185 {
1186         struct dc_state *context = kzalloc(sizeof(struct dc_state),
1187                                            GFP_KERNEL);
1188
1189         if (!context)
1190                 return NULL;
1191         /* Each context must have their own instance of VBA and in order to
1192          * initialize and obtain IP and SOC the base DML instance from DC is
1193          * initially copied into every context
1194          */
1195 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
1196         memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
1197 #endif
1198
1199         kref_init(&context->refcount);
1200
1201         return context;
1202 }
1203
1204 struct dc_state *dc_copy_state(struct dc_state *src_ctx)
1205 {
1206         int i, j;
1207         struct dc_state *new_ctx = kmemdup(src_ctx,
1208                         sizeof(struct dc_state), GFP_KERNEL);
1209
1210         if (!new_ctx)
1211                 return NULL;
1212
1213         for (i = 0; i < MAX_PIPES; i++) {
1214                         struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
1215
1216                         if (cur_pipe->top_pipe)
1217                                 cur_pipe->top_pipe =  &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
1218
1219                         if (cur_pipe->bottom_pipe)
1220                                 cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
1221
1222         }
1223
1224         for (i = 0; i < new_ctx->stream_count; i++) {
1225                         dc_stream_retain(new_ctx->streams[i]);
1226                         for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
1227                                 dc_plane_state_retain(
1228                                         new_ctx->stream_status[i].plane_states[j]);
1229         }
1230
1231         kref_init(&new_ctx->refcount);
1232
1233         return new_ctx;
1234 }
1235
1236 void dc_retain_state(struct dc_state *context)
1237 {
1238         kref_get(&context->refcount);
1239 }
1240
1241 static void dc_state_free(struct kref *kref)
1242 {
1243         struct dc_state *context = container_of(kref, struct dc_state, refcount);
1244         dc_resource_state_destruct(context);
1245         kfree(context);
1246 }
1247
1248 void dc_release_state(struct dc_state *context)
1249 {
1250         kref_put(&context->refcount, dc_state_free);
1251 }
1252
1253 bool dc_set_generic_gpio_for_stereo(bool enable,
1254                 struct gpio_service *gpio_service)
1255 {
1256         enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR;
1257         struct gpio_pin_info pin_info;
1258         struct gpio *generic;
1259         struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config),
1260                            GFP_KERNEL);
1261
1262         if (!config)
1263                 return false;
1264         pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0);
1265
1266         if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) {
1267                 kfree(config);
1268                 return false;
1269         } else {
1270                 generic = dal_gpio_service_create_generic_mux(
1271                         gpio_service,
1272                         pin_info.offset,
1273                         pin_info.mask);
1274         }
1275
1276         if (!generic) {
1277                 kfree(config);
1278                 return false;
1279         }
1280
1281         gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT);
1282
1283         config->enable_output_from_mux = enable;
1284         config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC;
1285
1286         if (gpio_result == GPIO_RESULT_OK)
1287                 gpio_result = dal_mux_setup_config(generic, config);
1288
1289         if (gpio_result == GPIO_RESULT_OK) {
1290                 dal_gpio_close(generic);
1291                 dal_gpio_destroy_generic_mux(&generic);
1292                 kfree(config);
1293                 return true;
1294         } else {
1295                 dal_gpio_close(generic);
1296                 dal_gpio_destroy_generic_mux(&generic);
1297                 kfree(config);
1298                 return false;
1299         }
1300 }
1301
1302 static bool is_surface_in_context(
1303                 const struct dc_state *context,
1304                 const struct dc_plane_state *plane_state)
1305 {
1306         int j;
1307
1308         for (j = 0; j < MAX_PIPES; j++) {
1309                 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1310
1311                 if (plane_state == pipe_ctx->plane_state) {
1312                         return true;
1313                 }
1314         }
1315
1316         return false;
1317 }
1318
1319 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
1320 {
1321         union surface_update_flags *update_flags = &u->surface->update_flags;
1322         enum surface_update_type update_type = UPDATE_TYPE_FAST;
1323
1324         if (!u->plane_info)
1325                 return UPDATE_TYPE_FAST;
1326
1327         if (u->plane_info->color_space != u->surface->color_space) {
1328                 update_flags->bits.color_space_change = 1;
1329                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1330         }
1331
1332         if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
1333                 update_flags->bits.horizontal_mirror_change = 1;
1334                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1335         }
1336
1337         if (u->plane_info->rotation != u->surface->rotation) {
1338                 update_flags->bits.rotation_change = 1;
1339                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1340         }
1341
1342         if (u->plane_info->format != u->surface->format) {
1343                 update_flags->bits.pixel_format_change = 1;
1344                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1345         }
1346
1347         if (u->plane_info->stereo_format != u->surface->stereo_format) {
1348                 update_flags->bits.stereo_format_change = 1;
1349                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1350         }
1351
1352         if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
1353                 update_flags->bits.per_pixel_alpha_change = 1;
1354                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1355         }
1356
1357         if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
1358                 update_flags->bits.global_alpha_change = 1;
1359                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1360         }
1361
1362         if (u->plane_info->sdr_white_level != u->surface->sdr_white_level) {
1363                 update_flags->bits.sdr_white_level = 1;
1364                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1365         }
1366
1367         if (u->plane_info->dcc.enable != u->surface->dcc.enable
1368                         || u->plane_info->dcc.independent_64b_blks != u->surface->dcc.independent_64b_blks
1369                         || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
1370                 update_flags->bits.dcc_change = 1;
1371                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1372         }
1373
1374         if (resource_pixel_format_to_bpp(u->plane_info->format) !=
1375                         resource_pixel_format_to_bpp(u->surface->format)) {
1376                 /* different bytes per element will require full bandwidth
1377                  * and DML calculation
1378                  */
1379                 update_flags->bits.bpp_change = 1;
1380                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1381         }
1382
1383         if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
1384                         || u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
1385                         || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
1386                 update_flags->bits.plane_size_change = 1;
1387                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1388         }
1389
1390
1391         if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
1392                         sizeof(union dc_tiling_info)) != 0) {
1393                 update_flags->bits.swizzle_change = 1;
1394                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1395
1396                 /* todo: below are HW dependent, we should add a hook to
1397                  * DCE/N resource and validated there.
1398                  */
1399                 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
1400                         /* swizzled mode requires RQ to be setup properly,
1401                          * thus need to run DML to calculate RQ settings
1402                          */
1403                         update_flags->bits.bandwidth_change = 1;
1404                         elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1405                 }
1406         }
1407
1408         /* This should be UPDATE_TYPE_FAST if nothing has changed. */
1409         return update_type;
1410 }
1411
1412 static enum surface_update_type get_scaling_info_update_type(
1413                 const struct dc_surface_update *u)
1414 {
1415         union surface_update_flags *update_flags = &u->surface->update_flags;
1416
1417         if (!u->scaling_info)
1418                 return UPDATE_TYPE_FAST;
1419
1420         if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width
1421                         || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
1422                         || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
1423                         || u->scaling_info->dst_rect.height != u->surface->dst_rect.height) {
1424                 update_flags->bits.scaling_change = 1;
1425
1426                 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
1427                         || u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
1428                                 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width
1429                                         || u->scaling_info->dst_rect.height < u->surface->src_rect.height))
1430                         /* Making dst rect smaller requires a bandwidth change */
1431                         update_flags->bits.bandwidth_change = 1;
1432         }
1433
1434         if (u->scaling_info->src_rect.width != u->surface->src_rect.width
1435                 || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
1436
1437                 update_flags->bits.scaling_change = 1;
1438                 if (u->scaling_info->src_rect.width > u->surface->src_rect.width
1439                                 && u->scaling_info->src_rect.height > u->surface->src_rect.height)
1440                         /* Making src rect bigger requires a bandwidth change */
1441                         update_flags->bits.clock_change = 1;
1442         }
1443
1444         if (u->scaling_info->src_rect.x != u->surface->src_rect.x
1445                         || u->scaling_info->src_rect.y != u->surface->src_rect.y
1446                         || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
1447                         || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
1448                         || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
1449                         || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
1450                 update_flags->bits.position_change = 1;
1451
1452         if (update_flags->bits.clock_change
1453                         || update_flags->bits.bandwidth_change)
1454                 return UPDATE_TYPE_FULL;
1455
1456         if (update_flags->bits.scaling_change
1457                         || update_flags->bits.position_change)
1458                 return UPDATE_TYPE_MED;
1459
1460         return UPDATE_TYPE_FAST;
1461 }
1462
1463 static enum surface_update_type det_surface_update(const struct dc *dc,
1464                 const struct dc_surface_update *u)
1465 {
1466         const struct dc_state *context = dc->current_state;
1467         enum surface_update_type type;
1468         enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1469         union surface_update_flags *update_flags = &u->surface->update_flags;
1470
1471         update_flags->raw = 0; // Reset all flags
1472
1473         if (u->flip_addr)
1474                 update_flags->bits.addr_update = 1;
1475
1476         if (!is_surface_in_context(context, u->surface)) {
1477                 update_flags->bits.new_plane = 1;
1478                 return UPDATE_TYPE_FULL;
1479         }
1480
1481         if (u->surface->force_full_update) {
1482                 update_flags->bits.full_update = 1;
1483                 return UPDATE_TYPE_FULL;
1484         }
1485
1486         type = get_plane_info_update_type(u);
1487         elevate_update_type(&overall_type, type);
1488
1489         type = get_scaling_info_update_type(u);
1490         elevate_update_type(&overall_type, type);
1491
1492         if (u->flip_addr)
1493                 update_flags->bits.addr_update = 1;
1494
1495         if (u->in_transfer_func)
1496                 update_flags->bits.in_transfer_func_change = 1;
1497
1498         if (u->input_csc_color_matrix)
1499                 update_flags->bits.input_csc_change = 1;
1500
1501         if (u->coeff_reduction_factor)
1502                 update_flags->bits.coeff_reduction_change = 1;
1503
1504         if (u->gamma) {
1505                 enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
1506
1507                 if (u->plane_info)
1508                         format = u->plane_info->format;
1509                 else if (u->surface)
1510                         format = u->surface->format;
1511
1512                 if (dce_use_lut(format))
1513                         update_flags->bits.gamma_change = 1;
1514         }
1515
1516         if (update_flags->bits.in_transfer_func_change) {
1517                 type = UPDATE_TYPE_MED;
1518                 elevate_update_type(&overall_type, type);
1519         }
1520
1521         if (update_flags->bits.input_csc_change
1522                         || update_flags->bits.coeff_reduction_change
1523                         || update_flags->bits.gamma_change) {
1524                 type = UPDATE_TYPE_FULL;
1525                 elevate_update_type(&overall_type, type);
1526         }
1527
1528         return overall_type;
1529 }
1530
1531 static enum surface_update_type check_update_surfaces_for_stream(
1532                 struct dc *dc,
1533                 struct dc_surface_update *updates,
1534                 int surface_count,
1535                 struct dc_stream_update *stream_update,
1536                 const struct dc_stream_status *stream_status)
1537 {
1538         int i;
1539         enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1540
1541         if (stream_status == NULL || stream_status->plane_count != surface_count)
1542                 return UPDATE_TYPE_FULL;
1543
1544         /* some stream updates require passive update */
1545         if (stream_update) {
1546                 if ((stream_update->src.height != 0) &&
1547                                 (stream_update->src.width != 0))
1548                         return UPDATE_TYPE_FULL;
1549
1550                 if ((stream_update->dst.height != 0) &&
1551                                 (stream_update->dst.width != 0))
1552                         return UPDATE_TYPE_FULL;
1553
1554                 if (stream_update->out_transfer_func)
1555                         return UPDATE_TYPE_FULL;
1556
1557                 if (stream_update->abm_level)
1558                         return UPDATE_TYPE_FULL;
1559
1560                 if (stream_update->dpms_off)
1561                         return UPDATE_TYPE_FULL;
1562
1563 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
1564                 if (stream_update->wb_update)
1565                         return UPDATE_TYPE_FULL;
1566 #endif
1567         }
1568
1569         for (i = 0 ; i < surface_count; i++) {
1570                 enum surface_update_type type =
1571                                 det_surface_update(dc, &updates[i]);
1572
1573                 if (type == UPDATE_TYPE_FULL)
1574                         return type;
1575
1576                 elevate_update_type(&overall_type, type);
1577         }
1578
1579         return overall_type;
1580 }
1581
1582 /**
1583  * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
1584  *
1585  * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
1586  */
1587 enum surface_update_type dc_check_update_surfaces_for_stream(
1588                 struct dc *dc,
1589                 struct dc_surface_update *updates,
1590                 int surface_count,
1591                 struct dc_stream_update *stream_update,
1592                 const struct dc_stream_status *stream_status)
1593 {
1594         int i;
1595         enum surface_update_type type;
1596
1597         for (i = 0; i < surface_count; i++)
1598                 updates[i].surface->update_flags.raw = 0;
1599
1600         type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
1601         if (type == UPDATE_TYPE_FULL)
1602                 for (i = 0; i < surface_count; i++)
1603                         updates[i].surface->update_flags.raw = 0xFFFFFFFF;
1604
1605         return type;
1606 }
1607
1608 static struct dc_stream_status *stream_get_status(
1609         struct dc_state *ctx,
1610         struct dc_stream_state *stream)
1611 {
1612         uint8_t i;
1613
1614         for (i = 0; i < ctx->stream_count; i++) {
1615                 if (stream == ctx->streams[i]) {
1616                         return &ctx->stream_status[i];
1617                 }
1618         }
1619
1620         return NULL;
1621 }
1622
1623 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
1624
1625 static void copy_surface_update_to_plane(
1626                 struct dc_plane_state *surface,
1627                 struct dc_surface_update *srf_update)
1628 {
1629         if (srf_update->flip_addr) {
1630                 surface->address = srf_update->flip_addr->address;
1631                 surface->flip_immediate =
1632                         srf_update->flip_addr->flip_immediate;
1633                 surface->time.time_elapsed_in_us[surface->time.index] =
1634                         srf_update->flip_addr->flip_timestamp_in_us -
1635                                 surface->time.prev_update_time_in_us;
1636                 surface->time.prev_update_time_in_us =
1637                         srf_update->flip_addr->flip_timestamp_in_us;
1638                 surface->time.index++;
1639                 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
1640                         surface->time.index = 0;
1641         }
1642
1643         if (srf_update->scaling_info) {
1644                 surface->scaling_quality =
1645                                 srf_update->scaling_info->scaling_quality;
1646                 surface->dst_rect =
1647                                 srf_update->scaling_info->dst_rect;
1648                 surface->src_rect =
1649                                 srf_update->scaling_info->src_rect;
1650                 surface->clip_rect =
1651                                 srf_update->scaling_info->clip_rect;
1652         }
1653
1654         if (srf_update->plane_info) {
1655                 surface->color_space =
1656                                 srf_update->plane_info->color_space;
1657                 surface->format =
1658                                 srf_update->plane_info->format;
1659                 surface->plane_size =
1660                                 srf_update->plane_info->plane_size;
1661                 surface->rotation =
1662                                 srf_update->plane_info->rotation;
1663                 surface->horizontal_mirror =
1664                                 srf_update->plane_info->horizontal_mirror;
1665                 surface->stereo_format =
1666                                 srf_update->plane_info->stereo_format;
1667                 surface->tiling_info =
1668                                 srf_update->plane_info->tiling_info;
1669                 surface->visible =
1670                                 srf_update->plane_info->visible;
1671                 surface->per_pixel_alpha =
1672                                 srf_update->plane_info->per_pixel_alpha;
1673                 surface->global_alpha =
1674                                 srf_update->plane_info->global_alpha;
1675                 surface->global_alpha_value =
1676                                 srf_update->plane_info->global_alpha_value;
1677                 surface->dcc =
1678                                 srf_update->plane_info->dcc;
1679                 surface->sdr_white_level =
1680                                 srf_update->plane_info->sdr_white_level;
1681         }
1682
1683         if (srf_update->gamma &&
1684                         (surface->gamma_correction !=
1685                                         srf_update->gamma)) {
1686                 memcpy(&surface->gamma_correction->entries,
1687                         &srf_update->gamma->entries,
1688                         sizeof(struct dc_gamma_entries));
1689                 surface->gamma_correction->is_identity =
1690                         srf_update->gamma->is_identity;
1691                 surface->gamma_correction->num_entries =
1692                         srf_update->gamma->num_entries;
1693                 surface->gamma_correction->type =
1694                         srf_update->gamma->type;
1695         }
1696
1697         if (srf_update->in_transfer_func &&
1698                         (surface->in_transfer_func !=
1699                                 srf_update->in_transfer_func)) {
1700                 surface->in_transfer_func->sdr_ref_white_level =
1701                         srf_update->in_transfer_func->sdr_ref_white_level;
1702                 surface->in_transfer_func->tf =
1703                         srf_update->in_transfer_func->tf;
1704                 surface->in_transfer_func->type =
1705                         srf_update->in_transfer_func->type;
1706                 memcpy(&surface->in_transfer_func->tf_pts,
1707                         &srf_update->in_transfer_func->tf_pts,
1708                         sizeof(struct dc_transfer_func_distributed_points));
1709         }
1710
1711 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
1712         if (srf_update->func_shaper &&
1713                         (surface->in_shaper_func !=
1714                         srf_update->func_shaper))
1715                 memcpy(surface->in_shaper_func, srf_update->func_shaper,
1716                 sizeof(*surface->in_shaper_func));
1717
1718         if (srf_update->lut3d_func &&
1719                         (surface->lut3d_func !=
1720                         srf_update->lut3d_func))
1721                 memcpy(surface->lut3d_func, srf_update->lut3d_func,
1722                 sizeof(*surface->lut3d_func));
1723
1724         if (srf_update->blend_tf &&
1725                         (surface->blend_tf !=
1726                         srf_update->blend_tf))
1727                 memcpy(surface->blend_tf, srf_update->blend_tf,
1728                 sizeof(*surface->blend_tf));
1729
1730 #endif
1731         if (srf_update->input_csc_color_matrix)
1732                 surface->input_csc_color_matrix =
1733                         *srf_update->input_csc_color_matrix;
1734
1735         if (srf_update->coeff_reduction_factor)
1736                 surface->coeff_reduction_factor =
1737                         *srf_update->coeff_reduction_factor;
1738 }
1739
1740 static void copy_stream_update_to_stream(struct dc *dc,
1741                                          struct dc_state *context,
1742                                          struct dc_stream_state *stream,
1743                                          const struct dc_stream_update *update)
1744 {
1745         if (update == NULL || stream == NULL)
1746                 return;
1747
1748         if (update->src.height && update->src.width)
1749                 stream->src = update->src;
1750
1751         if (update->dst.height && update->dst.width)
1752                 stream->dst = update->dst;
1753
1754         if (update->out_transfer_func &&
1755             stream->out_transfer_func != update->out_transfer_func) {
1756                 stream->out_transfer_func->sdr_ref_white_level =
1757                         update->out_transfer_func->sdr_ref_white_level;
1758                 stream->out_transfer_func->tf = update->out_transfer_func->tf;
1759                 stream->out_transfer_func->type =
1760                         update->out_transfer_func->type;
1761                 memcpy(&stream->out_transfer_func->tf_pts,
1762                        &update->out_transfer_func->tf_pts,
1763                        sizeof(struct dc_transfer_func_distributed_points));
1764         }
1765
1766         if (update->hdr_static_metadata)
1767                 stream->hdr_static_metadata = *update->hdr_static_metadata;
1768
1769         if (update->abm_level)
1770                 stream->abm_level = *update->abm_level;
1771
1772         if (update->periodic_interrupt0)
1773                 stream->periodic_interrupt0 = *update->periodic_interrupt0;
1774
1775         if (update->periodic_interrupt1)
1776                 stream->periodic_interrupt1 = *update->periodic_interrupt1;
1777
1778         if (update->gamut_remap)
1779                 stream->gamut_remap_matrix = *update->gamut_remap;
1780
1781         /* Note: this being updated after mode set is currently not a use case
1782          * however if it arises OCSC would need to be reprogrammed at the
1783          * minimum
1784          */
1785         if (update->output_color_space)
1786                 stream->output_color_space = *update->output_color_space;
1787
1788         if (update->output_csc_transform)
1789                 stream->csc_color_matrix = *update->output_csc_transform;
1790
1791         if (update->vrr_infopacket)
1792                 stream->vrr_infopacket = *update->vrr_infopacket;
1793
1794         if (update->dpms_off)
1795                 stream->dpms_off = *update->dpms_off;
1796
1797         if (update->vsc_infopacket)
1798                 stream->vsc_infopacket = *update->vsc_infopacket;
1799
1800         if (update->vsp_infopacket)
1801                 stream->vsp_infopacket = *update->vsp_infopacket;
1802
1803         if (update->dither_option)
1804                 stream->dither_option = *update->dither_option;
1805 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
1806         /* update current stream with writeback info */
1807         if (update->wb_update) {
1808                 int i;
1809
1810                 stream->num_wb_info = update->wb_update->num_wb_info;
1811                 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES);
1812                 for (i = 0; i < stream->num_wb_info; i++)
1813                         stream->writeback_info[i] =
1814                                 update->wb_update->writeback_info[i];
1815         }
1816 #endif
1817 #if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)
1818         if (update->dsc_config) {
1819                 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
1820                 uint32_t old_dsc_enabled = stream->timing.flags.DSC;
1821                 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
1822                                        update->dsc_config->num_slices_v != 0);
1823
1824                 stream->timing.dsc_cfg = *update->dsc_config;
1825                 stream->timing.flags.DSC = enable_dsc;
1826                 if (!dc->res_pool->funcs->validate_bandwidth(dc, context,
1827                                                              true)) {
1828                         stream->timing.dsc_cfg = old_dsc_cfg;
1829                         stream->timing.flags.DSC = old_dsc_enabled;
1830                 }
1831         }
1832 #endif
1833 }
1834
1835 static void commit_planes_do_stream_update(struct dc *dc,
1836                 struct dc_stream_state *stream,
1837                 struct dc_stream_update *stream_update,
1838                 enum surface_update_type update_type,
1839                 struct dc_state *context)
1840 {
1841         int j;
1842
1843         // Stream updates
1844         for (j = 0; j < dc->res_pool->pipe_count; j++) {
1845                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1846
1847                 if (!pipe_ctx->top_pipe &&
1848                         pipe_ctx->stream &&
1849                         pipe_ctx->stream == stream) {
1850
1851                         if (stream_update->periodic_interrupt0 &&
1852                                         dc->hwss.setup_periodic_interrupt)
1853                                 dc->hwss.setup_periodic_interrupt(pipe_ctx, VLINE0);
1854
1855                         if (stream_update->periodic_interrupt1 &&
1856                                         dc->hwss.setup_periodic_interrupt)
1857                                 dc->hwss.setup_periodic_interrupt(pipe_ctx, VLINE1);
1858
1859                         if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
1860                                         stream_update->vrr_infopacket ||
1861                                         stream_update->vsc_infopacket ||
1862                                         stream_update->vsp_infopacket) {
1863                                 resource_build_info_frame(pipe_ctx);
1864                                 dc->hwss.update_info_frame(pipe_ctx);
1865                         }
1866
1867                         if (stream_update->gamut_remap)
1868                                 dc_stream_set_gamut_remap(dc, stream);
1869
1870                         if (stream_update->output_csc_transform)
1871                                 dc_stream_program_csc_matrix(dc, stream);
1872
1873                         if (stream_update->dither_option) {
1874 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
1875                                 struct pipe_ctx *odm_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx);
1876 #endif
1877                                 resource_build_bit_depth_reduction_params(pipe_ctx->stream,
1878                                                                         &pipe_ctx->stream->bit_depth_params);
1879                                 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
1880                                                 &stream->bit_depth_params,
1881                                                 &stream->clamping);
1882 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
1883                                 if (odm_pipe)
1884                                         odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
1885                                                         &stream->bit_depth_params,
1886                                                         &stream->clamping);
1887 #endif
1888                         }
1889
1890 #if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)
1891                         if (stream_update->dsc_config && dc->hwss.pipe_control_lock_global) {
1892                                 dc->hwss.pipe_control_lock_global(dc, pipe_ctx, true);
1893                                 dp_update_dsc_config(pipe_ctx);
1894                                 dc->hwss.pipe_control_lock_global(dc, pipe_ctx, false);
1895                         }
1896 #endif
1897                         /* Full fe update*/
1898                         if (update_type == UPDATE_TYPE_FAST)
1899                                 continue;
1900
1901                         if (stream_update->dpms_off) {
1902                                 dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
1903                                 if (*stream_update->dpms_off) {
1904                                         core_link_disable_stream(pipe_ctx, KEEP_ACQUIRED_RESOURCE);
1905                                         dc->hwss.optimize_bandwidth(dc, dc->current_state);
1906                                 } else {
1907                                         dc->hwss.prepare_bandwidth(dc, dc->current_state);
1908                                         core_link_enable_stream(dc->current_state, pipe_ctx);
1909                                 }
1910                                 dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
1911                         }
1912
1913                         if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
1914                                 if (pipe_ctx->stream_res.tg->funcs->is_blanked) {
1915                                         // if otg funcs defined check if blanked before programming
1916                                         if (!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
1917                                                 pipe_ctx->stream_res.abm->funcs->set_abm_level(
1918                                                         pipe_ctx->stream_res.abm, stream->abm_level);
1919                                 } else
1920                                         pipe_ctx->stream_res.abm->funcs->set_abm_level(
1921                                                 pipe_ctx->stream_res.abm, stream->abm_level);
1922                         }
1923                 }
1924         }
1925 }
1926
1927 static void commit_planes_for_stream(struct dc *dc,
1928                 struct dc_surface_update *srf_updates,
1929                 int surface_count,
1930                 struct dc_stream_state *stream,
1931                 struct dc_stream_update *stream_update,
1932                 enum surface_update_type update_type,
1933                 struct dc_state *context)
1934 {
1935         int i, j;
1936         struct pipe_ctx *top_pipe_to_program = NULL;
1937
1938         if (dc->optimize_seamless_boot && surface_count > 0) {
1939                 /* Optimize seamless boot flag keeps clocks and watermarks high until
1940                  * first flip. After first flip, optimization is required to lower
1941                  * bandwidth. Important to note that it is expected UEFI will
1942                  * only light up a single display on POST, therefore we only expect
1943                  * one stream with seamless boot flag set.
1944                  */
1945                 if (stream->apply_seamless_boot_optimization) {
1946                         stream->apply_seamless_boot_optimization = false;
1947                         dc->optimize_seamless_boot = false;
1948                         dc->optimized_required = true;
1949                 }
1950         }
1951
1952         if (update_type == UPDATE_TYPE_FULL && !dc->optimize_seamless_boot) {
1953                 dc->hwss.prepare_bandwidth(dc, context);
1954                 context_clock_trace(dc, context);
1955         }
1956
1957         // Stream updates
1958         if (stream_update)
1959                 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
1960
1961         if (surface_count == 0) {
1962                 /*
1963                  * In case of turning off screen, no need to program front end a second time.
1964                  * just return after program blank.
1965                  */
1966                 dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
1967                 return;
1968         }
1969
1970 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
1971         if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
1972                 for (i = 0; i < surface_count; i++) {
1973                         struct dc_plane_state *plane_state = srf_updates[i].surface;
1974                         /*set logical flag for lock/unlock use*/
1975                         for (j = 0; j < dc->res_pool->pipe_count; j++) {
1976                                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1977                                 if (!pipe_ctx->plane_state)
1978                                         continue;
1979                                 if (pipe_ctx->plane_state != plane_state)
1980                                         continue;
1981                                 plane_state->triplebuffer_flips = false;
1982                                 if (update_type == UPDATE_TYPE_FAST &&
1983                                         dc->hwss.program_triplebuffer != NULL &&
1984                                         !plane_state->flip_immediate &&
1985                                         !dc->debug.disable_tri_buf) {
1986                                                 /*triple buffer for VUpdate  only*/
1987                                                 plane_state->triplebuffer_flips = true;
1988                                 }
1989                         }
1990                 }
1991         }
1992 #endif
1993
1994         // Update Type FULL, Surface updates
1995         for (j = 0; j < dc->res_pool->pipe_count; j++) {
1996                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1997
1998                 if (!pipe_ctx->top_pipe &&
1999                         pipe_ctx->stream &&
2000                         pipe_ctx->stream == stream) {
2001                         struct dc_stream_status *stream_status = NULL;
2002
2003                         top_pipe_to_program = pipe_ctx;
2004
2005                         if (!pipe_ctx->plane_state)
2006                                 continue;
2007
2008                         /* Full fe update*/
2009                         if (update_type == UPDATE_TYPE_FAST)
2010                                 continue;
2011
2012 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2013                         ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
2014
2015                         if (dc->hwss.program_triplebuffer != NULL &&
2016                                 !dc->debug.disable_tri_buf) {
2017                                 /*turn off triple buffer for full update*/
2018                                 dc->hwss.program_triplebuffer(
2019                                         dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
2020                         }
2021 #endif
2022                         stream_status =
2023                                 stream_get_status(context, pipe_ctx->stream);
2024
2025                         dc->hwss.apply_ctx_for_surface(
2026                                         dc, pipe_ctx->stream, stream_status->plane_count, context);
2027                 }
2028         }
2029
2030         // Update Type FAST, Surface updates
2031         if (update_type == UPDATE_TYPE_FAST) {
2032                 /* Lock the top pipe while updating plane addrs, since freesync requires
2033                  *  plane addr update event triggers to be synchronized.
2034                  *  top_pipe_to_program is expected to never be NULL
2035                  */
2036                 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
2037
2038 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2039                 if (dc->hwss.set_flip_control_gsl)
2040                         for (i = 0; i < surface_count; i++) {
2041                                 struct dc_plane_state *plane_state = srf_updates[i].surface;
2042
2043                                 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2044                                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2045
2046                                         if (pipe_ctx->stream != stream)
2047                                                 continue;
2048
2049                                         if (pipe_ctx->plane_state != plane_state)
2050                                                 continue;
2051
2052                                         // GSL has to be used for flip immediate
2053                                         dc->hwss.set_flip_control_gsl(pipe_ctx,
2054                                                         plane_state->flip_immediate);
2055                                 }
2056                         }
2057 #endif
2058                 /* Perform requested Updates */
2059                 for (i = 0; i < surface_count; i++) {
2060                         struct dc_plane_state *plane_state = srf_updates[i].surface;
2061
2062                         for (j = 0; j < dc->res_pool->pipe_count; j++) {
2063                                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2064
2065                                 if (pipe_ctx->stream != stream)
2066                                         continue;
2067
2068                                 if (pipe_ctx->plane_state != plane_state)
2069                                         continue;
2070 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2071                                 /*program triple buffer after lock based on flip type*/
2072                                 if (dc->hwss.program_triplebuffer != NULL &&
2073                                         !dc->debug.disable_tri_buf) {
2074                                         /*only enable triplebuffer for  fast_update*/
2075                                         dc->hwss.program_triplebuffer(
2076                                                 dc, pipe_ctx, plane_state->triplebuffer_flips);
2077                                 }
2078 #endif
2079                                 if (srf_updates[i].flip_addr)
2080                                         dc->hwss.update_plane_addr(dc, pipe_ctx);
2081                         }
2082                 }
2083
2084                 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
2085         }
2086
2087         // Fire manual trigger only when bottom plane is flipped
2088         for (j = 0; j < dc->res_pool->pipe_count; j++) {
2089                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2090
2091                 if (pipe_ctx->bottom_pipe ||
2092                                 !pipe_ctx->stream ||
2093                                 pipe_ctx->stream != stream ||
2094                                 !pipe_ctx->plane_state->update_flags.bits.addr_update)
2095                         continue;
2096
2097                 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
2098                         pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
2099         }
2100 }
2101
2102 void dc_commit_updates_for_stream(struct dc *dc,
2103                 struct dc_surface_update *srf_updates,
2104                 int surface_count,
2105                 struct dc_stream_state *stream,
2106                 struct dc_stream_update *stream_update,
2107                 struct dc_state *state)
2108 {
2109         const struct dc_stream_status *stream_status;
2110         enum surface_update_type update_type;
2111         struct dc_state *context;
2112         struct dc_context *dc_ctx = dc->ctx;
2113         int i, j;
2114
2115         stream_status = dc_stream_get_status(stream);
2116         context = dc->current_state;
2117
2118         update_type = dc_check_update_surfaces_for_stream(
2119                                 dc, srf_updates, surface_count, stream_update, stream_status);
2120
2121         if (update_type >= update_surface_trace_level)
2122                 update_surface_trace(dc, srf_updates, surface_count);
2123
2124
2125         if (update_type >= UPDATE_TYPE_FULL) {
2126
2127                 /* initialize scratch memory for building context */
2128                 context = dc_create_state(dc);
2129                 if (context == NULL) {
2130                         DC_ERROR("Failed to allocate new validate context!\n");
2131                         return;
2132                 }
2133
2134                 dc_resource_state_copy_construct(state, context);
2135
2136                 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2137                         struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
2138                         struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
2139
2140                         if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
2141                                 new_pipe->plane_state->force_full_update = true;
2142                 }
2143         }
2144
2145
2146         for (i = 0; i < surface_count; i++) {
2147                 struct dc_plane_state *surface = srf_updates[i].surface;
2148
2149                 copy_surface_update_to_plane(surface, &srf_updates[i]);
2150
2151                 if (update_type >= UPDATE_TYPE_MED) {
2152                         for (j = 0; j < dc->res_pool->pipe_count; j++) {
2153                                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2154
2155                                 if (pipe_ctx->plane_state != surface)
2156                                         continue;
2157
2158                                 resource_build_scaling_params(pipe_ctx);
2159                         }
2160                 }
2161         }
2162
2163         copy_stream_update_to_stream(dc, context, stream, stream_update);
2164
2165         commit_planes_for_stream(
2166                                 dc,
2167                                 srf_updates,
2168                                 surface_count,
2169                                 stream,
2170                                 stream_update,
2171                                 update_type,
2172                                 context);
2173         /*update current_State*/
2174         if (dc->current_state != context) {
2175
2176                 struct dc_state *old = dc->current_state;
2177
2178                 dc->current_state = context;
2179                 dc_release_state(old);
2180
2181                 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2182                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2183
2184                         if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
2185                                 pipe_ctx->plane_state->force_full_update = false;
2186                 }
2187         }
2188         /*let's use current_state to update watermark etc*/
2189         if (update_type >= UPDATE_TYPE_FULL)
2190                 dc_post_update_surfaces_to_stream(dc);
2191
2192         return;
2193
2194 }
2195
2196 uint8_t dc_get_current_stream_count(struct dc *dc)
2197 {
2198         return dc->current_state->stream_count;
2199 }
2200
2201 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
2202 {
2203         if (i < dc->current_state->stream_count)
2204                 return dc->current_state->streams[i];
2205         return NULL;
2206 }
2207
2208 enum dc_irq_source dc_interrupt_to_irq_source(
2209                 struct dc *dc,
2210                 uint32_t src_id,
2211                 uint32_t ext_id)
2212 {
2213         return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
2214 }
2215
2216 /**
2217  * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
2218  */
2219 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
2220 {
2221
2222         if (dc == NULL)
2223                 return false;
2224
2225         return dal_irq_service_set(dc->res_pool->irqs, src, enable);
2226 }
2227
2228 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
2229 {
2230         dal_irq_service_ack(dc->res_pool->irqs, src);
2231 }
2232
2233 void dc_set_power_state(
2234         struct dc *dc,
2235         enum dc_acpi_cm_power_state power_state)
2236 {
2237         struct kref refcount;
2238         struct display_mode_lib *dml = kzalloc(sizeof(struct display_mode_lib),
2239                                                 GFP_KERNEL);
2240
2241         ASSERT(dml);
2242         if (!dml)
2243                 return;
2244
2245         switch (power_state) {
2246         case DC_ACPI_CM_POWER_STATE_D0:
2247                 dc_resource_state_construct(dc, dc->current_state);
2248
2249                 dc->hwss.init_hw(dc);
2250                 break;
2251         default:
2252                 ASSERT(dc->current_state->stream_count == 0);
2253                 /* Zero out the current context so that on resume we start with
2254                  * clean state, and dc hw programming optimizations will not
2255                  * cause any trouble.
2256                  */
2257
2258                 /* Preserve refcount */
2259                 refcount = dc->current_state->refcount;
2260                 /* Preserve display mode lib */
2261                 memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib));
2262
2263                 dc_resource_state_destruct(dc->current_state);
2264                 memset(dc->current_state, 0,
2265                                 sizeof(*dc->current_state));
2266
2267                 dc->current_state->refcount = refcount;
2268                 dc->current_state->bw_ctx.dml = *dml;
2269
2270                 break;
2271         }
2272
2273         kfree(dml);
2274 }
2275
2276 void dc_resume(struct dc *dc)
2277 {
2278
2279         uint32_t i;
2280
2281         for (i = 0; i < dc->link_count; i++)
2282                 core_link_resume(dc->links[i]);
2283 }
2284
2285 unsigned int dc_get_current_backlight_pwm(struct dc *dc)
2286 {
2287         struct abm *abm = dc->res_pool->abm;
2288
2289         if (abm)
2290                 return abm->funcs->get_current_backlight(abm);
2291
2292         return 0;
2293 }
2294
2295 unsigned int dc_get_target_backlight_pwm(struct dc *dc)
2296 {
2297         struct abm *abm = dc->res_pool->abm;
2298
2299         if (abm)
2300                 return abm->funcs->get_target_backlight(abm);
2301
2302         return 0;
2303 }
2304
2305 bool dc_is_dmcu_initialized(struct dc *dc)
2306 {
2307         struct dmcu *dmcu = dc->res_pool->dmcu;
2308
2309         if (dmcu)
2310                 return dmcu->funcs->is_dmcu_initialized(dmcu);
2311         return false;
2312 }
2313
2314 bool dc_submit_i2c(
2315                 struct dc *dc,
2316                 uint32_t link_index,
2317                 struct i2c_command *cmd)
2318 {
2319
2320         struct dc_link *link = dc->links[link_index];
2321         struct ddc_service *ddc = link->ddc;
2322         return dce_i2c_submit_command(
2323                 dc->res_pool,
2324                 ddc->ddc_pin,
2325                 cmd);
2326 }
2327
2328 static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
2329 {
2330         if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
2331                 BREAK_TO_DEBUGGER();
2332                 return false;
2333         }
2334
2335         dc_sink_retain(sink);
2336
2337         dc_link->remote_sinks[dc_link->sink_count] = sink;
2338         dc_link->sink_count++;
2339
2340         return true;
2341 }
2342
2343 /**
2344  * dc_link_add_remote_sink() - Create a sink and attach it to an existing link
2345  *
2346  * EDID length is in bytes
2347  */
2348 struct dc_sink *dc_link_add_remote_sink(
2349                 struct dc_link *link,
2350                 const uint8_t *edid,
2351                 int len,
2352                 struct dc_sink_init_data *init_data)
2353 {
2354         struct dc_sink *dc_sink;
2355         enum dc_edid_status edid_status;
2356
2357         if (len > DC_MAX_EDID_BUFFER_SIZE) {
2358                 dm_error("Max EDID buffer size breached!\n");
2359                 return NULL;
2360         }
2361
2362         if (!init_data) {
2363                 BREAK_TO_DEBUGGER();
2364                 return NULL;
2365         }
2366
2367         if (!init_data->link) {
2368                 BREAK_TO_DEBUGGER();
2369                 return NULL;
2370         }
2371
2372         dc_sink = dc_sink_create(init_data);
2373
2374         if (!dc_sink)
2375                 return NULL;
2376
2377         memmove(dc_sink->dc_edid.raw_edid, edid, len);
2378         dc_sink->dc_edid.length = len;
2379
2380         if (!link_add_remote_sink_helper(
2381                         link,
2382                         dc_sink))
2383                 goto fail_add_sink;
2384
2385         edid_status = dm_helpers_parse_edid_caps(
2386                         link->ctx,
2387                         &dc_sink->dc_edid,
2388                         &dc_sink->edid_caps);
2389
2390         /*
2391          * Treat device as no EDID device if EDID
2392          * parsing fails
2393          */
2394         if (edid_status != EDID_OK) {
2395                 dc_sink->dc_edid.length = 0;
2396                 dm_error("Bad EDID, status%d!\n", edid_status);
2397         }
2398
2399         return dc_sink;
2400
2401 fail_add_sink:
2402         dc_sink_release(dc_sink);
2403         return NULL;
2404 }
2405
2406 /**
2407  * dc_link_remove_remote_sink() - Remove a remote sink from a dc_link
2408  *
2409  * Note that this just removes the struct dc_sink - it doesn't
2410  * program hardware or alter other members of dc_link
2411  */
2412 void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
2413 {
2414         int i;
2415
2416         if (!link->sink_count) {
2417                 BREAK_TO_DEBUGGER();
2418                 return;
2419         }
2420
2421         for (i = 0; i < link->sink_count; i++) {
2422                 if (link->remote_sinks[i] == sink) {
2423                         dc_sink_release(sink);
2424                         link->remote_sinks[i] = NULL;
2425
2426                         /* shrink array to remove empty place */
2427                         while (i < link->sink_count - 1) {
2428                                 link->remote_sinks[i] = link->remote_sinks[i+1];
2429                                 i++;
2430                         }
2431                         link->remote_sinks[i] = NULL;
2432                         link->sink_count--;
2433                         return;
2434                 }
2435         }
2436 }
2437
2438 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
2439 {
2440         info->displayClock                              = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
2441         info->engineClock                               = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
2442         info->memoryClock                               = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
2443         info->maxSupportedDppClock              = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
2444         info->dppClock                                  = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
2445         info->socClock                                  = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
2446         info->dcfClockDeepSleep                 = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
2447         info->fClock                                    = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
2448         info->phyClock                                  = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
2449 }
2450 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping)
2451 {
2452         if (dc->hwss.set_clock)
2453                 return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping);
2454         return DC_ERROR_UNEXPECTED;
2455 }
2456 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg)
2457 {
2458         if (dc->hwss.get_clock)
2459                 dc->hwss.get_clock(dc, clock_type, clock_cfg);
2460 }