2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
36 #include "amdgpu_display.h"
37 #include "amdgpu_ucode.h"
39 #include "amdgpu_dm.h"
40 #include "amdgpu_pm.h"
42 #include "amd_shared.h"
43 #include "amdgpu_dm_irq.h"
44 #include "dm_helpers.h"
45 #include "amdgpu_dm_mst_types.h"
46 #if defined(CONFIG_DEBUG_FS)
47 #include "amdgpu_dm_debugfs.h"
50 #include "ivsrcid/ivsrcid_vislands30.h"
52 #include <linux/module.h>
53 #include <linux/moduleparam.h>
54 #include <linux/version.h>
55 #include <linux/types.h>
56 #include <linux/pm_runtime.h>
57 #include <linux/pci.h>
58 #include <linux/firmware.h>
59 #include <linux/component.h>
61 #include <drm/drm_atomic.h>
62 #include <drm/drm_atomic_uapi.h>
63 #include <drm/drm_atomic_helper.h>
64 #include <drm/drm_dp_mst_helper.h>
65 #include <drm/drm_fb_helper.h>
66 #include <drm/drm_fourcc.h>
67 #include <drm/drm_edid.h>
68 #include <drm/drm_vblank.h>
69 #include <drm/drm_audio_component.h>
71 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
72 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
74 #include "dcn/dcn_1_0_offset.h"
75 #include "dcn/dcn_1_0_sh_mask.h"
76 #include "soc15_hw_ip.h"
77 #include "vega10_ip_offset.h"
79 #include "soc15_common.h"
82 #include "modules/inc/mod_freesync.h"
83 #include "modules/power/power_helpers.h"
84 #include "modules/inc/mod_info_packet.h"
86 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
87 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
92 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
93 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
94 * requests into DC requests, and DC responses into DRM responses.
96 * The root control structure is &struct amdgpu_display_manager.
99 /* basic init/fini API */
100 static int amdgpu_dm_init(struct amdgpu_device *adev);
101 static void amdgpu_dm_fini(struct amdgpu_device *adev);
104 * initializes drm_device display related structures, based on the information
105 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
106 * drm_encoder, drm_mode_config
108 * Returns 0 on success
110 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
111 /* removes and deallocates the drm structures, created by the above function */
112 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
115 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
117 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
118 struct drm_plane *plane,
119 unsigned long possible_crtcs,
120 const struct dc_plane_cap *plane_cap);
121 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
122 struct drm_plane *plane,
123 uint32_t link_index);
124 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
125 struct amdgpu_dm_connector *amdgpu_dm_connector,
127 struct amdgpu_encoder *amdgpu_encoder);
128 static int amdgpu_dm_encoder_init(struct drm_device *dev,
129 struct amdgpu_encoder *aencoder,
130 uint32_t link_index);
132 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
134 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
135 struct drm_atomic_state *state,
138 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
140 static int amdgpu_dm_atomic_check(struct drm_device *dev,
141 struct drm_atomic_state *state);
143 static void handle_cursor_update(struct drm_plane *plane,
144 struct drm_plane_state *old_plane_state);
147 * dm_vblank_get_counter
150 * Get counter for number of vertical blanks
153 * struct amdgpu_device *adev - [in] desired amdgpu device
154 * int disp_idx - [in] which CRTC to get the counter from
157 * Counter for vertical blanks
159 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
161 if (crtc >= adev->mode_info.num_crtc)
164 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
165 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
169 if (acrtc_state->stream == NULL) {
170 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
175 return dc_stream_get_vblank_counter(acrtc_state->stream);
179 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
180 u32 *vbl, u32 *position)
182 uint32_t v_blank_start, v_blank_end, h_position, v_position;
184 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
187 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
188 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
191 if (acrtc_state->stream == NULL) {
192 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
198 * TODO rework base driver to use values directly.
199 * for now parse it back into reg-format
201 dc_stream_get_scanoutpos(acrtc_state->stream,
207 *position = v_position | (h_position << 16);
208 *vbl = v_blank_start | (v_blank_end << 16);
214 static bool dm_is_idle(void *handle)
220 static int dm_wait_for_idle(void *handle)
226 static bool dm_check_soft_reset(void *handle)
231 static int dm_soft_reset(void *handle)
237 static struct amdgpu_crtc *
238 get_crtc_by_otg_inst(struct amdgpu_device *adev,
241 struct drm_device *dev = adev->ddev;
242 struct drm_crtc *crtc;
243 struct amdgpu_crtc *amdgpu_crtc;
245 if (otg_inst == -1) {
247 return adev->mode_info.crtcs[0];
250 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
251 amdgpu_crtc = to_amdgpu_crtc(crtc);
253 if (amdgpu_crtc->otg_inst == otg_inst)
260 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
262 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
263 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
266 static void dm_pflip_high_irq(void *interrupt_params)
268 struct amdgpu_crtc *amdgpu_crtc;
269 struct common_irq_params *irq_params = interrupt_params;
270 struct amdgpu_device *adev = irq_params->adev;
272 struct drm_pending_vblank_event *e;
273 struct dm_crtc_state *acrtc_state;
274 uint32_t vpos, hpos, v_blank_start, v_blank_end;
277 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
279 /* IRQ could occur when in initial stage */
280 /* TODO work and BO cleanup */
281 if (amdgpu_crtc == NULL) {
282 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
286 spin_lock_irqsave(&adev->ddev->event_lock, flags);
288 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
289 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
290 amdgpu_crtc->pflip_status,
291 AMDGPU_FLIP_SUBMITTED,
292 amdgpu_crtc->crtc_id,
294 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
298 /* page flip completed. */
299 e = amdgpu_crtc->event;
300 amdgpu_crtc->event = NULL;
305 acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
306 vrr_active = amdgpu_dm_vrr_active(acrtc_state);
308 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
310 !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
311 &v_blank_end, &hpos, &vpos) ||
312 (vpos < v_blank_start)) {
313 /* Update to correct count and vblank timestamp if racing with
314 * vblank irq. This also updates to the correct vblank timestamp
315 * even in VRR mode, as scanout is past the front-porch atm.
317 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
319 /* Wake up userspace by sending the pageflip event with proper
320 * count and timestamp of vblank of flip completion.
323 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
325 /* Event sent, so done with vblank for this flip */
326 drm_crtc_vblank_put(&amdgpu_crtc->base);
329 /* VRR active and inside front-porch: vblank count and
330 * timestamp for pageflip event will only be up to date after
331 * drm_crtc_handle_vblank() has been executed from late vblank
332 * irq handler after start of back-porch (vline 0). We queue the
333 * pageflip event for send-out by drm_crtc_handle_vblank() with
334 * updated timestamp and count, once it runs after us.
336 * We need to open-code this instead of using the helper
337 * drm_crtc_arm_vblank_event(), as that helper would
338 * call drm_crtc_accurate_vblank_count(), which we must
339 * not call in VRR mode while we are in front-porch!
342 /* sequence will be replaced by real count during send-out. */
343 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
344 e->pipe = amdgpu_crtc->crtc_id;
346 list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
350 /* Keep track of vblank of this flip for flip throttling. We use the
351 * cooked hw counter, as that one incremented at start of this vblank
352 * of pageflip completion, so last_flip_vblank is the forbidden count
353 * for queueing new pageflips if vsync + VRR is enabled.
355 amdgpu_crtc->last_flip_vblank = amdgpu_get_vblank_counter_kms(adev->ddev,
356 amdgpu_crtc->crtc_id);
358 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
359 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
361 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
362 amdgpu_crtc->crtc_id, amdgpu_crtc,
363 vrr_active, (int) !e);
366 static void dm_vupdate_high_irq(void *interrupt_params)
368 struct common_irq_params *irq_params = interrupt_params;
369 struct amdgpu_device *adev = irq_params->adev;
370 struct amdgpu_crtc *acrtc;
371 struct dm_crtc_state *acrtc_state;
374 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
377 acrtc_state = to_dm_crtc_state(acrtc->base.state);
379 DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
380 amdgpu_dm_vrr_active(acrtc_state));
382 /* Core vblank handling is done here after end of front-porch in
383 * vrr mode, as vblank timestamping will give valid results
384 * while now done after front-porch. This will also deliver
385 * page-flip completion events that have been queued to us
386 * if a pageflip happened inside front-porch.
388 if (amdgpu_dm_vrr_active(acrtc_state)) {
389 drm_crtc_handle_vblank(&acrtc->base);
391 /* BTR processing for pre-DCE12 ASICs */
392 if (acrtc_state->stream &&
393 adev->family < AMDGPU_FAMILY_AI) {
394 spin_lock_irqsave(&adev->ddev->event_lock, flags);
395 mod_freesync_handle_v_update(
396 adev->dm.freesync_module,
398 &acrtc_state->vrr_params);
400 dc_stream_adjust_vmin_vmax(
403 &acrtc_state->vrr_params.adjust);
404 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
410 static void dm_crtc_high_irq(void *interrupt_params)
412 struct common_irq_params *irq_params = interrupt_params;
413 struct amdgpu_device *adev = irq_params->adev;
414 struct amdgpu_crtc *acrtc;
415 struct dm_crtc_state *acrtc_state;
418 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
421 acrtc_state = to_dm_crtc_state(acrtc->base.state);
423 DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
424 amdgpu_dm_vrr_active(acrtc_state));
426 /* Core vblank handling at start of front-porch is only possible
427 * in non-vrr mode, as only there vblank timestamping will give
428 * valid results while done in front-porch. Otherwise defer it
429 * to dm_vupdate_high_irq after end of front-porch.
431 if (!amdgpu_dm_vrr_active(acrtc_state))
432 drm_crtc_handle_vblank(&acrtc->base);
434 /* Following stuff must happen at start of vblank, for crc
435 * computation and below-the-range btr support in vrr mode.
437 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
439 if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI &&
440 acrtc_state->vrr_params.supported &&
441 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
442 spin_lock_irqsave(&adev->ddev->event_lock, flags);
443 mod_freesync_handle_v_update(
444 adev->dm.freesync_module,
446 &acrtc_state->vrr_params);
448 dc_stream_adjust_vmin_vmax(
451 &acrtc_state->vrr_params.adjust);
452 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
457 static int dm_set_clockgating_state(void *handle,
458 enum amd_clockgating_state state)
463 static int dm_set_powergating_state(void *handle,
464 enum amd_powergating_state state)
469 /* Prototypes of private functions */
470 static int dm_early_init(void* handle);
472 /* Allocate memory for FBC compressed data */
473 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
475 struct drm_device *dev = connector->dev;
476 struct amdgpu_device *adev = dev->dev_private;
477 struct dm_comressor_info *compressor = &adev->dm.compressor;
478 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
479 struct drm_display_mode *mode;
480 unsigned long max_size = 0;
482 if (adev->dm.dc->fbc_compressor == NULL)
485 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
488 if (compressor->bo_ptr)
492 list_for_each_entry(mode, &connector->modes, head) {
493 if (max_size < mode->htotal * mode->vtotal)
494 max_size = mode->htotal * mode->vtotal;
498 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
499 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
500 &compressor->gpu_addr, &compressor->cpu_addr);
503 DRM_ERROR("DM: Failed to initialize FBC\n");
505 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
506 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
513 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
514 int pipe, bool *enabled,
515 unsigned char *buf, int max_bytes)
517 struct drm_device *dev = dev_get_drvdata(kdev);
518 struct amdgpu_device *adev = dev->dev_private;
519 struct drm_connector *connector;
520 struct drm_connector_list_iter conn_iter;
521 struct amdgpu_dm_connector *aconnector;
526 mutex_lock(&adev->dm.audio_lock);
528 drm_connector_list_iter_begin(dev, &conn_iter);
529 drm_for_each_connector_iter(connector, &conn_iter) {
530 aconnector = to_amdgpu_dm_connector(connector);
531 if (aconnector->audio_inst != port)
535 ret = drm_eld_size(connector->eld);
536 memcpy(buf, connector->eld, min(max_bytes, ret));
540 drm_connector_list_iter_end(&conn_iter);
542 mutex_unlock(&adev->dm.audio_lock);
544 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
549 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
550 .get_eld = amdgpu_dm_audio_component_get_eld,
553 static int amdgpu_dm_audio_component_bind(struct device *kdev,
554 struct device *hda_kdev, void *data)
556 struct drm_device *dev = dev_get_drvdata(kdev);
557 struct amdgpu_device *adev = dev->dev_private;
558 struct drm_audio_component *acomp = data;
560 acomp->ops = &amdgpu_dm_audio_component_ops;
562 adev->dm.audio_component = acomp;
567 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
568 struct device *hda_kdev, void *data)
570 struct drm_device *dev = dev_get_drvdata(kdev);
571 struct amdgpu_device *adev = dev->dev_private;
572 struct drm_audio_component *acomp = data;
576 adev->dm.audio_component = NULL;
579 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
580 .bind = amdgpu_dm_audio_component_bind,
581 .unbind = amdgpu_dm_audio_component_unbind,
584 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
591 adev->mode_info.audio.enabled = true;
593 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
595 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
596 adev->mode_info.audio.pin[i].channels = -1;
597 adev->mode_info.audio.pin[i].rate = -1;
598 adev->mode_info.audio.pin[i].bits_per_sample = -1;
599 adev->mode_info.audio.pin[i].status_bits = 0;
600 adev->mode_info.audio.pin[i].category_code = 0;
601 adev->mode_info.audio.pin[i].connected = false;
602 adev->mode_info.audio.pin[i].id =
603 adev->dm.dc->res_pool->audios[i]->inst;
604 adev->mode_info.audio.pin[i].offset = 0;
607 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
611 adev->dm.audio_registered = true;
616 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
621 if (!adev->mode_info.audio.enabled)
624 if (adev->dm.audio_registered) {
625 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
626 adev->dm.audio_registered = false;
629 /* TODO: Disable audio? */
631 adev->mode_info.audio.enabled = false;
634 void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
636 struct drm_audio_component *acomp = adev->dm.audio_component;
638 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
639 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
641 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
646 static int amdgpu_dm_init(struct amdgpu_device *adev)
648 struct dc_init_data init_data;
649 adev->dm.ddev = adev->ddev;
650 adev->dm.adev = adev;
652 /* Zero all the fields */
653 memset(&init_data, 0, sizeof(init_data));
655 mutex_init(&adev->dm.dc_lock);
656 mutex_init(&adev->dm.audio_lock);
658 if(amdgpu_dm_irq_init(adev)) {
659 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
663 init_data.asic_id.chip_family = adev->family;
665 init_data.asic_id.pci_revision_id = adev->rev_id;
666 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
668 init_data.asic_id.vram_width = adev->gmc.vram_width;
669 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
670 init_data.asic_id.atombios_base_address =
671 adev->mode_info.atom_context->bios;
673 init_data.driver = adev;
675 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
677 if (!adev->dm.cgs_device) {
678 DRM_ERROR("amdgpu: failed to create cgs device.\n");
682 init_data.cgs_device = adev->dm.cgs_device;
684 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
687 * TODO debug why this doesn't work on Raven
689 if (adev->flags & AMD_IS_APU &&
690 adev->asic_type >= CHIP_CARRIZO &&
691 adev->asic_type <= CHIP_RAVEN)
692 init_data.flags.gpu_vm_support = true;
694 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
695 init_data.flags.fbc_support = true;
697 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
698 init_data.flags.multi_mon_pp_mclk_switch = true;
700 init_data.flags.power_down_display_on_boot = true;
702 #ifdef CONFIG_DRM_AMD_DC_DCN2_0
703 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
706 /* Display Core create. */
707 adev->dm.dc = dc_create(&init_data);
710 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
712 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
716 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
717 if (!adev->dm.freesync_module) {
719 "amdgpu: failed to initialize freesync_module.\n");
721 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
722 adev->dm.freesync_module);
724 amdgpu_dm_init_color_mod();
726 if (amdgpu_dm_initialize_drm_device(adev)) {
728 "amdgpu: failed to initialize sw for display support.\n");
732 /* Update the actual used number of crtc */
733 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
735 /* TODO: Add_display_info? */
737 /* TODO use dynamic cursor width */
738 adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
739 adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
741 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
743 "amdgpu: failed to initialize sw for display support.\n");
747 #if defined(CONFIG_DEBUG_FS)
748 if (dtn_debugfs_init(adev))
749 DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
752 DRM_DEBUG_DRIVER("KMS initialized.\n");
756 amdgpu_dm_fini(adev);
761 static void amdgpu_dm_fini(struct amdgpu_device *adev)
763 amdgpu_dm_audio_fini(adev);
765 amdgpu_dm_destroy_drm_device(&adev->dm);
767 /* DC Destroy TODO: Replace destroy DAL */
769 dc_destroy(&adev->dm.dc);
771 * TODO: pageflip, vlank interrupt
773 * amdgpu_dm_irq_fini(adev);
776 if (adev->dm.cgs_device) {
777 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
778 adev->dm.cgs_device = NULL;
780 if (adev->dm.freesync_module) {
781 mod_freesync_destroy(adev->dm.freesync_module);
782 adev->dm.freesync_module = NULL;
785 mutex_destroy(&adev->dm.audio_lock);
786 mutex_destroy(&adev->dm.dc_lock);
791 static int load_dmcu_fw(struct amdgpu_device *adev)
793 const char *fw_name_dmcu = NULL;
795 const struct dmcu_firmware_header_v1_0 *hdr;
797 switch(adev->asic_type) {
820 if (ASICREV_IS_PICASSO(adev->external_rev_id))
821 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
822 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
823 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
828 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
832 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
833 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
837 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
839 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
840 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
841 adev->dm.fw_dmcu = NULL;
845 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
850 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
852 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
854 release_firmware(adev->dm.fw_dmcu);
855 adev->dm.fw_dmcu = NULL;
859 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
860 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
861 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
862 adev->firmware.fw_size +=
863 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
865 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
866 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
867 adev->firmware.fw_size +=
868 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
870 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
872 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
877 static int dm_sw_init(void *handle)
879 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
881 return load_dmcu_fw(adev);
884 static int dm_sw_fini(void *handle)
886 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
888 if(adev->dm.fw_dmcu) {
889 release_firmware(adev->dm.fw_dmcu);
890 adev->dm.fw_dmcu = NULL;
896 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
898 struct amdgpu_dm_connector *aconnector;
899 struct drm_connector *connector;
902 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
904 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
905 aconnector = to_amdgpu_dm_connector(connector);
906 if (aconnector->dc_link->type == dc_connection_mst_branch &&
907 aconnector->mst_mgr.aux) {
908 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
909 aconnector, aconnector->base.base.id);
911 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
913 DRM_ERROR("DM_MST: Failed to start MST\n");
914 ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
920 drm_modeset_unlock(&dev->mode_config.connection_mutex);
924 static int dm_late_init(void *handle)
926 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
928 struct dmcu_iram_parameters params;
929 unsigned int linear_lut[16];
931 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
934 for (i = 0; i < 16; i++)
935 linear_lut[i] = 0xFFFF * i / 15;
938 params.backlight_ramping_start = 0xCCCC;
939 params.backlight_ramping_reduction = 0xCCCCCCCC;
940 params.backlight_lut_array_size = 16;
941 params.backlight_lut_array = linear_lut;
943 /* todo will enable for navi10 */
944 if (adev->asic_type <= CHIP_RAVEN) {
945 ret = dmcu_load_iram(dmcu, params);
951 return detect_mst_link_for_all_connectors(adev->ddev);
954 static void s3_handle_mst(struct drm_device *dev, bool suspend)
956 struct amdgpu_dm_connector *aconnector;
957 struct drm_connector *connector;
958 struct drm_dp_mst_topology_mgr *mgr;
960 bool need_hotplug = false;
962 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
964 list_for_each_entry(connector, &dev->mode_config.connector_list,
966 aconnector = to_amdgpu_dm_connector(connector);
967 if (aconnector->dc_link->type != dc_connection_mst_branch ||
968 aconnector->mst_port)
971 mgr = &aconnector->mst_mgr;
974 drm_dp_mst_topology_mgr_suspend(mgr);
976 ret = drm_dp_mst_topology_mgr_resume(mgr);
978 drm_dp_mst_topology_mgr_set_mst(mgr, false);
984 drm_modeset_unlock(&dev->mode_config.connection_mutex);
987 drm_kms_helper_hotplug_event(dev);
991 * dm_hw_init() - Initialize DC device
992 * @handle: The base driver device containing the amdpgu_dm device.
994 * Initialize the &struct amdgpu_display_manager device. This involves calling
995 * the initializers of each DM component, then populating the struct with them.
997 * Although the function implies hardware initialization, both hardware and
998 * software are initialized here. Splitting them out to their relevant init
999 * hooks is a future TODO item.
1001 * Some notable things that are initialized here:
1003 * - Display Core, both software and hardware
1004 * - DC modules that we need (freesync and color management)
1005 * - DRM software states
1006 * - Interrupt sources and handlers
1008 * - Debug FS entries, if enabled
1010 static int dm_hw_init(void *handle)
1012 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1013 /* Create DAL display manager */
1014 amdgpu_dm_init(adev);
1015 amdgpu_dm_hpd_init(adev);
1021 * dm_hw_fini() - Teardown DC device
1022 * @handle: The base driver device containing the amdpgu_dm device.
1024 * Teardown components within &struct amdgpu_display_manager that require
1025 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1026 * were loaded. Also flush IRQ workqueues and disable them.
1028 static int dm_hw_fini(void *handle)
1030 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1032 amdgpu_dm_hpd_fini(adev);
1034 amdgpu_dm_irq_fini(adev);
1035 amdgpu_dm_fini(adev);
1039 static int dm_suspend(void *handle)
1041 struct amdgpu_device *adev = handle;
1042 struct amdgpu_display_manager *dm = &adev->dm;
1045 WARN_ON(adev->dm.cached_state);
1046 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1048 s3_handle_mst(adev->ddev, true);
1050 amdgpu_dm_irq_suspend(adev);
1053 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1058 static struct amdgpu_dm_connector *
1059 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1060 struct drm_crtc *crtc)
1063 struct drm_connector_state *new_con_state;
1064 struct drm_connector *connector;
1065 struct drm_crtc *crtc_from_state;
1067 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1068 crtc_from_state = new_con_state->crtc;
1070 if (crtc_from_state == crtc)
1071 return to_amdgpu_dm_connector(connector);
1077 static void emulated_link_detect(struct dc_link *link)
1079 struct dc_sink_init_data sink_init_data = { 0 };
1080 struct display_sink_capability sink_caps = { 0 };
1081 enum dc_edid_status edid_status;
1082 struct dc_context *dc_ctx = link->ctx;
1083 struct dc_sink *sink = NULL;
1084 struct dc_sink *prev_sink = NULL;
1086 link->type = dc_connection_none;
1087 prev_sink = link->local_sink;
1089 if (prev_sink != NULL)
1090 dc_sink_retain(prev_sink);
1092 switch (link->connector_signal) {
1093 case SIGNAL_TYPE_HDMI_TYPE_A: {
1094 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1095 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1099 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1100 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1101 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1105 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1106 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1107 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1111 case SIGNAL_TYPE_LVDS: {
1112 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1113 sink_caps.signal = SIGNAL_TYPE_LVDS;
1117 case SIGNAL_TYPE_EDP: {
1118 sink_caps.transaction_type =
1119 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1120 sink_caps.signal = SIGNAL_TYPE_EDP;
1124 case SIGNAL_TYPE_DISPLAY_PORT: {
1125 sink_caps.transaction_type =
1126 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1127 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1132 DC_ERROR("Invalid connector type! signal:%d\n",
1133 link->connector_signal);
1137 sink_init_data.link = link;
1138 sink_init_data.sink_signal = sink_caps.signal;
1140 sink = dc_sink_create(&sink_init_data);
1142 DC_ERROR("Failed to create sink!\n");
1146 /* dc_sink_create returns a new reference */
1147 link->local_sink = sink;
1149 edid_status = dm_helpers_read_local_edid(
1154 if (edid_status != EDID_OK)
1155 DC_ERROR("Failed to read EDID");
1159 static int dm_resume(void *handle)
1161 struct amdgpu_device *adev = handle;
1162 struct drm_device *ddev = adev->ddev;
1163 struct amdgpu_display_manager *dm = &adev->dm;
1164 struct amdgpu_dm_connector *aconnector;
1165 struct drm_connector *connector;
1166 struct drm_crtc *crtc;
1167 struct drm_crtc_state *new_crtc_state;
1168 struct dm_crtc_state *dm_new_crtc_state;
1169 struct drm_plane *plane;
1170 struct drm_plane_state *new_plane_state;
1171 struct dm_plane_state *dm_new_plane_state;
1172 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1173 enum dc_connection_type new_connection_type = dc_connection_none;
1176 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1177 dc_release_state(dm_state->context);
1178 dm_state->context = dc_create_state(dm->dc);
1179 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1180 dc_resource_state_construct(dm->dc, dm_state->context);
1182 /* power on hardware */
1183 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1185 /* program HPD filter */
1188 /* On resume we need to rewrite the MSTM control bits to enamble MST*/
1189 s3_handle_mst(ddev, false);
1192 * early enable HPD Rx IRQ, should be done before set mode as short
1193 * pulse interrupts are used for MST
1195 amdgpu_dm_irq_resume_early(adev);
1198 list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
1199 aconnector = to_amdgpu_dm_connector(connector);
1202 * this is the case when traversing through already created
1203 * MST connectors, should be skipped
1205 if (aconnector->mst_port)
1208 mutex_lock(&aconnector->hpd_lock);
1209 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1210 DRM_ERROR("KMS: Failed to detect connector\n");
1212 if (aconnector->base.force && new_connection_type == dc_connection_none)
1213 emulated_link_detect(aconnector->dc_link);
1215 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1217 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1218 aconnector->fake_enable = false;
1220 if (aconnector->dc_sink)
1221 dc_sink_release(aconnector->dc_sink);
1222 aconnector->dc_sink = NULL;
1223 amdgpu_dm_update_connector_after_detect(aconnector);
1224 mutex_unlock(&aconnector->hpd_lock);
1227 /* Force mode set in atomic commit */
1228 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1229 new_crtc_state->active_changed = true;
1232 * atomic_check is expected to create the dc states. We need to release
1233 * them here, since they were duplicated as part of the suspend
1236 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1237 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1238 if (dm_new_crtc_state->stream) {
1239 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1240 dc_stream_release(dm_new_crtc_state->stream);
1241 dm_new_crtc_state->stream = NULL;
1245 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1246 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1247 if (dm_new_plane_state->dc_state) {
1248 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1249 dc_plane_state_release(dm_new_plane_state->dc_state);
1250 dm_new_plane_state->dc_state = NULL;
1254 drm_atomic_helper_resume(ddev, dm->cached_state);
1256 dm->cached_state = NULL;
1258 amdgpu_dm_irq_resume_late(adev);
1266 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1267 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1268 * the base driver's device list to be initialized and torn down accordingly.
1270 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1273 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1275 .early_init = dm_early_init,
1276 .late_init = dm_late_init,
1277 .sw_init = dm_sw_init,
1278 .sw_fini = dm_sw_fini,
1279 .hw_init = dm_hw_init,
1280 .hw_fini = dm_hw_fini,
1281 .suspend = dm_suspend,
1282 .resume = dm_resume,
1283 .is_idle = dm_is_idle,
1284 .wait_for_idle = dm_wait_for_idle,
1285 .check_soft_reset = dm_check_soft_reset,
1286 .soft_reset = dm_soft_reset,
1287 .set_clockgating_state = dm_set_clockgating_state,
1288 .set_powergating_state = dm_set_powergating_state,
1291 const struct amdgpu_ip_block_version dm_ip_block =
1293 .type = AMD_IP_BLOCK_TYPE_DCE,
1297 .funcs = &amdgpu_dm_funcs,
1307 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
1308 .fb_create = amdgpu_display_user_framebuffer_create,
1309 .output_poll_changed = drm_fb_helper_output_poll_changed,
1310 .atomic_check = amdgpu_dm_atomic_check,
1311 .atomic_commit = amdgpu_dm_atomic_commit,
1314 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
1315 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
1319 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
1321 struct drm_connector *connector = &aconnector->base;
1322 struct drm_device *dev = connector->dev;
1323 struct dc_sink *sink;
1325 /* MST handled by drm_mst framework */
1326 if (aconnector->mst_mgr.mst_state == true)
1330 sink = aconnector->dc_link->local_sink;
1332 dc_sink_retain(sink);
1335 * Edid mgmt connector gets first update only in mode_valid hook and then
1336 * the connector sink is set to either fake or physical sink depends on link status.
1337 * Skip if already done during boot.
1339 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
1340 && aconnector->dc_em_sink) {
1343 * For S3 resume with headless use eml_sink to fake stream
1344 * because on resume connector->sink is set to NULL
1346 mutex_lock(&dev->mode_config.mutex);
1349 if (aconnector->dc_sink) {
1350 amdgpu_dm_update_freesync_caps(connector, NULL);
1352 * retain and release below are used to
1353 * bump up refcount for sink because the link doesn't point
1354 * to it anymore after disconnect, so on next crtc to connector
1355 * reshuffle by UMD we will get into unwanted dc_sink release
1357 dc_sink_release(aconnector->dc_sink);
1359 aconnector->dc_sink = sink;
1360 dc_sink_retain(aconnector->dc_sink);
1361 amdgpu_dm_update_freesync_caps(connector,
1364 amdgpu_dm_update_freesync_caps(connector, NULL);
1365 if (!aconnector->dc_sink) {
1366 aconnector->dc_sink = aconnector->dc_em_sink;
1367 dc_sink_retain(aconnector->dc_sink);
1371 mutex_unlock(&dev->mode_config.mutex);
1374 dc_sink_release(sink);
1379 * TODO: temporary guard to look for proper fix
1380 * if this sink is MST sink, we should not do anything
1382 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1383 dc_sink_release(sink);
1387 if (aconnector->dc_sink == sink) {
1389 * We got a DP short pulse (Link Loss, DP CTS, etc...).
1392 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1393 aconnector->connector_id);
1395 dc_sink_release(sink);
1399 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
1400 aconnector->connector_id, aconnector->dc_sink, sink);
1402 mutex_lock(&dev->mode_config.mutex);
1405 * 1. Update status of the drm connector
1406 * 2. Send an event and let userspace tell us what to do
1410 * TODO: check if we still need the S3 mode update workaround.
1411 * If yes, put it here.
1413 if (aconnector->dc_sink)
1414 amdgpu_dm_update_freesync_caps(connector, NULL);
1416 aconnector->dc_sink = sink;
1417 dc_sink_retain(aconnector->dc_sink);
1418 if (sink->dc_edid.length == 0) {
1419 aconnector->edid = NULL;
1420 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
1423 (struct edid *) sink->dc_edid.raw_edid;
1426 drm_connector_update_edid_property(connector,
1428 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
1431 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
1434 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
1435 amdgpu_dm_update_freesync_caps(connector, NULL);
1436 drm_connector_update_edid_property(connector, NULL);
1437 aconnector->num_modes = 0;
1438 dc_sink_release(aconnector->dc_sink);
1439 aconnector->dc_sink = NULL;
1440 aconnector->edid = NULL;
1443 mutex_unlock(&dev->mode_config.mutex);
1446 dc_sink_release(sink);
1449 static void handle_hpd_irq(void *param)
1451 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
1452 struct drm_connector *connector = &aconnector->base;
1453 struct drm_device *dev = connector->dev;
1454 enum dc_connection_type new_connection_type = dc_connection_none;
1457 * In case of failure or MST no need to update connector status or notify the OS
1458 * since (for MST case) MST does this in its own context.
1460 mutex_lock(&aconnector->hpd_lock);
1462 if (aconnector->fake_enable)
1463 aconnector->fake_enable = false;
1465 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1466 DRM_ERROR("KMS: Failed to detect connector\n");
1468 if (aconnector->base.force && new_connection_type == dc_connection_none) {
1469 emulated_link_detect(aconnector->dc_link);
1472 drm_modeset_lock_all(dev);
1473 dm_restore_drm_connector_state(dev, connector);
1474 drm_modeset_unlock_all(dev);
1476 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
1477 drm_kms_helper_hotplug_event(dev);
1479 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
1480 amdgpu_dm_update_connector_after_detect(aconnector);
1483 drm_modeset_lock_all(dev);
1484 dm_restore_drm_connector_state(dev, connector);
1485 drm_modeset_unlock_all(dev);
1487 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
1488 drm_kms_helper_hotplug_event(dev);
1490 mutex_unlock(&aconnector->hpd_lock);
1494 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
1496 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
1498 bool new_irq_handled = false;
1500 int dpcd_bytes_to_read;
1502 const int max_process_count = 30;
1503 int process_count = 0;
1505 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
1507 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
1508 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
1509 /* DPCD 0x200 - 0x201 for downstream IRQ */
1510 dpcd_addr = DP_SINK_COUNT;
1512 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
1513 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
1514 dpcd_addr = DP_SINK_COUNT_ESI;
1517 dret = drm_dp_dpcd_read(
1518 &aconnector->dm_dp_aux.aux,
1521 dpcd_bytes_to_read);
1523 while (dret == dpcd_bytes_to_read &&
1524 process_count < max_process_count) {
1530 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
1531 /* handle HPD short pulse irq */
1532 if (aconnector->mst_mgr.mst_state)
1534 &aconnector->mst_mgr,
1538 if (new_irq_handled) {
1539 /* ACK at DPCD to notify down stream */
1540 const int ack_dpcd_bytes_to_write =
1541 dpcd_bytes_to_read - 1;
1543 for (retry = 0; retry < 3; retry++) {
1546 wret = drm_dp_dpcd_write(
1547 &aconnector->dm_dp_aux.aux,
1550 ack_dpcd_bytes_to_write);
1551 if (wret == ack_dpcd_bytes_to_write)
1555 /* check if there is new irq to be handled */
1556 dret = drm_dp_dpcd_read(
1557 &aconnector->dm_dp_aux.aux,
1560 dpcd_bytes_to_read);
1562 new_irq_handled = false;
1568 if (process_count == max_process_count)
1569 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
1572 static void handle_hpd_rx_irq(void *param)
1574 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
1575 struct drm_connector *connector = &aconnector->base;
1576 struct drm_device *dev = connector->dev;
1577 struct dc_link *dc_link = aconnector->dc_link;
1578 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
1579 enum dc_connection_type new_connection_type = dc_connection_none;
1582 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
1583 * conflict, after implement i2c helper, this mutex should be
1586 if (dc_link->type != dc_connection_mst_branch)
1587 mutex_lock(&aconnector->hpd_lock);
1589 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
1590 !is_mst_root_connector) {
1591 /* Downstream Port status changed. */
1592 if (!dc_link_detect_sink(dc_link, &new_connection_type))
1593 DRM_ERROR("KMS: Failed to detect connector\n");
1595 if (aconnector->base.force && new_connection_type == dc_connection_none) {
1596 emulated_link_detect(dc_link);
1598 if (aconnector->fake_enable)
1599 aconnector->fake_enable = false;
1601 amdgpu_dm_update_connector_after_detect(aconnector);
1604 drm_modeset_lock_all(dev);
1605 dm_restore_drm_connector_state(dev, connector);
1606 drm_modeset_unlock_all(dev);
1608 drm_kms_helper_hotplug_event(dev);
1609 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
1611 if (aconnector->fake_enable)
1612 aconnector->fake_enable = false;
1614 amdgpu_dm_update_connector_after_detect(aconnector);
1617 drm_modeset_lock_all(dev);
1618 dm_restore_drm_connector_state(dev, connector);
1619 drm_modeset_unlock_all(dev);
1621 drm_kms_helper_hotplug_event(dev);
1624 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
1625 (dc_link->type == dc_connection_mst_branch))
1626 dm_handle_hpd_rx_irq(aconnector);
1628 if (dc_link->type != dc_connection_mst_branch) {
1629 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
1630 mutex_unlock(&aconnector->hpd_lock);
1634 static void register_hpd_handlers(struct amdgpu_device *adev)
1636 struct drm_device *dev = adev->ddev;
1637 struct drm_connector *connector;
1638 struct amdgpu_dm_connector *aconnector;
1639 const struct dc_link *dc_link;
1640 struct dc_interrupt_params int_params = {0};
1642 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1643 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1645 list_for_each_entry(connector,
1646 &dev->mode_config.connector_list, head) {
1648 aconnector = to_amdgpu_dm_connector(connector);
1649 dc_link = aconnector->dc_link;
1651 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
1652 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1653 int_params.irq_source = dc_link->irq_source_hpd;
1655 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1657 (void *) aconnector);
1660 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
1662 /* Also register for DP short pulse (hpd_rx). */
1663 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1664 int_params.irq_source = dc_link->irq_source_hpd_rx;
1666 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1668 (void *) aconnector);
1673 /* Register IRQ sources and initialize IRQ callbacks */
1674 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
1676 struct dc *dc = adev->dm.dc;
1677 struct common_irq_params *c_irq_params;
1678 struct dc_interrupt_params int_params = {0};
1681 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
1683 if (adev->asic_type >= CHIP_VEGA10)
1684 client_id = SOC15_IH_CLIENTID_DCE;
1686 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1687 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1690 * Actions of amdgpu_irq_add_id():
1691 * 1. Register a set() function with base driver.
1692 * Base driver will call set() function to enable/disable an
1693 * interrupt in DC hardware.
1694 * 2. Register amdgpu_dm_irq_handler().
1695 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1696 * coming from DC hardware.
1697 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1698 * for acknowledging and handling. */
1700 /* Use VBLANK interrupt */
1701 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
1702 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
1704 DRM_ERROR("Failed to add crtc irq id!\n");
1708 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1709 int_params.irq_source =
1710 dc_interrupt_to_irq_source(dc, i, 0);
1712 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1714 c_irq_params->adev = adev;
1715 c_irq_params->irq_src = int_params.irq_source;
1717 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1718 dm_crtc_high_irq, c_irq_params);
1721 /* Use VUPDATE interrupt */
1722 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
1723 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
1725 DRM_ERROR("Failed to add vupdate irq id!\n");
1729 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1730 int_params.irq_source =
1731 dc_interrupt_to_irq_source(dc, i, 0);
1733 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
1735 c_irq_params->adev = adev;
1736 c_irq_params->irq_src = int_params.irq_source;
1738 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1739 dm_vupdate_high_irq, c_irq_params);
1742 /* Use GRPH_PFLIP interrupt */
1743 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
1744 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
1745 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
1747 DRM_ERROR("Failed to add page flip irq id!\n");
1751 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1752 int_params.irq_source =
1753 dc_interrupt_to_irq_source(dc, i, 0);
1755 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1757 c_irq_params->adev = adev;
1758 c_irq_params->irq_src = int_params.irq_source;
1760 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1761 dm_pflip_high_irq, c_irq_params);
1766 r = amdgpu_irq_add_id(adev, client_id,
1767 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
1769 DRM_ERROR("Failed to add hpd irq id!\n");
1773 register_hpd_handlers(adev);
1778 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1779 /* Register IRQ sources and initialize IRQ callbacks */
1780 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
1782 struct dc *dc = adev->dm.dc;
1783 struct common_irq_params *c_irq_params;
1784 struct dc_interrupt_params int_params = {0};
1788 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1789 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1792 * Actions of amdgpu_irq_add_id():
1793 * 1. Register a set() function with base driver.
1794 * Base driver will call set() function to enable/disable an
1795 * interrupt in DC hardware.
1796 * 2. Register amdgpu_dm_irq_handler().
1797 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1798 * coming from DC hardware.
1799 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1800 * for acknowledging and handling.
1803 /* Use VSTARTUP interrupt */
1804 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
1805 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
1807 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
1810 DRM_ERROR("Failed to add crtc irq id!\n");
1814 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1815 int_params.irq_source =
1816 dc_interrupt_to_irq_source(dc, i, 0);
1818 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1820 c_irq_params->adev = adev;
1821 c_irq_params->irq_src = int_params.irq_source;
1823 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1824 dm_crtc_high_irq, c_irq_params);
1827 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
1828 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
1829 * to trigger at end of each vblank, regardless of state of the lock,
1830 * matching DCE behaviour.
1832 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
1833 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
1835 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
1838 DRM_ERROR("Failed to add vupdate irq id!\n");
1842 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1843 int_params.irq_source =
1844 dc_interrupt_to_irq_source(dc, i, 0);
1846 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
1848 c_irq_params->adev = adev;
1849 c_irq_params->irq_src = int_params.irq_source;
1851 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1852 dm_vupdate_high_irq, c_irq_params);
1855 /* Use GRPH_PFLIP interrupt */
1856 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
1857 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
1859 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
1861 DRM_ERROR("Failed to add page flip irq id!\n");
1865 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1866 int_params.irq_source =
1867 dc_interrupt_to_irq_source(dc, i, 0);
1869 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1871 c_irq_params->adev = adev;
1872 c_irq_params->irq_src = int_params.irq_source;
1874 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1875 dm_pflip_high_irq, c_irq_params);
1880 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
1883 DRM_ERROR("Failed to add hpd irq id!\n");
1887 register_hpd_handlers(adev);
1894 * Acquires the lock for the atomic state object and returns
1895 * the new atomic state.
1897 * This should only be called during atomic check.
1899 static int dm_atomic_get_state(struct drm_atomic_state *state,
1900 struct dm_atomic_state **dm_state)
1902 struct drm_device *dev = state->dev;
1903 struct amdgpu_device *adev = dev->dev_private;
1904 struct amdgpu_display_manager *dm = &adev->dm;
1905 struct drm_private_state *priv_state;
1910 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
1911 if (IS_ERR(priv_state))
1912 return PTR_ERR(priv_state);
1914 *dm_state = to_dm_atomic_state(priv_state);
1919 struct dm_atomic_state *
1920 dm_atomic_get_new_state(struct drm_atomic_state *state)
1922 struct drm_device *dev = state->dev;
1923 struct amdgpu_device *adev = dev->dev_private;
1924 struct amdgpu_display_manager *dm = &adev->dm;
1925 struct drm_private_obj *obj;
1926 struct drm_private_state *new_obj_state;
1929 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
1930 if (obj->funcs == dm->atomic_obj.funcs)
1931 return to_dm_atomic_state(new_obj_state);
1937 struct dm_atomic_state *
1938 dm_atomic_get_old_state(struct drm_atomic_state *state)
1940 struct drm_device *dev = state->dev;
1941 struct amdgpu_device *adev = dev->dev_private;
1942 struct amdgpu_display_manager *dm = &adev->dm;
1943 struct drm_private_obj *obj;
1944 struct drm_private_state *old_obj_state;
1947 for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
1948 if (obj->funcs == dm->atomic_obj.funcs)
1949 return to_dm_atomic_state(old_obj_state);
1955 static struct drm_private_state *
1956 dm_atomic_duplicate_state(struct drm_private_obj *obj)
1958 struct dm_atomic_state *old_state, *new_state;
1960 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
1964 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
1966 old_state = to_dm_atomic_state(obj->state);
1968 if (old_state && old_state->context)
1969 new_state->context = dc_copy_state(old_state->context);
1971 if (!new_state->context) {
1976 return &new_state->base;
1979 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
1980 struct drm_private_state *state)
1982 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
1984 if (dm_state && dm_state->context)
1985 dc_release_state(dm_state->context);
1990 static struct drm_private_state_funcs dm_atomic_state_funcs = {
1991 .atomic_duplicate_state = dm_atomic_duplicate_state,
1992 .atomic_destroy_state = dm_atomic_destroy_state,
1995 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
1997 struct dm_atomic_state *state;
2000 adev->mode_info.mode_config_initialized = true;
2002 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2003 adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2005 adev->ddev->mode_config.max_width = 16384;
2006 adev->ddev->mode_config.max_height = 16384;
2008 adev->ddev->mode_config.preferred_depth = 24;
2009 adev->ddev->mode_config.prefer_shadow = 1;
2010 /* indicates support for immediate flip */
2011 adev->ddev->mode_config.async_page_flip = true;
2013 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2015 state = kzalloc(sizeof(*state), GFP_KERNEL);
2019 state->context = dc_create_state(adev->dm.dc);
2020 if (!state->context) {
2025 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2027 drm_atomic_private_obj_init(adev->ddev,
2028 &adev->dm.atomic_obj,
2030 &dm_atomic_state_funcs);
2032 r = amdgpu_display_modeset_create_props(adev);
2036 r = amdgpu_dm_audio_init(adev);
2043 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2044 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2046 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2047 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2049 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2051 #if defined(CONFIG_ACPI)
2052 struct amdgpu_dm_backlight_caps caps;
2054 if (dm->backlight_caps.caps_valid)
2057 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2058 if (caps.caps_valid) {
2059 dm->backlight_caps.min_input_signal = caps.min_input_signal;
2060 dm->backlight_caps.max_input_signal = caps.max_input_signal;
2061 dm->backlight_caps.caps_valid = true;
2063 dm->backlight_caps.min_input_signal =
2064 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2065 dm->backlight_caps.max_input_signal =
2066 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2069 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2070 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2074 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2076 struct amdgpu_display_manager *dm = bl_get_data(bd);
2077 struct amdgpu_dm_backlight_caps caps;
2078 uint32_t brightness = bd->props.brightness;
2080 amdgpu_dm_update_backlight_caps(dm);
2081 caps = dm->backlight_caps;
2083 * The brightness input is in the range 0-255
2084 * It needs to be rescaled to be between the
2085 * requested min and max input signal
2087 * It also needs to be scaled up by 0x101 to
2088 * match the DC interface which has a range of
2094 * (caps.max_input_signal - caps.min_input_signal)
2095 / AMDGPU_MAX_BL_LEVEL
2096 + caps.min_input_signal * 0x101;
2098 if (dc_link_set_backlight_level(dm->backlight_link,
2105 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2107 struct amdgpu_display_manager *dm = bl_get_data(bd);
2108 int ret = dc_link_get_backlight_level(dm->backlight_link);
2110 if (ret == DC_ERROR_UNEXPECTED)
2111 return bd->props.brightness;
2115 static const struct backlight_ops amdgpu_dm_backlight_ops = {
2116 .get_brightness = amdgpu_dm_backlight_get_brightness,
2117 .update_status = amdgpu_dm_backlight_update_status,
2121 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2124 struct backlight_properties props = { 0 };
2126 amdgpu_dm_update_backlight_caps(dm);
2128 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2129 props.brightness = AMDGPU_MAX_BL_LEVEL;
2130 props.type = BACKLIGHT_RAW;
2132 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2133 dm->adev->ddev->primary->index);
2135 dm->backlight_dev = backlight_device_register(bl_name,
2136 dm->adev->ddev->dev,
2138 &amdgpu_dm_backlight_ops,
2141 if (IS_ERR(dm->backlight_dev))
2142 DRM_ERROR("DM: Backlight registration failed!\n");
2144 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
2149 static int initialize_plane(struct amdgpu_display_manager *dm,
2150 struct amdgpu_mode_info *mode_info, int plane_id,
2151 enum drm_plane_type plane_type,
2152 const struct dc_plane_cap *plane_cap)
2154 struct drm_plane *plane;
2155 unsigned long possible_crtcs;
2158 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
2160 DRM_ERROR("KMS: Failed to allocate plane\n");
2163 plane->type = plane_type;
2166 * HACK: IGT tests expect that the primary plane for a CRTC
2167 * can only have one possible CRTC. Only expose support for
2168 * any CRTC if they're not going to be used as a primary plane
2169 * for a CRTC - like overlay or underlay planes.
2171 possible_crtcs = 1 << plane_id;
2172 if (plane_id >= dm->dc->caps.max_streams)
2173 possible_crtcs = 0xff;
2175 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
2178 DRM_ERROR("KMS: Failed to initialize plane\n");
2184 mode_info->planes[plane_id] = plane;
2190 static void register_backlight_device(struct amdgpu_display_manager *dm,
2191 struct dc_link *link)
2193 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2194 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2196 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
2197 link->type != dc_connection_none) {
2199 * Event if registration failed, we should continue with
2200 * DM initialization because not having a backlight control
2201 * is better then a black screen.
2203 amdgpu_dm_register_backlight_device(dm);
2205 if (dm->backlight_dev)
2206 dm->backlight_link = link;
2213 * In this architecture, the association
2214 * connector -> encoder -> crtc
2215 * id not really requried. The crtc and connector will hold the
2216 * display_index as an abstraction to use with DAL component
2218 * Returns 0 on success
2220 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
2222 struct amdgpu_display_manager *dm = &adev->dm;
2224 struct amdgpu_dm_connector *aconnector = NULL;
2225 struct amdgpu_encoder *aencoder = NULL;
2226 struct amdgpu_mode_info *mode_info = &adev->mode_info;
2228 int32_t primary_planes;
2229 enum dc_connection_type new_connection_type = dc_connection_none;
2230 const struct dc_plane_cap *plane;
2232 link_cnt = dm->dc->caps.max_links;
2233 if (amdgpu_dm_mode_config_init(dm->adev)) {
2234 DRM_ERROR("DM: Failed to initialize mode config\n");
2238 /* There is one primary plane per CRTC */
2239 primary_planes = dm->dc->caps.max_streams;
2240 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
2243 * Initialize primary planes, implicit planes for legacy IOCTLS.
2244 * Order is reversed to match iteration order in atomic check.
2246 for (i = (primary_planes - 1); i >= 0; i--) {
2247 plane = &dm->dc->caps.planes[i];
2249 if (initialize_plane(dm, mode_info, i,
2250 DRM_PLANE_TYPE_PRIMARY, plane)) {
2251 DRM_ERROR("KMS: Failed to initialize primary plane\n");
2257 * Initialize overlay planes, index starting after primary planes.
2258 * These planes have a higher DRM index than the primary planes since
2259 * they should be considered as having a higher z-order.
2260 * Order is reversed to match iteration order in atomic check.
2262 * Only support DCN for now, and only expose one so we don't encourage
2263 * userspace to use up all the pipes.
2265 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
2266 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
2268 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
2271 if (!plane->blends_with_above || !plane->blends_with_below)
2274 if (!plane->pixel_format_support.argb8888)
2277 if (initialize_plane(dm, NULL, primary_planes + i,
2278 DRM_PLANE_TYPE_OVERLAY, plane)) {
2279 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
2283 /* Only create one overlay plane. */
2287 for (i = 0; i < dm->dc->caps.max_streams; i++)
2288 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
2289 DRM_ERROR("KMS: Failed to initialize crtc\n");
2293 dm->display_indexes_num = dm->dc->caps.max_streams;
2295 /* loops over all connectors on the board */
2296 for (i = 0; i < link_cnt; i++) {
2297 struct dc_link *link = NULL;
2299 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
2301 "KMS: Cannot support more than %d display indexes\n",
2302 AMDGPU_DM_MAX_DISPLAY_INDEX);
2306 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
2310 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
2314 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
2315 DRM_ERROR("KMS: Failed to initialize encoder\n");
2319 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
2320 DRM_ERROR("KMS: Failed to initialize connector\n");
2324 link = dc_get_link_at_index(dm->dc, i);
2326 if (!dc_link_detect_sink(link, &new_connection_type))
2327 DRM_ERROR("KMS: Failed to detect connector\n");
2329 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2330 emulated_link_detect(link);
2331 amdgpu_dm_update_connector_after_detect(aconnector);
2333 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
2334 amdgpu_dm_update_connector_after_detect(aconnector);
2335 register_backlight_device(dm, link);
2341 /* Software is initialized. Now we can register interrupt handlers. */
2342 switch (adev->asic_type) {
2352 case CHIP_POLARIS11:
2353 case CHIP_POLARIS10:
2354 case CHIP_POLARIS12:
2359 if (dce110_register_irq_handlers(dm->adev)) {
2360 DRM_ERROR("DM: Failed to initialize IRQ\n");
2364 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2366 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2371 #if defined(CONFIG_DRM_AMD_DC_DCN2_1)
2374 if (dcn10_register_irq_handlers(dm->adev)) {
2375 DRM_ERROR("DM: Failed to initialize IRQ\n");
2381 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
2385 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
2386 dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
2387 if (adev->asic_type == CHIP_RENOIR)
2388 dm->dc->debug.disable_stutter = true;
2398 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
2400 drm_mode_config_cleanup(dm->ddev);
2401 drm_atomic_private_obj_fini(&dm->atomic_obj);
2405 /******************************************************************************
2406 * amdgpu_display_funcs functions
2407 *****************************************************************************/
2410 * dm_bandwidth_update - program display watermarks
2412 * @adev: amdgpu_device pointer
2414 * Calculate and program the display watermarks and line buffer allocation.
2416 static void dm_bandwidth_update(struct amdgpu_device *adev)
2418 /* TODO: implement later */
2421 static const struct amdgpu_display_funcs dm_display_funcs = {
2422 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
2423 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
2424 .backlight_set_level = NULL, /* never called for DC */
2425 .backlight_get_level = NULL, /* never called for DC */
2426 .hpd_sense = NULL,/* called unconditionally */
2427 .hpd_set_polarity = NULL, /* called unconditionally */
2428 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
2429 .page_flip_get_scanoutpos =
2430 dm_crtc_get_scanoutpos,/* called unconditionally */
2431 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
2432 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
2435 #if defined(CONFIG_DEBUG_KERNEL_DC)
2437 static ssize_t s3_debug_store(struct device *device,
2438 struct device_attribute *attr,
2444 struct drm_device *drm_dev = dev_get_drvdata(device);
2445 struct amdgpu_device *adev = drm_dev->dev_private;
2447 ret = kstrtoint(buf, 0, &s3_state);
2452 drm_kms_helper_hotplug_event(adev->ddev);
2457 return ret == 0 ? count : 0;
2460 DEVICE_ATTR_WO(s3_debug);
2464 static int dm_early_init(void *handle)
2466 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2468 switch (adev->asic_type) {
2471 adev->mode_info.num_crtc = 6;
2472 adev->mode_info.num_hpd = 6;
2473 adev->mode_info.num_dig = 6;
2476 adev->mode_info.num_crtc = 4;
2477 adev->mode_info.num_hpd = 6;
2478 adev->mode_info.num_dig = 7;
2482 adev->mode_info.num_crtc = 2;
2483 adev->mode_info.num_hpd = 6;
2484 adev->mode_info.num_dig = 6;
2488 adev->mode_info.num_crtc = 6;
2489 adev->mode_info.num_hpd = 6;
2490 adev->mode_info.num_dig = 7;
2493 adev->mode_info.num_crtc = 3;
2494 adev->mode_info.num_hpd = 6;
2495 adev->mode_info.num_dig = 9;
2498 adev->mode_info.num_crtc = 2;
2499 adev->mode_info.num_hpd = 6;
2500 adev->mode_info.num_dig = 9;
2502 case CHIP_POLARIS11:
2503 case CHIP_POLARIS12:
2504 adev->mode_info.num_crtc = 5;
2505 adev->mode_info.num_hpd = 5;
2506 adev->mode_info.num_dig = 5;
2508 case CHIP_POLARIS10:
2510 adev->mode_info.num_crtc = 6;
2511 adev->mode_info.num_hpd = 6;
2512 adev->mode_info.num_dig = 6;
2517 adev->mode_info.num_crtc = 6;
2518 adev->mode_info.num_hpd = 6;
2519 adev->mode_info.num_dig = 6;
2521 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
2523 adev->mode_info.num_crtc = 4;
2524 adev->mode_info.num_hpd = 4;
2525 adev->mode_info.num_dig = 4;
2528 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2531 adev->mode_info.num_crtc = 6;
2532 adev->mode_info.num_hpd = 6;
2533 adev->mode_info.num_dig = 6;
2536 adev->mode_info.num_crtc = 5;
2537 adev->mode_info.num_hpd = 5;
2538 adev->mode_info.num_dig = 5;
2541 #if defined(CONFIG_DRM_AMD_DC_DCN2_1)
2543 adev->mode_info.num_crtc = 4;
2544 adev->mode_info.num_hpd = 4;
2545 adev->mode_info.num_dig = 4;
2549 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
2553 amdgpu_dm_set_irq_funcs(adev);
2555 if (adev->mode_info.funcs == NULL)
2556 adev->mode_info.funcs = &dm_display_funcs;
2559 * Note: Do NOT change adev->audio_endpt_rreg and
2560 * adev->audio_endpt_wreg because they are initialised in
2561 * amdgpu_device_init()
2563 #if defined(CONFIG_DEBUG_KERNEL_DC)
2566 &dev_attr_s3_debug);
2572 static bool modeset_required(struct drm_crtc_state *crtc_state,
2573 struct dc_stream_state *new_stream,
2574 struct dc_stream_state *old_stream)
2576 if (!drm_atomic_crtc_needs_modeset(crtc_state))
2579 if (!crtc_state->enable)
2582 return crtc_state->active;
2585 static bool modereset_required(struct drm_crtc_state *crtc_state)
2587 if (!drm_atomic_crtc_needs_modeset(crtc_state))
2590 return !crtc_state->enable || !crtc_state->active;
2593 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
2595 drm_encoder_cleanup(encoder);
2599 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
2600 .destroy = amdgpu_dm_encoder_destroy,
2604 static int fill_dc_scaling_info(const struct drm_plane_state *state,
2605 struct dc_scaling_info *scaling_info)
2607 int scale_w, scale_h;
2609 memset(scaling_info, 0, sizeof(*scaling_info));
2611 /* Source is fixed 16.16 but we ignore mantissa for now... */
2612 scaling_info->src_rect.x = state->src_x >> 16;
2613 scaling_info->src_rect.y = state->src_y >> 16;
2615 scaling_info->src_rect.width = state->src_w >> 16;
2616 if (scaling_info->src_rect.width == 0)
2619 scaling_info->src_rect.height = state->src_h >> 16;
2620 if (scaling_info->src_rect.height == 0)
2623 scaling_info->dst_rect.x = state->crtc_x;
2624 scaling_info->dst_rect.y = state->crtc_y;
2626 if (state->crtc_w == 0)
2629 scaling_info->dst_rect.width = state->crtc_w;
2631 if (state->crtc_h == 0)
2634 scaling_info->dst_rect.height = state->crtc_h;
2636 /* DRM doesn't specify clipping on destination output. */
2637 scaling_info->clip_rect = scaling_info->dst_rect;
2639 /* TODO: Validate scaling per-format with DC plane caps */
2640 scale_w = scaling_info->dst_rect.width * 1000 /
2641 scaling_info->src_rect.width;
2643 if (scale_w < 250 || scale_w > 16000)
2646 scale_h = scaling_info->dst_rect.height * 1000 /
2647 scaling_info->src_rect.height;
2649 if (scale_h < 250 || scale_h > 16000)
2653 * The "scaling_quality" can be ignored for now, quality = 0 has DC
2654 * assume reasonable defaults based on the format.
2660 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
2661 uint64_t *tiling_flags)
2663 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
2664 int r = amdgpu_bo_reserve(rbo, false);
2667 /* Don't show error message when returning -ERESTARTSYS */
2668 if (r != -ERESTARTSYS)
2669 DRM_ERROR("Unable to reserve buffer: %d\n", r);
2674 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
2676 amdgpu_bo_unreserve(rbo);
2681 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
2683 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
2685 return offset ? (address + offset * 256) : 0;
2689 fill_plane_dcc_attributes(struct amdgpu_device *adev,
2690 const struct amdgpu_framebuffer *afb,
2691 const enum surface_pixel_format format,
2692 const enum dc_rotation_angle rotation,
2693 const struct plane_size *plane_size,
2694 const union dc_tiling_info *tiling_info,
2695 const uint64_t info,
2696 struct dc_plane_dcc_param *dcc,
2697 struct dc_plane_address *address)
2699 struct dc *dc = adev->dm.dc;
2700 struct dc_dcc_surface_param input;
2701 struct dc_surface_dcc_cap output;
2702 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
2703 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
2704 uint64_t dcc_address;
2706 memset(&input, 0, sizeof(input));
2707 memset(&output, 0, sizeof(output));
2712 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
2715 if (!dc->cap_funcs.get_dcc_compression_cap)
2718 input.format = format;
2719 input.surface_size.width = plane_size->surface_size.width;
2720 input.surface_size.height = plane_size->surface_size.height;
2721 input.swizzle_mode = tiling_info->gfx9.swizzle;
2723 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
2724 input.scan = SCAN_DIRECTION_HORIZONTAL;
2725 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
2726 input.scan = SCAN_DIRECTION_VERTICAL;
2728 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
2731 if (!output.capable)
2734 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
2739 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
2740 dcc->independent_64b_blks = i64b;
2742 dcc_address = get_dcc_address(afb->address, info);
2743 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
2744 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
2750 fill_plane_buffer_attributes(struct amdgpu_device *adev,
2751 const struct amdgpu_framebuffer *afb,
2752 const enum surface_pixel_format format,
2753 const enum dc_rotation_angle rotation,
2754 const uint64_t tiling_flags,
2755 union dc_tiling_info *tiling_info,
2756 struct plane_size *plane_size,
2757 struct dc_plane_dcc_param *dcc,
2758 struct dc_plane_address *address)
2760 const struct drm_framebuffer *fb = &afb->base;
2763 memset(tiling_info, 0, sizeof(*tiling_info));
2764 memset(plane_size, 0, sizeof(*plane_size));
2765 memset(dcc, 0, sizeof(*dcc));
2766 memset(address, 0, sizeof(*address));
2768 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
2769 plane_size->surface_size.x = 0;
2770 plane_size->surface_size.y = 0;
2771 plane_size->surface_size.width = fb->width;
2772 plane_size->surface_size.height = fb->height;
2773 plane_size->surface_pitch =
2774 fb->pitches[0] / fb->format->cpp[0];
2776 address->type = PLN_ADDR_TYPE_GRAPHICS;
2777 address->grph.addr.low_part = lower_32_bits(afb->address);
2778 address->grph.addr.high_part = upper_32_bits(afb->address);
2779 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
2780 uint64_t chroma_addr = afb->address + fb->offsets[1];
2782 plane_size->surface_size.x = 0;
2783 plane_size->surface_size.y = 0;
2784 plane_size->surface_size.width = fb->width;
2785 plane_size->surface_size.height = fb->height;
2786 plane_size->surface_pitch =
2787 fb->pitches[0] / fb->format->cpp[0];
2789 plane_size->chroma_size.x = 0;
2790 plane_size->chroma_size.y = 0;
2791 /* TODO: set these based on surface format */
2792 plane_size->chroma_size.width = fb->width / 2;
2793 plane_size->chroma_size.height = fb->height / 2;
2795 plane_size->chroma_pitch =
2796 fb->pitches[1] / fb->format->cpp[1];
2798 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
2799 address->video_progressive.luma_addr.low_part =
2800 lower_32_bits(afb->address);
2801 address->video_progressive.luma_addr.high_part =
2802 upper_32_bits(afb->address);
2803 address->video_progressive.chroma_addr.low_part =
2804 lower_32_bits(chroma_addr);
2805 address->video_progressive.chroma_addr.high_part =
2806 upper_32_bits(chroma_addr);
2809 /* Fill GFX8 params */
2810 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
2811 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
2813 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
2814 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
2815 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
2816 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
2817 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
2819 /* XXX fix me for VI */
2820 tiling_info->gfx8.num_banks = num_banks;
2821 tiling_info->gfx8.array_mode =
2822 DC_ARRAY_2D_TILED_THIN1;
2823 tiling_info->gfx8.tile_split = tile_split;
2824 tiling_info->gfx8.bank_width = bankw;
2825 tiling_info->gfx8.bank_height = bankh;
2826 tiling_info->gfx8.tile_aspect = mtaspect;
2827 tiling_info->gfx8.tile_mode =
2828 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
2829 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
2830 == DC_ARRAY_1D_TILED_THIN1) {
2831 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
2834 tiling_info->gfx8.pipe_config =
2835 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
2837 if (adev->asic_type == CHIP_VEGA10 ||
2838 adev->asic_type == CHIP_VEGA12 ||
2839 adev->asic_type == CHIP_VEGA20 ||
2840 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2841 adev->asic_type == CHIP_NAVI10 ||
2842 adev->asic_type == CHIP_NAVI14 ||
2843 adev->asic_type == CHIP_NAVI12 ||
2845 #if defined(CONFIG_DRM_AMD_DC_DCN2_1)
2846 adev->asic_type == CHIP_RENOIR ||
2848 adev->asic_type == CHIP_RAVEN) {
2849 /* Fill GFX9 params */
2850 tiling_info->gfx9.num_pipes =
2851 adev->gfx.config.gb_addr_config_fields.num_pipes;
2852 tiling_info->gfx9.num_banks =
2853 adev->gfx.config.gb_addr_config_fields.num_banks;
2854 tiling_info->gfx9.pipe_interleave =
2855 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
2856 tiling_info->gfx9.num_shader_engines =
2857 adev->gfx.config.gb_addr_config_fields.num_se;
2858 tiling_info->gfx9.max_compressed_frags =
2859 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
2860 tiling_info->gfx9.num_rb_per_se =
2861 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
2862 tiling_info->gfx9.swizzle =
2863 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
2864 tiling_info->gfx9.shaderEnable = 1;
2866 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
2867 plane_size, tiling_info,
2868 tiling_flags, dcc, address);
2877 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
2878 bool *per_pixel_alpha, bool *global_alpha,
2879 int *global_alpha_value)
2881 *per_pixel_alpha = false;
2882 *global_alpha = false;
2883 *global_alpha_value = 0xff;
2885 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
2888 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
2889 static const uint32_t alpha_formats[] = {
2890 DRM_FORMAT_ARGB8888,
2891 DRM_FORMAT_RGBA8888,
2892 DRM_FORMAT_ABGR8888,
2894 uint32_t format = plane_state->fb->format->format;
2897 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
2898 if (format == alpha_formats[i]) {
2899 *per_pixel_alpha = true;
2905 if (plane_state->alpha < 0xffff) {
2906 *global_alpha = true;
2907 *global_alpha_value = plane_state->alpha >> 8;
2912 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
2913 const enum surface_pixel_format format,
2914 enum dc_color_space *color_space)
2918 *color_space = COLOR_SPACE_SRGB;
2920 /* DRM color properties only affect non-RGB formats. */
2921 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
2924 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
2926 switch (plane_state->color_encoding) {
2927 case DRM_COLOR_YCBCR_BT601:
2929 *color_space = COLOR_SPACE_YCBCR601;
2931 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
2934 case DRM_COLOR_YCBCR_BT709:
2936 *color_space = COLOR_SPACE_YCBCR709;
2938 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
2941 case DRM_COLOR_YCBCR_BT2020:
2943 *color_space = COLOR_SPACE_2020_YCBCR;
2956 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
2957 const struct drm_plane_state *plane_state,
2958 const uint64_t tiling_flags,
2959 struct dc_plane_info *plane_info,
2960 struct dc_plane_address *address)
2962 const struct drm_framebuffer *fb = plane_state->fb;
2963 const struct amdgpu_framebuffer *afb =
2964 to_amdgpu_framebuffer(plane_state->fb);
2965 struct drm_format_name_buf format_name;
2968 memset(plane_info, 0, sizeof(*plane_info));
2970 switch (fb->format->format) {
2972 plane_info->format =
2973 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
2975 case DRM_FORMAT_RGB565:
2976 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
2978 case DRM_FORMAT_XRGB8888:
2979 case DRM_FORMAT_ARGB8888:
2980 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
2982 case DRM_FORMAT_XRGB2101010:
2983 case DRM_FORMAT_ARGB2101010:
2984 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
2986 case DRM_FORMAT_XBGR2101010:
2987 case DRM_FORMAT_ABGR2101010:
2988 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
2990 case DRM_FORMAT_XBGR8888:
2991 case DRM_FORMAT_ABGR8888:
2992 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
2994 case DRM_FORMAT_NV21:
2995 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
2997 case DRM_FORMAT_NV12:
2998 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3002 "Unsupported screen format %s\n",
3003 drm_get_format_name(fb->format->format, &format_name));
3007 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3008 case DRM_MODE_ROTATE_0:
3009 plane_info->rotation = ROTATION_ANGLE_0;
3011 case DRM_MODE_ROTATE_90:
3012 plane_info->rotation = ROTATION_ANGLE_90;
3014 case DRM_MODE_ROTATE_180:
3015 plane_info->rotation = ROTATION_ANGLE_180;
3017 case DRM_MODE_ROTATE_270:
3018 plane_info->rotation = ROTATION_ANGLE_270;
3021 plane_info->rotation = ROTATION_ANGLE_0;
3025 plane_info->visible = true;
3026 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3028 plane_info->layer_index = 0;
3030 ret = fill_plane_color_attributes(plane_state, plane_info->format,
3031 &plane_info->color_space);
3035 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3036 plane_info->rotation, tiling_flags,
3037 &plane_info->tiling_info,
3038 &plane_info->plane_size,
3039 &plane_info->dcc, address);
3043 fill_blending_from_plane_state(
3044 plane_state, &plane_info->per_pixel_alpha,
3045 &plane_info->global_alpha, &plane_info->global_alpha_value);
3050 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3051 struct dc_plane_state *dc_plane_state,
3052 struct drm_plane_state *plane_state,
3053 struct drm_crtc_state *crtc_state)
3055 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3056 const struct amdgpu_framebuffer *amdgpu_fb =
3057 to_amdgpu_framebuffer(plane_state->fb);
3058 struct dc_scaling_info scaling_info;
3059 struct dc_plane_info plane_info;
3060 uint64_t tiling_flags;
3063 ret = fill_dc_scaling_info(plane_state, &scaling_info);
3067 dc_plane_state->src_rect = scaling_info.src_rect;
3068 dc_plane_state->dst_rect = scaling_info.dst_rect;
3069 dc_plane_state->clip_rect = scaling_info.clip_rect;
3070 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3072 ret = get_fb_info(amdgpu_fb, &tiling_flags);
3076 ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3078 &dc_plane_state->address);
3082 dc_plane_state->format = plane_info.format;
3083 dc_plane_state->color_space = plane_info.color_space;
3084 dc_plane_state->format = plane_info.format;
3085 dc_plane_state->plane_size = plane_info.plane_size;
3086 dc_plane_state->rotation = plane_info.rotation;
3087 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3088 dc_plane_state->stereo_format = plane_info.stereo_format;
3089 dc_plane_state->tiling_info = plane_info.tiling_info;
3090 dc_plane_state->visible = plane_info.visible;
3091 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3092 dc_plane_state->global_alpha = plane_info.global_alpha;
3093 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3094 dc_plane_state->dcc = plane_info.dcc;
3095 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
3098 * Always set input transfer function, since plane state is refreshed
3101 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3108 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3109 const struct dm_connector_state *dm_state,
3110 struct dc_stream_state *stream)
3112 enum amdgpu_rmx_type rmx_type;
3114 struct rect src = { 0 }; /* viewport in composition space*/
3115 struct rect dst = { 0 }; /* stream addressable area */
3117 /* no mode. nothing to be done */
3121 /* Full screen scaling by default */
3122 src.width = mode->hdisplay;
3123 src.height = mode->vdisplay;
3124 dst.width = stream->timing.h_addressable;
3125 dst.height = stream->timing.v_addressable;
3128 rmx_type = dm_state->scaling;
3129 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
3130 if (src.width * dst.height <
3131 src.height * dst.width) {
3132 /* height needs less upscaling/more downscaling */
3133 dst.width = src.width *
3134 dst.height / src.height;
3136 /* width needs less upscaling/more downscaling */
3137 dst.height = src.height *
3138 dst.width / src.width;
3140 } else if (rmx_type == RMX_CENTER) {
3144 dst.x = (stream->timing.h_addressable - dst.width) / 2;
3145 dst.y = (stream->timing.v_addressable - dst.height) / 2;
3147 if (dm_state->underscan_enable) {
3148 dst.x += dm_state->underscan_hborder / 2;
3149 dst.y += dm_state->underscan_vborder / 2;
3150 dst.width -= dm_state->underscan_hborder;
3151 dst.height -= dm_state->underscan_vborder;
3158 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
3159 dst.x, dst.y, dst.width, dst.height);
3163 static enum dc_color_depth
3164 convert_color_depth_from_display_info(const struct drm_connector *connector,
3165 const struct drm_connector_state *state)
3167 uint8_t bpc = (uint8_t)connector->display_info.bpc;
3169 /* Assume 8 bpc by default if no bpc is specified. */
3170 bpc = bpc ? bpc : 8;
3173 state = connector->state;
3177 * Cap display bpc based on the user requested value.
3179 * The value for state->max_bpc may not correctly updated
3180 * depending on when the connector gets added to the state
3181 * or if this was called outside of atomic check, so it
3182 * can't be used directly.
3184 bpc = min(bpc, state->max_requested_bpc);
3186 /* Round down to the nearest even number. */
3187 bpc = bpc - (bpc & 1);
3193 * Temporary Work around, DRM doesn't parse color depth for
3194 * EDID revision before 1.4
3195 * TODO: Fix edid parsing
3197 return COLOR_DEPTH_888;
3199 return COLOR_DEPTH_666;
3201 return COLOR_DEPTH_888;
3203 return COLOR_DEPTH_101010;
3205 return COLOR_DEPTH_121212;
3207 return COLOR_DEPTH_141414;
3209 return COLOR_DEPTH_161616;
3211 return COLOR_DEPTH_UNDEFINED;
3215 static enum dc_aspect_ratio
3216 get_aspect_ratio(const struct drm_display_mode *mode_in)
3218 /* 1-1 mapping, since both enums follow the HDMI spec. */
3219 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
3222 static enum dc_color_space
3223 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
3225 enum dc_color_space color_space = COLOR_SPACE_SRGB;
3227 switch (dc_crtc_timing->pixel_encoding) {
3228 case PIXEL_ENCODING_YCBCR422:
3229 case PIXEL_ENCODING_YCBCR444:
3230 case PIXEL_ENCODING_YCBCR420:
3233 * 27030khz is the separation point between HDTV and SDTV
3234 * according to HDMI spec, we use YCbCr709 and YCbCr601
3237 if (dc_crtc_timing->pix_clk_100hz > 270300) {
3238 if (dc_crtc_timing->flags.Y_ONLY)
3240 COLOR_SPACE_YCBCR709_LIMITED;
3242 color_space = COLOR_SPACE_YCBCR709;
3244 if (dc_crtc_timing->flags.Y_ONLY)
3246 COLOR_SPACE_YCBCR601_LIMITED;
3248 color_space = COLOR_SPACE_YCBCR601;
3253 case PIXEL_ENCODING_RGB:
3254 color_space = COLOR_SPACE_SRGB;
3265 static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out)
3267 if (timing_out->display_color_depth <= COLOR_DEPTH_888)
3270 timing_out->display_color_depth--;
3273 static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out,
3274 const struct drm_display_info *info)
3277 if (timing_out->display_color_depth <= COLOR_DEPTH_888)
3280 normalized_clk = timing_out->pix_clk_100hz / 10;
3281 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
3282 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
3283 normalized_clk /= 2;
3284 /* Adjusting pix clock following on HDMI spec based on colour depth */
3285 switch (timing_out->display_color_depth) {
3286 case COLOR_DEPTH_101010:
3287 normalized_clk = (normalized_clk * 30) / 24;
3289 case COLOR_DEPTH_121212:
3290 normalized_clk = (normalized_clk * 36) / 24;
3292 case COLOR_DEPTH_161616:
3293 normalized_clk = (normalized_clk * 48) / 24;
3298 if (normalized_clk <= info->max_tmds_clock)
3300 reduce_mode_colour_depth(timing_out);
3302 } while (timing_out->display_color_depth > COLOR_DEPTH_888);
3306 static void fill_stream_properties_from_drm_display_mode(
3307 struct dc_stream_state *stream,
3308 const struct drm_display_mode *mode_in,
3309 const struct drm_connector *connector,
3310 const struct drm_connector_state *connector_state,
3311 const struct dc_stream_state *old_stream)
3313 struct dc_crtc_timing *timing_out = &stream->timing;
3314 const struct drm_display_info *info = &connector->display_info;
3316 memset(timing_out, 0, sizeof(struct dc_crtc_timing));
3318 timing_out->h_border_left = 0;
3319 timing_out->h_border_right = 0;
3320 timing_out->v_border_top = 0;
3321 timing_out->v_border_bottom = 0;
3322 /* TODO: un-hardcode */
3323 if (drm_mode_is_420_only(info, mode_in)
3324 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3325 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3326 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
3327 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3328 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
3330 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
3332 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
3333 timing_out->display_color_depth = convert_color_depth_from_display_info(
3334 connector, connector_state);
3335 timing_out->scan_type = SCANNING_TYPE_NODATA;
3336 timing_out->hdmi_vic = 0;
3339 timing_out->vic = old_stream->timing.vic;
3340 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
3341 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
3343 timing_out->vic = drm_match_cea_mode(mode_in);
3344 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
3345 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
3346 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
3347 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
3350 timing_out->h_addressable = mode_in->crtc_hdisplay;
3351 timing_out->h_total = mode_in->crtc_htotal;
3352 timing_out->h_sync_width =
3353 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
3354 timing_out->h_front_porch =
3355 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
3356 timing_out->v_total = mode_in->crtc_vtotal;
3357 timing_out->v_addressable = mode_in->crtc_vdisplay;
3358 timing_out->v_front_porch =
3359 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
3360 timing_out->v_sync_width =
3361 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
3362 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
3363 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
3365 stream->output_color_space = get_output_color_space(timing_out);
3367 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
3368 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
3369 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3370 adjust_colour_depth_from_display_info(timing_out, info);
3373 static void fill_audio_info(struct audio_info *audio_info,
3374 const struct drm_connector *drm_connector,
3375 const struct dc_sink *dc_sink)
3378 int cea_revision = 0;
3379 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
3381 audio_info->manufacture_id = edid_caps->manufacturer_id;
3382 audio_info->product_id = edid_caps->product_id;
3384 cea_revision = drm_connector->display_info.cea_rev;
3386 strscpy(audio_info->display_name,
3387 edid_caps->display_name,
3388 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
3390 if (cea_revision >= 3) {
3391 audio_info->mode_count = edid_caps->audio_mode_count;
3393 for (i = 0; i < audio_info->mode_count; ++i) {
3394 audio_info->modes[i].format_code =
3395 (enum audio_format_code)
3396 (edid_caps->audio_modes[i].format_code);
3397 audio_info->modes[i].channel_count =
3398 edid_caps->audio_modes[i].channel_count;
3399 audio_info->modes[i].sample_rates.all =
3400 edid_caps->audio_modes[i].sample_rate;
3401 audio_info->modes[i].sample_size =
3402 edid_caps->audio_modes[i].sample_size;
3406 audio_info->flags.all = edid_caps->speaker_flags;
3408 /* TODO: We only check for the progressive mode, check for interlace mode too */
3409 if (drm_connector->latency_present[0]) {
3410 audio_info->video_latency = drm_connector->video_latency[0];
3411 audio_info->audio_latency = drm_connector->audio_latency[0];
3414 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
3419 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
3420 struct drm_display_mode *dst_mode)
3422 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
3423 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
3424 dst_mode->crtc_clock = src_mode->crtc_clock;
3425 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
3426 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
3427 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
3428 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
3429 dst_mode->crtc_htotal = src_mode->crtc_htotal;
3430 dst_mode->crtc_hskew = src_mode->crtc_hskew;
3431 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
3432 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
3433 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
3434 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
3435 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
3439 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
3440 const struct drm_display_mode *native_mode,
3443 if (scale_enabled) {
3444 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
3445 } else if (native_mode->clock == drm_mode->clock &&
3446 native_mode->htotal == drm_mode->htotal &&
3447 native_mode->vtotal == drm_mode->vtotal) {
3448 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
3450 /* no scaling nor amdgpu inserted, no need to patch */
3454 static struct dc_sink *
3455 create_fake_sink(struct amdgpu_dm_connector *aconnector)
3457 struct dc_sink_init_data sink_init_data = { 0 };
3458 struct dc_sink *sink = NULL;
3459 sink_init_data.link = aconnector->dc_link;
3460 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
3462 sink = dc_sink_create(&sink_init_data);
3464 DRM_ERROR("Failed to create sink!\n");
3467 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
3472 static void set_multisync_trigger_params(
3473 struct dc_stream_state *stream)
3475 if (stream->triggered_crtc_reset.enabled) {
3476 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
3477 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
3481 static void set_master_stream(struct dc_stream_state *stream_set[],
3484 int j, highest_rfr = 0, master_stream = 0;
3486 for (j = 0; j < stream_count; j++) {
3487 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
3488 int refresh_rate = 0;
3490 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
3491 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
3492 if (refresh_rate > highest_rfr) {
3493 highest_rfr = refresh_rate;
3498 for (j = 0; j < stream_count; j++) {
3500 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
3504 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
3508 if (context->stream_count < 2)
3510 for (i = 0; i < context->stream_count ; i++) {
3511 if (!context->streams[i])
3514 * TODO: add a function to read AMD VSDB bits and set
3515 * crtc_sync_master.multi_sync_enabled flag
3516 * For now it's set to false
3518 set_multisync_trigger_params(context->streams[i]);
3520 set_master_stream(context->streams, context->stream_count);
3523 static struct dc_stream_state *
3524 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
3525 const struct drm_display_mode *drm_mode,
3526 const struct dm_connector_state *dm_state,
3527 const struct dc_stream_state *old_stream)
3529 struct drm_display_mode *preferred_mode = NULL;
3530 struct drm_connector *drm_connector;
3531 const struct drm_connector_state *con_state =
3532 dm_state ? &dm_state->base : NULL;
3533 struct dc_stream_state *stream = NULL;
3534 struct drm_display_mode mode = *drm_mode;
3535 bool native_mode_found = false;
3536 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
3538 int preferred_refresh = 0;
3539 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
3540 struct dsc_dec_dpcd_caps dsc_caps;
3541 uint32_t link_bandwidth_kbps;
3544 struct dc_sink *sink = NULL;
3545 if (aconnector == NULL) {
3546 DRM_ERROR("aconnector is NULL!\n");
3550 drm_connector = &aconnector->base;
3552 if (!aconnector->dc_sink) {
3553 sink = create_fake_sink(aconnector);
3557 sink = aconnector->dc_sink;
3558 dc_sink_retain(sink);
3561 stream = dc_create_stream_for_sink(sink);
3563 if (stream == NULL) {
3564 DRM_ERROR("Failed to create stream for sink!\n");
3568 stream->dm_stream_context = aconnector;
3570 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
3571 /* Search for preferred mode */
3572 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
3573 native_mode_found = true;
3577 if (!native_mode_found)
3578 preferred_mode = list_first_entry_or_null(
3579 &aconnector->base.modes,
3580 struct drm_display_mode,
3583 mode_refresh = drm_mode_vrefresh(&mode);
3585 if (preferred_mode == NULL) {
3587 * This may not be an error, the use case is when we have no
3588 * usermode calls to reset and set mode upon hotplug. In this
3589 * case, we call set mode ourselves to restore the previous mode
3590 * and the modelist may not be filled in in time.
3592 DRM_DEBUG_DRIVER("No preferred mode found\n");
3594 decide_crtc_timing_for_drm_display_mode(
3595 &mode, preferred_mode,
3596 dm_state ? (dm_state->scaling != RMX_OFF) : false);
3597 preferred_refresh = drm_mode_vrefresh(preferred_mode);
3601 drm_mode_set_crtcinfo(&mode, 0);
3604 * If scaling is enabled and refresh rate didn't change
3605 * we copy the vic and polarities of the old timings
3607 if (!scale || mode_refresh != preferred_refresh)
3608 fill_stream_properties_from_drm_display_mode(stream,
3609 &mode, &aconnector->base, con_state, NULL);
3611 fill_stream_properties_from_drm_display_mode(stream,
3612 &mode, &aconnector->base, con_state, old_stream);
3614 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
3615 stream->timing.flags.DSC = 0;
3617 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
3618 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
3619 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
3621 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
3622 dc_link_get_link_cap(aconnector->dc_link));
3624 if (dsc_caps.is_dsc_supported)
3625 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
3627 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
3628 link_bandwidth_kbps,
3630 &stream->timing.dsc_cfg))
3631 stream->timing.flags.DSC = 1;
3635 update_stream_scaling_settings(&mode, dm_state, stream);
3638 &stream->audio_info,
3642 update_stream_signal(stream, sink);
3645 dc_sink_release(sink);
3650 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
3652 drm_crtc_cleanup(crtc);
3656 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
3657 struct drm_crtc_state *state)
3659 struct dm_crtc_state *cur = to_dm_crtc_state(state);
3661 /* TODO Destroy dc_stream objects are stream object is flattened */
3663 dc_stream_release(cur->stream);
3666 __drm_atomic_helper_crtc_destroy_state(state);
3672 static void dm_crtc_reset_state(struct drm_crtc *crtc)
3674 struct dm_crtc_state *state;
3677 dm_crtc_destroy_state(crtc, crtc->state);
3679 state = kzalloc(sizeof(*state), GFP_KERNEL);
3680 if (WARN_ON(!state))
3683 crtc->state = &state->base;
3684 crtc->state->crtc = crtc;
3688 static struct drm_crtc_state *
3689 dm_crtc_duplicate_state(struct drm_crtc *crtc)
3691 struct dm_crtc_state *state, *cur;
3693 cur = to_dm_crtc_state(crtc->state);
3695 if (WARN_ON(!crtc->state))
3698 state = kzalloc(sizeof(*state), GFP_KERNEL);
3702 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
3705 state->stream = cur->stream;
3706 dc_stream_retain(state->stream);
3709 state->active_planes = cur->active_planes;
3710 state->interrupts_enabled = cur->interrupts_enabled;
3711 state->vrr_params = cur->vrr_params;
3712 state->vrr_infopacket = cur->vrr_infopacket;
3713 state->abm_level = cur->abm_level;
3714 state->vrr_supported = cur->vrr_supported;
3715 state->freesync_config = cur->freesync_config;
3716 state->crc_src = cur->crc_src;
3717 state->cm_has_degamma = cur->cm_has_degamma;
3718 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
3720 /* TODO Duplicate dc_stream after objects are stream object is flattened */
3722 return &state->base;
3725 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
3727 enum dc_irq_source irq_source;
3728 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3729 struct amdgpu_device *adev = crtc->dev->dev_private;
3732 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
3734 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
3736 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
3737 acrtc->crtc_id, enable ? "en" : "dis", rc);
3741 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
3743 enum dc_irq_source irq_source;
3744 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3745 struct amdgpu_device *adev = crtc->dev->dev_private;
3746 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
3750 /* vblank irq on -> Only need vupdate irq in vrr mode */
3751 if (amdgpu_dm_vrr_active(acrtc_state))
3752 rc = dm_set_vupdate_irq(crtc, true);
3754 /* vblank irq off -> vupdate irq off */
3755 rc = dm_set_vupdate_irq(crtc, false);
3761 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
3762 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
3765 static int dm_enable_vblank(struct drm_crtc *crtc)
3767 return dm_set_vblank(crtc, true);
3770 static void dm_disable_vblank(struct drm_crtc *crtc)
3772 dm_set_vblank(crtc, false);
3775 /* Implemented only the options currently availible for the driver */
3776 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
3777 .reset = dm_crtc_reset_state,
3778 .destroy = amdgpu_dm_crtc_destroy,
3779 .gamma_set = drm_atomic_helper_legacy_gamma_set,
3780 .set_config = drm_atomic_helper_set_config,
3781 .page_flip = drm_atomic_helper_page_flip,
3782 .atomic_duplicate_state = dm_crtc_duplicate_state,
3783 .atomic_destroy_state = dm_crtc_destroy_state,
3784 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
3785 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
3786 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
3787 .enable_vblank = dm_enable_vblank,
3788 .disable_vblank = dm_disable_vblank,
3791 static enum drm_connector_status
3792 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
3795 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3799 * 1. This interface is NOT called in context of HPD irq.
3800 * 2. This interface *is called* in context of user-mode ioctl. Which
3801 * makes it a bad place for *any* MST-related activity.
3804 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
3805 !aconnector->fake_enable)
3806 connected = (aconnector->dc_sink != NULL);
3808 connected = (aconnector->base.force == DRM_FORCE_ON);
3810 return (connected ? connector_status_connected :
3811 connector_status_disconnected);
3814 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
3815 struct drm_connector_state *connector_state,
3816 struct drm_property *property,
3819 struct drm_device *dev = connector->dev;
3820 struct amdgpu_device *adev = dev->dev_private;
3821 struct dm_connector_state *dm_old_state =
3822 to_dm_connector_state(connector->state);
3823 struct dm_connector_state *dm_new_state =
3824 to_dm_connector_state(connector_state);
3828 if (property == dev->mode_config.scaling_mode_property) {
3829 enum amdgpu_rmx_type rmx_type;
3832 case DRM_MODE_SCALE_CENTER:
3833 rmx_type = RMX_CENTER;
3835 case DRM_MODE_SCALE_ASPECT:
3836 rmx_type = RMX_ASPECT;
3838 case DRM_MODE_SCALE_FULLSCREEN:
3839 rmx_type = RMX_FULL;
3841 case DRM_MODE_SCALE_NONE:
3847 if (dm_old_state->scaling == rmx_type)
3850 dm_new_state->scaling = rmx_type;
3852 } else if (property == adev->mode_info.underscan_hborder_property) {
3853 dm_new_state->underscan_hborder = val;
3855 } else if (property == adev->mode_info.underscan_vborder_property) {
3856 dm_new_state->underscan_vborder = val;
3858 } else if (property == adev->mode_info.underscan_property) {
3859 dm_new_state->underscan_enable = val;
3861 } else if (property == adev->mode_info.abm_level_property) {
3862 dm_new_state->abm_level = val;
3869 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
3870 const struct drm_connector_state *state,
3871 struct drm_property *property,
3874 struct drm_device *dev = connector->dev;
3875 struct amdgpu_device *adev = dev->dev_private;
3876 struct dm_connector_state *dm_state =
3877 to_dm_connector_state(state);
3880 if (property == dev->mode_config.scaling_mode_property) {
3881 switch (dm_state->scaling) {
3883 *val = DRM_MODE_SCALE_CENTER;
3886 *val = DRM_MODE_SCALE_ASPECT;
3889 *val = DRM_MODE_SCALE_FULLSCREEN;
3893 *val = DRM_MODE_SCALE_NONE;
3897 } else if (property == adev->mode_info.underscan_hborder_property) {
3898 *val = dm_state->underscan_hborder;
3900 } else if (property == adev->mode_info.underscan_vborder_property) {
3901 *val = dm_state->underscan_vborder;
3903 } else if (property == adev->mode_info.underscan_property) {
3904 *val = dm_state->underscan_enable;
3906 } else if (property == adev->mode_info.abm_level_property) {
3907 *val = dm_state->abm_level;
3914 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
3916 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
3918 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
3921 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
3923 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3924 const struct dc_link *link = aconnector->dc_link;
3925 struct amdgpu_device *adev = connector->dev->dev_private;
3926 struct amdgpu_display_manager *dm = &adev->dm;
3928 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3929 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3931 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3932 link->type != dc_connection_none &&
3933 dm->backlight_dev) {
3934 backlight_device_unregister(dm->backlight_dev);
3935 dm->backlight_dev = NULL;
3939 if (aconnector->dc_em_sink)
3940 dc_sink_release(aconnector->dc_em_sink);
3941 aconnector->dc_em_sink = NULL;
3942 if (aconnector->dc_sink)
3943 dc_sink_release(aconnector->dc_sink);
3944 aconnector->dc_sink = NULL;
3946 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
3947 drm_connector_unregister(connector);
3948 drm_connector_cleanup(connector);
3949 if (aconnector->i2c) {
3950 i2c_del_adapter(&aconnector->i2c->base);
3951 kfree(aconnector->i2c);
3957 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
3959 struct dm_connector_state *state =
3960 to_dm_connector_state(connector->state);
3962 if (connector->state)
3963 __drm_atomic_helper_connector_destroy_state(connector->state);
3967 state = kzalloc(sizeof(*state), GFP_KERNEL);
3970 state->scaling = RMX_OFF;
3971 state->underscan_enable = false;
3972 state->underscan_hborder = 0;
3973 state->underscan_vborder = 0;
3974 state->base.max_requested_bpc = 8;
3976 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3977 state->abm_level = amdgpu_dm_abm_level;
3979 __drm_atomic_helper_connector_reset(connector, &state->base);
3983 struct drm_connector_state *
3984 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
3986 struct dm_connector_state *state =
3987 to_dm_connector_state(connector->state);
3989 struct dm_connector_state *new_state =
3990 kmemdup(state, sizeof(*state), GFP_KERNEL);
3995 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
3997 new_state->freesync_capable = state->freesync_capable;
3998 new_state->abm_level = state->abm_level;
3999 new_state->scaling = state->scaling;
4000 new_state->underscan_enable = state->underscan_enable;
4001 new_state->underscan_hborder = state->underscan_hborder;
4002 new_state->underscan_vborder = state->underscan_vborder;
4004 return &new_state->base;
4007 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4008 .reset = amdgpu_dm_connector_funcs_reset,
4009 .detect = amdgpu_dm_connector_detect,
4010 .fill_modes = drm_helper_probe_single_connector_modes,
4011 .destroy = amdgpu_dm_connector_destroy,
4012 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4013 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4014 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4015 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4016 .early_unregister = amdgpu_dm_connector_unregister
4019 static int get_modes(struct drm_connector *connector)
4021 return amdgpu_dm_connector_get_modes(connector);
4024 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
4026 struct dc_sink_init_data init_params = {
4027 .link = aconnector->dc_link,
4028 .sink_signal = SIGNAL_TYPE_VIRTUAL
4032 if (!aconnector->base.edid_blob_ptr) {
4033 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4034 aconnector->base.name);
4036 aconnector->base.force = DRM_FORCE_OFF;
4037 aconnector->base.override_edid = false;
4041 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
4043 aconnector->edid = edid;
4045 aconnector->dc_em_sink = dc_link_add_remote_sink(
4046 aconnector->dc_link,
4048 (edid->extensions + 1) * EDID_LENGTH,
4051 if (aconnector->base.force == DRM_FORCE_ON) {
4052 aconnector->dc_sink = aconnector->dc_link->local_sink ?
4053 aconnector->dc_link->local_sink :
4054 aconnector->dc_em_sink;
4055 dc_sink_retain(aconnector->dc_sink);
4059 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
4061 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
4064 * In case of headless boot with force on for DP managed connector
4065 * Those settings have to be != 0 to get initial modeset
4067 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4068 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
4069 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
4073 aconnector->base.override_edid = true;
4074 create_eml_sink(aconnector);
4077 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
4078 struct drm_display_mode *mode)
4080 int result = MODE_ERROR;
4081 struct dc_sink *dc_sink;
4082 struct amdgpu_device *adev = connector->dev->dev_private;
4083 /* TODO: Unhardcode stream count */
4084 struct dc_stream_state *stream;
4085 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4086 enum dc_status dc_result = DC_OK;
4088 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
4089 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
4093 * Only run this the first time mode_valid is called to initilialize
4096 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
4097 !aconnector->dc_em_sink)
4098 handle_edid_mgmt(aconnector);
4100 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
4102 if (dc_sink == NULL) {
4103 DRM_ERROR("dc_sink is NULL!\n");
4107 stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
4108 if (stream == NULL) {
4109 DRM_ERROR("Failed to create stream for sink!\n");
4113 dc_result = dc_validate_stream(adev->dm.dc, stream);
4115 if (dc_result == DC_OK)
4118 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
4124 dc_stream_release(stream);
4127 /* TODO: error handling*/
4131 static int fill_hdr_info_packet(const struct drm_connector_state *state,
4132 struct dc_info_packet *out)
4134 struct hdmi_drm_infoframe frame;
4135 unsigned char buf[30]; /* 26 + 4 */
4139 memset(out, 0, sizeof(*out));
4141 if (!state->hdr_output_metadata)
4144 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
4148 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
4152 /* Static metadata is a fixed 26 bytes + 4 byte header. */
4156 /* Prepare the infopacket for DC. */
4157 switch (state->connector->connector_type) {
4158 case DRM_MODE_CONNECTOR_HDMIA:
4159 out->hb0 = 0x87; /* type */
4160 out->hb1 = 0x01; /* version */
4161 out->hb2 = 0x1A; /* length */
4162 out->sb[0] = buf[3]; /* checksum */
4166 case DRM_MODE_CONNECTOR_DisplayPort:
4167 case DRM_MODE_CONNECTOR_eDP:
4168 out->hb0 = 0x00; /* sdp id, zero */
4169 out->hb1 = 0x87; /* type */
4170 out->hb2 = 0x1D; /* payload len - 1 */
4171 out->hb3 = (0x13 << 2); /* sdp version */
4172 out->sb[0] = 0x01; /* version */
4173 out->sb[1] = 0x1A; /* length */
4181 memcpy(&out->sb[i], &buf[4], 26);
4184 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
4185 sizeof(out->sb), false);
4191 is_hdr_metadata_different(const struct drm_connector_state *old_state,
4192 const struct drm_connector_state *new_state)
4194 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
4195 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
4197 if (old_blob != new_blob) {
4198 if (old_blob && new_blob &&
4199 old_blob->length == new_blob->length)
4200 return memcmp(old_blob->data, new_blob->data,
4210 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
4211 struct drm_atomic_state *state)
4213 struct drm_connector_state *new_con_state =
4214 drm_atomic_get_new_connector_state(state, conn);
4215 struct drm_connector_state *old_con_state =
4216 drm_atomic_get_old_connector_state(state, conn);
4217 struct drm_crtc *crtc = new_con_state->crtc;
4218 struct drm_crtc_state *new_crtc_state;
4224 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
4225 struct dc_info_packet hdr_infopacket;
4227 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
4231 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
4232 if (IS_ERR(new_crtc_state))
4233 return PTR_ERR(new_crtc_state);
4236 * DC considers the stream backends changed if the
4237 * static metadata changes. Forcing the modeset also
4238 * gives a simple way for userspace to switch from
4239 * 8bpc to 10bpc when setting the metadata to enter
4242 * Changing the static metadata after it's been
4243 * set is permissible, however. So only force a
4244 * modeset if we're entering or exiting HDR.
4246 new_crtc_state->mode_changed =
4247 !old_con_state->hdr_output_metadata ||
4248 !new_con_state->hdr_output_metadata;
4254 static const struct drm_connector_helper_funcs
4255 amdgpu_dm_connector_helper_funcs = {
4257 * If hotplugging a second bigger display in FB Con mode, bigger resolution
4258 * modes will be filtered by drm_mode_validate_size(), and those modes
4259 * are missing after user start lightdm. So we need to renew modes list.
4260 * in get_modes call back, not just return the modes count
4262 .get_modes = get_modes,
4263 .mode_valid = amdgpu_dm_connector_mode_valid,
4264 .atomic_check = amdgpu_dm_connector_atomic_check,
4267 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
4271 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
4273 struct drm_device *dev = new_crtc_state->crtc->dev;
4274 struct drm_plane *plane;
4276 drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
4277 if (plane->type == DRM_PLANE_TYPE_CURSOR)
4284 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
4286 struct drm_atomic_state *state = new_crtc_state->state;
4287 struct drm_plane *plane;
4290 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
4291 struct drm_plane_state *new_plane_state;
4293 /* Cursor planes are "fake". */
4294 if (plane->type == DRM_PLANE_TYPE_CURSOR)
4297 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
4299 if (!new_plane_state) {
4301 * The plane is enable on the CRTC and hasn't changed
4302 * state. This means that it previously passed
4303 * validation and is therefore enabled.
4309 /* We need a framebuffer to be considered enabled. */
4310 num_active += (new_plane_state->fb != NULL);
4317 * Sets whether interrupts should be enabled on a specific CRTC.
4318 * We require that the stream be enabled and that there exist active
4319 * DC planes on the stream.
4322 dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
4323 struct drm_crtc_state *new_crtc_state)
4325 struct dm_crtc_state *dm_new_crtc_state =
4326 to_dm_crtc_state(new_crtc_state);
4328 dm_new_crtc_state->active_planes = 0;
4329 dm_new_crtc_state->interrupts_enabled = false;
4331 if (!dm_new_crtc_state->stream)
4334 dm_new_crtc_state->active_planes =
4335 count_crtc_active_planes(new_crtc_state);
4337 dm_new_crtc_state->interrupts_enabled =
4338 dm_new_crtc_state->active_planes > 0;
4341 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
4342 struct drm_crtc_state *state)
4344 struct amdgpu_device *adev = crtc->dev->dev_private;
4345 struct dc *dc = adev->dm.dc;
4346 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
4350 * Update interrupt state for the CRTC. This needs to happen whenever
4351 * the CRTC has changed or whenever any of its planes have changed.
4352 * Atomic check satisfies both of these requirements since the CRTC
4353 * is added to the state by DRM during drm_atomic_helper_check_planes.
4355 dm_update_crtc_interrupt_state(crtc, state);
4357 if (unlikely(!dm_crtc_state->stream &&
4358 modeset_required(state, NULL, dm_crtc_state->stream))) {
4363 /* In some use cases, like reset, no stream is attached */
4364 if (!dm_crtc_state->stream)
4368 * We want at least one hardware plane enabled to use
4369 * the stream with a cursor enabled.
4371 if (state->enable && state->active &&
4372 does_crtc_have_active_cursor(state) &&
4373 dm_crtc_state->active_planes == 0)
4376 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
4382 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
4383 const struct drm_display_mode *mode,
4384 struct drm_display_mode *adjusted_mode)
4389 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
4390 .disable = dm_crtc_helper_disable,
4391 .atomic_check = dm_crtc_helper_atomic_check,
4392 .mode_fixup = dm_crtc_helper_mode_fixup
4395 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
4400 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
4401 struct drm_crtc_state *crtc_state,
4402 struct drm_connector_state *conn_state)
4407 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
4408 .disable = dm_encoder_helper_disable,
4409 .atomic_check = dm_encoder_helper_atomic_check
4412 static void dm_drm_plane_reset(struct drm_plane *plane)
4414 struct dm_plane_state *amdgpu_state = NULL;
4417 plane->funcs->atomic_destroy_state(plane, plane->state);
4419 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
4420 WARN_ON(amdgpu_state == NULL);
4423 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
4426 static struct drm_plane_state *
4427 dm_drm_plane_duplicate_state(struct drm_plane *plane)
4429 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
4431 old_dm_plane_state = to_dm_plane_state(plane->state);
4432 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
4433 if (!dm_plane_state)
4436 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
4438 if (old_dm_plane_state->dc_state) {
4439 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
4440 dc_plane_state_retain(dm_plane_state->dc_state);
4443 return &dm_plane_state->base;
4446 void dm_drm_plane_destroy_state(struct drm_plane *plane,
4447 struct drm_plane_state *state)
4449 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
4451 if (dm_plane_state->dc_state)
4452 dc_plane_state_release(dm_plane_state->dc_state);
4454 drm_atomic_helper_plane_destroy_state(plane, state);
4457 static const struct drm_plane_funcs dm_plane_funcs = {
4458 .update_plane = drm_atomic_helper_update_plane,
4459 .disable_plane = drm_atomic_helper_disable_plane,
4460 .destroy = drm_primary_helper_destroy,
4461 .reset = dm_drm_plane_reset,
4462 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
4463 .atomic_destroy_state = dm_drm_plane_destroy_state,
4466 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
4467 struct drm_plane_state *new_state)
4469 struct amdgpu_framebuffer *afb;
4470 struct drm_gem_object *obj;
4471 struct amdgpu_device *adev;
4472 struct amdgpu_bo *rbo;
4473 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
4474 struct list_head list;
4475 struct ttm_validate_buffer tv;
4476 struct ww_acquire_ctx ticket;
4477 uint64_t tiling_flags;
4481 dm_plane_state_old = to_dm_plane_state(plane->state);
4482 dm_plane_state_new = to_dm_plane_state(new_state);
4484 if (!new_state->fb) {
4485 DRM_DEBUG_DRIVER("No FB bound\n");
4489 afb = to_amdgpu_framebuffer(new_state->fb);
4490 obj = new_state->fb->obj[0];
4491 rbo = gem_to_amdgpu_bo(obj);
4492 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
4493 INIT_LIST_HEAD(&list);
4497 list_add(&tv.head, &list);
4499 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL, true);
4501 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
4505 if (plane->type != DRM_PLANE_TYPE_CURSOR)
4506 domain = amdgpu_display_supported_domains(adev, rbo->flags);
4508 domain = AMDGPU_GEM_DOMAIN_VRAM;
4510 r = amdgpu_bo_pin(rbo, domain);
4511 if (unlikely(r != 0)) {
4512 if (r != -ERESTARTSYS)
4513 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
4514 ttm_eu_backoff_reservation(&ticket, &list);
4518 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
4519 if (unlikely(r != 0)) {
4520 amdgpu_bo_unpin(rbo);
4521 ttm_eu_backoff_reservation(&ticket, &list);
4522 DRM_ERROR("%p bind failed\n", rbo);
4526 amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
4528 ttm_eu_backoff_reservation(&ticket, &list);
4530 afb->address = amdgpu_bo_gpu_offset(rbo);
4534 if (dm_plane_state_new->dc_state &&
4535 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
4536 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
4538 fill_plane_buffer_attributes(
4539 adev, afb, plane_state->format, plane_state->rotation,
4540 tiling_flags, &plane_state->tiling_info,
4541 &plane_state->plane_size, &plane_state->dcc,
4542 &plane_state->address);
4548 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
4549 struct drm_plane_state *old_state)
4551 struct amdgpu_bo *rbo;
4557 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
4558 r = amdgpu_bo_reserve(rbo, false);
4560 DRM_ERROR("failed to reserve rbo before unpin\n");
4564 amdgpu_bo_unpin(rbo);
4565 amdgpu_bo_unreserve(rbo);
4566 amdgpu_bo_unref(&rbo);
4569 static int dm_plane_atomic_check(struct drm_plane *plane,
4570 struct drm_plane_state *state)
4572 struct amdgpu_device *adev = plane->dev->dev_private;
4573 struct dc *dc = adev->dm.dc;
4574 struct dm_plane_state *dm_plane_state;
4575 struct dc_scaling_info scaling_info;
4578 dm_plane_state = to_dm_plane_state(state);
4580 if (!dm_plane_state->dc_state)
4583 ret = fill_dc_scaling_info(state, &scaling_info);
4587 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
4593 static int dm_plane_atomic_async_check(struct drm_plane *plane,
4594 struct drm_plane_state *new_plane_state)
4596 /* Only support async updates on cursor planes. */
4597 if (plane->type != DRM_PLANE_TYPE_CURSOR)
4603 static void dm_plane_atomic_async_update(struct drm_plane *plane,
4604 struct drm_plane_state *new_state)
4606 struct drm_plane_state *old_state =
4607 drm_atomic_get_old_plane_state(new_state->state, plane);
4609 swap(plane->state->fb, new_state->fb);
4611 plane->state->src_x = new_state->src_x;
4612 plane->state->src_y = new_state->src_y;
4613 plane->state->src_w = new_state->src_w;
4614 plane->state->src_h = new_state->src_h;
4615 plane->state->crtc_x = new_state->crtc_x;
4616 plane->state->crtc_y = new_state->crtc_y;
4617 plane->state->crtc_w = new_state->crtc_w;
4618 plane->state->crtc_h = new_state->crtc_h;
4620 handle_cursor_update(plane, old_state);
4623 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
4624 .prepare_fb = dm_plane_helper_prepare_fb,
4625 .cleanup_fb = dm_plane_helper_cleanup_fb,
4626 .atomic_check = dm_plane_atomic_check,
4627 .atomic_async_check = dm_plane_atomic_async_check,
4628 .atomic_async_update = dm_plane_atomic_async_update
4632 * TODO: these are currently initialized to rgb formats only.
4633 * For future use cases we should either initialize them dynamically based on
4634 * plane capabilities, or initialize this array to all formats, so internal drm
4635 * check will succeed, and let DC implement proper check
4637 static const uint32_t rgb_formats[] = {
4638 DRM_FORMAT_XRGB8888,
4639 DRM_FORMAT_ARGB8888,
4640 DRM_FORMAT_RGBA8888,
4641 DRM_FORMAT_XRGB2101010,
4642 DRM_FORMAT_XBGR2101010,
4643 DRM_FORMAT_ARGB2101010,
4644 DRM_FORMAT_ABGR2101010,
4645 DRM_FORMAT_XBGR8888,
4646 DRM_FORMAT_ABGR8888,
4650 static const uint32_t overlay_formats[] = {
4651 DRM_FORMAT_XRGB8888,
4652 DRM_FORMAT_ARGB8888,
4653 DRM_FORMAT_RGBA8888,
4654 DRM_FORMAT_XBGR8888,
4655 DRM_FORMAT_ABGR8888,
4659 static const u32 cursor_formats[] = {
4663 static int get_plane_formats(const struct drm_plane *plane,
4664 const struct dc_plane_cap *plane_cap,
4665 uint32_t *formats, int max_formats)
4667 int i, num_formats = 0;
4670 * TODO: Query support for each group of formats directly from
4671 * DC plane caps. This will require adding more formats to the
4675 switch (plane->type) {
4676 case DRM_PLANE_TYPE_PRIMARY:
4677 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
4678 if (num_formats >= max_formats)
4681 formats[num_formats++] = rgb_formats[i];
4684 if (plane_cap && plane_cap->pixel_format_support.nv12)
4685 formats[num_formats++] = DRM_FORMAT_NV12;
4688 case DRM_PLANE_TYPE_OVERLAY:
4689 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
4690 if (num_formats >= max_formats)
4693 formats[num_formats++] = overlay_formats[i];
4697 case DRM_PLANE_TYPE_CURSOR:
4698 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
4699 if (num_formats >= max_formats)
4702 formats[num_formats++] = cursor_formats[i];
4710 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
4711 struct drm_plane *plane,
4712 unsigned long possible_crtcs,
4713 const struct dc_plane_cap *plane_cap)
4715 uint32_t formats[32];
4719 num_formats = get_plane_formats(plane, plane_cap, formats,
4720 ARRAY_SIZE(formats));
4722 res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
4723 &dm_plane_funcs, formats, num_formats,
4724 NULL, plane->type, NULL);
4728 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
4729 plane_cap && plane_cap->per_pixel_alpha) {
4730 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
4731 BIT(DRM_MODE_BLEND_PREMULTI);
4733 drm_plane_create_alpha_property(plane);
4734 drm_plane_create_blend_mode_property(plane, blend_caps);
4737 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
4738 plane_cap && plane_cap->pixel_format_support.nv12) {
4739 /* This only affects YUV formats. */
4740 drm_plane_create_color_properties(
4742 BIT(DRM_COLOR_YCBCR_BT601) |
4743 BIT(DRM_COLOR_YCBCR_BT709),
4744 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
4745 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
4746 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
4749 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
4751 /* Create (reset) the plane state */
4752 if (plane->funcs->reset)
4753 plane->funcs->reset(plane);
4758 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
4759 struct drm_plane *plane,
4760 uint32_t crtc_index)
4762 struct amdgpu_crtc *acrtc = NULL;
4763 struct drm_plane *cursor_plane;
4767 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
4771 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
4772 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
4774 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
4778 res = drm_crtc_init_with_planes(
4783 &amdgpu_dm_crtc_funcs, NULL);
4788 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
4790 /* Create (reset) the plane state */
4791 if (acrtc->base.funcs->reset)
4792 acrtc->base.funcs->reset(&acrtc->base);
4794 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
4795 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
4797 acrtc->crtc_id = crtc_index;
4798 acrtc->base.enabled = false;
4799 acrtc->otg_inst = -1;
4801 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
4802 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
4803 true, MAX_COLOR_LUT_ENTRIES);
4804 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
4810 kfree(cursor_plane);
4815 static int to_drm_connector_type(enum signal_type st)
4818 case SIGNAL_TYPE_HDMI_TYPE_A:
4819 return DRM_MODE_CONNECTOR_HDMIA;
4820 case SIGNAL_TYPE_EDP:
4821 return DRM_MODE_CONNECTOR_eDP;
4822 case SIGNAL_TYPE_LVDS:
4823 return DRM_MODE_CONNECTOR_LVDS;
4824 case SIGNAL_TYPE_RGB:
4825 return DRM_MODE_CONNECTOR_VGA;
4826 case SIGNAL_TYPE_DISPLAY_PORT:
4827 case SIGNAL_TYPE_DISPLAY_PORT_MST:
4828 return DRM_MODE_CONNECTOR_DisplayPort;
4829 case SIGNAL_TYPE_DVI_DUAL_LINK:
4830 case SIGNAL_TYPE_DVI_SINGLE_LINK:
4831 return DRM_MODE_CONNECTOR_DVID;
4832 case SIGNAL_TYPE_VIRTUAL:
4833 return DRM_MODE_CONNECTOR_VIRTUAL;
4836 return DRM_MODE_CONNECTOR_Unknown;
4840 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
4842 return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
4845 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
4847 struct drm_encoder *encoder;
4848 struct amdgpu_encoder *amdgpu_encoder;
4850 encoder = amdgpu_dm_connector_to_encoder(connector);
4852 if (encoder == NULL)
4855 amdgpu_encoder = to_amdgpu_encoder(encoder);
4857 amdgpu_encoder->native_mode.clock = 0;
4859 if (!list_empty(&connector->probed_modes)) {
4860 struct drm_display_mode *preferred_mode = NULL;
4862 list_for_each_entry(preferred_mode,
4863 &connector->probed_modes,
4865 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
4866 amdgpu_encoder->native_mode = *preferred_mode;
4874 static struct drm_display_mode *
4875 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
4877 int hdisplay, int vdisplay)
4879 struct drm_device *dev = encoder->dev;
4880 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
4881 struct drm_display_mode *mode = NULL;
4882 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
4884 mode = drm_mode_duplicate(dev, native_mode);
4889 mode->hdisplay = hdisplay;
4890 mode->vdisplay = vdisplay;
4891 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
4892 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
4898 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
4899 struct drm_connector *connector)
4901 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
4902 struct drm_display_mode *mode = NULL;
4903 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
4904 struct amdgpu_dm_connector *amdgpu_dm_connector =
4905 to_amdgpu_dm_connector(connector);
4909 char name[DRM_DISPLAY_MODE_LEN];
4912 } common_modes[] = {
4913 { "640x480", 640, 480},
4914 { "800x600", 800, 600},
4915 { "1024x768", 1024, 768},
4916 { "1280x720", 1280, 720},
4917 { "1280x800", 1280, 800},
4918 {"1280x1024", 1280, 1024},
4919 { "1440x900", 1440, 900},
4920 {"1680x1050", 1680, 1050},
4921 {"1600x1200", 1600, 1200},
4922 {"1920x1080", 1920, 1080},
4923 {"1920x1200", 1920, 1200}
4926 n = ARRAY_SIZE(common_modes);
4928 for (i = 0; i < n; i++) {
4929 struct drm_display_mode *curmode = NULL;
4930 bool mode_existed = false;
4932 if (common_modes[i].w > native_mode->hdisplay ||
4933 common_modes[i].h > native_mode->vdisplay ||
4934 (common_modes[i].w == native_mode->hdisplay &&
4935 common_modes[i].h == native_mode->vdisplay))
4938 list_for_each_entry(curmode, &connector->probed_modes, head) {
4939 if (common_modes[i].w == curmode->hdisplay &&
4940 common_modes[i].h == curmode->vdisplay) {
4941 mode_existed = true;
4949 mode = amdgpu_dm_create_common_mode(encoder,
4950 common_modes[i].name, common_modes[i].w,
4952 drm_mode_probed_add(connector, mode);
4953 amdgpu_dm_connector->num_modes++;
4957 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
4960 struct amdgpu_dm_connector *amdgpu_dm_connector =
4961 to_amdgpu_dm_connector(connector);
4964 /* empty probed_modes */
4965 INIT_LIST_HEAD(&connector->probed_modes);
4966 amdgpu_dm_connector->num_modes =
4967 drm_add_edid_modes(connector, edid);
4969 /* sorting the probed modes before calling function
4970 * amdgpu_dm_get_native_mode() since EDID can have
4971 * more than one preferred mode. The modes that are
4972 * later in the probed mode list could be of higher
4973 * and preferred resolution. For example, 3840x2160
4974 * resolution in base EDID preferred timing and 4096x2160
4975 * preferred resolution in DID extension block later.
4977 drm_mode_sort(&connector->probed_modes);
4978 amdgpu_dm_get_native_mode(connector);
4980 amdgpu_dm_connector->num_modes = 0;
4984 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
4986 struct amdgpu_dm_connector *amdgpu_dm_connector =
4987 to_amdgpu_dm_connector(connector);
4988 struct drm_encoder *encoder;
4989 struct edid *edid = amdgpu_dm_connector->edid;
4991 encoder = amdgpu_dm_connector_to_encoder(connector);
4993 if (!edid || !drm_edid_is_valid(edid)) {
4994 amdgpu_dm_connector->num_modes =
4995 drm_add_modes_noedid(connector, 640, 480);
4997 amdgpu_dm_connector_ddc_get_modes(connector, edid);
4998 amdgpu_dm_connector_add_common_modes(encoder, connector);
5000 amdgpu_dm_fbc_init(connector);
5002 return amdgpu_dm_connector->num_modes;
5005 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
5006 struct amdgpu_dm_connector *aconnector,
5008 struct dc_link *link,
5011 struct amdgpu_device *adev = dm->ddev->dev_private;
5014 * Some of the properties below require access to state, like bpc.
5015 * Allocate some default initial connector state with our reset helper.
5017 if (aconnector->base.funcs->reset)
5018 aconnector->base.funcs->reset(&aconnector->base);
5020 aconnector->connector_id = link_index;
5021 aconnector->dc_link = link;
5022 aconnector->base.interlace_allowed = false;
5023 aconnector->base.doublescan_allowed = false;
5024 aconnector->base.stereo_allowed = false;
5025 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
5026 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
5027 aconnector->audio_inst = -1;
5028 mutex_init(&aconnector->hpd_lock);
5031 * configure support HPD hot plug connector_>polled default value is 0
5032 * which means HPD hot plug not supported
5034 switch (connector_type) {
5035 case DRM_MODE_CONNECTOR_HDMIA:
5036 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5037 aconnector->base.ycbcr_420_allowed =
5038 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
5040 case DRM_MODE_CONNECTOR_DisplayPort:
5041 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5042 aconnector->base.ycbcr_420_allowed =
5043 link->link_enc->features.dp_ycbcr420_supported ? true : false;
5045 case DRM_MODE_CONNECTOR_DVID:
5046 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5052 drm_object_attach_property(&aconnector->base.base,
5053 dm->ddev->mode_config.scaling_mode_property,
5054 DRM_MODE_SCALE_NONE);
5056 drm_object_attach_property(&aconnector->base.base,
5057 adev->mode_info.underscan_property,
5059 drm_object_attach_property(&aconnector->base.base,
5060 adev->mode_info.underscan_hborder_property,
5062 drm_object_attach_property(&aconnector->base.base,
5063 adev->mode_info.underscan_vborder_property,
5066 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
5068 /* This defaults to the max in the range, but we want 8bpc. */
5069 aconnector->base.state->max_bpc = 8;
5070 aconnector->base.state->max_requested_bpc = 8;
5072 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5073 dc_is_dmcu_initialized(adev->dm.dc)) {
5074 drm_object_attach_property(&aconnector->base.base,
5075 adev->mode_info.abm_level_property, 0);
5078 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
5079 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5080 connector_type == DRM_MODE_CONNECTOR_eDP) {
5081 drm_object_attach_property(
5082 &aconnector->base.base,
5083 dm->ddev->mode_config.hdr_output_metadata_property, 0);
5085 drm_connector_attach_vrr_capable_property(
5090 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
5091 struct i2c_msg *msgs, int num)
5093 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
5094 struct ddc_service *ddc_service = i2c->ddc_service;
5095 struct i2c_command cmd;
5099 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
5104 cmd.number_of_payloads = num;
5105 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
5108 for (i = 0; i < num; i++) {
5109 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
5110 cmd.payloads[i].address = msgs[i].addr;
5111 cmd.payloads[i].length = msgs[i].len;
5112 cmd.payloads[i].data = msgs[i].buf;
5116 ddc_service->ctx->dc,
5117 ddc_service->ddc_pin->hw_info.ddc_channel,
5121 kfree(cmd.payloads);
5125 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
5127 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
5130 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
5131 .master_xfer = amdgpu_dm_i2c_xfer,
5132 .functionality = amdgpu_dm_i2c_func,
5135 static struct amdgpu_i2c_adapter *
5136 create_i2c(struct ddc_service *ddc_service,
5140 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
5141 struct amdgpu_i2c_adapter *i2c;
5143 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
5146 i2c->base.owner = THIS_MODULE;
5147 i2c->base.class = I2C_CLASS_DDC;
5148 i2c->base.dev.parent = &adev->pdev->dev;
5149 i2c->base.algo = &amdgpu_dm_i2c_algo;
5150 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
5151 i2c_set_adapdata(&i2c->base, i2c);
5152 i2c->ddc_service = ddc_service;
5153 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
5160 * Note: this function assumes that dc_link_detect() was called for the
5161 * dc_link which will be represented by this aconnector.
5163 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
5164 struct amdgpu_dm_connector *aconnector,
5165 uint32_t link_index,
5166 struct amdgpu_encoder *aencoder)
5170 struct dc *dc = dm->dc;
5171 struct dc_link *link = dc_get_link_at_index(dc, link_index);
5172 struct amdgpu_i2c_adapter *i2c;
5174 link->priv = aconnector;
5176 DRM_DEBUG_DRIVER("%s()\n", __func__);
5178 i2c = create_i2c(link->ddc, link->link_index, &res);
5180 DRM_ERROR("Failed to create i2c adapter data\n");
5184 aconnector->i2c = i2c;
5185 res = i2c_add_adapter(&i2c->base);
5188 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
5192 connector_type = to_drm_connector_type(link->connector_signal);
5194 res = drm_connector_init(
5197 &amdgpu_dm_connector_funcs,
5201 DRM_ERROR("connector_init failed\n");
5202 aconnector->connector_id = -1;
5206 drm_connector_helper_add(
5208 &amdgpu_dm_connector_helper_funcs);
5210 amdgpu_dm_connector_init_helper(
5217 drm_connector_attach_encoder(
5218 &aconnector->base, &aencoder->base);
5220 drm_connector_register(&aconnector->base);
5221 #if defined(CONFIG_DEBUG_FS)
5222 connector_debugfs_init(aconnector);
5223 aconnector->debugfs_dpcd_address = 0;
5224 aconnector->debugfs_dpcd_size = 0;
5227 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
5228 || connector_type == DRM_MODE_CONNECTOR_eDP)
5229 amdgpu_dm_initialize_dp_connector(dm, aconnector);
5234 aconnector->i2c = NULL;
5239 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
5241 switch (adev->mode_info.num_crtc) {
5258 static int amdgpu_dm_encoder_init(struct drm_device *dev,
5259 struct amdgpu_encoder *aencoder,
5260 uint32_t link_index)
5262 struct amdgpu_device *adev = dev->dev_private;
5264 int res = drm_encoder_init(dev,
5266 &amdgpu_dm_encoder_funcs,
5267 DRM_MODE_ENCODER_TMDS,
5270 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
5273 aencoder->encoder_id = link_index;
5275 aencoder->encoder_id = -1;
5277 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
5282 static void manage_dm_interrupts(struct amdgpu_device *adev,
5283 struct amdgpu_crtc *acrtc,
5287 * this is not correct translation but will work as soon as VBLANK
5288 * constant is the same as PFLIP
5291 amdgpu_display_crtc_idx_to_irq_type(
5296 drm_crtc_vblank_on(&acrtc->base);
5299 &adev->pageflip_irq,
5305 &adev->pageflip_irq,
5307 drm_crtc_vblank_off(&acrtc->base);
5312 is_scaling_state_different(const struct dm_connector_state *dm_state,
5313 const struct dm_connector_state *old_dm_state)
5315 if (dm_state->scaling != old_dm_state->scaling)
5317 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
5318 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
5320 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
5321 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
5323 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
5324 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
5329 static void remove_stream(struct amdgpu_device *adev,
5330 struct amdgpu_crtc *acrtc,
5331 struct dc_stream_state *stream)
5333 /* this is the update mode case */
5335 acrtc->otg_inst = -1;
5336 acrtc->enabled = false;
5339 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
5340 struct dc_cursor_position *position)
5342 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
5344 int xorigin = 0, yorigin = 0;
5346 position->enable = false;
5350 if (!crtc || !plane->state->fb)
5353 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
5354 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
5355 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
5357 plane->state->crtc_w,
5358 plane->state->crtc_h);
5362 x = plane->state->crtc_x;
5363 y = plane->state->crtc_y;
5365 if (x <= -amdgpu_crtc->max_cursor_width ||
5366 y <= -amdgpu_crtc->max_cursor_height)
5369 if (crtc->primary->state) {
5370 /* avivo cursor are offset into the total surface */
5371 x += crtc->primary->state->src_x >> 16;
5372 y += crtc->primary->state->src_y >> 16;
5376 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
5380 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
5383 position->enable = true;
5386 position->x_hotspot = xorigin;
5387 position->y_hotspot = yorigin;
5392 static void handle_cursor_update(struct drm_plane *plane,
5393 struct drm_plane_state *old_plane_state)
5395 struct amdgpu_device *adev = plane->dev->dev_private;
5396 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
5397 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
5398 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
5399 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
5400 uint64_t address = afb ? afb->address : 0;
5401 struct dc_cursor_position position;
5402 struct dc_cursor_attributes attributes;
5405 if (!plane->state->fb && !old_plane_state->fb)
5408 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
5410 amdgpu_crtc->crtc_id,
5411 plane->state->crtc_w,
5412 plane->state->crtc_h);
5414 ret = get_cursor_position(plane, crtc, &position);
5418 if (!position.enable) {
5419 /* turn off cursor */
5420 if (crtc_state && crtc_state->stream) {
5421 mutex_lock(&adev->dm.dc_lock);
5422 dc_stream_set_cursor_position(crtc_state->stream,
5424 mutex_unlock(&adev->dm.dc_lock);
5429 amdgpu_crtc->cursor_width = plane->state->crtc_w;
5430 amdgpu_crtc->cursor_height = plane->state->crtc_h;
5432 memset(&attributes, 0, sizeof(attributes));
5433 attributes.address.high_part = upper_32_bits(address);
5434 attributes.address.low_part = lower_32_bits(address);
5435 attributes.width = plane->state->crtc_w;
5436 attributes.height = plane->state->crtc_h;
5437 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
5438 attributes.rotation_angle = 0;
5439 attributes.attribute_flags.value = 0;
5441 attributes.pitch = attributes.width;
5443 if (crtc_state->stream) {
5444 mutex_lock(&adev->dm.dc_lock);
5445 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
5447 DRM_ERROR("DC failed to set cursor attributes\n");
5449 if (!dc_stream_set_cursor_position(crtc_state->stream,
5451 DRM_ERROR("DC failed to set cursor position\n");
5452 mutex_unlock(&adev->dm.dc_lock);
5456 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
5459 assert_spin_locked(&acrtc->base.dev->event_lock);
5460 WARN_ON(acrtc->event);
5462 acrtc->event = acrtc->base.state->event;
5464 /* Set the flip status */
5465 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
5467 /* Mark this event as consumed */
5468 acrtc->base.state->event = NULL;
5470 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
5474 static void update_freesync_state_on_stream(
5475 struct amdgpu_display_manager *dm,
5476 struct dm_crtc_state *new_crtc_state,
5477 struct dc_stream_state *new_stream,
5478 struct dc_plane_state *surface,
5479 u32 flip_timestamp_in_us)
5481 struct mod_vrr_params vrr_params;
5482 struct dc_info_packet vrr_infopacket = {0};
5483 struct amdgpu_device *adev = dm->adev;
5484 unsigned long flags;
5490 * TODO: Determine why min/max totals and vrefresh can be 0 here.
5491 * For now it's sufficient to just guard against these conditions.
5494 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
5497 spin_lock_irqsave(&adev->ddev->event_lock, flags);
5498 vrr_params = new_crtc_state->vrr_params;
5501 mod_freesync_handle_preflip(
5502 dm->freesync_module,
5505 flip_timestamp_in_us,
5508 if (adev->family < AMDGPU_FAMILY_AI &&
5509 amdgpu_dm_vrr_active(new_crtc_state)) {
5510 mod_freesync_handle_v_update(dm->freesync_module,
5511 new_stream, &vrr_params);
5513 /* Need to call this before the frame ends. */
5514 dc_stream_adjust_vmin_vmax(dm->dc,
5515 new_crtc_state->stream,
5516 &vrr_params.adjust);
5520 mod_freesync_build_vrr_infopacket(
5521 dm->freesync_module,
5525 TRANSFER_FUNC_UNKNOWN,
5528 new_crtc_state->freesync_timing_changed |=
5529 (memcmp(&new_crtc_state->vrr_params.adjust,
5531 sizeof(vrr_params.adjust)) != 0);
5533 new_crtc_state->freesync_vrr_info_changed |=
5534 (memcmp(&new_crtc_state->vrr_infopacket,
5536 sizeof(vrr_infopacket)) != 0);
5538 new_crtc_state->vrr_params = vrr_params;
5539 new_crtc_state->vrr_infopacket = vrr_infopacket;
5541 new_stream->adjust = new_crtc_state->vrr_params.adjust;
5542 new_stream->vrr_infopacket = vrr_infopacket;
5544 if (new_crtc_state->freesync_vrr_info_changed)
5545 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
5546 new_crtc_state->base.crtc->base.id,
5547 (int)new_crtc_state->base.vrr_enabled,
5548 (int)vrr_params.state);
5550 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
5553 static void pre_update_freesync_state_on_stream(
5554 struct amdgpu_display_manager *dm,
5555 struct dm_crtc_state *new_crtc_state)
5557 struct dc_stream_state *new_stream = new_crtc_state->stream;
5558 struct mod_vrr_params vrr_params;
5559 struct mod_freesync_config config = new_crtc_state->freesync_config;
5560 struct amdgpu_device *adev = dm->adev;
5561 unsigned long flags;
5567 * TODO: Determine why min/max totals and vrefresh can be 0 here.
5568 * For now it's sufficient to just guard against these conditions.
5570 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
5573 spin_lock_irqsave(&adev->ddev->event_lock, flags);
5574 vrr_params = new_crtc_state->vrr_params;
5576 if (new_crtc_state->vrr_supported &&
5577 config.min_refresh_in_uhz &&
5578 config.max_refresh_in_uhz) {
5579 config.state = new_crtc_state->base.vrr_enabled ?
5580 VRR_STATE_ACTIVE_VARIABLE :
5583 config.state = VRR_STATE_UNSUPPORTED;
5586 mod_freesync_build_vrr_params(dm->freesync_module,
5588 &config, &vrr_params);
5590 new_crtc_state->freesync_timing_changed |=
5591 (memcmp(&new_crtc_state->vrr_params.adjust,
5593 sizeof(vrr_params.adjust)) != 0);
5595 new_crtc_state->vrr_params = vrr_params;
5596 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
5599 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
5600 struct dm_crtc_state *new_state)
5602 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
5603 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
5605 if (!old_vrr_active && new_vrr_active) {
5606 /* Transition VRR inactive -> active:
5607 * While VRR is active, we must not disable vblank irq, as a
5608 * reenable after disable would compute bogus vblank/pflip
5609 * timestamps if it likely happened inside display front-porch.
5611 * We also need vupdate irq for the actual core vblank handling
5614 dm_set_vupdate_irq(new_state->base.crtc, true);
5615 drm_crtc_vblank_get(new_state->base.crtc);
5616 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
5617 __func__, new_state->base.crtc->base.id);
5618 } else if (old_vrr_active && !new_vrr_active) {
5619 /* Transition VRR active -> inactive:
5620 * Allow vblank irq disable again for fixed refresh rate.
5622 dm_set_vupdate_irq(new_state->base.crtc, false);
5623 drm_crtc_vblank_put(new_state->base.crtc);
5624 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
5625 __func__, new_state->base.crtc->base.id);
5629 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
5631 struct drm_plane *plane;
5632 struct drm_plane_state *old_plane_state, *new_plane_state;
5636 * TODO: Make this per-stream so we don't issue redundant updates for
5637 * commits with multiple streams.
5639 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
5641 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5642 handle_cursor_update(plane, old_plane_state);
5645 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
5646 struct dc_state *dc_state,
5647 struct drm_device *dev,
5648 struct amdgpu_display_manager *dm,
5649 struct drm_crtc *pcrtc,
5650 bool wait_for_vblank)
5653 uint64_t timestamp_ns;
5654 struct drm_plane *plane;
5655 struct drm_plane_state *old_plane_state, *new_plane_state;
5656 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
5657 struct drm_crtc_state *new_pcrtc_state =
5658 drm_atomic_get_new_crtc_state(state, pcrtc);
5659 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
5660 struct dm_crtc_state *dm_old_crtc_state =
5661 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
5662 int planes_count = 0, vpos, hpos;
5664 unsigned long flags;
5665 struct amdgpu_bo *abo;
5666 uint64_t tiling_flags;
5667 uint32_t target_vblank, last_flip_vblank;
5668 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
5669 bool pflip_present = false;
5671 struct dc_surface_update surface_updates[MAX_SURFACES];
5672 struct dc_plane_info plane_infos[MAX_SURFACES];
5673 struct dc_scaling_info scaling_infos[MAX_SURFACES];
5674 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
5675 struct dc_stream_update stream_update;
5678 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
5681 dm_error("Failed to allocate update bundle\n");
5686 * Disable the cursor first if we're disabling all the planes.
5687 * It'll remain on the screen after the planes are re-enabled
5690 if (acrtc_state->active_planes == 0)
5691 amdgpu_dm_commit_cursors(state);
5693 /* update planes when needed */
5694 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
5695 struct drm_crtc *crtc = new_plane_state->crtc;
5696 struct drm_crtc_state *new_crtc_state;
5697 struct drm_framebuffer *fb = new_plane_state->fb;
5698 bool plane_needs_flip;
5699 struct dc_plane_state *dc_plane;
5700 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
5702 /* Cursor plane is handled after stream updates */
5703 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5706 if (!fb || !crtc || pcrtc != crtc)
5709 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
5710 if (!new_crtc_state->active)
5713 dc_plane = dm_new_plane_state->dc_state;
5715 bundle->surface_updates[planes_count].surface = dc_plane;
5716 if (new_pcrtc_state->color_mgmt_changed) {
5717 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
5718 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
5721 fill_dc_scaling_info(new_plane_state,
5722 &bundle->scaling_infos[planes_count]);
5724 bundle->surface_updates[planes_count].scaling_info =
5725 &bundle->scaling_infos[planes_count];
5727 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
5729 pflip_present = pflip_present || plane_needs_flip;
5731 if (!plane_needs_flip) {
5736 abo = gem_to_amdgpu_bo(fb->obj[0]);
5739 * Wait for all fences on this FB. Do limited wait to avoid
5740 * deadlock during GPU reset when this fence will not signal
5741 * but we hold reservation lock for the BO.
5743 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
5745 msecs_to_jiffies(5000));
5746 if (unlikely(r <= 0))
5747 DRM_ERROR("Waiting for fences timed out!");
5750 * TODO This might fail and hence better not used, wait
5751 * explicitly on fences instead
5752 * and in general should be called for
5753 * blocking commit to as per framework helpers
5755 r = amdgpu_bo_reserve(abo, true);
5756 if (unlikely(r != 0))
5757 DRM_ERROR("failed to reserve buffer before flip\n");
5759 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
5761 amdgpu_bo_unreserve(abo);
5763 fill_dc_plane_info_and_addr(
5764 dm->adev, new_plane_state, tiling_flags,
5765 &bundle->plane_infos[planes_count],
5766 &bundle->flip_addrs[planes_count].address);
5768 bundle->surface_updates[planes_count].plane_info =
5769 &bundle->plane_infos[planes_count];
5772 * Only allow immediate flips for fast updates that don't
5773 * change FB pitch, DCC state, rotation or mirroing.
5775 bundle->flip_addrs[planes_count].flip_immediate =
5776 (crtc->state->pageflip_flags &
5777 DRM_MODE_PAGE_FLIP_ASYNC) != 0 &&
5778 acrtc_state->update_type == UPDATE_TYPE_FAST;
5780 timestamp_ns = ktime_get_ns();
5781 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
5782 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
5783 bundle->surface_updates[planes_count].surface = dc_plane;
5785 if (!bundle->surface_updates[planes_count].surface) {
5786 DRM_ERROR("No surface for CRTC: id=%d\n",
5787 acrtc_attach->crtc_id);
5791 if (plane == pcrtc->primary)
5792 update_freesync_state_on_stream(
5795 acrtc_state->stream,
5797 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
5799 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
5801 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
5802 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
5808 if (pflip_present) {
5810 /* Use old throttling in non-vrr fixed refresh rate mode
5811 * to keep flip scheduling based on target vblank counts
5812 * working in a backwards compatible way, e.g., for
5813 * clients using the GLX_OML_sync_control extension or
5814 * DRI3/Present extension with defined target_msc.
5816 last_flip_vblank = amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id);
5819 /* For variable refresh rate mode only:
5820 * Get vblank of last completed flip to avoid > 1 vrr
5821 * flips per video frame by use of throttling, but allow
5822 * flip programming anywhere in the possibly large
5823 * variable vrr vblank interval for fine-grained flip
5824 * timing control and more opportunity to avoid stutter
5825 * on late submission of flips.
5827 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5828 last_flip_vblank = acrtc_attach->last_flip_vblank;
5829 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
5832 target_vblank = last_flip_vblank + wait_for_vblank;
5835 * Wait until we're out of the vertical blank period before the one
5836 * targeted by the flip
5838 while ((acrtc_attach->enabled &&
5839 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
5840 0, &vpos, &hpos, NULL,
5841 NULL, &pcrtc->hwmode)
5842 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
5843 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
5844 (int)(target_vblank -
5845 amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id)) > 0)) {
5846 usleep_range(1000, 1100);
5849 if (acrtc_attach->base.state->event) {
5850 drm_crtc_vblank_get(pcrtc);
5852 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5854 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
5855 prepare_flip_isr(acrtc_attach);
5857 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
5860 if (acrtc_state->stream) {
5861 if (acrtc_state->freesync_vrr_info_changed)
5862 bundle->stream_update.vrr_infopacket =
5863 &acrtc_state->stream->vrr_infopacket;
5867 /* Update the planes if changed or disable if we don't have any. */
5868 if ((planes_count || acrtc_state->active_planes == 0) &&
5869 acrtc_state->stream) {
5870 bundle->stream_update.stream = acrtc_state->stream;
5871 if (new_pcrtc_state->mode_changed) {
5872 bundle->stream_update.src = acrtc_state->stream->src;
5873 bundle->stream_update.dst = acrtc_state->stream->dst;
5876 if (new_pcrtc_state->color_mgmt_changed) {
5878 * TODO: This isn't fully correct since we've actually
5879 * already modified the stream in place.
5881 bundle->stream_update.gamut_remap =
5882 &acrtc_state->stream->gamut_remap_matrix;
5883 bundle->stream_update.output_csc_transform =
5884 &acrtc_state->stream->csc_color_matrix;
5885 bundle->stream_update.out_transfer_func =
5886 acrtc_state->stream->out_transfer_func;
5889 acrtc_state->stream->abm_level = acrtc_state->abm_level;
5890 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
5891 bundle->stream_update.abm_level = &acrtc_state->abm_level;
5894 * If FreeSync state on the stream has changed then we need to
5895 * re-adjust the min/max bounds now that DC doesn't handle this
5896 * as part of commit.
5898 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
5899 amdgpu_dm_vrr_active(acrtc_state)) {
5900 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
5901 dc_stream_adjust_vmin_vmax(
5902 dm->dc, acrtc_state->stream,
5903 &acrtc_state->vrr_params.adjust);
5904 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
5907 mutex_lock(&dm->dc_lock);
5908 dc_commit_updates_for_stream(dm->dc,
5909 bundle->surface_updates,
5911 acrtc_state->stream,
5912 &bundle->stream_update,
5914 mutex_unlock(&dm->dc_lock);
5918 * Update cursor state *after* programming all the planes.
5919 * This avoids redundant programming in the case where we're going
5920 * to be disabling a single plane - those pipes are being disabled.
5922 if (acrtc_state->active_planes)
5923 amdgpu_dm_commit_cursors(state);
5929 static void amdgpu_dm_commit_audio(struct drm_device *dev,
5930 struct drm_atomic_state *state)
5932 struct amdgpu_device *adev = dev->dev_private;
5933 struct amdgpu_dm_connector *aconnector;
5934 struct drm_connector *connector;
5935 struct drm_connector_state *old_con_state, *new_con_state;
5936 struct drm_crtc_state *new_crtc_state;
5937 struct dm_crtc_state *new_dm_crtc_state;
5938 const struct dc_stream_status *status;
5941 /* Notify device removals. */
5942 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5943 if (old_con_state->crtc != new_con_state->crtc) {
5944 /* CRTC changes require notification. */
5948 if (!new_con_state->crtc)
5951 new_crtc_state = drm_atomic_get_new_crtc_state(
5952 state, new_con_state->crtc);
5954 if (!new_crtc_state)
5957 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
5961 aconnector = to_amdgpu_dm_connector(connector);
5963 mutex_lock(&adev->dm.audio_lock);
5964 inst = aconnector->audio_inst;
5965 aconnector->audio_inst = -1;
5966 mutex_unlock(&adev->dm.audio_lock);
5968 amdgpu_dm_audio_eld_notify(adev, inst);
5971 /* Notify audio device additions. */
5972 for_each_new_connector_in_state(state, connector, new_con_state, i) {
5973 if (!new_con_state->crtc)
5976 new_crtc_state = drm_atomic_get_new_crtc_state(
5977 state, new_con_state->crtc);
5979 if (!new_crtc_state)
5982 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
5985 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
5986 if (!new_dm_crtc_state->stream)
5989 status = dc_stream_get_status(new_dm_crtc_state->stream);
5993 aconnector = to_amdgpu_dm_connector(connector);
5995 mutex_lock(&adev->dm.audio_lock);
5996 inst = status->audio_inst;
5997 aconnector->audio_inst = inst;
5998 mutex_unlock(&adev->dm.audio_lock);
6000 amdgpu_dm_audio_eld_notify(adev, inst);
6005 * Enable interrupts on CRTCs that are newly active, undergone
6006 * a modeset, or have active planes again.
6008 * Done in two passes, based on the for_modeset flag:
6009 * Pass 1: For CRTCs going through modeset
6010 * Pass 2: For CRTCs going from 0 to n active planes
6012 * Interrupts can only be enabled after the planes are programmed,
6013 * so this requires a two-pass approach since we don't want to
6014 * just defer the interrupts until after commit planes every time.
6016 static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
6017 struct drm_atomic_state *state,
6020 struct amdgpu_device *adev = dev->dev_private;
6021 struct drm_crtc *crtc;
6022 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6024 enum amdgpu_dm_pipe_crc_source source;
6026 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
6027 new_crtc_state, i) {
6028 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6029 struct dm_crtc_state *dm_new_crtc_state =
6030 to_dm_crtc_state(new_crtc_state);
6031 struct dm_crtc_state *dm_old_crtc_state =
6032 to_dm_crtc_state(old_crtc_state);
6033 bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
6036 run_pass = (for_modeset && modeset) ||
6037 (!for_modeset && !modeset &&
6038 !dm_old_crtc_state->interrupts_enabled);
6043 if (!dm_new_crtc_state->interrupts_enabled)
6046 manage_dm_interrupts(adev, acrtc, true);
6048 #ifdef CONFIG_DEBUG_FS
6049 /* The stream has changed so CRC capture needs to re-enabled. */
6050 source = dm_new_crtc_state->crc_src;
6051 if (amdgpu_dm_is_valid_crc_source(source)) {
6052 amdgpu_dm_crtc_configure_crc_source(
6053 crtc, dm_new_crtc_state,
6054 dm_new_crtc_state->crc_src);
6061 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
6062 * @crtc_state: the DRM CRTC state
6063 * @stream_state: the DC stream state.
6065 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
6066 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
6068 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
6069 struct dc_stream_state *stream_state)
6071 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
6074 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
6075 struct drm_atomic_state *state,
6078 struct drm_crtc *crtc;
6079 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6080 struct amdgpu_device *adev = dev->dev_private;
6084 * We evade vblank and pflip interrupts on CRTCs that are undergoing
6085 * a modeset, being disabled, or have no active planes.
6087 * It's done in atomic commit rather than commit tail for now since
6088 * some of these interrupt handlers access the current CRTC state and
6089 * potentially the stream pointer itself.
6091 * Since the atomic state is swapped within atomic commit and not within
6092 * commit tail this would leave to new state (that hasn't been committed yet)
6093 * being accesssed from within the handlers.
6095 * TODO: Fix this so we can do this in commit tail and not have to block
6098 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
6099 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
6100 struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6101 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6103 if (dm_old_crtc_state->interrupts_enabled &&
6104 (!dm_new_crtc_state->interrupts_enabled ||
6105 drm_atomic_crtc_needs_modeset(new_crtc_state)))
6106 manage_dm_interrupts(adev, acrtc, false);
6109 * Add check here for SoC's that support hardware cursor plane, to
6110 * unset legacy_cursor_update
6113 return drm_atomic_helper_commit(dev, state, nonblock);
6115 /*TODO Handle EINTR, reenable IRQ*/
6119 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
6120 * @state: The atomic state to commit
6122 * This will tell DC to commit the constructed DC state from atomic_check,
6123 * programming the hardware. Any failures here implies a hardware failure, since
6124 * atomic check should have filtered anything non-kosher.
6126 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
6128 struct drm_device *dev = state->dev;
6129 struct amdgpu_device *adev = dev->dev_private;
6130 struct amdgpu_display_manager *dm = &adev->dm;
6131 struct dm_atomic_state *dm_state;
6132 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
6134 struct drm_crtc *crtc;
6135 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6136 unsigned long flags;
6137 bool wait_for_vblank = true;
6138 struct drm_connector *connector;
6139 struct drm_connector_state *old_con_state, *new_con_state;
6140 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
6141 int crtc_disable_count = 0;
6143 drm_atomic_helper_update_legacy_modeset_state(dev, state);
6145 dm_state = dm_atomic_get_new_state(state);
6146 if (dm_state && dm_state->context) {
6147 dc_state = dm_state->context;
6149 /* No state changes, retain current state. */
6150 dc_state_temp = dc_create_state(dm->dc);
6151 ASSERT(dc_state_temp);
6152 dc_state = dc_state_temp;
6153 dc_resource_state_copy_construct_current(dm->dc, dc_state);
6156 /* update changed items */
6157 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
6158 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6160 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6161 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
6164 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
6165 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
6166 "connectors_changed:%d\n",
6168 new_crtc_state->enable,
6169 new_crtc_state->active,
6170 new_crtc_state->planes_changed,
6171 new_crtc_state->mode_changed,
6172 new_crtc_state->active_changed,
6173 new_crtc_state->connectors_changed);
6175 /* Copy all transient state flags into dc state */
6176 if (dm_new_crtc_state->stream) {
6177 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
6178 dm_new_crtc_state->stream);
6181 /* handles headless hotplug case, updating new_state and
6182 * aconnector as needed
6185 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
6187 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
6189 if (!dm_new_crtc_state->stream) {
6191 * this could happen because of issues with
6192 * userspace notifications delivery.
6193 * In this case userspace tries to set mode on
6194 * display which is disconnected in fact.
6195 * dc_sink is NULL in this case on aconnector.
6196 * We expect reset mode will come soon.
6198 * This can also happen when unplug is done
6199 * during resume sequence ended
6201 * In this case, we want to pretend we still
6202 * have a sink to keep the pipe running so that
6203 * hw state is consistent with the sw state
6205 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
6206 __func__, acrtc->base.base.id);
6210 if (dm_old_crtc_state->stream)
6211 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
6213 pm_runtime_get_noresume(dev->dev);
6215 acrtc->enabled = true;
6216 acrtc->hw_mode = new_crtc_state->mode;
6217 crtc->hwmode = new_crtc_state->mode;
6218 } else if (modereset_required(new_crtc_state)) {
6219 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
6221 /* i.e. reset mode */
6222 if (dm_old_crtc_state->stream)
6223 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
6225 } /* for_each_crtc_in_state() */
6228 dm_enable_per_frame_crtc_master_sync(dc_state);
6229 mutex_lock(&dm->dc_lock);
6230 WARN_ON(!dc_commit_state(dm->dc, dc_state));
6231 mutex_unlock(&dm->dc_lock);
6234 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
6235 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6237 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6239 if (dm_new_crtc_state->stream != NULL) {
6240 const struct dc_stream_status *status =
6241 dc_stream_get_status(dm_new_crtc_state->stream);
6244 status = dc_stream_get_status_from_state(dc_state,
6245 dm_new_crtc_state->stream);
6248 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
6250 acrtc->otg_inst = status->primary_otg_inst;
6254 /* Handle connector state changes */
6255 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6256 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
6257 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
6258 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
6259 struct dc_surface_update dummy_updates[MAX_SURFACES];
6260 struct dc_stream_update stream_update;
6261 struct dc_info_packet hdr_packet;
6262 struct dc_stream_status *status = NULL;
6263 bool abm_changed, hdr_changed, scaling_changed;
6265 memset(&dummy_updates, 0, sizeof(dummy_updates));
6266 memset(&stream_update, 0, sizeof(stream_update));
6269 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
6270 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
6273 /* Skip any modesets/resets */
6274 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
6277 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6278 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
6280 scaling_changed = is_scaling_state_different(dm_new_con_state,
6283 abm_changed = dm_new_crtc_state->abm_level !=
6284 dm_old_crtc_state->abm_level;
6287 is_hdr_metadata_different(old_con_state, new_con_state);
6289 if (!scaling_changed && !abm_changed && !hdr_changed)
6292 stream_update.stream = dm_new_crtc_state->stream;
6293 if (scaling_changed) {
6294 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
6295 dm_new_con_state, dm_new_crtc_state->stream);
6297 stream_update.src = dm_new_crtc_state->stream->src;
6298 stream_update.dst = dm_new_crtc_state->stream->dst;
6302 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
6304 stream_update.abm_level = &dm_new_crtc_state->abm_level;
6308 fill_hdr_info_packet(new_con_state, &hdr_packet);
6309 stream_update.hdr_static_metadata = &hdr_packet;
6312 status = dc_stream_get_status(dm_new_crtc_state->stream);
6314 WARN_ON(!status->plane_count);
6317 * TODO: DC refuses to perform stream updates without a dc_surface_update.
6318 * Here we create an empty update on each plane.
6319 * To fix this, DC should permit updating only stream properties.
6321 for (j = 0; j < status->plane_count; j++)
6322 dummy_updates[j].surface = status->plane_states[0];
6325 mutex_lock(&dm->dc_lock);
6326 dc_commit_updates_for_stream(dm->dc,
6328 status->plane_count,
6329 dm_new_crtc_state->stream,
6332 mutex_unlock(&dm->dc_lock);
6335 /* Count number of newly disabled CRTCs for dropping PM refs later. */
6336 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
6337 new_crtc_state, i) {
6338 if (old_crtc_state->active && !new_crtc_state->active)
6339 crtc_disable_count++;
6341 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6342 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
6344 /* Update freesync active state. */
6345 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
6347 /* Handle vrr on->off / off->on transitions */
6348 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
6352 /* Enable interrupts for CRTCs going through a modeset. */
6353 amdgpu_dm_enable_crtc_interrupts(dev, state, true);
6355 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
6356 if (new_crtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
6357 wait_for_vblank = false;
6359 /* update planes when needed per crtc*/
6360 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
6361 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6363 if (dm_new_crtc_state->stream)
6364 amdgpu_dm_commit_planes(state, dc_state, dev,
6365 dm, crtc, wait_for_vblank);
6368 /* Enable interrupts for CRTCs going from 0 to n active planes. */
6369 amdgpu_dm_enable_crtc_interrupts(dev, state, false);
6371 /* Update audio instances for each connector. */
6372 amdgpu_dm_commit_audio(dev, state);
6375 * send vblank event on all events not handled in flip and
6376 * mark consumed event for drm_atomic_helper_commit_hw_done
6378 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6379 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
6381 if (new_crtc_state->event)
6382 drm_send_event_locked(dev, &new_crtc_state->event->base);
6384 new_crtc_state->event = NULL;
6386 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6388 /* Signal HW programming completion */
6389 drm_atomic_helper_commit_hw_done(state);
6391 if (wait_for_vblank)
6392 drm_atomic_helper_wait_for_flip_done(dev, state);
6394 drm_atomic_helper_cleanup_planes(dev, state);
6397 * Finally, drop a runtime PM reference for each newly disabled CRTC,
6398 * so we can put the GPU into runtime suspend if we're not driving any
6401 for (i = 0; i < crtc_disable_count; i++)
6402 pm_runtime_put_autosuspend(dev->dev);
6403 pm_runtime_mark_last_busy(dev->dev);
6406 dc_release_state(dc_state_temp);
6410 static int dm_force_atomic_commit(struct drm_connector *connector)
6413 struct drm_device *ddev = connector->dev;
6414 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
6415 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
6416 struct drm_plane *plane = disconnected_acrtc->base.primary;
6417 struct drm_connector_state *conn_state;
6418 struct drm_crtc_state *crtc_state;
6419 struct drm_plane_state *plane_state;
6424 state->acquire_ctx = ddev->mode_config.acquire_ctx;
6426 /* Construct an atomic state to restore previous display setting */
6429 * Attach connectors to drm_atomic_state
6431 conn_state = drm_atomic_get_connector_state(state, connector);
6433 ret = PTR_ERR_OR_ZERO(conn_state);
6437 /* Attach crtc to drm_atomic_state*/
6438 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
6440 ret = PTR_ERR_OR_ZERO(crtc_state);
6444 /* force a restore */
6445 crtc_state->mode_changed = true;
6447 /* Attach plane to drm_atomic_state */
6448 plane_state = drm_atomic_get_plane_state(state, plane);
6450 ret = PTR_ERR_OR_ZERO(plane_state);
6455 /* Call commit internally with the state we just constructed */
6456 ret = drm_atomic_commit(state);
6461 DRM_ERROR("Restoring old state failed with %i\n", ret);
6462 drm_atomic_state_put(state);
6468 * This function handles all cases when set mode does not come upon hotplug.
6469 * This includes when a display is unplugged then plugged back into the
6470 * same port and when running without usermode desktop manager supprot
6472 void dm_restore_drm_connector_state(struct drm_device *dev,
6473 struct drm_connector *connector)
6475 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6476 struct amdgpu_crtc *disconnected_acrtc;
6477 struct dm_crtc_state *acrtc_state;
6479 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
6482 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
6483 if (!disconnected_acrtc)
6486 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
6487 if (!acrtc_state->stream)
6491 * If the previous sink is not released and different from the current,
6492 * we deduce we are in a state where we can not rely on usermode call
6493 * to turn on the display, so we do it here
6495 if (acrtc_state->stream->sink != aconnector->dc_sink)
6496 dm_force_atomic_commit(&aconnector->base);
6500 * Grabs all modesetting locks to serialize against any blocking commits,
6501 * Waits for completion of all non blocking commits.
6503 static int do_aquire_global_lock(struct drm_device *dev,
6504 struct drm_atomic_state *state)
6506 struct drm_crtc *crtc;
6507 struct drm_crtc_commit *commit;
6511 * Adding all modeset locks to aquire_ctx will
6512 * ensure that when the framework release it the
6513 * extra locks we are locking here will get released to
6515 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
6519 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
6520 spin_lock(&crtc->commit_lock);
6521 commit = list_first_entry_or_null(&crtc->commit_list,
6522 struct drm_crtc_commit, commit_entry);
6524 drm_crtc_commit_get(commit);
6525 spin_unlock(&crtc->commit_lock);
6531 * Make sure all pending HW programming completed and
6534 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
6537 ret = wait_for_completion_interruptible_timeout(
6538 &commit->flip_done, 10*HZ);
6541 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
6542 "timed out\n", crtc->base.id, crtc->name);
6544 drm_crtc_commit_put(commit);
6547 return ret < 0 ? ret : 0;
6550 static void get_freesync_config_for_crtc(
6551 struct dm_crtc_state *new_crtc_state,
6552 struct dm_connector_state *new_con_state)
6554 struct mod_freesync_config config = {0};
6555 struct amdgpu_dm_connector *aconnector =
6556 to_amdgpu_dm_connector(new_con_state->base.connector);
6557 struct drm_display_mode *mode = &new_crtc_state->base.mode;
6558 int vrefresh = drm_mode_vrefresh(mode);
6560 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
6561 vrefresh >= aconnector->min_vfreq &&
6562 vrefresh <= aconnector->max_vfreq;
6564 if (new_crtc_state->vrr_supported) {
6565 new_crtc_state->stream->ignore_msa_timing_param = true;
6566 config.state = new_crtc_state->base.vrr_enabled ?
6567 VRR_STATE_ACTIVE_VARIABLE :
6569 config.min_refresh_in_uhz =
6570 aconnector->min_vfreq * 1000000;
6571 config.max_refresh_in_uhz =
6572 aconnector->max_vfreq * 1000000;
6573 config.vsif_supported = true;
6577 new_crtc_state->freesync_config = config;
6580 static void reset_freesync_config_for_crtc(
6581 struct dm_crtc_state *new_crtc_state)
6583 new_crtc_state->vrr_supported = false;
6585 memset(&new_crtc_state->vrr_params, 0,
6586 sizeof(new_crtc_state->vrr_params));
6587 memset(&new_crtc_state->vrr_infopacket, 0,
6588 sizeof(new_crtc_state->vrr_infopacket));
6591 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
6592 struct drm_atomic_state *state,
6593 struct drm_crtc *crtc,
6594 struct drm_crtc_state *old_crtc_state,
6595 struct drm_crtc_state *new_crtc_state,
6597 bool *lock_and_validation_needed)
6599 struct dm_atomic_state *dm_state = NULL;
6600 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
6601 struct dc_stream_state *new_stream;
6605 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
6606 * update changed items
6608 struct amdgpu_crtc *acrtc = NULL;
6609 struct amdgpu_dm_connector *aconnector = NULL;
6610 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
6611 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
6615 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
6616 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6617 acrtc = to_amdgpu_crtc(crtc);
6618 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
6620 /* TODO This hack should go away */
6621 if (aconnector && enable) {
6622 /* Make sure fake sink is created in plug-in scenario */
6623 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
6625 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
6628 if (IS_ERR(drm_new_conn_state)) {
6629 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
6633 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
6634 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
6636 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6639 new_stream = create_stream_for_sink(aconnector,
6640 &new_crtc_state->mode,
6642 dm_old_crtc_state->stream);
6645 * we can have no stream on ACTION_SET if a display
6646 * was disconnected during S3, in this case it is not an
6647 * error, the OS will be updated after detection, and
6648 * will do the right thing on next atomic commit
6652 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
6653 __func__, acrtc->base.base.id);
6658 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
6660 ret = fill_hdr_info_packet(drm_new_conn_state,
6661 &new_stream->hdr_static_metadata);
6666 * If we already removed the old stream from the context
6667 * (and set the new stream to NULL) then we can't reuse
6668 * the old stream even if the stream and scaling are unchanged.
6669 * We'll hit the BUG_ON and black screen.
6671 * TODO: Refactor this function to allow this check to work
6672 * in all conditions.
6674 if (dm_new_crtc_state->stream &&
6675 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
6676 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
6677 new_crtc_state->mode_changed = false;
6678 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
6679 new_crtc_state->mode_changed);
6683 /* mode_changed flag may get updated above, need to check again */
6684 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6688 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
6689 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
6690 "connectors_changed:%d\n",
6692 new_crtc_state->enable,
6693 new_crtc_state->active,
6694 new_crtc_state->planes_changed,
6695 new_crtc_state->mode_changed,
6696 new_crtc_state->active_changed,
6697 new_crtc_state->connectors_changed);
6699 /* Remove stream for any changed/disabled CRTC */
6702 if (!dm_old_crtc_state->stream)
6705 ret = dm_atomic_get_state(state, &dm_state);
6709 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
6712 /* i.e. reset mode */
6713 if (dc_remove_stream_from_ctx(
6716 dm_old_crtc_state->stream) != DC_OK) {
6721 dc_stream_release(dm_old_crtc_state->stream);
6722 dm_new_crtc_state->stream = NULL;
6724 reset_freesync_config_for_crtc(dm_new_crtc_state);
6726 *lock_and_validation_needed = true;
6728 } else {/* Add stream for any updated/enabled CRTC */
6730 * Quick fix to prevent NULL pointer on new_stream when
6731 * added MST connectors not found in existing crtc_state in the chained mode
6732 * TODO: need to dig out the root cause of that
6734 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
6737 if (modereset_required(new_crtc_state))
6740 if (modeset_required(new_crtc_state, new_stream,
6741 dm_old_crtc_state->stream)) {
6743 WARN_ON(dm_new_crtc_state->stream);
6745 ret = dm_atomic_get_state(state, &dm_state);
6749 dm_new_crtc_state->stream = new_stream;
6751 dc_stream_retain(new_stream);
6753 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
6756 if (dc_add_stream_to_ctx(
6759 dm_new_crtc_state->stream) != DC_OK) {
6764 *lock_and_validation_needed = true;
6769 /* Release extra reference */
6771 dc_stream_release(new_stream);
6774 * We want to do dc stream updates that do not require a
6775 * full modeset below.
6777 if (!(enable && aconnector && new_crtc_state->enable &&
6778 new_crtc_state->active))
6781 * Given above conditions, the dc state cannot be NULL because:
6782 * 1. We're in the process of enabling CRTCs (just been added
6783 * to the dc context, or already is on the context)
6784 * 2. Has a valid connector attached, and
6785 * 3. Is currently active and enabled.
6786 * => The dc stream state currently exists.
6788 BUG_ON(dm_new_crtc_state->stream == NULL);
6790 /* Scaling or underscan settings */
6791 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
6792 update_stream_scaling_settings(
6793 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
6796 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
6799 * Color management settings. We also update color properties
6800 * when a modeset is needed, to ensure it gets reprogrammed.
6802 if (dm_new_crtc_state->base.color_mgmt_changed ||
6803 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
6804 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
6809 /* Update Freesync settings. */
6810 get_freesync_config_for_crtc(dm_new_crtc_state,
6817 dc_stream_release(new_stream);
6821 static bool should_reset_plane(struct drm_atomic_state *state,
6822 struct drm_plane *plane,
6823 struct drm_plane_state *old_plane_state,
6824 struct drm_plane_state *new_plane_state)
6826 struct drm_plane *other;
6827 struct drm_plane_state *old_other_state, *new_other_state;
6828 struct drm_crtc_state *new_crtc_state;
6832 * TODO: Remove this hack once the checks below are sufficient
6833 * enough to determine when we need to reset all the planes on
6836 if (state->allow_modeset)
6839 /* Exit early if we know that we're adding or removing the plane. */
6840 if (old_plane_state->crtc != new_plane_state->crtc)
6843 /* old crtc == new_crtc == NULL, plane not in context. */
6844 if (!new_plane_state->crtc)
6848 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
6850 if (!new_crtc_state)
6853 /* CRTC Degamma changes currently require us to recreate planes. */
6854 if (new_crtc_state->color_mgmt_changed)
6857 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
6861 * If there are any new primary or overlay planes being added or
6862 * removed then the z-order can potentially change. To ensure
6863 * correct z-order and pipe acquisition the current DC architecture
6864 * requires us to remove and recreate all existing planes.
6866 * TODO: Come up with a more elegant solution for this.
6868 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
6869 if (other->type == DRM_PLANE_TYPE_CURSOR)
6872 if (old_other_state->crtc != new_plane_state->crtc &&
6873 new_other_state->crtc != new_plane_state->crtc)
6876 if (old_other_state->crtc != new_other_state->crtc)
6879 /* TODO: Remove this once we can handle fast format changes. */
6880 if (old_other_state->fb && new_other_state->fb &&
6881 old_other_state->fb->format != new_other_state->fb->format)
6888 static int dm_update_plane_state(struct dc *dc,
6889 struct drm_atomic_state *state,
6890 struct drm_plane *plane,
6891 struct drm_plane_state *old_plane_state,
6892 struct drm_plane_state *new_plane_state,
6894 bool *lock_and_validation_needed)
6897 struct dm_atomic_state *dm_state = NULL;
6898 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
6899 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6900 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
6901 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
6906 new_plane_crtc = new_plane_state->crtc;
6907 old_plane_crtc = old_plane_state->crtc;
6908 dm_new_plane_state = to_dm_plane_state(new_plane_state);
6909 dm_old_plane_state = to_dm_plane_state(old_plane_state);
6911 /*TODO Implement atomic check for cursor plane */
6912 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6915 needs_reset = should_reset_plane(state, plane, old_plane_state,
6918 /* Remove any changed/removed planes */
6923 if (!old_plane_crtc)
6926 old_crtc_state = drm_atomic_get_old_crtc_state(
6927 state, old_plane_crtc);
6928 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
6930 if (!dm_old_crtc_state->stream)
6933 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
6934 plane->base.id, old_plane_crtc->base.id);
6936 ret = dm_atomic_get_state(state, &dm_state);
6940 if (!dc_remove_plane_from_context(
6942 dm_old_crtc_state->stream,
6943 dm_old_plane_state->dc_state,
6944 dm_state->context)) {
6951 dc_plane_state_release(dm_old_plane_state->dc_state);
6952 dm_new_plane_state->dc_state = NULL;
6954 *lock_and_validation_needed = true;
6956 } else { /* Add new planes */
6957 struct dc_plane_state *dc_new_plane_state;
6959 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
6962 if (!new_plane_crtc)
6965 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
6966 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6968 if (!dm_new_crtc_state->stream)
6974 WARN_ON(dm_new_plane_state->dc_state);
6976 dc_new_plane_state = dc_create_plane_state(dc);
6977 if (!dc_new_plane_state)
6980 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
6981 plane->base.id, new_plane_crtc->base.id);
6983 ret = fill_dc_plane_attributes(
6984 new_plane_crtc->dev->dev_private,
6989 dc_plane_state_release(dc_new_plane_state);
6993 ret = dm_atomic_get_state(state, &dm_state);
6995 dc_plane_state_release(dc_new_plane_state);
7000 * Any atomic check errors that occur after this will
7001 * not need a release. The plane state will be attached
7002 * to the stream, and therefore part of the atomic
7003 * state. It'll be released when the atomic state is
7006 if (!dc_add_plane_to_context(
7008 dm_new_crtc_state->stream,
7010 dm_state->context)) {
7012 dc_plane_state_release(dc_new_plane_state);
7016 dm_new_plane_state->dc_state = dc_new_plane_state;
7018 /* Tell DC to do a full surface update every time there
7019 * is a plane change. Inefficient, but works for now.
7021 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
7023 *lock_and_validation_needed = true;
7031 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
7032 struct drm_atomic_state *state,
7033 enum surface_update_type *out_type)
7035 struct dc *dc = dm->dc;
7036 struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
7037 int i, j, num_plane, ret = 0;
7038 struct drm_plane_state *old_plane_state, *new_plane_state;
7039 struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
7040 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
7041 struct drm_plane *plane;
7043 struct drm_crtc *crtc;
7044 struct drm_crtc_state *new_crtc_state, *old_crtc_state;
7045 struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
7046 struct dc_stream_status *status = NULL;
7048 struct dc_surface_update *updates;
7049 enum surface_update_type update_type = UPDATE_TYPE_FAST;
7051 updates = kcalloc(MAX_SURFACES, sizeof(*updates), GFP_KERNEL);
7054 DRM_ERROR("Failed to allocate plane updates\n");
7055 /* Set type to FULL to avoid crashing in DC*/
7056 update_type = UPDATE_TYPE_FULL;
7060 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7061 struct dc_scaling_info scaling_info;
7062 struct dc_stream_update stream_update;
7064 memset(&stream_update, 0, sizeof(stream_update));
7066 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7067 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
7070 if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
7071 update_type = UPDATE_TYPE_FULL;
7075 if (!new_dm_crtc_state->stream)
7078 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
7079 const struct amdgpu_framebuffer *amdgpu_fb =
7080 to_amdgpu_framebuffer(new_plane_state->fb);
7081 struct dc_plane_info plane_info;
7082 struct dc_flip_addrs flip_addr;
7083 uint64_t tiling_flags;
7085 new_plane_crtc = new_plane_state->crtc;
7086 old_plane_crtc = old_plane_state->crtc;
7087 new_dm_plane_state = to_dm_plane_state(new_plane_state);
7088 old_dm_plane_state = to_dm_plane_state(old_plane_state);
7090 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7093 if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
7094 update_type = UPDATE_TYPE_FULL;
7098 if (crtc != new_plane_crtc)
7101 updates[num_plane].surface = new_dm_plane_state->dc_state;
7103 if (new_crtc_state->mode_changed) {
7104 stream_update.dst = new_dm_crtc_state->stream->dst;
7105 stream_update.src = new_dm_crtc_state->stream->src;
7108 if (new_crtc_state->color_mgmt_changed) {
7109 updates[num_plane].gamma =
7110 new_dm_plane_state->dc_state->gamma_correction;
7111 updates[num_plane].in_transfer_func =
7112 new_dm_plane_state->dc_state->in_transfer_func;
7113 stream_update.gamut_remap =
7114 &new_dm_crtc_state->stream->gamut_remap_matrix;
7115 stream_update.output_csc_transform =
7116 &new_dm_crtc_state->stream->csc_color_matrix;
7117 stream_update.out_transfer_func =
7118 new_dm_crtc_state->stream->out_transfer_func;
7121 ret = fill_dc_scaling_info(new_plane_state,
7126 updates[num_plane].scaling_info = &scaling_info;
7129 ret = get_fb_info(amdgpu_fb, &tiling_flags);
7133 memset(&flip_addr, 0, sizeof(flip_addr));
7135 ret = fill_dc_plane_info_and_addr(
7136 dm->adev, new_plane_state, tiling_flags,
7138 &flip_addr.address);
7142 updates[num_plane].plane_info = &plane_info;
7143 updates[num_plane].flip_addr = &flip_addr;
7152 ret = dm_atomic_get_state(state, &dm_state);
7156 old_dm_state = dm_atomic_get_old_state(state);
7157 if (!old_dm_state) {
7162 status = dc_stream_get_status_from_state(old_dm_state->context,
7163 new_dm_crtc_state->stream);
7164 stream_update.stream = new_dm_crtc_state->stream;
7166 * TODO: DC modifies the surface during this call so we need
7167 * to lock here - find a way to do this without locking.
7169 mutex_lock(&dm->dc_lock);
7170 update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane,
7171 &stream_update, status);
7172 mutex_unlock(&dm->dc_lock);
7174 if (update_type > UPDATE_TYPE_MED) {
7175 update_type = UPDATE_TYPE_FULL;
7183 *out_type = update_type;
7188 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
7189 * @dev: The DRM device
7190 * @state: The atomic state to commit
7192 * Validate that the given atomic state is programmable by DC into hardware.
7193 * This involves constructing a &struct dc_state reflecting the new hardware
7194 * state we wish to commit, then querying DC to see if it is programmable. It's
7195 * important not to modify the existing DC state. Otherwise, atomic_check
7196 * may unexpectedly commit hardware changes.
7198 * When validating the DC state, it's important that the right locks are
7199 * acquired. For full updates case which removes/adds/updates streams on one
7200 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
7201 * that any such full update commit will wait for completion of any outstanding
7202 * flip using DRMs synchronization events. See
7203 * dm_determine_update_type_for_commit()
7205 * Note that DM adds the affected connectors for all CRTCs in state, when that
7206 * might not seem necessary. This is because DC stream creation requires the
7207 * DC sink, which is tied to the DRM connector state. Cleaning this up should
7208 * be possible but non-trivial - a possible TODO item.
7210 * Return: -Error code if validation failed.
7212 static int amdgpu_dm_atomic_check(struct drm_device *dev,
7213 struct drm_atomic_state *state)
7215 struct amdgpu_device *adev = dev->dev_private;
7216 struct dm_atomic_state *dm_state = NULL;
7217 struct dc *dc = adev->dm.dc;
7218 struct drm_connector *connector;
7219 struct drm_connector_state *old_con_state, *new_con_state;
7220 struct drm_crtc *crtc;
7221 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7222 struct drm_plane *plane;
7223 struct drm_plane_state *old_plane_state, *new_plane_state;
7224 enum surface_update_type update_type = UPDATE_TYPE_FAST;
7225 enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
7230 * This bool will be set for true for any modeset/reset
7231 * or plane update which implies non fast surface update.
7233 bool lock_and_validation_needed = false;
7235 ret = drm_atomic_helper_check_modeset(dev, state);
7239 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7240 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
7241 !new_crtc_state->color_mgmt_changed &&
7242 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
7245 if (!new_crtc_state->enable)
7248 ret = drm_atomic_add_affected_connectors(state, crtc);
7252 ret = drm_atomic_add_affected_planes(state, crtc);
7258 * Add all primary and overlay planes on the CRTC to the state
7259 * whenever a plane is enabled to maintain correct z-ordering
7260 * and to enable fast surface updates.
7262 drm_for_each_crtc(crtc, dev) {
7263 bool modified = false;
7265 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7266 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7269 if (new_plane_state->crtc == crtc ||
7270 old_plane_state->crtc == crtc) {
7279 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
7280 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7284 drm_atomic_get_plane_state(state, plane);
7286 if (IS_ERR(new_plane_state)) {
7287 ret = PTR_ERR(new_plane_state);
7293 /* Remove exiting planes if they are modified */
7294 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
7295 ret = dm_update_plane_state(dc, state, plane,
7299 &lock_and_validation_needed);
7304 /* Disable all crtcs which require disable */
7305 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7306 ret = dm_update_crtc_state(&adev->dm, state, crtc,
7310 &lock_and_validation_needed);
7315 /* Enable all crtcs which require enable */
7316 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7317 ret = dm_update_crtc_state(&adev->dm, state, crtc,
7321 &lock_and_validation_needed);
7326 /* Add new/modified planes */
7327 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
7328 ret = dm_update_plane_state(dc, state, plane,
7332 &lock_and_validation_needed);
7337 /* Run this here since we want to validate the streams we created */
7338 ret = drm_atomic_helper_check_planes(dev, state);
7342 if (state->legacy_cursor_update) {
7344 * This is a fast cursor update coming from the plane update
7345 * helper, check if it can be done asynchronously for better
7348 state->async_update =
7349 !drm_atomic_helper_async_check(dev, state);
7352 * Skip the remaining global validation if this is an async
7353 * update. Cursor updates can be done without affecting
7354 * state or bandwidth calcs and this avoids the performance
7355 * penalty of locking the private state object and
7356 * allocating a new dc_state.
7358 if (state->async_update)
7362 /* Check scaling and underscan changes*/
7363 /* TODO Removed scaling changes validation due to inability to commit
7364 * new stream into context w\o causing full reset. Need to
7365 * decide how to handle.
7367 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7368 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7369 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7370 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7372 /* Skip any modesets/resets */
7373 if (!acrtc || drm_atomic_crtc_needs_modeset(
7374 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
7377 /* Skip any thing not scale or underscan changes */
7378 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
7381 overall_update_type = UPDATE_TYPE_FULL;
7382 lock_and_validation_needed = true;
7385 ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
7389 if (overall_update_type < update_type)
7390 overall_update_type = update_type;
7393 * lock_and_validation_needed was an old way to determine if we need to set
7394 * the global lock. Leaving it in to check if we broke any corner cases
7395 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
7396 * lock_and_validation_needed false = UPDATE_TYPE_FAST
7398 if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
7399 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
7401 if (overall_update_type > UPDATE_TYPE_FAST) {
7402 ret = dm_atomic_get_state(state, &dm_state);
7406 ret = do_aquire_global_lock(dev, state);
7410 if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
7416 * The commit is a fast update. Fast updates shouldn't change
7417 * the DC context, affect global validation, and can have their
7418 * commit work done in parallel with other commits not touching
7419 * the same resource. If we have a new DC context as part of
7420 * the DM atomic state from validation we need to free it and
7421 * retain the existing one instead.
7423 struct dm_atomic_state *new_dm_state, *old_dm_state;
7425 new_dm_state = dm_atomic_get_new_state(state);
7426 old_dm_state = dm_atomic_get_old_state(state);
7428 if (new_dm_state && old_dm_state) {
7429 if (new_dm_state->context)
7430 dc_release_state(new_dm_state->context);
7432 new_dm_state->context = old_dm_state->context;
7434 if (old_dm_state->context)
7435 dc_retain_state(old_dm_state->context);
7439 /* Store the overall update type for use later in atomic check. */
7440 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
7441 struct dm_crtc_state *dm_new_crtc_state =
7442 to_dm_crtc_state(new_crtc_state);
7444 dm_new_crtc_state->update_type = (int)overall_update_type;
7447 /* Must be success */
7452 if (ret == -EDEADLK)
7453 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
7454 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
7455 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
7457 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
7462 static bool is_dp_capable_without_timing_msa(struct dc *dc,
7463 struct amdgpu_dm_connector *amdgpu_dm_connector)
7466 bool capable = false;
7468 if (amdgpu_dm_connector->dc_link &&
7469 dm_helpers_dp_read_dpcd(
7471 amdgpu_dm_connector->dc_link,
7472 DP_DOWN_STREAM_PORT_COUNT,
7474 sizeof(dpcd_data))) {
7475 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
7480 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
7484 bool edid_check_required;
7485 struct detailed_timing *timing;
7486 struct detailed_non_pixel *data;
7487 struct detailed_data_monitor_range *range;
7488 struct amdgpu_dm_connector *amdgpu_dm_connector =
7489 to_amdgpu_dm_connector(connector);
7490 struct dm_connector_state *dm_con_state = NULL;
7492 struct drm_device *dev = connector->dev;
7493 struct amdgpu_device *adev = dev->dev_private;
7494 bool freesync_capable = false;
7496 if (!connector->state) {
7497 DRM_ERROR("%s - Connector has no state", __func__);
7502 dm_con_state = to_dm_connector_state(connector->state);
7504 amdgpu_dm_connector->min_vfreq = 0;
7505 amdgpu_dm_connector->max_vfreq = 0;
7506 amdgpu_dm_connector->pixel_clock_mhz = 0;
7511 dm_con_state = to_dm_connector_state(connector->state);
7513 edid_check_required = false;
7514 if (!amdgpu_dm_connector->dc_sink) {
7515 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
7518 if (!adev->dm.freesync_module)
7521 * if edid non zero restrict freesync only for dp and edp
7524 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
7525 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
7526 edid_check_required = is_dp_capable_without_timing_msa(
7528 amdgpu_dm_connector);
7531 if (edid_check_required == true && (edid->version > 1 ||
7532 (edid->version == 1 && edid->revision > 1))) {
7533 for (i = 0; i < 4; i++) {
7535 timing = &edid->detailed_timings[i];
7536 data = &timing->data.other_data;
7537 range = &data->data.range;
7539 * Check if monitor has continuous frequency mode
7541 if (data->type != EDID_DETAIL_MONITOR_RANGE)
7544 * Check for flag range limits only. If flag == 1 then
7545 * no additional timing information provided.
7546 * Default GTF, GTF Secondary curve and CVT are not
7549 if (range->flags != 1)
7552 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
7553 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
7554 amdgpu_dm_connector->pixel_clock_mhz =
7555 range->pixel_clock_mhz * 10;
7559 if (amdgpu_dm_connector->max_vfreq -
7560 amdgpu_dm_connector->min_vfreq > 10) {
7562 freesync_capable = true;
7568 dm_con_state->freesync_capable = freesync_capable;
7570 if (connector->vrr_capable_property)
7571 drm_connector_set_vrr_capable_property(connector,