2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/inc/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
48 #include "amdgpu_pm.h"
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
58 #include "ivsrcid/ivsrcid_vislands30.h"
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
88 #include "soc15_common.h"
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
98 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
99 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
101 /* Number of bytes in PSP header for firmware. */
102 #define PSP_HEADER_BYTES 0x100
104 /* Number of bytes in PSP footer for firmware. */
105 #define PSP_FOOTER_BYTES 0x100
110 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
111 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
112 * requests into DC requests, and DC responses into DRM responses.
114 * The root control structure is &struct amdgpu_display_manager.
117 /* basic init/fini API */
118 static int amdgpu_dm_init(struct amdgpu_device *adev);
119 static void amdgpu_dm_fini(struct amdgpu_device *adev);
122 * initializes drm_device display related structures, based on the information
123 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
124 * drm_encoder, drm_mode_config
126 * Returns 0 on success
128 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
129 /* removes and deallocates the drm structures, created by the above function */
130 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
133 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
135 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
136 struct drm_plane *plane,
137 unsigned long possible_crtcs,
138 const struct dc_plane_cap *plane_cap);
139 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
140 struct drm_plane *plane,
141 uint32_t link_index);
142 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
143 struct amdgpu_dm_connector *amdgpu_dm_connector,
145 struct amdgpu_encoder *amdgpu_encoder);
146 static int amdgpu_dm_encoder_init(struct drm_device *dev,
147 struct amdgpu_encoder *aencoder,
148 uint32_t link_index);
150 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
152 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
153 struct drm_atomic_state *state,
156 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
158 static int amdgpu_dm_atomic_check(struct drm_device *dev,
159 struct drm_atomic_state *state);
161 static void handle_cursor_update(struct drm_plane *plane,
162 struct drm_plane_state *old_plane_state);
164 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
165 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
166 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
167 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
171 * dm_vblank_get_counter
174 * Get counter for number of vertical blanks
177 * struct amdgpu_device *adev - [in] desired amdgpu device
178 * int disp_idx - [in] which CRTC to get the counter from
181 * Counter for vertical blanks
183 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
185 if (crtc >= adev->mode_info.num_crtc)
188 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
189 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
193 if (acrtc_state->stream == NULL) {
194 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
199 return dc_stream_get_vblank_counter(acrtc_state->stream);
203 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
204 u32 *vbl, u32 *position)
206 uint32_t v_blank_start, v_blank_end, h_position, v_position;
208 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
211 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
212 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
215 if (acrtc_state->stream == NULL) {
216 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
222 * TODO rework base driver to use values directly.
223 * for now parse it back into reg-format
225 dc_stream_get_scanoutpos(acrtc_state->stream,
231 *position = v_position | (h_position << 16);
232 *vbl = v_blank_start | (v_blank_end << 16);
238 static bool dm_is_idle(void *handle)
244 static int dm_wait_for_idle(void *handle)
250 static bool dm_check_soft_reset(void *handle)
255 static int dm_soft_reset(void *handle)
261 static struct amdgpu_crtc *
262 get_crtc_by_otg_inst(struct amdgpu_device *adev,
265 struct drm_device *dev = adev->ddev;
266 struct drm_crtc *crtc;
267 struct amdgpu_crtc *amdgpu_crtc;
269 if (otg_inst == -1) {
271 return adev->mode_info.crtcs[0];
274 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
275 amdgpu_crtc = to_amdgpu_crtc(crtc);
277 if (amdgpu_crtc->otg_inst == otg_inst)
284 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
286 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
287 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
291 * dm_pflip_high_irq() - Handle pageflip interrupt
292 * @interrupt_params: ignored
294 * Handles the pageflip interrupt by notifying all interested parties
295 * that the pageflip has been completed.
297 static void dm_pflip_high_irq(void *interrupt_params)
299 struct amdgpu_crtc *amdgpu_crtc;
300 struct common_irq_params *irq_params = interrupt_params;
301 struct amdgpu_device *adev = irq_params->adev;
303 struct drm_pending_vblank_event *e;
304 struct dm_crtc_state *acrtc_state;
305 uint32_t vpos, hpos, v_blank_start, v_blank_end;
308 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
310 /* IRQ could occur when in initial stage */
311 /* TODO work and BO cleanup */
312 if (amdgpu_crtc == NULL) {
313 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
317 spin_lock_irqsave(&adev->ddev->event_lock, flags);
319 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
320 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
321 amdgpu_crtc->pflip_status,
322 AMDGPU_FLIP_SUBMITTED,
323 amdgpu_crtc->crtc_id,
325 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
329 /* page flip completed. */
330 e = amdgpu_crtc->event;
331 amdgpu_crtc->event = NULL;
336 acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
337 vrr_active = amdgpu_dm_vrr_active(acrtc_state);
339 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
341 !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
342 &v_blank_end, &hpos, &vpos) ||
343 (vpos < v_blank_start)) {
344 /* Update to correct count and vblank timestamp if racing with
345 * vblank irq. This also updates to the correct vblank timestamp
346 * even in VRR mode, as scanout is past the front-porch atm.
348 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
350 /* Wake up userspace by sending the pageflip event with proper
351 * count and timestamp of vblank of flip completion.
354 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
356 /* Event sent, so done with vblank for this flip */
357 drm_crtc_vblank_put(&amdgpu_crtc->base);
360 /* VRR active and inside front-porch: vblank count and
361 * timestamp for pageflip event will only be up to date after
362 * drm_crtc_handle_vblank() has been executed from late vblank
363 * irq handler after start of back-porch (vline 0). We queue the
364 * pageflip event for send-out by drm_crtc_handle_vblank() with
365 * updated timestamp and count, once it runs after us.
367 * We need to open-code this instead of using the helper
368 * drm_crtc_arm_vblank_event(), as that helper would
369 * call drm_crtc_accurate_vblank_count(), which we must
370 * not call in VRR mode while we are in front-porch!
373 /* sequence will be replaced by real count during send-out. */
374 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
375 e->pipe = amdgpu_crtc->crtc_id;
377 list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
381 /* Keep track of vblank of this flip for flip throttling. We use the
382 * cooked hw counter, as that one incremented at start of this vblank
383 * of pageflip completion, so last_flip_vblank is the forbidden count
384 * for queueing new pageflips if vsync + VRR is enabled.
386 amdgpu_crtc->last_flip_vblank = amdgpu_get_vblank_counter_kms(adev->ddev,
387 amdgpu_crtc->crtc_id);
389 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
390 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
392 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
393 amdgpu_crtc->crtc_id, amdgpu_crtc,
394 vrr_active, (int) !e);
397 static void dm_vupdate_high_irq(void *interrupt_params)
399 struct common_irq_params *irq_params = interrupt_params;
400 struct amdgpu_device *adev = irq_params->adev;
401 struct amdgpu_crtc *acrtc;
402 struct dm_crtc_state *acrtc_state;
405 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
408 acrtc_state = to_dm_crtc_state(acrtc->base.state);
410 DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
411 amdgpu_dm_vrr_active(acrtc_state));
413 /* Core vblank handling is done here after end of front-porch in
414 * vrr mode, as vblank timestamping will give valid results
415 * while now done after front-porch. This will also deliver
416 * page-flip completion events that have been queued to us
417 * if a pageflip happened inside front-porch.
419 if (amdgpu_dm_vrr_active(acrtc_state)) {
420 drm_crtc_handle_vblank(&acrtc->base);
422 /* BTR processing for pre-DCE12 ASICs */
423 if (acrtc_state->stream &&
424 adev->family < AMDGPU_FAMILY_AI) {
425 spin_lock_irqsave(&adev->ddev->event_lock, flags);
426 mod_freesync_handle_v_update(
427 adev->dm.freesync_module,
429 &acrtc_state->vrr_params);
431 dc_stream_adjust_vmin_vmax(
434 &acrtc_state->vrr_params.adjust);
435 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
442 * dm_crtc_high_irq() - Handles CRTC interrupt
443 * @interrupt_params: ignored
445 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
448 static void dm_crtc_high_irq(void *interrupt_params)
450 struct common_irq_params *irq_params = interrupt_params;
451 struct amdgpu_device *adev = irq_params->adev;
452 struct amdgpu_crtc *acrtc;
453 struct dm_crtc_state *acrtc_state;
456 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
459 acrtc_state = to_dm_crtc_state(acrtc->base.state);
461 DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
462 amdgpu_dm_vrr_active(acrtc_state));
464 /* Core vblank handling at start of front-porch is only possible
465 * in non-vrr mode, as only there vblank timestamping will give
466 * valid results while done in front-porch. Otherwise defer it
467 * to dm_vupdate_high_irq after end of front-porch.
469 if (!amdgpu_dm_vrr_active(acrtc_state))
470 drm_crtc_handle_vblank(&acrtc->base);
472 /* Following stuff must happen at start of vblank, for crc
473 * computation and below-the-range btr support in vrr mode.
475 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
477 if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI &&
478 acrtc_state->vrr_params.supported &&
479 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
480 spin_lock_irqsave(&adev->ddev->event_lock, flags);
481 mod_freesync_handle_v_update(
482 adev->dm.freesync_module,
484 &acrtc_state->vrr_params);
486 dc_stream_adjust_vmin_vmax(
489 &acrtc_state->vrr_params.adjust);
490 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
495 #if defined(CONFIG_DRM_AMD_DC_DCN)
497 * dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs
498 * @interrupt params - interrupt parameters
500 * Notify DRM's vblank event handler at VSTARTUP
502 * Unlike DCE hardware, we trigger the handler at VSTARTUP. at which:
503 * * We are close enough to VUPDATE - the point of no return for hw
504 * * We are in the fixed portion of variable front porch when vrr is enabled
505 * * We are before VUPDATE, where double-buffered vrr registers are swapped
507 * It is therefore the correct place to signal vblank, send user flip events,
510 static void dm_dcn_crtc_high_irq(void *interrupt_params)
512 struct common_irq_params *irq_params = interrupt_params;
513 struct amdgpu_device *adev = irq_params->adev;
514 struct amdgpu_crtc *acrtc;
515 struct dm_crtc_state *acrtc_state;
518 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
523 acrtc_state = to_dm_crtc_state(acrtc->base.state);
525 DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
526 amdgpu_dm_vrr_active(acrtc_state),
527 acrtc_state->active_planes);
529 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
530 drm_crtc_handle_vblank(&acrtc->base);
532 spin_lock_irqsave(&adev->ddev->event_lock, flags);
534 if (acrtc_state->vrr_params.supported &&
535 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
536 mod_freesync_handle_v_update(
537 adev->dm.freesync_module,
539 &acrtc_state->vrr_params);
541 dc_stream_adjust_vmin_vmax(
544 &acrtc_state->vrr_params.adjust);
548 * If there aren't any active_planes then DCH HUBP may be clock-gated.
549 * In that case, pageflip completion interrupts won't fire and pageflip
550 * completion events won't get delivered. Prevent this by sending
551 * pending pageflip events from here if a flip is still pending.
553 * If any planes are enabled, use dm_pflip_high_irq() instead, to
554 * avoid race conditions between flip programming and completion,
555 * which could cause too early flip completion events.
557 if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
558 acrtc_state->active_planes == 0) {
560 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
562 drm_crtc_vblank_put(&acrtc->base);
564 acrtc->pflip_status = AMDGPU_FLIP_NONE;
567 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
571 static int dm_set_clockgating_state(void *handle,
572 enum amd_clockgating_state state)
577 static int dm_set_powergating_state(void *handle,
578 enum amd_powergating_state state)
583 /* Prototypes of private functions */
584 static int dm_early_init(void* handle);
586 /* Allocate memory for FBC compressed data */
587 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
589 struct drm_device *dev = connector->dev;
590 struct amdgpu_device *adev = dev->dev_private;
591 struct dm_comressor_info *compressor = &adev->dm.compressor;
592 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
593 struct drm_display_mode *mode;
594 unsigned long max_size = 0;
596 if (adev->dm.dc->fbc_compressor == NULL)
599 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
602 if (compressor->bo_ptr)
606 list_for_each_entry(mode, &connector->modes, head) {
607 if (max_size < mode->htotal * mode->vtotal)
608 max_size = mode->htotal * mode->vtotal;
612 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
613 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
614 &compressor->gpu_addr, &compressor->cpu_addr);
617 DRM_ERROR("DM: Failed to initialize FBC\n");
619 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
620 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
627 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
628 int pipe, bool *enabled,
629 unsigned char *buf, int max_bytes)
631 struct drm_device *dev = dev_get_drvdata(kdev);
632 struct amdgpu_device *adev = dev->dev_private;
633 struct drm_connector *connector;
634 struct drm_connector_list_iter conn_iter;
635 struct amdgpu_dm_connector *aconnector;
640 mutex_lock(&adev->dm.audio_lock);
642 drm_connector_list_iter_begin(dev, &conn_iter);
643 drm_for_each_connector_iter(connector, &conn_iter) {
644 aconnector = to_amdgpu_dm_connector(connector);
645 if (aconnector->audio_inst != port)
649 ret = drm_eld_size(connector->eld);
650 memcpy(buf, connector->eld, min(max_bytes, ret));
654 drm_connector_list_iter_end(&conn_iter);
656 mutex_unlock(&adev->dm.audio_lock);
658 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
663 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
664 .get_eld = amdgpu_dm_audio_component_get_eld,
667 static int amdgpu_dm_audio_component_bind(struct device *kdev,
668 struct device *hda_kdev, void *data)
670 struct drm_device *dev = dev_get_drvdata(kdev);
671 struct amdgpu_device *adev = dev->dev_private;
672 struct drm_audio_component *acomp = data;
674 acomp->ops = &amdgpu_dm_audio_component_ops;
676 adev->dm.audio_component = acomp;
681 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
682 struct device *hda_kdev, void *data)
684 struct drm_device *dev = dev_get_drvdata(kdev);
685 struct amdgpu_device *adev = dev->dev_private;
686 struct drm_audio_component *acomp = data;
690 adev->dm.audio_component = NULL;
693 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
694 .bind = amdgpu_dm_audio_component_bind,
695 .unbind = amdgpu_dm_audio_component_unbind,
698 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
705 adev->mode_info.audio.enabled = true;
707 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
709 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
710 adev->mode_info.audio.pin[i].channels = -1;
711 adev->mode_info.audio.pin[i].rate = -1;
712 adev->mode_info.audio.pin[i].bits_per_sample = -1;
713 adev->mode_info.audio.pin[i].status_bits = 0;
714 adev->mode_info.audio.pin[i].category_code = 0;
715 adev->mode_info.audio.pin[i].connected = false;
716 adev->mode_info.audio.pin[i].id =
717 adev->dm.dc->res_pool->audios[i]->inst;
718 adev->mode_info.audio.pin[i].offset = 0;
721 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
725 adev->dm.audio_registered = true;
730 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
735 if (!adev->mode_info.audio.enabled)
738 if (adev->dm.audio_registered) {
739 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
740 adev->dm.audio_registered = false;
743 /* TODO: Disable audio? */
745 adev->mode_info.audio.enabled = false;
748 void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
750 struct drm_audio_component *acomp = adev->dm.audio_component;
752 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
753 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
755 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
760 static int dm_dmub_hw_init(struct amdgpu_device *adev)
762 const struct dmcub_firmware_header_v1_0 *hdr;
763 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
764 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
765 const struct firmware *dmub_fw = adev->dm.dmub_fw;
766 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
767 struct abm *abm = adev->dm.dc->res_pool->abm;
768 struct dmub_srv_hw_params hw_params;
769 enum dmub_status status;
770 const unsigned char *fw_inst_const, *fw_bss_data;
771 uint32_t i, fw_inst_const_size, fw_bss_data_size;
775 /* DMUB isn't supported on the ASIC. */
779 DRM_ERROR("No framebuffer info for DMUB service.\n");
784 /* Firmware required for DMUB support. */
785 DRM_ERROR("No firmware provided for DMUB.\n");
789 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
790 if (status != DMUB_STATUS_OK) {
791 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
795 if (!has_hw_support) {
796 DRM_INFO("DMUB unsupported on ASIC\n");
800 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
802 fw_inst_const = dmub_fw->data +
803 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
806 fw_bss_data = dmub_fw->data +
807 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
808 le32_to_cpu(hdr->inst_const_bytes);
810 /* Copy firmware and bios info into FB memory. */
811 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
812 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
814 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
816 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
818 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
820 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
823 /* Reset regions that need to be reset. */
824 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
825 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
827 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
828 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
830 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
831 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
833 /* Initialize hardware. */
834 memset(&hw_params, 0, sizeof(hw_params));
835 hw_params.fb_base = adev->gmc.fb_start;
836 hw_params.fb_offset = adev->gmc.aper_base;
839 hw_params.psp_version = dmcu->psp_version;
841 for (i = 0; i < fb_info->num_fb; ++i)
842 hw_params.fb[i] = &fb_info->fb[i];
844 status = dmub_srv_hw_init(dmub_srv, &hw_params);
845 if (status != DMUB_STATUS_OK) {
846 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
850 /* Wait for firmware load to finish. */
851 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
852 if (status != DMUB_STATUS_OK)
853 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
855 /* Init DMCU and ABM if available. */
857 dmcu->funcs->dmcu_init(dmcu);
858 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
861 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
862 if (!adev->dm.dc->ctx->dmub_srv) {
863 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
867 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
868 adev->dm.dmcub_fw_version);
873 static int amdgpu_dm_init(struct amdgpu_device *adev)
875 struct dc_init_data init_data;
876 #ifdef CONFIG_DRM_AMD_DC_HDCP
877 struct dc_callback_init init_params;
881 adev->dm.ddev = adev->ddev;
882 adev->dm.adev = adev;
884 /* Zero all the fields */
885 memset(&init_data, 0, sizeof(init_data));
886 #ifdef CONFIG_DRM_AMD_DC_HDCP
887 memset(&init_params, 0, sizeof(init_params));
890 mutex_init(&adev->dm.dc_lock);
891 mutex_init(&adev->dm.audio_lock);
893 if(amdgpu_dm_irq_init(adev)) {
894 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
898 init_data.asic_id.chip_family = adev->family;
900 init_data.asic_id.pci_revision_id = adev->rev_id;
901 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
903 init_data.asic_id.vram_width = adev->gmc.vram_width;
904 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
905 init_data.asic_id.atombios_base_address =
906 adev->mode_info.atom_context->bios;
908 init_data.driver = adev;
910 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
912 if (!adev->dm.cgs_device) {
913 DRM_ERROR("amdgpu: failed to create cgs device.\n");
917 init_data.cgs_device = adev->dm.cgs_device;
919 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
921 switch (adev->asic_type) {
926 init_data.flags.gpu_vm_support = true;
932 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
933 init_data.flags.fbc_support = true;
935 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
936 init_data.flags.multi_mon_pp_mclk_switch = true;
938 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
939 init_data.flags.disable_fractional_pwm = true;
941 init_data.flags.power_down_display_on_boot = true;
943 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
945 /* Display Core create. */
946 adev->dm.dc = dc_create(&init_data);
949 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
951 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
955 r = dm_dmub_hw_init(adev);
957 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
961 dc_hardware_init(adev->dm.dc);
963 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
964 if (!adev->dm.freesync_module) {
966 "amdgpu: failed to initialize freesync_module.\n");
968 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
969 adev->dm.freesync_module);
971 amdgpu_dm_init_color_mod();
973 #ifdef CONFIG_DRM_AMD_DC_HDCP
974 if (adev->asic_type >= CHIP_RAVEN) {
975 adev->dm.hdcp_workqueue = hdcp_create_workqueue(&adev->psp, &init_params.cp_psp, adev->dm.dc);
977 if (!adev->dm.hdcp_workqueue)
978 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
980 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
982 dc_init_callbacks(adev->dm.dc, &init_params);
985 if (amdgpu_dm_initialize_drm_device(adev)) {
987 "amdgpu: failed to initialize sw for display support.\n");
991 /* Update the actual used number of crtc */
992 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
994 /* TODO: Add_display_info? */
996 /* TODO use dynamic cursor width */
997 adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
998 adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1000 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
1002 "amdgpu: failed to initialize sw for display support.\n");
1006 #if defined(CONFIG_DEBUG_FS)
1007 if (dtn_debugfs_init(adev))
1008 DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
1011 DRM_DEBUG_DRIVER("KMS initialized.\n");
1015 amdgpu_dm_fini(adev);
1020 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1022 amdgpu_dm_audio_fini(adev);
1024 amdgpu_dm_destroy_drm_device(&adev->dm);
1026 #ifdef CONFIG_DRM_AMD_DC_HDCP
1027 if (adev->dm.hdcp_workqueue) {
1028 hdcp_destroy(adev->dm.hdcp_workqueue);
1029 adev->dm.hdcp_workqueue = NULL;
1033 dc_deinit_callbacks(adev->dm.dc);
1035 if (adev->dm.dc->ctx->dmub_srv) {
1036 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1037 adev->dm.dc->ctx->dmub_srv = NULL;
1040 if (adev->dm.dmub_bo)
1041 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1042 &adev->dm.dmub_bo_gpu_addr,
1043 &adev->dm.dmub_bo_cpu_addr);
1045 /* DC Destroy TODO: Replace destroy DAL */
1047 dc_destroy(&adev->dm.dc);
1049 * TODO: pageflip, vlank interrupt
1051 * amdgpu_dm_irq_fini(adev);
1054 if (adev->dm.cgs_device) {
1055 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1056 adev->dm.cgs_device = NULL;
1058 if (adev->dm.freesync_module) {
1059 mod_freesync_destroy(adev->dm.freesync_module);
1060 adev->dm.freesync_module = NULL;
1063 mutex_destroy(&adev->dm.audio_lock);
1064 mutex_destroy(&adev->dm.dc_lock);
1069 static int load_dmcu_fw(struct amdgpu_device *adev)
1071 const char *fw_name_dmcu = NULL;
1073 const struct dmcu_firmware_header_v1_0 *hdr;
1075 switch(adev->asic_type) {
1085 case CHIP_POLARIS11:
1086 case CHIP_POLARIS10:
1087 case CHIP_POLARIS12:
1098 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1099 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1100 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1101 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1106 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1110 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1111 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1115 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1117 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1118 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1119 adev->dm.fw_dmcu = NULL;
1123 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1128 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1130 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1132 release_firmware(adev->dm.fw_dmcu);
1133 adev->dm.fw_dmcu = NULL;
1137 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1138 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1139 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1140 adev->firmware.fw_size +=
1141 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1143 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1144 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1145 adev->firmware.fw_size +=
1146 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1148 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1150 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1155 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1157 struct amdgpu_device *adev = ctx;
1159 return dm_read_reg(adev->dm.dc->ctx, address);
1162 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1165 struct amdgpu_device *adev = ctx;
1167 return dm_write_reg(adev->dm.dc->ctx, address, value);
1170 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1172 struct dmub_srv_create_params create_params;
1173 struct dmub_srv_region_params region_params;
1174 struct dmub_srv_region_info region_info;
1175 struct dmub_srv_fb_params fb_params;
1176 struct dmub_srv_fb_info *fb_info;
1177 struct dmub_srv *dmub_srv;
1178 const struct dmcub_firmware_header_v1_0 *hdr;
1179 const char *fw_name_dmub;
1180 enum dmub_asic dmub_asic;
1181 enum dmub_status status;
1184 switch (adev->asic_type) {
1186 dmub_asic = DMUB_ASIC_DCN21;
1187 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1191 /* ASIC doesn't support DMUB. */
1195 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1197 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1201 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1203 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1207 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1208 DRM_WARN("Only PSP firmware loading is supported for DMUB\n");
1212 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1213 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1214 AMDGPU_UCODE_ID_DMCUB;
1215 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw = adev->dm.dmub_fw;
1216 adev->firmware.fw_size +=
1217 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1219 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1221 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1222 adev->dm.dmcub_fw_version);
1224 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1225 dmub_srv = adev->dm.dmub_srv;
1228 DRM_ERROR("Failed to allocate DMUB service!\n");
1232 memset(&create_params, 0, sizeof(create_params));
1233 create_params.user_ctx = adev;
1234 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1235 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1236 create_params.asic = dmub_asic;
1238 /* Create the DMUB service. */
1239 status = dmub_srv_create(dmub_srv, &create_params);
1240 if (status != DMUB_STATUS_OK) {
1241 DRM_ERROR("Error creating DMUB service: %d\n", status);
1245 /* Calculate the size of all the regions for the DMUB service. */
1246 memset(®ion_params, 0, sizeof(region_params));
1248 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1249 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1250 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1251 region_params.vbios_size = adev->bios_size;
1252 region_params.fw_bss_data =
1253 adev->dm.dmub_fw->data +
1254 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1255 le32_to_cpu(hdr->inst_const_bytes);
1257 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1260 if (status != DMUB_STATUS_OK) {
1261 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1266 * Allocate a framebuffer based on the total size of all the regions.
1267 * TODO: Move this into GART.
1269 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1270 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1271 &adev->dm.dmub_bo_gpu_addr,
1272 &adev->dm.dmub_bo_cpu_addr);
1276 /* Rebase the regions on the framebuffer address. */
1277 memset(&fb_params, 0, sizeof(fb_params));
1278 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1279 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1280 fb_params.region_info = ®ion_info;
1282 adev->dm.dmub_fb_info =
1283 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1284 fb_info = adev->dm.dmub_fb_info;
1288 "Failed to allocate framebuffer info for DMUB service!\n");
1292 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1293 if (status != DMUB_STATUS_OK) {
1294 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1301 static int dm_sw_init(void *handle)
1303 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1306 r = dm_dmub_sw_init(adev);
1310 return load_dmcu_fw(adev);
1313 static int dm_sw_fini(void *handle)
1315 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1317 kfree(adev->dm.dmub_fb_info);
1318 adev->dm.dmub_fb_info = NULL;
1320 if (adev->dm.dmub_srv) {
1321 dmub_srv_destroy(adev->dm.dmub_srv);
1322 adev->dm.dmub_srv = NULL;
1325 if (adev->dm.dmub_fw) {
1326 release_firmware(adev->dm.dmub_fw);
1327 adev->dm.dmub_fw = NULL;
1330 if(adev->dm.fw_dmcu) {
1331 release_firmware(adev->dm.fw_dmcu);
1332 adev->dm.fw_dmcu = NULL;
1338 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1340 struct amdgpu_dm_connector *aconnector;
1341 struct drm_connector *connector;
1342 struct drm_connector_list_iter iter;
1345 drm_connector_list_iter_begin(dev, &iter);
1346 drm_for_each_connector_iter(connector, &iter) {
1347 aconnector = to_amdgpu_dm_connector(connector);
1348 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1349 aconnector->mst_mgr.aux) {
1350 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1352 aconnector->base.base.id);
1354 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1356 DRM_ERROR("DM_MST: Failed to start MST\n");
1357 aconnector->dc_link->type =
1358 dc_connection_single;
1363 drm_connector_list_iter_end(&iter);
1368 static int dm_late_init(void *handle)
1370 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1372 struct dmcu_iram_parameters params;
1373 unsigned int linear_lut[16];
1375 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1378 for (i = 0; i < 16; i++)
1379 linear_lut[i] = 0xFFFF * i / 15;
1382 params.backlight_ramping_start = 0xCCCC;
1383 params.backlight_ramping_reduction = 0xCCCCCCCC;
1384 params.backlight_lut_array_size = 16;
1385 params.backlight_lut_array = linear_lut;
1387 /* Min backlight level after ABM reduction, Don't allow below 1%
1388 * 0xFFFF x 0.01 = 0x28F
1390 params.min_abm_backlight = 0x28F;
1392 /* todo will enable for navi10 */
1393 if (adev->asic_type <= CHIP_RAVEN) {
1394 ret = dmcu_load_iram(dmcu, params);
1400 return detect_mst_link_for_all_connectors(adev->ddev);
1403 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1405 struct amdgpu_dm_connector *aconnector;
1406 struct drm_connector *connector;
1407 struct drm_connector_list_iter iter;
1408 struct drm_dp_mst_topology_mgr *mgr;
1410 bool need_hotplug = false;
1412 drm_connector_list_iter_begin(dev, &iter);
1413 drm_for_each_connector_iter(connector, &iter) {
1414 aconnector = to_amdgpu_dm_connector(connector);
1415 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1416 aconnector->mst_port)
1419 mgr = &aconnector->mst_mgr;
1422 drm_dp_mst_topology_mgr_suspend(mgr);
1424 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1426 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1427 need_hotplug = true;
1431 drm_connector_list_iter_end(&iter);
1434 drm_kms_helper_hotplug_event(dev);
1437 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1439 struct smu_context *smu = &adev->smu;
1442 if (!is_support_sw_smu(adev))
1445 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1446 * on window driver dc implementation.
1447 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1448 * should be passed to smu during boot up and resume from s3.
1449 * boot up: dc calculate dcn watermark clock settings within dc_create,
1450 * dcn20_resource_construct
1451 * then call pplib functions below to pass the settings to smu:
1452 * smu_set_watermarks_for_clock_ranges
1453 * smu_set_watermarks_table
1454 * navi10_set_watermarks_table
1455 * smu_write_watermarks_table
1457 * For Renoir, clock settings of dcn watermark are also fixed values.
1458 * dc has implemented different flow for window driver:
1459 * dc_hardware_init / dc_set_power_state
1464 * smu_set_watermarks_for_clock_ranges
1465 * renoir_set_watermarks_table
1466 * smu_write_watermarks_table
1469 * dc_hardware_init -> amdgpu_dm_init
1470 * dc_set_power_state --> dm_resume
1472 * therefore, this function apply to navi10/12/14 but not Renoir
1475 switch(adev->asic_type) {
1484 mutex_lock(&smu->mutex);
1486 /* pass data to smu controller */
1487 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1488 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1489 ret = smu_write_watermarks_table(smu);
1492 mutex_unlock(&smu->mutex);
1493 DRM_ERROR("Failed to update WMTABLE!\n");
1496 smu->watermarks_bitmap |= WATERMARKS_LOADED;
1499 mutex_unlock(&smu->mutex);
1505 * dm_hw_init() - Initialize DC device
1506 * @handle: The base driver device containing the amdgpu_dm device.
1508 * Initialize the &struct amdgpu_display_manager device. This involves calling
1509 * the initializers of each DM component, then populating the struct with them.
1511 * Although the function implies hardware initialization, both hardware and
1512 * software are initialized here. Splitting them out to their relevant init
1513 * hooks is a future TODO item.
1515 * Some notable things that are initialized here:
1517 * - Display Core, both software and hardware
1518 * - DC modules that we need (freesync and color management)
1519 * - DRM software states
1520 * - Interrupt sources and handlers
1522 * - Debug FS entries, if enabled
1524 static int dm_hw_init(void *handle)
1526 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1527 /* Create DAL display manager */
1528 amdgpu_dm_init(adev);
1529 amdgpu_dm_hpd_init(adev);
1535 * dm_hw_fini() - Teardown DC device
1536 * @handle: The base driver device containing the amdgpu_dm device.
1538 * Teardown components within &struct amdgpu_display_manager that require
1539 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1540 * were loaded. Also flush IRQ workqueues and disable them.
1542 static int dm_hw_fini(void *handle)
1544 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1546 amdgpu_dm_hpd_fini(adev);
1548 amdgpu_dm_irq_fini(adev);
1549 amdgpu_dm_fini(adev);
1553 static int dm_suspend(void *handle)
1555 struct amdgpu_device *adev = handle;
1556 struct amdgpu_display_manager *dm = &adev->dm;
1559 WARN_ON(adev->dm.cached_state);
1560 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1562 s3_handle_mst(adev->ddev, true);
1564 amdgpu_dm_irq_suspend(adev);
1567 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1572 static struct amdgpu_dm_connector *
1573 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1574 struct drm_crtc *crtc)
1577 struct drm_connector_state *new_con_state;
1578 struct drm_connector *connector;
1579 struct drm_crtc *crtc_from_state;
1581 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1582 crtc_from_state = new_con_state->crtc;
1584 if (crtc_from_state == crtc)
1585 return to_amdgpu_dm_connector(connector);
1591 static void emulated_link_detect(struct dc_link *link)
1593 struct dc_sink_init_data sink_init_data = { 0 };
1594 struct display_sink_capability sink_caps = { 0 };
1595 enum dc_edid_status edid_status;
1596 struct dc_context *dc_ctx = link->ctx;
1597 struct dc_sink *sink = NULL;
1598 struct dc_sink *prev_sink = NULL;
1600 link->type = dc_connection_none;
1601 prev_sink = link->local_sink;
1603 if (prev_sink != NULL)
1604 dc_sink_retain(prev_sink);
1606 switch (link->connector_signal) {
1607 case SIGNAL_TYPE_HDMI_TYPE_A: {
1608 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1609 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1613 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1614 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1615 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1619 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1620 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1621 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1625 case SIGNAL_TYPE_LVDS: {
1626 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1627 sink_caps.signal = SIGNAL_TYPE_LVDS;
1631 case SIGNAL_TYPE_EDP: {
1632 sink_caps.transaction_type =
1633 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1634 sink_caps.signal = SIGNAL_TYPE_EDP;
1638 case SIGNAL_TYPE_DISPLAY_PORT: {
1639 sink_caps.transaction_type =
1640 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1641 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1646 DC_ERROR("Invalid connector type! signal:%d\n",
1647 link->connector_signal);
1651 sink_init_data.link = link;
1652 sink_init_data.sink_signal = sink_caps.signal;
1654 sink = dc_sink_create(&sink_init_data);
1656 DC_ERROR("Failed to create sink!\n");
1660 /* dc_sink_create returns a new reference */
1661 link->local_sink = sink;
1663 edid_status = dm_helpers_read_local_edid(
1668 if (edid_status != EDID_OK)
1669 DC_ERROR("Failed to read EDID");
1673 static int dm_resume(void *handle)
1675 struct amdgpu_device *adev = handle;
1676 struct drm_device *ddev = adev->ddev;
1677 struct amdgpu_display_manager *dm = &adev->dm;
1678 struct amdgpu_dm_connector *aconnector;
1679 struct drm_connector *connector;
1680 struct drm_connector_list_iter iter;
1681 struct drm_crtc *crtc;
1682 struct drm_crtc_state *new_crtc_state;
1683 struct dm_crtc_state *dm_new_crtc_state;
1684 struct drm_plane *plane;
1685 struct drm_plane_state *new_plane_state;
1686 struct dm_plane_state *dm_new_plane_state;
1687 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1688 enum dc_connection_type new_connection_type = dc_connection_none;
1691 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1692 dc_release_state(dm_state->context);
1693 dm_state->context = dc_create_state(dm->dc);
1694 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1695 dc_resource_state_construct(dm->dc, dm_state->context);
1697 /* Before powering on DC we need to re-initialize DMUB. */
1698 r = dm_dmub_hw_init(adev);
1700 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1702 /* power on hardware */
1703 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1705 /* program HPD filter */
1709 * early enable HPD Rx IRQ, should be done before set mode as short
1710 * pulse interrupts are used for MST
1712 amdgpu_dm_irq_resume_early(adev);
1714 /* On resume we need to rewrite the MSTM control bits to enable MST*/
1715 s3_handle_mst(ddev, false);
1718 drm_connector_list_iter_begin(ddev, &iter);
1719 drm_for_each_connector_iter(connector, &iter) {
1720 aconnector = to_amdgpu_dm_connector(connector);
1723 * this is the case when traversing through already created
1724 * MST connectors, should be skipped
1726 if (aconnector->mst_port)
1729 mutex_lock(&aconnector->hpd_lock);
1730 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1731 DRM_ERROR("KMS: Failed to detect connector\n");
1733 if (aconnector->base.force && new_connection_type == dc_connection_none)
1734 emulated_link_detect(aconnector->dc_link);
1736 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1738 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1739 aconnector->fake_enable = false;
1741 if (aconnector->dc_sink)
1742 dc_sink_release(aconnector->dc_sink);
1743 aconnector->dc_sink = NULL;
1744 amdgpu_dm_update_connector_after_detect(aconnector);
1745 mutex_unlock(&aconnector->hpd_lock);
1747 drm_connector_list_iter_end(&iter);
1749 /* Force mode set in atomic commit */
1750 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1751 new_crtc_state->active_changed = true;
1754 * atomic_check is expected to create the dc states. We need to release
1755 * them here, since they were duplicated as part of the suspend
1758 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1759 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1760 if (dm_new_crtc_state->stream) {
1761 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1762 dc_stream_release(dm_new_crtc_state->stream);
1763 dm_new_crtc_state->stream = NULL;
1767 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1768 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1769 if (dm_new_plane_state->dc_state) {
1770 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1771 dc_plane_state_release(dm_new_plane_state->dc_state);
1772 dm_new_plane_state->dc_state = NULL;
1776 drm_atomic_helper_resume(ddev, dm->cached_state);
1778 dm->cached_state = NULL;
1780 amdgpu_dm_irq_resume_late(adev);
1782 amdgpu_dm_smu_write_watermarks_table(adev);
1790 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1791 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1792 * the base driver's device list to be initialized and torn down accordingly.
1794 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1797 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1799 .early_init = dm_early_init,
1800 .late_init = dm_late_init,
1801 .sw_init = dm_sw_init,
1802 .sw_fini = dm_sw_fini,
1803 .hw_init = dm_hw_init,
1804 .hw_fini = dm_hw_fini,
1805 .suspend = dm_suspend,
1806 .resume = dm_resume,
1807 .is_idle = dm_is_idle,
1808 .wait_for_idle = dm_wait_for_idle,
1809 .check_soft_reset = dm_check_soft_reset,
1810 .soft_reset = dm_soft_reset,
1811 .set_clockgating_state = dm_set_clockgating_state,
1812 .set_powergating_state = dm_set_powergating_state,
1815 const struct amdgpu_ip_block_version dm_ip_block =
1817 .type = AMD_IP_BLOCK_TYPE_DCE,
1821 .funcs = &amdgpu_dm_funcs,
1831 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
1832 .fb_create = amdgpu_display_user_framebuffer_create,
1833 .output_poll_changed = drm_fb_helper_output_poll_changed,
1834 .atomic_check = amdgpu_dm_atomic_check,
1835 .atomic_commit = amdgpu_dm_atomic_commit,
1838 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
1839 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
1843 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
1845 struct drm_connector *connector = &aconnector->base;
1846 struct drm_device *dev = connector->dev;
1847 struct dc_sink *sink;
1849 /* MST handled by drm_mst framework */
1850 if (aconnector->mst_mgr.mst_state == true)
1854 sink = aconnector->dc_link->local_sink;
1856 dc_sink_retain(sink);
1859 * Edid mgmt connector gets first update only in mode_valid hook and then
1860 * the connector sink is set to either fake or physical sink depends on link status.
1861 * Skip if already done during boot.
1863 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
1864 && aconnector->dc_em_sink) {
1867 * For S3 resume with headless use eml_sink to fake stream
1868 * because on resume connector->sink is set to NULL
1870 mutex_lock(&dev->mode_config.mutex);
1873 if (aconnector->dc_sink) {
1874 amdgpu_dm_update_freesync_caps(connector, NULL);
1876 * retain and release below are used to
1877 * bump up refcount for sink because the link doesn't point
1878 * to it anymore after disconnect, so on next crtc to connector
1879 * reshuffle by UMD we will get into unwanted dc_sink release
1881 dc_sink_release(aconnector->dc_sink);
1883 aconnector->dc_sink = sink;
1884 dc_sink_retain(aconnector->dc_sink);
1885 amdgpu_dm_update_freesync_caps(connector,
1888 amdgpu_dm_update_freesync_caps(connector, NULL);
1889 if (!aconnector->dc_sink) {
1890 aconnector->dc_sink = aconnector->dc_em_sink;
1891 dc_sink_retain(aconnector->dc_sink);
1895 mutex_unlock(&dev->mode_config.mutex);
1898 dc_sink_release(sink);
1903 * TODO: temporary guard to look for proper fix
1904 * if this sink is MST sink, we should not do anything
1906 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1907 dc_sink_release(sink);
1911 if (aconnector->dc_sink == sink) {
1913 * We got a DP short pulse (Link Loss, DP CTS, etc...).
1916 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1917 aconnector->connector_id);
1919 dc_sink_release(sink);
1923 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
1924 aconnector->connector_id, aconnector->dc_sink, sink);
1926 mutex_lock(&dev->mode_config.mutex);
1929 * 1. Update status of the drm connector
1930 * 2. Send an event and let userspace tell us what to do
1934 * TODO: check if we still need the S3 mode update workaround.
1935 * If yes, put it here.
1937 if (aconnector->dc_sink)
1938 amdgpu_dm_update_freesync_caps(connector, NULL);
1940 aconnector->dc_sink = sink;
1941 dc_sink_retain(aconnector->dc_sink);
1942 if (sink->dc_edid.length == 0) {
1943 aconnector->edid = NULL;
1944 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
1947 (struct edid *) sink->dc_edid.raw_edid;
1950 drm_connector_update_edid_property(connector,
1952 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
1955 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
1958 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
1959 amdgpu_dm_update_freesync_caps(connector, NULL);
1960 drm_connector_update_edid_property(connector, NULL);
1961 aconnector->num_modes = 0;
1962 dc_sink_release(aconnector->dc_sink);
1963 aconnector->dc_sink = NULL;
1964 aconnector->edid = NULL;
1965 #ifdef CONFIG_DRM_AMD_DC_HDCP
1966 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
1967 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
1968 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1972 mutex_unlock(&dev->mode_config.mutex);
1975 dc_sink_release(sink);
1978 static void handle_hpd_irq(void *param)
1980 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
1981 struct drm_connector *connector = &aconnector->base;
1982 struct drm_device *dev = connector->dev;
1983 enum dc_connection_type new_connection_type = dc_connection_none;
1984 #ifdef CONFIG_DRM_AMD_DC_HDCP
1985 struct amdgpu_device *adev = dev->dev_private;
1989 * In case of failure or MST no need to update connector status or notify the OS
1990 * since (for MST case) MST does this in its own context.
1992 mutex_lock(&aconnector->hpd_lock);
1994 #ifdef CONFIG_DRM_AMD_DC_HDCP
1995 if (adev->dm.hdcp_workqueue)
1996 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
1998 if (aconnector->fake_enable)
1999 aconnector->fake_enable = false;
2001 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2002 DRM_ERROR("KMS: Failed to detect connector\n");
2004 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2005 emulated_link_detect(aconnector->dc_link);
2008 drm_modeset_lock_all(dev);
2009 dm_restore_drm_connector_state(dev, connector);
2010 drm_modeset_unlock_all(dev);
2012 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2013 drm_kms_helper_hotplug_event(dev);
2015 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2016 amdgpu_dm_update_connector_after_detect(aconnector);
2019 drm_modeset_lock_all(dev);
2020 dm_restore_drm_connector_state(dev, connector);
2021 drm_modeset_unlock_all(dev);
2023 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2024 drm_kms_helper_hotplug_event(dev);
2026 mutex_unlock(&aconnector->hpd_lock);
2030 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2032 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2034 bool new_irq_handled = false;
2036 int dpcd_bytes_to_read;
2038 const int max_process_count = 30;
2039 int process_count = 0;
2041 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2043 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2044 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2045 /* DPCD 0x200 - 0x201 for downstream IRQ */
2046 dpcd_addr = DP_SINK_COUNT;
2048 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2049 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2050 dpcd_addr = DP_SINK_COUNT_ESI;
2053 dret = drm_dp_dpcd_read(
2054 &aconnector->dm_dp_aux.aux,
2057 dpcd_bytes_to_read);
2059 while (dret == dpcd_bytes_to_read &&
2060 process_count < max_process_count) {
2066 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2067 /* handle HPD short pulse irq */
2068 if (aconnector->mst_mgr.mst_state)
2070 &aconnector->mst_mgr,
2074 if (new_irq_handled) {
2075 /* ACK at DPCD to notify down stream */
2076 const int ack_dpcd_bytes_to_write =
2077 dpcd_bytes_to_read - 1;
2079 for (retry = 0; retry < 3; retry++) {
2082 wret = drm_dp_dpcd_write(
2083 &aconnector->dm_dp_aux.aux,
2086 ack_dpcd_bytes_to_write);
2087 if (wret == ack_dpcd_bytes_to_write)
2091 /* check if there is new irq to be handled */
2092 dret = drm_dp_dpcd_read(
2093 &aconnector->dm_dp_aux.aux,
2096 dpcd_bytes_to_read);
2098 new_irq_handled = false;
2104 if (process_count == max_process_count)
2105 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2108 static void handle_hpd_rx_irq(void *param)
2110 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2111 struct drm_connector *connector = &aconnector->base;
2112 struct drm_device *dev = connector->dev;
2113 struct dc_link *dc_link = aconnector->dc_link;
2114 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2115 enum dc_connection_type new_connection_type = dc_connection_none;
2116 #ifdef CONFIG_DRM_AMD_DC_HDCP
2117 union hpd_irq_data hpd_irq_data;
2118 struct amdgpu_device *adev = dev->dev_private;
2120 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2124 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2125 * conflict, after implement i2c helper, this mutex should be
2128 if (dc_link->type != dc_connection_mst_branch)
2129 mutex_lock(&aconnector->hpd_lock);
2132 #ifdef CONFIG_DRM_AMD_DC_HDCP
2133 if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2135 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2137 !is_mst_root_connector) {
2138 /* Downstream Port status changed. */
2139 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2140 DRM_ERROR("KMS: Failed to detect connector\n");
2142 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2143 emulated_link_detect(dc_link);
2145 if (aconnector->fake_enable)
2146 aconnector->fake_enable = false;
2148 amdgpu_dm_update_connector_after_detect(aconnector);
2151 drm_modeset_lock_all(dev);
2152 dm_restore_drm_connector_state(dev, connector);
2153 drm_modeset_unlock_all(dev);
2155 drm_kms_helper_hotplug_event(dev);
2156 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2158 if (aconnector->fake_enable)
2159 aconnector->fake_enable = false;
2161 amdgpu_dm_update_connector_after_detect(aconnector);
2164 drm_modeset_lock_all(dev);
2165 dm_restore_drm_connector_state(dev, connector);
2166 drm_modeset_unlock_all(dev);
2168 drm_kms_helper_hotplug_event(dev);
2171 #ifdef CONFIG_DRM_AMD_DC_HDCP
2172 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2173 if (adev->dm.hdcp_workqueue)
2174 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2177 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2178 (dc_link->type == dc_connection_mst_branch))
2179 dm_handle_hpd_rx_irq(aconnector);
2181 if (dc_link->type != dc_connection_mst_branch) {
2182 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2183 mutex_unlock(&aconnector->hpd_lock);
2187 static void register_hpd_handlers(struct amdgpu_device *adev)
2189 struct drm_device *dev = adev->ddev;
2190 struct drm_connector *connector;
2191 struct amdgpu_dm_connector *aconnector;
2192 const struct dc_link *dc_link;
2193 struct dc_interrupt_params int_params = {0};
2195 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2196 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2198 list_for_each_entry(connector,
2199 &dev->mode_config.connector_list, head) {
2201 aconnector = to_amdgpu_dm_connector(connector);
2202 dc_link = aconnector->dc_link;
2204 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2205 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2206 int_params.irq_source = dc_link->irq_source_hpd;
2208 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2210 (void *) aconnector);
2213 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2215 /* Also register for DP short pulse (hpd_rx). */
2216 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2217 int_params.irq_source = dc_link->irq_source_hpd_rx;
2219 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2221 (void *) aconnector);
2226 /* Register IRQ sources and initialize IRQ callbacks */
2227 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2229 struct dc *dc = adev->dm.dc;
2230 struct common_irq_params *c_irq_params;
2231 struct dc_interrupt_params int_params = {0};
2234 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2236 if (adev->asic_type >= CHIP_VEGA10)
2237 client_id = SOC15_IH_CLIENTID_DCE;
2239 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2240 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2243 * Actions of amdgpu_irq_add_id():
2244 * 1. Register a set() function with base driver.
2245 * Base driver will call set() function to enable/disable an
2246 * interrupt in DC hardware.
2247 * 2. Register amdgpu_dm_irq_handler().
2248 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2249 * coming from DC hardware.
2250 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2251 * for acknowledging and handling. */
2253 /* Use VBLANK interrupt */
2254 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2255 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2257 DRM_ERROR("Failed to add crtc irq id!\n");
2261 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2262 int_params.irq_source =
2263 dc_interrupt_to_irq_source(dc, i, 0);
2265 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2267 c_irq_params->adev = adev;
2268 c_irq_params->irq_src = int_params.irq_source;
2270 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2271 dm_crtc_high_irq, c_irq_params);
2274 /* Use VUPDATE interrupt */
2275 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2276 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2278 DRM_ERROR("Failed to add vupdate irq id!\n");
2282 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2283 int_params.irq_source =
2284 dc_interrupt_to_irq_source(dc, i, 0);
2286 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2288 c_irq_params->adev = adev;
2289 c_irq_params->irq_src = int_params.irq_source;
2291 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2292 dm_vupdate_high_irq, c_irq_params);
2295 /* Use GRPH_PFLIP interrupt */
2296 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2297 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2298 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2300 DRM_ERROR("Failed to add page flip irq id!\n");
2304 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2305 int_params.irq_source =
2306 dc_interrupt_to_irq_source(dc, i, 0);
2308 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2310 c_irq_params->adev = adev;
2311 c_irq_params->irq_src = int_params.irq_source;
2313 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2314 dm_pflip_high_irq, c_irq_params);
2319 r = amdgpu_irq_add_id(adev, client_id,
2320 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2322 DRM_ERROR("Failed to add hpd irq id!\n");
2326 register_hpd_handlers(adev);
2331 #if defined(CONFIG_DRM_AMD_DC_DCN)
2332 /* Register IRQ sources and initialize IRQ callbacks */
2333 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2335 struct dc *dc = adev->dm.dc;
2336 struct common_irq_params *c_irq_params;
2337 struct dc_interrupt_params int_params = {0};
2341 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2342 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2345 * Actions of amdgpu_irq_add_id():
2346 * 1. Register a set() function with base driver.
2347 * Base driver will call set() function to enable/disable an
2348 * interrupt in DC hardware.
2349 * 2. Register amdgpu_dm_irq_handler().
2350 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2351 * coming from DC hardware.
2352 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2353 * for acknowledging and handling.
2356 /* Use VSTARTUP interrupt */
2357 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2358 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2360 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2363 DRM_ERROR("Failed to add crtc irq id!\n");
2367 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2368 int_params.irq_source =
2369 dc_interrupt_to_irq_source(dc, i, 0);
2371 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2373 c_irq_params->adev = adev;
2374 c_irq_params->irq_src = int_params.irq_source;
2376 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2377 dm_dcn_crtc_high_irq, c_irq_params);
2380 /* Use GRPH_PFLIP interrupt */
2381 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2382 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2384 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2386 DRM_ERROR("Failed to add page flip irq id!\n");
2390 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2391 int_params.irq_source =
2392 dc_interrupt_to_irq_source(dc, i, 0);
2394 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2396 c_irq_params->adev = adev;
2397 c_irq_params->irq_src = int_params.irq_source;
2399 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2400 dm_pflip_high_irq, c_irq_params);
2405 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2408 DRM_ERROR("Failed to add hpd irq id!\n");
2412 register_hpd_handlers(adev);
2419 * Acquires the lock for the atomic state object and returns
2420 * the new atomic state.
2422 * This should only be called during atomic check.
2424 static int dm_atomic_get_state(struct drm_atomic_state *state,
2425 struct dm_atomic_state **dm_state)
2427 struct drm_device *dev = state->dev;
2428 struct amdgpu_device *adev = dev->dev_private;
2429 struct amdgpu_display_manager *dm = &adev->dm;
2430 struct drm_private_state *priv_state;
2435 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2436 if (IS_ERR(priv_state))
2437 return PTR_ERR(priv_state);
2439 *dm_state = to_dm_atomic_state(priv_state);
2444 struct dm_atomic_state *
2445 dm_atomic_get_new_state(struct drm_atomic_state *state)
2447 struct drm_device *dev = state->dev;
2448 struct amdgpu_device *adev = dev->dev_private;
2449 struct amdgpu_display_manager *dm = &adev->dm;
2450 struct drm_private_obj *obj;
2451 struct drm_private_state *new_obj_state;
2454 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2455 if (obj->funcs == dm->atomic_obj.funcs)
2456 return to_dm_atomic_state(new_obj_state);
2462 struct dm_atomic_state *
2463 dm_atomic_get_old_state(struct drm_atomic_state *state)
2465 struct drm_device *dev = state->dev;
2466 struct amdgpu_device *adev = dev->dev_private;
2467 struct amdgpu_display_manager *dm = &adev->dm;
2468 struct drm_private_obj *obj;
2469 struct drm_private_state *old_obj_state;
2472 for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2473 if (obj->funcs == dm->atomic_obj.funcs)
2474 return to_dm_atomic_state(old_obj_state);
2480 static struct drm_private_state *
2481 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2483 struct dm_atomic_state *old_state, *new_state;
2485 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2489 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2491 old_state = to_dm_atomic_state(obj->state);
2493 if (old_state && old_state->context)
2494 new_state->context = dc_copy_state(old_state->context);
2496 if (!new_state->context) {
2501 return &new_state->base;
2504 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2505 struct drm_private_state *state)
2507 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2509 if (dm_state && dm_state->context)
2510 dc_release_state(dm_state->context);
2515 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2516 .atomic_duplicate_state = dm_atomic_duplicate_state,
2517 .atomic_destroy_state = dm_atomic_destroy_state,
2520 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2522 struct dm_atomic_state *state;
2525 adev->mode_info.mode_config_initialized = true;
2527 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2528 adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2530 adev->ddev->mode_config.max_width = 16384;
2531 adev->ddev->mode_config.max_height = 16384;
2533 adev->ddev->mode_config.preferred_depth = 24;
2534 adev->ddev->mode_config.prefer_shadow = 1;
2535 /* indicates support for immediate flip */
2536 adev->ddev->mode_config.async_page_flip = true;
2538 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2540 state = kzalloc(sizeof(*state), GFP_KERNEL);
2544 state->context = dc_create_state(adev->dm.dc);
2545 if (!state->context) {
2550 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2552 drm_atomic_private_obj_init(adev->ddev,
2553 &adev->dm.atomic_obj,
2555 &dm_atomic_state_funcs);
2557 r = amdgpu_display_modeset_create_props(adev);
2561 r = amdgpu_dm_audio_init(adev);
2568 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2569 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2571 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2572 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2574 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2576 #if defined(CONFIG_ACPI)
2577 struct amdgpu_dm_backlight_caps caps;
2579 if (dm->backlight_caps.caps_valid)
2582 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2583 if (caps.caps_valid) {
2584 dm->backlight_caps.min_input_signal = caps.min_input_signal;
2585 dm->backlight_caps.max_input_signal = caps.max_input_signal;
2586 dm->backlight_caps.caps_valid = true;
2588 dm->backlight_caps.min_input_signal =
2589 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2590 dm->backlight_caps.max_input_signal =
2591 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2594 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2595 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2599 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2601 struct amdgpu_display_manager *dm = bl_get_data(bd);
2602 struct amdgpu_dm_backlight_caps caps;
2603 uint32_t brightness = bd->props.brightness;
2605 amdgpu_dm_update_backlight_caps(dm);
2606 caps = dm->backlight_caps;
2608 * The brightness input is in the range 0-255
2609 * It needs to be rescaled to be between the
2610 * requested min and max input signal
2612 * It also needs to be scaled up by 0x101 to
2613 * match the DC interface which has a range of
2619 * (caps.max_input_signal - caps.min_input_signal)
2620 / AMDGPU_MAX_BL_LEVEL
2621 + caps.min_input_signal * 0x101;
2623 if (dc_link_set_backlight_level(dm->backlight_link,
2630 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2632 struct amdgpu_display_manager *dm = bl_get_data(bd);
2633 int ret = dc_link_get_backlight_level(dm->backlight_link);
2635 if (ret == DC_ERROR_UNEXPECTED)
2636 return bd->props.brightness;
2640 static const struct backlight_ops amdgpu_dm_backlight_ops = {
2641 .options = BL_CORE_SUSPENDRESUME,
2642 .get_brightness = amdgpu_dm_backlight_get_brightness,
2643 .update_status = amdgpu_dm_backlight_update_status,
2647 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2650 struct backlight_properties props = { 0 };
2652 amdgpu_dm_update_backlight_caps(dm);
2654 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2655 props.brightness = AMDGPU_MAX_BL_LEVEL;
2656 props.type = BACKLIGHT_RAW;
2658 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2659 dm->adev->ddev->primary->index);
2661 dm->backlight_dev = backlight_device_register(bl_name,
2662 dm->adev->ddev->dev,
2664 &amdgpu_dm_backlight_ops,
2667 if (IS_ERR(dm->backlight_dev))
2668 DRM_ERROR("DM: Backlight registration failed!\n");
2670 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
2675 static int initialize_plane(struct amdgpu_display_manager *dm,
2676 struct amdgpu_mode_info *mode_info, int plane_id,
2677 enum drm_plane_type plane_type,
2678 const struct dc_plane_cap *plane_cap)
2680 struct drm_plane *plane;
2681 unsigned long possible_crtcs;
2684 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
2686 DRM_ERROR("KMS: Failed to allocate plane\n");
2689 plane->type = plane_type;
2692 * HACK: IGT tests expect that the primary plane for a CRTC
2693 * can only have one possible CRTC. Only expose support for
2694 * any CRTC if they're not going to be used as a primary plane
2695 * for a CRTC - like overlay or underlay planes.
2697 possible_crtcs = 1 << plane_id;
2698 if (plane_id >= dm->dc->caps.max_streams)
2699 possible_crtcs = 0xff;
2701 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
2704 DRM_ERROR("KMS: Failed to initialize plane\n");
2710 mode_info->planes[plane_id] = plane;
2716 static void register_backlight_device(struct amdgpu_display_manager *dm,
2717 struct dc_link *link)
2719 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2720 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2722 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
2723 link->type != dc_connection_none) {
2725 * Event if registration failed, we should continue with
2726 * DM initialization because not having a backlight control
2727 * is better then a black screen.
2729 amdgpu_dm_register_backlight_device(dm);
2731 if (dm->backlight_dev)
2732 dm->backlight_link = link;
2739 * In this architecture, the association
2740 * connector -> encoder -> crtc
2741 * id not really requried. The crtc and connector will hold the
2742 * display_index as an abstraction to use with DAL component
2744 * Returns 0 on success
2746 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
2748 struct amdgpu_display_manager *dm = &adev->dm;
2750 struct amdgpu_dm_connector *aconnector = NULL;
2751 struct amdgpu_encoder *aencoder = NULL;
2752 struct amdgpu_mode_info *mode_info = &adev->mode_info;
2754 int32_t primary_planes;
2755 enum dc_connection_type new_connection_type = dc_connection_none;
2756 const struct dc_plane_cap *plane;
2758 link_cnt = dm->dc->caps.max_links;
2759 if (amdgpu_dm_mode_config_init(dm->adev)) {
2760 DRM_ERROR("DM: Failed to initialize mode config\n");
2764 /* There is one primary plane per CRTC */
2765 primary_planes = dm->dc->caps.max_streams;
2766 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
2769 * Initialize primary planes, implicit planes for legacy IOCTLS.
2770 * Order is reversed to match iteration order in atomic check.
2772 for (i = (primary_planes - 1); i >= 0; i--) {
2773 plane = &dm->dc->caps.planes[i];
2775 if (initialize_plane(dm, mode_info, i,
2776 DRM_PLANE_TYPE_PRIMARY, plane)) {
2777 DRM_ERROR("KMS: Failed to initialize primary plane\n");
2783 * Initialize overlay planes, index starting after primary planes.
2784 * These planes have a higher DRM index than the primary planes since
2785 * they should be considered as having a higher z-order.
2786 * Order is reversed to match iteration order in atomic check.
2788 * Only support DCN for now, and only expose one so we don't encourage
2789 * userspace to use up all the pipes.
2791 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
2792 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
2794 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
2797 if (!plane->blends_with_above || !plane->blends_with_below)
2800 if (!plane->pixel_format_support.argb8888)
2803 if (initialize_plane(dm, NULL, primary_planes + i,
2804 DRM_PLANE_TYPE_OVERLAY, plane)) {
2805 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
2809 /* Only create one overlay plane. */
2813 for (i = 0; i < dm->dc->caps.max_streams; i++)
2814 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
2815 DRM_ERROR("KMS: Failed to initialize crtc\n");
2819 dm->display_indexes_num = dm->dc->caps.max_streams;
2821 /* loops over all connectors on the board */
2822 for (i = 0; i < link_cnt; i++) {
2823 struct dc_link *link = NULL;
2825 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
2827 "KMS: Cannot support more than %d display indexes\n",
2828 AMDGPU_DM_MAX_DISPLAY_INDEX);
2832 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
2836 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
2840 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
2841 DRM_ERROR("KMS: Failed to initialize encoder\n");
2845 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
2846 DRM_ERROR("KMS: Failed to initialize connector\n");
2850 link = dc_get_link_at_index(dm->dc, i);
2852 if (!dc_link_detect_sink(link, &new_connection_type))
2853 DRM_ERROR("KMS: Failed to detect connector\n");
2855 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2856 emulated_link_detect(link);
2857 amdgpu_dm_update_connector_after_detect(aconnector);
2859 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
2860 amdgpu_dm_update_connector_after_detect(aconnector);
2861 register_backlight_device(dm, link);
2862 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
2863 amdgpu_dm_set_psr_caps(link);
2869 /* Software is initialized. Now we can register interrupt handlers. */
2870 switch (adev->asic_type) {
2880 case CHIP_POLARIS11:
2881 case CHIP_POLARIS10:
2882 case CHIP_POLARIS12:
2887 if (dce110_register_irq_handlers(dm->adev)) {
2888 DRM_ERROR("DM: Failed to initialize IRQ\n");
2892 #if defined(CONFIG_DRM_AMD_DC_DCN)
2898 if (dcn10_register_irq_handlers(dm->adev)) {
2899 DRM_ERROR("DM: Failed to initialize IRQ\n");
2905 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
2909 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
2910 dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
2920 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
2922 drm_mode_config_cleanup(dm->ddev);
2923 drm_atomic_private_obj_fini(&dm->atomic_obj);
2927 /******************************************************************************
2928 * amdgpu_display_funcs functions
2929 *****************************************************************************/
2932 * dm_bandwidth_update - program display watermarks
2934 * @adev: amdgpu_device pointer
2936 * Calculate and program the display watermarks and line buffer allocation.
2938 static void dm_bandwidth_update(struct amdgpu_device *adev)
2940 /* TODO: implement later */
2943 static const struct amdgpu_display_funcs dm_display_funcs = {
2944 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
2945 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
2946 .backlight_set_level = NULL, /* never called for DC */
2947 .backlight_get_level = NULL, /* never called for DC */
2948 .hpd_sense = NULL,/* called unconditionally */
2949 .hpd_set_polarity = NULL, /* called unconditionally */
2950 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
2951 .page_flip_get_scanoutpos =
2952 dm_crtc_get_scanoutpos,/* called unconditionally */
2953 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
2954 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
2957 #if defined(CONFIG_DEBUG_KERNEL_DC)
2959 static ssize_t s3_debug_store(struct device *device,
2960 struct device_attribute *attr,
2966 struct drm_device *drm_dev = dev_get_drvdata(device);
2967 struct amdgpu_device *adev = drm_dev->dev_private;
2969 ret = kstrtoint(buf, 0, &s3_state);
2974 drm_kms_helper_hotplug_event(adev->ddev);
2979 return ret == 0 ? count : 0;
2982 DEVICE_ATTR_WO(s3_debug);
2986 static int dm_early_init(void *handle)
2988 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2990 switch (adev->asic_type) {
2993 adev->mode_info.num_crtc = 6;
2994 adev->mode_info.num_hpd = 6;
2995 adev->mode_info.num_dig = 6;
2998 adev->mode_info.num_crtc = 4;
2999 adev->mode_info.num_hpd = 6;
3000 adev->mode_info.num_dig = 7;
3004 adev->mode_info.num_crtc = 2;
3005 adev->mode_info.num_hpd = 6;
3006 adev->mode_info.num_dig = 6;
3010 adev->mode_info.num_crtc = 6;
3011 adev->mode_info.num_hpd = 6;
3012 adev->mode_info.num_dig = 7;
3015 adev->mode_info.num_crtc = 3;
3016 adev->mode_info.num_hpd = 6;
3017 adev->mode_info.num_dig = 9;
3020 adev->mode_info.num_crtc = 2;
3021 adev->mode_info.num_hpd = 6;
3022 adev->mode_info.num_dig = 9;
3024 case CHIP_POLARIS11:
3025 case CHIP_POLARIS12:
3026 adev->mode_info.num_crtc = 5;
3027 adev->mode_info.num_hpd = 5;
3028 adev->mode_info.num_dig = 5;
3030 case CHIP_POLARIS10:
3032 adev->mode_info.num_crtc = 6;
3033 adev->mode_info.num_hpd = 6;
3034 adev->mode_info.num_dig = 6;
3039 adev->mode_info.num_crtc = 6;
3040 adev->mode_info.num_hpd = 6;
3041 adev->mode_info.num_dig = 6;
3043 #if defined(CONFIG_DRM_AMD_DC_DCN)
3045 adev->mode_info.num_crtc = 4;
3046 adev->mode_info.num_hpd = 4;
3047 adev->mode_info.num_dig = 4;
3052 adev->mode_info.num_crtc = 6;
3053 adev->mode_info.num_hpd = 6;
3054 adev->mode_info.num_dig = 6;
3057 adev->mode_info.num_crtc = 5;
3058 adev->mode_info.num_hpd = 5;
3059 adev->mode_info.num_dig = 5;
3062 adev->mode_info.num_crtc = 4;
3063 adev->mode_info.num_hpd = 4;
3064 adev->mode_info.num_dig = 4;
3067 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3071 amdgpu_dm_set_irq_funcs(adev);
3073 if (adev->mode_info.funcs == NULL)
3074 adev->mode_info.funcs = &dm_display_funcs;
3077 * Note: Do NOT change adev->audio_endpt_rreg and
3078 * adev->audio_endpt_wreg because they are initialised in
3079 * amdgpu_device_init()
3081 #if defined(CONFIG_DEBUG_KERNEL_DC)
3084 &dev_attr_s3_debug);
3090 static bool modeset_required(struct drm_crtc_state *crtc_state,
3091 struct dc_stream_state *new_stream,
3092 struct dc_stream_state *old_stream)
3094 if (!drm_atomic_crtc_needs_modeset(crtc_state))
3097 if (!crtc_state->enable)
3100 return crtc_state->active;
3103 static bool modereset_required(struct drm_crtc_state *crtc_state)
3105 if (!drm_atomic_crtc_needs_modeset(crtc_state))
3108 return !crtc_state->enable || !crtc_state->active;
3111 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3113 drm_encoder_cleanup(encoder);
3117 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3118 .destroy = amdgpu_dm_encoder_destroy,
3122 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3123 struct dc_scaling_info *scaling_info)
3125 int scale_w, scale_h;
3127 memset(scaling_info, 0, sizeof(*scaling_info));
3129 /* Source is fixed 16.16 but we ignore mantissa for now... */
3130 scaling_info->src_rect.x = state->src_x >> 16;
3131 scaling_info->src_rect.y = state->src_y >> 16;
3133 scaling_info->src_rect.width = state->src_w >> 16;
3134 if (scaling_info->src_rect.width == 0)
3137 scaling_info->src_rect.height = state->src_h >> 16;
3138 if (scaling_info->src_rect.height == 0)
3141 scaling_info->dst_rect.x = state->crtc_x;
3142 scaling_info->dst_rect.y = state->crtc_y;
3144 if (state->crtc_w == 0)
3147 scaling_info->dst_rect.width = state->crtc_w;
3149 if (state->crtc_h == 0)
3152 scaling_info->dst_rect.height = state->crtc_h;
3154 /* DRM doesn't specify clipping on destination output. */
3155 scaling_info->clip_rect = scaling_info->dst_rect;
3157 /* TODO: Validate scaling per-format with DC plane caps */
3158 scale_w = scaling_info->dst_rect.width * 1000 /
3159 scaling_info->src_rect.width;
3161 if (scale_w < 250 || scale_w > 16000)
3164 scale_h = scaling_info->dst_rect.height * 1000 /
3165 scaling_info->src_rect.height;
3167 if (scale_h < 250 || scale_h > 16000)
3171 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3172 * assume reasonable defaults based on the format.
3178 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3179 uint64_t *tiling_flags)
3181 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3182 int r = amdgpu_bo_reserve(rbo, false);
3185 /* Don't show error message when returning -ERESTARTSYS */
3186 if (r != -ERESTARTSYS)
3187 DRM_ERROR("Unable to reserve buffer: %d\n", r);
3192 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3194 amdgpu_bo_unreserve(rbo);
3199 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3201 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3203 return offset ? (address + offset * 256) : 0;
3207 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3208 const struct amdgpu_framebuffer *afb,
3209 const enum surface_pixel_format format,
3210 const enum dc_rotation_angle rotation,
3211 const struct plane_size *plane_size,
3212 const union dc_tiling_info *tiling_info,
3213 const uint64_t info,
3214 struct dc_plane_dcc_param *dcc,
3215 struct dc_plane_address *address)
3217 struct dc *dc = adev->dm.dc;
3218 struct dc_dcc_surface_param input;
3219 struct dc_surface_dcc_cap output;
3220 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3221 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3222 uint64_t dcc_address;
3224 memset(&input, 0, sizeof(input));
3225 memset(&output, 0, sizeof(output));
3230 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3233 if (!dc->cap_funcs.get_dcc_compression_cap)
3236 input.format = format;
3237 input.surface_size.width = plane_size->surface_size.width;
3238 input.surface_size.height = plane_size->surface_size.height;
3239 input.swizzle_mode = tiling_info->gfx9.swizzle;
3241 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3242 input.scan = SCAN_DIRECTION_HORIZONTAL;
3243 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3244 input.scan = SCAN_DIRECTION_VERTICAL;
3246 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3249 if (!output.capable)
3252 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3257 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3258 dcc->independent_64b_blks = i64b;
3260 dcc_address = get_dcc_address(afb->address, info);
3261 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3262 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3268 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3269 const struct amdgpu_framebuffer *afb,
3270 const enum surface_pixel_format format,
3271 const enum dc_rotation_angle rotation,
3272 const uint64_t tiling_flags,
3273 union dc_tiling_info *tiling_info,
3274 struct plane_size *plane_size,
3275 struct dc_plane_dcc_param *dcc,
3276 struct dc_plane_address *address)
3278 const struct drm_framebuffer *fb = &afb->base;
3281 memset(tiling_info, 0, sizeof(*tiling_info));
3282 memset(plane_size, 0, sizeof(*plane_size));
3283 memset(dcc, 0, sizeof(*dcc));
3284 memset(address, 0, sizeof(*address));
3286 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3287 plane_size->surface_size.x = 0;
3288 plane_size->surface_size.y = 0;
3289 plane_size->surface_size.width = fb->width;
3290 plane_size->surface_size.height = fb->height;
3291 plane_size->surface_pitch =
3292 fb->pitches[0] / fb->format->cpp[0];
3294 address->type = PLN_ADDR_TYPE_GRAPHICS;
3295 address->grph.addr.low_part = lower_32_bits(afb->address);
3296 address->grph.addr.high_part = upper_32_bits(afb->address);
3297 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3298 uint64_t chroma_addr = afb->address + fb->offsets[1];
3300 plane_size->surface_size.x = 0;
3301 plane_size->surface_size.y = 0;
3302 plane_size->surface_size.width = fb->width;
3303 plane_size->surface_size.height = fb->height;
3304 plane_size->surface_pitch =
3305 fb->pitches[0] / fb->format->cpp[0];
3307 plane_size->chroma_size.x = 0;
3308 plane_size->chroma_size.y = 0;
3309 /* TODO: set these based on surface format */
3310 plane_size->chroma_size.width = fb->width / 2;
3311 plane_size->chroma_size.height = fb->height / 2;
3313 plane_size->chroma_pitch =
3314 fb->pitches[1] / fb->format->cpp[1];
3316 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3317 address->video_progressive.luma_addr.low_part =
3318 lower_32_bits(afb->address);
3319 address->video_progressive.luma_addr.high_part =
3320 upper_32_bits(afb->address);
3321 address->video_progressive.chroma_addr.low_part =
3322 lower_32_bits(chroma_addr);
3323 address->video_progressive.chroma_addr.high_part =
3324 upper_32_bits(chroma_addr);
3327 /* Fill GFX8 params */
3328 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3329 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3331 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3332 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3333 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3334 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3335 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3337 /* XXX fix me for VI */
3338 tiling_info->gfx8.num_banks = num_banks;
3339 tiling_info->gfx8.array_mode =
3340 DC_ARRAY_2D_TILED_THIN1;
3341 tiling_info->gfx8.tile_split = tile_split;
3342 tiling_info->gfx8.bank_width = bankw;
3343 tiling_info->gfx8.bank_height = bankh;
3344 tiling_info->gfx8.tile_aspect = mtaspect;
3345 tiling_info->gfx8.tile_mode =
3346 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3347 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3348 == DC_ARRAY_1D_TILED_THIN1) {
3349 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3352 tiling_info->gfx8.pipe_config =
3353 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3355 if (adev->asic_type == CHIP_VEGA10 ||
3356 adev->asic_type == CHIP_VEGA12 ||
3357 adev->asic_type == CHIP_VEGA20 ||
3358 adev->asic_type == CHIP_NAVI10 ||
3359 adev->asic_type == CHIP_NAVI14 ||
3360 adev->asic_type == CHIP_NAVI12 ||
3361 adev->asic_type == CHIP_RENOIR ||
3362 adev->asic_type == CHIP_RAVEN) {
3363 /* Fill GFX9 params */
3364 tiling_info->gfx9.num_pipes =
3365 adev->gfx.config.gb_addr_config_fields.num_pipes;
3366 tiling_info->gfx9.num_banks =
3367 adev->gfx.config.gb_addr_config_fields.num_banks;
3368 tiling_info->gfx9.pipe_interleave =
3369 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3370 tiling_info->gfx9.num_shader_engines =
3371 adev->gfx.config.gb_addr_config_fields.num_se;
3372 tiling_info->gfx9.max_compressed_frags =
3373 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3374 tiling_info->gfx9.num_rb_per_se =
3375 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3376 tiling_info->gfx9.swizzle =
3377 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3378 tiling_info->gfx9.shaderEnable = 1;
3380 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3381 plane_size, tiling_info,
3382 tiling_flags, dcc, address);
3391 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3392 bool *per_pixel_alpha, bool *global_alpha,
3393 int *global_alpha_value)
3395 *per_pixel_alpha = false;
3396 *global_alpha = false;
3397 *global_alpha_value = 0xff;
3399 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3402 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3403 static const uint32_t alpha_formats[] = {
3404 DRM_FORMAT_ARGB8888,
3405 DRM_FORMAT_RGBA8888,
3406 DRM_FORMAT_ABGR8888,
3408 uint32_t format = plane_state->fb->format->format;
3411 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3412 if (format == alpha_formats[i]) {
3413 *per_pixel_alpha = true;
3419 if (plane_state->alpha < 0xffff) {
3420 *global_alpha = true;
3421 *global_alpha_value = plane_state->alpha >> 8;
3426 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3427 const enum surface_pixel_format format,
3428 enum dc_color_space *color_space)
3432 *color_space = COLOR_SPACE_SRGB;
3434 /* DRM color properties only affect non-RGB formats. */
3435 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3438 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3440 switch (plane_state->color_encoding) {
3441 case DRM_COLOR_YCBCR_BT601:
3443 *color_space = COLOR_SPACE_YCBCR601;
3445 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3448 case DRM_COLOR_YCBCR_BT709:
3450 *color_space = COLOR_SPACE_YCBCR709;
3452 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
3455 case DRM_COLOR_YCBCR_BT2020:
3457 *color_space = COLOR_SPACE_2020_YCBCR;
3470 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3471 const struct drm_plane_state *plane_state,
3472 const uint64_t tiling_flags,
3473 struct dc_plane_info *plane_info,
3474 struct dc_plane_address *address)
3476 const struct drm_framebuffer *fb = plane_state->fb;
3477 const struct amdgpu_framebuffer *afb =
3478 to_amdgpu_framebuffer(plane_state->fb);
3479 struct drm_format_name_buf format_name;
3482 memset(plane_info, 0, sizeof(*plane_info));
3484 switch (fb->format->format) {
3486 plane_info->format =
3487 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3489 case DRM_FORMAT_RGB565:
3490 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3492 case DRM_FORMAT_XRGB8888:
3493 case DRM_FORMAT_ARGB8888:
3494 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3496 case DRM_FORMAT_XRGB2101010:
3497 case DRM_FORMAT_ARGB2101010:
3498 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3500 case DRM_FORMAT_XBGR2101010:
3501 case DRM_FORMAT_ABGR2101010:
3502 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3504 case DRM_FORMAT_XBGR8888:
3505 case DRM_FORMAT_ABGR8888:
3506 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3508 case DRM_FORMAT_NV21:
3509 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3511 case DRM_FORMAT_NV12:
3512 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3516 "Unsupported screen format %s\n",
3517 drm_get_format_name(fb->format->format, &format_name));
3521 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3522 case DRM_MODE_ROTATE_0:
3523 plane_info->rotation = ROTATION_ANGLE_0;
3525 case DRM_MODE_ROTATE_90:
3526 plane_info->rotation = ROTATION_ANGLE_90;
3528 case DRM_MODE_ROTATE_180:
3529 plane_info->rotation = ROTATION_ANGLE_180;
3531 case DRM_MODE_ROTATE_270:
3532 plane_info->rotation = ROTATION_ANGLE_270;
3535 plane_info->rotation = ROTATION_ANGLE_0;
3539 plane_info->visible = true;
3540 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3542 plane_info->layer_index = 0;
3544 ret = fill_plane_color_attributes(plane_state, plane_info->format,
3545 &plane_info->color_space);
3549 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3550 plane_info->rotation, tiling_flags,
3551 &plane_info->tiling_info,
3552 &plane_info->plane_size,
3553 &plane_info->dcc, address);
3557 fill_blending_from_plane_state(
3558 plane_state, &plane_info->per_pixel_alpha,
3559 &plane_info->global_alpha, &plane_info->global_alpha_value);
3564 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3565 struct dc_plane_state *dc_plane_state,
3566 struct drm_plane_state *plane_state,
3567 struct drm_crtc_state *crtc_state)
3569 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3570 const struct amdgpu_framebuffer *amdgpu_fb =
3571 to_amdgpu_framebuffer(plane_state->fb);
3572 struct dc_scaling_info scaling_info;
3573 struct dc_plane_info plane_info;
3574 uint64_t tiling_flags;
3577 ret = fill_dc_scaling_info(plane_state, &scaling_info);
3581 dc_plane_state->src_rect = scaling_info.src_rect;
3582 dc_plane_state->dst_rect = scaling_info.dst_rect;
3583 dc_plane_state->clip_rect = scaling_info.clip_rect;
3584 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3586 ret = get_fb_info(amdgpu_fb, &tiling_flags);
3590 ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3592 &dc_plane_state->address);
3596 dc_plane_state->format = plane_info.format;
3597 dc_plane_state->color_space = plane_info.color_space;
3598 dc_plane_state->format = plane_info.format;
3599 dc_plane_state->plane_size = plane_info.plane_size;
3600 dc_plane_state->rotation = plane_info.rotation;
3601 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3602 dc_plane_state->stereo_format = plane_info.stereo_format;
3603 dc_plane_state->tiling_info = plane_info.tiling_info;
3604 dc_plane_state->visible = plane_info.visible;
3605 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3606 dc_plane_state->global_alpha = plane_info.global_alpha;
3607 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3608 dc_plane_state->dcc = plane_info.dcc;
3609 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
3612 * Always set input transfer function, since plane state is refreshed
3615 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3622 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3623 const struct dm_connector_state *dm_state,
3624 struct dc_stream_state *stream)
3626 enum amdgpu_rmx_type rmx_type;
3628 struct rect src = { 0 }; /* viewport in composition space*/
3629 struct rect dst = { 0 }; /* stream addressable area */
3631 /* no mode. nothing to be done */
3635 /* Full screen scaling by default */
3636 src.width = mode->hdisplay;
3637 src.height = mode->vdisplay;
3638 dst.width = stream->timing.h_addressable;
3639 dst.height = stream->timing.v_addressable;
3642 rmx_type = dm_state->scaling;
3643 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
3644 if (src.width * dst.height <
3645 src.height * dst.width) {
3646 /* height needs less upscaling/more downscaling */
3647 dst.width = src.width *
3648 dst.height / src.height;
3650 /* width needs less upscaling/more downscaling */
3651 dst.height = src.height *
3652 dst.width / src.width;
3654 } else if (rmx_type == RMX_CENTER) {
3658 dst.x = (stream->timing.h_addressable - dst.width) / 2;
3659 dst.y = (stream->timing.v_addressable - dst.height) / 2;
3661 if (dm_state->underscan_enable) {
3662 dst.x += dm_state->underscan_hborder / 2;
3663 dst.y += dm_state->underscan_vborder / 2;
3664 dst.width -= dm_state->underscan_hborder;
3665 dst.height -= dm_state->underscan_vborder;
3672 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
3673 dst.x, dst.y, dst.width, dst.height);
3677 static enum dc_color_depth
3678 convert_color_depth_from_display_info(const struct drm_connector *connector,
3679 const struct drm_connector_state *state,
3687 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
3688 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
3690 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
3692 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
3695 bpc = (uint8_t)connector->display_info.bpc;
3696 /* Assume 8 bpc by default if no bpc is specified. */
3697 bpc = bpc ? bpc : 8;
3701 state = connector->state;
3705 * Cap display bpc based on the user requested value.
3707 * The value for state->max_bpc may not correctly updated
3708 * depending on when the connector gets added to the state
3709 * or if this was called outside of atomic check, so it
3710 * can't be used directly.
3712 bpc = min(bpc, state->max_requested_bpc);
3714 /* Round down to the nearest even number. */
3715 bpc = bpc - (bpc & 1);
3721 * Temporary Work around, DRM doesn't parse color depth for
3722 * EDID revision before 1.4
3723 * TODO: Fix edid parsing
3725 return COLOR_DEPTH_888;
3727 return COLOR_DEPTH_666;
3729 return COLOR_DEPTH_888;
3731 return COLOR_DEPTH_101010;
3733 return COLOR_DEPTH_121212;
3735 return COLOR_DEPTH_141414;
3737 return COLOR_DEPTH_161616;
3739 return COLOR_DEPTH_UNDEFINED;
3743 static enum dc_aspect_ratio
3744 get_aspect_ratio(const struct drm_display_mode *mode_in)
3746 /* 1-1 mapping, since both enums follow the HDMI spec. */
3747 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
3750 static enum dc_color_space
3751 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
3753 enum dc_color_space color_space = COLOR_SPACE_SRGB;
3755 switch (dc_crtc_timing->pixel_encoding) {
3756 case PIXEL_ENCODING_YCBCR422:
3757 case PIXEL_ENCODING_YCBCR444:
3758 case PIXEL_ENCODING_YCBCR420:
3761 * 27030khz is the separation point between HDTV and SDTV
3762 * according to HDMI spec, we use YCbCr709 and YCbCr601
3765 if (dc_crtc_timing->pix_clk_100hz > 270300) {
3766 if (dc_crtc_timing->flags.Y_ONLY)
3768 COLOR_SPACE_YCBCR709_LIMITED;
3770 color_space = COLOR_SPACE_YCBCR709;
3772 if (dc_crtc_timing->flags.Y_ONLY)
3774 COLOR_SPACE_YCBCR601_LIMITED;
3776 color_space = COLOR_SPACE_YCBCR601;
3781 case PIXEL_ENCODING_RGB:
3782 color_space = COLOR_SPACE_SRGB;
3793 static bool adjust_colour_depth_from_display_info(
3794 struct dc_crtc_timing *timing_out,
3795 const struct drm_display_info *info)
3797 enum dc_color_depth depth = timing_out->display_color_depth;
3800 normalized_clk = timing_out->pix_clk_100hz / 10;
3801 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
3802 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
3803 normalized_clk /= 2;
3804 /* Adjusting pix clock following on HDMI spec based on colour depth */
3806 case COLOR_DEPTH_888:
3808 case COLOR_DEPTH_101010:
3809 normalized_clk = (normalized_clk * 30) / 24;
3811 case COLOR_DEPTH_121212:
3812 normalized_clk = (normalized_clk * 36) / 24;
3814 case COLOR_DEPTH_161616:
3815 normalized_clk = (normalized_clk * 48) / 24;
3818 /* The above depths are the only ones valid for HDMI. */
3821 if (normalized_clk <= info->max_tmds_clock) {
3822 timing_out->display_color_depth = depth;
3825 } while (--depth > COLOR_DEPTH_666);
3829 static void fill_stream_properties_from_drm_display_mode(
3830 struct dc_stream_state *stream,
3831 const struct drm_display_mode *mode_in,
3832 const struct drm_connector *connector,
3833 const struct drm_connector_state *connector_state,
3834 const struct dc_stream_state *old_stream)
3836 struct dc_crtc_timing *timing_out = &stream->timing;
3837 const struct drm_display_info *info = &connector->display_info;
3838 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3839 struct hdmi_vendor_infoframe hv_frame;
3840 struct hdmi_avi_infoframe avi_frame;
3842 memset(&hv_frame, 0, sizeof(hv_frame));
3843 memset(&avi_frame, 0, sizeof(avi_frame));
3845 timing_out->h_border_left = 0;
3846 timing_out->h_border_right = 0;
3847 timing_out->v_border_top = 0;
3848 timing_out->v_border_bottom = 0;
3849 /* TODO: un-hardcode */
3850 if (drm_mode_is_420_only(info, mode_in)
3851 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3852 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3853 else if (drm_mode_is_420_also(info, mode_in)
3854 && aconnector->force_yuv420_output)
3855 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3856 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
3857 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3858 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
3860 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
3862 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
3863 timing_out->display_color_depth = convert_color_depth_from_display_info(
3864 connector, connector_state,
3865 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420));
3866 timing_out->scan_type = SCANNING_TYPE_NODATA;
3867 timing_out->hdmi_vic = 0;
3870 timing_out->vic = old_stream->timing.vic;
3871 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
3872 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
3874 timing_out->vic = drm_match_cea_mode(mode_in);
3875 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
3876 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
3877 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
3878 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
3881 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
3882 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
3883 timing_out->vic = avi_frame.video_code;
3884 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
3885 timing_out->hdmi_vic = hv_frame.vic;
3888 timing_out->h_addressable = mode_in->crtc_hdisplay;
3889 timing_out->h_total = mode_in->crtc_htotal;
3890 timing_out->h_sync_width =
3891 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
3892 timing_out->h_front_porch =
3893 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
3894 timing_out->v_total = mode_in->crtc_vtotal;
3895 timing_out->v_addressable = mode_in->crtc_vdisplay;
3896 timing_out->v_front_porch =
3897 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
3898 timing_out->v_sync_width =
3899 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
3900 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
3901 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
3903 stream->output_color_space = get_output_color_space(timing_out);
3905 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
3906 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
3907 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
3908 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
3909 drm_mode_is_420_also(info, mode_in) &&
3910 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
3911 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3912 adjust_colour_depth_from_display_info(timing_out, info);
3917 static void fill_audio_info(struct audio_info *audio_info,
3918 const struct drm_connector *drm_connector,
3919 const struct dc_sink *dc_sink)
3922 int cea_revision = 0;
3923 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
3925 audio_info->manufacture_id = edid_caps->manufacturer_id;
3926 audio_info->product_id = edid_caps->product_id;
3928 cea_revision = drm_connector->display_info.cea_rev;
3930 strscpy(audio_info->display_name,
3931 edid_caps->display_name,
3932 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
3934 if (cea_revision >= 3) {
3935 audio_info->mode_count = edid_caps->audio_mode_count;
3937 for (i = 0; i < audio_info->mode_count; ++i) {
3938 audio_info->modes[i].format_code =
3939 (enum audio_format_code)
3940 (edid_caps->audio_modes[i].format_code);
3941 audio_info->modes[i].channel_count =
3942 edid_caps->audio_modes[i].channel_count;
3943 audio_info->modes[i].sample_rates.all =
3944 edid_caps->audio_modes[i].sample_rate;
3945 audio_info->modes[i].sample_size =
3946 edid_caps->audio_modes[i].sample_size;
3950 audio_info->flags.all = edid_caps->speaker_flags;
3952 /* TODO: We only check for the progressive mode, check for interlace mode too */
3953 if (drm_connector->latency_present[0]) {
3954 audio_info->video_latency = drm_connector->video_latency[0];
3955 audio_info->audio_latency = drm_connector->audio_latency[0];
3958 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
3963 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
3964 struct drm_display_mode *dst_mode)
3966 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
3967 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
3968 dst_mode->crtc_clock = src_mode->crtc_clock;
3969 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
3970 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
3971 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
3972 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
3973 dst_mode->crtc_htotal = src_mode->crtc_htotal;
3974 dst_mode->crtc_hskew = src_mode->crtc_hskew;
3975 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
3976 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
3977 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
3978 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
3979 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
3983 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
3984 const struct drm_display_mode *native_mode,
3987 if (scale_enabled) {
3988 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
3989 } else if (native_mode->clock == drm_mode->clock &&
3990 native_mode->htotal == drm_mode->htotal &&
3991 native_mode->vtotal == drm_mode->vtotal) {
3992 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
3994 /* no scaling nor amdgpu inserted, no need to patch */
3998 static struct dc_sink *
3999 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4001 struct dc_sink_init_data sink_init_data = { 0 };
4002 struct dc_sink *sink = NULL;
4003 sink_init_data.link = aconnector->dc_link;
4004 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4006 sink = dc_sink_create(&sink_init_data);
4008 DRM_ERROR("Failed to create sink!\n");
4011 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4016 static void set_multisync_trigger_params(
4017 struct dc_stream_state *stream)
4019 if (stream->triggered_crtc_reset.enabled) {
4020 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4021 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4025 static void set_master_stream(struct dc_stream_state *stream_set[],
4028 int j, highest_rfr = 0, master_stream = 0;
4030 for (j = 0; j < stream_count; j++) {
4031 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4032 int refresh_rate = 0;
4034 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4035 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4036 if (refresh_rate > highest_rfr) {
4037 highest_rfr = refresh_rate;
4042 for (j = 0; j < stream_count; j++) {
4044 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4048 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4052 if (context->stream_count < 2)
4054 for (i = 0; i < context->stream_count ; i++) {
4055 if (!context->streams[i])
4058 * TODO: add a function to read AMD VSDB bits and set
4059 * crtc_sync_master.multi_sync_enabled flag
4060 * For now it's set to false
4062 set_multisync_trigger_params(context->streams[i]);
4064 set_master_stream(context->streams, context->stream_count);
4067 static struct dc_stream_state *
4068 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4069 const struct drm_display_mode *drm_mode,
4070 const struct dm_connector_state *dm_state,
4071 const struct dc_stream_state *old_stream)
4073 struct drm_display_mode *preferred_mode = NULL;
4074 struct drm_connector *drm_connector;
4075 const struct drm_connector_state *con_state =
4076 dm_state ? &dm_state->base : NULL;
4077 struct dc_stream_state *stream = NULL;
4078 struct drm_display_mode mode = *drm_mode;
4079 bool native_mode_found = false;
4080 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4082 int preferred_refresh = 0;
4083 #if defined(CONFIG_DRM_AMD_DC_DCN)
4084 struct dsc_dec_dpcd_caps dsc_caps;
4086 uint32_t link_bandwidth_kbps;
4088 struct dc_sink *sink = NULL;
4089 if (aconnector == NULL) {
4090 DRM_ERROR("aconnector is NULL!\n");
4094 drm_connector = &aconnector->base;
4096 if (!aconnector->dc_sink) {
4097 sink = create_fake_sink(aconnector);
4101 sink = aconnector->dc_sink;
4102 dc_sink_retain(sink);
4105 stream = dc_create_stream_for_sink(sink);
4107 if (stream == NULL) {
4108 DRM_ERROR("Failed to create stream for sink!\n");
4112 stream->dm_stream_context = aconnector;
4114 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4115 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4117 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4118 /* Search for preferred mode */
4119 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4120 native_mode_found = true;
4124 if (!native_mode_found)
4125 preferred_mode = list_first_entry_or_null(
4126 &aconnector->base.modes,
4127 struct drm_display_mode,
4130 mode_refresh = drm_mode_vrefresh(&mode);
4132 if (preferred_mode == NULL) {
4134 * This may not be an error, the use case is when we have no
4135 * usermode calls to reset and set mode upon hotplug. In this
4136 * case, we call set mode ourselves to restore the previous mode
4137 * and the modelist may not be filled in in time.
4139 DRM_DEBUG_DRIVER("No preferred mode found\n");
4141 decide_crtc_timing_for_drm_display_mode(
4142 &mode, preferred_mode,
4143 dm_state ? (dm_state->scaling != RMX_OFF) : false);
4144 preferred_refresh = drm_mode_vrefresh(preferred_mode);
4148 drm_mode_set_crtcinfo(&mode, 0);
4151 * If scaling is enabled and refresh rate didn't change
4152 * we copy the vic and polarities of the old timings
4154 if (!scale || mode_refresh != preferred_refresh)
4155 fill_stream_properties_from_drm_display_mode(stream,
4156 &mode, &aconnector->base, con_state, NULL);
4158 fill_stream_properties_from_drm_display_mode(stream,
4159 &mode, &aconnector->base, con_state, old_stream);
4161 stream->timing.flags.DSC = 0;
4163 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4164 #if defined(CONFIG_DRM_AMD_DC_DCN)
4165 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4166 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4167 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
4170 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4171 dc_link_get_link_cap(aconnector->dc_link));
4173 #if defined(CONFIG_DRM_AMD_DC_DCN)
4174 if (dsc_caps.is_dsc_supported)
4175 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4177 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4178 link_bandwidth_kbps,
4180 &stream->timing.dsc_cfg))
4181 stream->timing.flags.DSC = 1;
4185 update_stream_scaling_settings(&mode, dm_state, stream);
4188 &stream->audio_info,
4192 update_stream_signal(stream, sink);
4194 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4195 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4196 if (stream->link->psr_feature_enabled) {
4197 struct dc *core_dc = stream->link->ctx->dc;
4199 if (dc_is_dmcu_initialized(core_dc)) {
4200 struct dmcu *dmcu = core_dc->res_pool->dmcu;
4202 stream->psr_version = dmcu->dmcu_version.psr_version;
4203 mod_build_vsc_infopacket(stream,
4204 &stream->vsc_infopacket,
4205 &stream->use_vsc_sdp_for_colorimetry);
4209 dc_sink_release(sink);
4214 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4216 drm_crtc_cleanup(crtc);
4220 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4221 struct drm_crtc_state *state)
4223 struct dm_crtc_state *cur = to_dm_crtc_state(state);
4225 /* TODO Destroy dc_stream objects are stream object is flattened */
4227 dc_stream_release(cur->stream);
4230 __drm_atomic_helper_crtc_destroy_state(state);
4236 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4238 struct dm_crtc_state *state;
4241 dm_crtc_destroy_state(crtc, crtc->state);
4243 state = kzalloc(sizeof(*state), GFP_KERNEL);
4244 if (WARN_ON(!state))
4247 crtc->state = &state->base;
4248 crtc->state->crtc = crtc;
4252 static struct drm_crtc_state *
4253 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4255 struct dm_crtc_state *state, *cur;
4257 cur = to_dm_crtc_state(crtc->state);
4259 if (WARN_ON(!crtc->state))
4262 state = kzalloc(sizeof(*state), GFP_KERNEL);
4266 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4269 state->stream = cur->stream;
4270 dc_stream_retain(state->stream);
4273 state->active_planes = cur->active_planes;
4274 state->interrupts_enabled = cur->interrupts_enabled;
4275 state->vrr_params = cur->vrr_params;
4276 state->vrr_infopacket = cur->vrr_infopacket;
4277 state->abm_level = cur->abm_level;
4278 state->vrr_supported = cur->vrr_supported;
4279 state->freesync_config = cur->freesync_config;
4280 state->crc_src = cur->crc_src;
4281 state->cm_has_degamma = cur->cm_has_degamma;
4282 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4284 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4286 return &state->base;
4289 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4291 enum dc_irq_source irq_source;
4292 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4293 struct amdgpu_device *adev = crtc->dev->dev_private;
4296 /* Do not set vupdate for DCN hardware */
4297 if (adev->family > AMDGPU_FAMILY_AI)
4300 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4302 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4304 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4305 acrtc->crtc_id, enable ? "en" : "dis", rc);
4309 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4311 enum dc_irq_source irq_source;
4312 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4313 struct amdgpu_device *adev = crtc->dev->dev_private;
4314 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4318 /* vblank irq on -> Only need vupdate irq in vrr mode */
4319 if (amdgpu_dm_vrr_active(acrtc_state))
4320 rc = dm_set_vupdate_irq(crtc, true);
4322 /* vblank irq off -> vupdate irq off */
4323 rc = dm_set_vupdate_irq(crtc, false);
4329 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4330 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4333 static int dm_enable_vblank(struct drm_crtc *crtc)
4335 return dm_set_vblank(crtc, true);
4338 static void dm_disable_vblank(struct drm_crtc *crtc)
4340 dm_set_vblank(crtc, false);
4343 /* Implemented only the options currently availible for the driver */
4344 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4345 .reset = dm_crtc_reset_state,
4346 .destroy = amdgpu_dm_crtc_destroy,
4347 .gamma_set = drm_atomic_helper_legacy_gamma_set,
4348 .set_config = drm_atomic_helper_set_config,
4349 .page_flip = drm_atomic_helper_page_flip,
4350 .atomic_duplicate_state = dm_crtc_duplicate_state,
4351 .atomic_destroy_state = dm_crtc_destroy_state,
4352 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
4353 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4354 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4355 .enable_vblank = dm_enable_vblank,
4356 .disable_vblank = dm_disable_vblank,
4359 static enum drm_connector_status
4360 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4363 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4367 * 1. This interface is NOT called in context of HPD irq.
4368 * 2. This interface *is called* in context of user-mode ioctl. Which
4369 * makes it a bad place for *any* MST-related activity.
4372 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4373 !aconnector->fake_enable)
4374 connected = (aconnector->dc_sink != NULL);
4376 connected = (aconnector->base.force == DRM_FORCE_ON);
4378 return (connected ? connector_status_connected :
4379 connector_status_disconnected);
4382 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4383 struct drm_connector_state *connector_state,
4384 struct drm_property *property,
4387 struct drm_device *dev = connector->dev;
4388 struct amdgpu_device *adev = dev->dev_private;
4389 struct dm_connector_state *dm_old_state =
4390 to_dm_connector_state(connector->state);
4391 struct dm_connector_state *dm_new_state =
4392 to_dm_connector_state(connector_state);
4396 if (property == dev->mode_config.scaling_mode_property) {
4397 enum amdgpu_rmx_type rmx_type;
4400 case DRM_MODE_SCALE_CENTER:
4401 rmx_type = RMX_CENTER;
4403 case DRM_MODE_SCALE_ASPECT:
4404 rmx_type = RMX_ASPECT;
4406 case DRM_MODE_SCALE_FULLSCREEN:
4407 rmx_type = RMX_FULL;
4409 case DRM_MODE_SCALE_NONE:
4415 if (dm_old_state->scaling == rmx_type)
4418 dm_new_state->scaling = rmx_type;
4420 } else if (property == adev->mode_info.underscan_hborder_property) {
4421 dm_new_state->underscan_hborder = val;
4423 } else if (property == adev->mode_info.underscan_vborder_property) {
4424 dm_new_state->underscan_vborder = val;
4426 } else if (property == adev->mode_info.underscan_property) {
4427 dm_new_state->underscan_enable = val;
4429 } else if (property == adev->mode_info.abm_level_property) {
4430 dm_new_state->abm_level = val;
4437 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4438 const struct drm_connector_state *state,
4439 struct drm_property *property,
4442 struct drm_device *dev = connector->dev;
4443 struct amdgpu_device *adev = dev->dev_private;
4444 struct dm_connector_state *dm_state =
4445 to_dm_connector_state(state);
4448 if (property == dev->mode_config.scaling_mode_property) {
4449 switch (dm_state->scaling) {
4451 *val = DRM_MODE_SCALE_CENTER;
4454 *val = DRM_MODE_SCALE_ASPECT;
4457 *val = DRM_MODE_SCALE_FULLSCREEN;
4461 *val = DRM_MODE_SCALE_NONE;
4465 } else if (property == adev->mode_info.underscan_hborder_property) {
4466 *val = dm_state->underscan_hborder;
4468 } else if (property == adev->mode_info.underscan_vborder_property) {
4469 *val = dm_state->underscan_vborder;
4471 } else if (property == adev->mode_info.underscan_property) {
4472 *val = dm_state->underscan_enable;
4474 } else if (property == adev->mode_info.abm_level_property) {
4475 *val = dm_state->abm_level;
4482 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4484 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4486 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4489 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4491 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4492 const struct dc_link *link = aconnector->dc_link;
4493 struct amdgpu_device *adev = connector->dev->dev_private;
4494 struct amdgpu_display_manager *dm = &adev->dm;
4496 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4497 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4499 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4500 link->type != dc_connection_none &&
4501 dm->backlight_dev) {
4502 backlight_device_unregister(dm->backlight_dev);
4503 dm->backlight_dev = NULL;
4507 if (aconnector->dc_em_sink)
4508 dc_sink_release(aconnector->dc_em_sink);
4509 aconnector->dc_em_sink = NULL;
4510 if (aconnector->dc_sink)
4511 dc_sink_release(aconnector->dc_sink);
4512 aconnector->dc_sink = NULL;
4514 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4515 drm_connector_unregister(connector);
4516 drm_connector_cleanup(connector);
4517 if (aconnector->i2c) {
4518 i2c_del_adapter(&aconnector->i2c->base);
4519 kfree(aconnector->i2c);
4525 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4527 struct dm_connector_state *state =
4528 to_dm_connector_state(connector->state);
4530 if (connector->state)
4531 __drm_atomic_helper_connector_destroy_state(connector->state);
4535 state = kzalloc(sizeof(*state), GFP_KERNEL);
4538 state->scaling = RMX_OFF;
4539 state->underscan_enable = false;
4540 state->underscan_hborder = 0;
4541 state->underscan_vborder = 0;
4542 state->base.max_requested_bpc = 8;
4543 state->vcpi_slots = 0;
4545 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4546 state->abm_level = amdgpu_dm_abm_level;
4548 __drm_atomic_helper_connector_reset(connector, &state->base);
4552 struct drm_connector_state *
4553 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4555 struct dm_connector_state *state =
4556 to_dm_connector_state(connector->state);
4558 struct dm_connector_state *new_state =
4559 kmemdup(state, sizeof(*state), GFP_KERNEL);
4564 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4566 new_state->freesync_capable = state->freesync_capable;
4567 new_state->abm_level = state->abm_level;
4568 new_state->scaling = state->scaling;
4569 new_state->underscan_enable = state->underscan_enable;
4570 new_state->underscan_hborder = state->underscan_hborder;
4571 new_state->underscan_vborder = state->underscan_vborder;
4572 new_state->vcpi_slots = state->vcpi_slots;
4573 new_state->pbn = state->pbn;
4574 return &new_state->base;
4577 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4578 .reset = amdgpu_dm_connector_funcs_reset,
4579 .detect = amdgpu_dm_connector_detect,
4580 .fill_modes = drm_helper_probe_single_connector_modes,
4581 .destroy = amdgpu_dm_connector_destroy,
4582 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4583 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4584 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4585 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4586 .early_unregister = amdgpu_dm_connector_unregister
4589 static int get_modes(struct drm_connector *connector)
4591 return amdgpu_dm_connector_get_modes(connector);
4594 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
4596 struct dc_sink_init_data init_params = {
4597 .link = aconnector->dc_link,
4598 .sink_signal = SIGNAL_TYPE_VIRTUAL
4602 if (!aconnector->base.edid_blob_ptr) {
4603 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4604 aconnector->base.name);
4606 aconnector->base.force = DRM_FORCE_OFF;
4607 aconnector->base.override_edid = false;
4611 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
4613 aconnector->edid = edid;
4615 aconnector->dc_em_sink = dc_link_add_remote_sink(
4616 aconnector->dc_link,
4618 (edid->extensions + 1) * EDID_LENGTH,
4621 if (aconnector->base.force == DRM_FORCE_ON) {
4622 aconnector->dc_sink = aconnector->dc_link->local_sink ?
4623 aconnector->dc_link->local_sink :
4624 aconnector->dc_em_sink;
4625 dc_sink_retain(aconnector->dc_sink);
4629 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
4631 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
4634 * In case of headless boot with force on for DP managed connector
4635 * Those settings have to be != 0 to get initial modeset
4637 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4638 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
4639 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
4643 aconnector->base.override_edid = true;
4644 create_eml_sink(aconnector);
4647 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
4648 struct drm_display_mode *mode)
4650 int result = MODE_ERROR;
4651 struct dc_sink *dc_sink;
4652 struct amdgpu_device *adev = connector->dev->dev_private;
4653 /* TODO: Unhardcode stream count */
4654 struct dc_stream_state *stream;
4655 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4656 enum dc_status dc_result = DC_OK;
4658 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
4659 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
4663 * Only run this the first time mode_valid is called to initilialize
4666 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
4667 !aconnector->dc_em_sink)
4668 handle_edid_mgmt(aconnector);
4670 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
4672 if (dc_sink == NULL) {
4673 DRM_ERROR("dc_sink is NULL!\n");
4677 stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
4678 if (stream == NULL) {
4679 DRM_ERROR("Failed to create stream for sink!\n");
4683 dc_result = dc_validate_stream(adev->dm.dc, stream);
4685 if (dc_result == DC_OK)
4688 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
4694 dc_stream_release(stream);
4697 /* TODO: error handling*/
4701 static int fill_hdr_info_packet(const struct drm_connector_state *state,
4702 struct dc_info_packet *out)
4704 struct hdmi_drm_infoframe frame;
4705 unsigned char buf[30]; /* 26 + 4 */
4709 memset(out, 0, sizeof(*out));
4711 if (!state->hdr_output_metadata)
4714 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
4718 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
4722 /* Static metadata is a fixed 26 bytes + 4 byte header. */
4726 /* Prepare the infopacket for DC. */
4727 switch (state->connector->connector_type) {
4728 case DRM_MODE_CONNECTOR_HDMIA:
4729 out->hb0 = 0x87; /* type */
4730 out->hb1 = 0x01; /* version */
4731 out->hb2 = 0x1A; /* length */
4732 out->sb[0] = buf[3]; /* checksum */
4736 case DRM_MODE_CONNECTOR_DisplayPort:
4737 case DRM_MODE_CONNECTOR_eDP:
4738 out->hb0 = 0x00; /* sdp id, zero */
4739 out->hb1 = 0x87; /* type */
4740 out->hb2 = 0x1D; /* payload len - 1 */
4741 out->hb3 = (0x13 << 2); /* sdp version */
4742 out->sb[0] = 0x01; /* version */
4743 out->sb[1] = 0x1A; /* length */
4751 memcpy(&out->sb[i], &buf[4], 26);
4754 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
4755 sizeof(out->sb), false);
4761 is_hdr_metadata_different(const struct drm_connector_state *old_state,
4762 const struct drm_connector_state *new_state)
4764 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
4765 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
4767 if (old_blob != new_blob) {
4768 if (old_blob && new_blob &&
4769 old_blob->length == new_blob->length)
4770 return memcmp(old_blob->data, new_blob->data,
4780 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
4781 struct drm_atomic_state *state)
4783 struct drm_connector_state *new_con_state =
4784 drm_atomic_get_new_connector_state(state, conn);
4785 struct drm_connector_state *old_con_state =
4786 drm_atomic_get_old_connector_state(state, conn);
4787 struct drm_crtc *crtc = new_con_state->crtc;
4788 struct drm_crtc_state *new_crtc_state;
4794 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
4795 struct dc_info_packet hdr_infopacket;
4797 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
4801 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
4802 if (IS_ERR(new_crtc_state))
4803 return PTR_ERR(new_crtc_state);
4806 * DC considers the stream backends changed if the
4807 * static metadata changes. Forcing the modeset also
4808 * gives a simple way for userspace to switch from
4809 * 8bpc to 10bpc when setting the metadata to enter
4812 * Changing the static metadata after it's been
4813 * set is permissible, however. So only force a
4814 * modeset if we're entering or exiting HDR.
4816 new_crtc_state->mode_changed =
4817 !old_con_state->hdr_output_metadata ||
4818 !new_con_state->hdr_output_metadata;
4824 static const struct drm_connector_helper_funcs
4825 amdgpu_dm_connector_helper_funcs = {
4827 * If hotplugging a second bigger display in FB Con mode, bigger resolution
4828 * modes will be filtered by drm_mode_validate_size(), and those modes
4829 * are missing after user start lightdm. So we need to renew modes list.
4830 * in get_modes call back, not just return the modes count
4832 .get_modes = get_modes,
4833 .mode_valid = amdgpu_dm_connector_mode_valid,
4834 .atomic_check = amdgpu_dm_connector_atomic_check,
4837 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
4841 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
4843 struct drm_device *dev = new_crtc_state->crtc->dev;
4844 struct drm_plane *plane;
4846 drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
4847 if (plane->type == DRM_PLANE_TYPE_CURSOR)
4854 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
4856 struct drm_atomic_state *state = new_crtc_state->state;
4857 struct drm_plane *plane;
4860 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
4861 struct drm_plane_state *new_plane_state;
4863 /* Cursor planes are "fake". */
4864 if (plane->type == DRM_PLANE_TYPE_CURSOR)
4867 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
4869 if (!new_plane_state) {
4871 * The plane is enable on the CRTC and hasn't changed
4872 * state. This means that it previously passed
4873 * validation and is therefore enabled.
4879 /* We need a framebuffer to be considered enabled. */
4880 num_active += (new_plane_state->fb != NULL);
4887 * Sets whether interrupts should be enabled on a specific CRTC.
4888 * We require that the stream be enabled and that there exist active
4889 * DC planes on the stream.
4892 dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
4893 struct drm_crtc_state *new_crtc_state)
4895 struct dm_crtc_state *dm_new_crtc_state =
4896 to_dm_crtc_state(new_crtc_state);
4898 dm_new_crtc_state->active_planes = 0;
4899 dm_new_crtc_state->interrupts_enabled = false;
4901 if (!dm_new_crtc_state->stream)
4904 dm_new_crtc_state->active_planes =
4905 count_crtc_active_planes(new_crtc_state);
4907 dm_new_crtc_state->interrupts_enabled =
4908 dm_new_crtc_state->active_planes > 0;
4911 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
4912 struct drm_crtc_state *state)
4914 struct amdgpu_device *adev = crtc->dev->dev_private;
4915 struct dc *dc = adev->dm.dc;
4916 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
4920 * Update interrupt state for the CRTC. This needs to happen whenever
4921 * the CRTC has changed or whenever any of its planes have changed.
4922 * Atomic check satisfies both of these requirements since the CRTC
4923 * is added to the state by DRM during drm_atomic_helper_check_planes.
4925 dm_update_crtc_interrupt_state(crtc, state);
4927 if (unlikely(!dm_crtc_state->stream &&
4928 modeset_required(state, NULL, dm_crtc_state->stream))) {
4933 /* In some use cases, like reset, no stream is attached */
4934 if (!dm_crtc_state->stream)
4938 * We want at least one hardware plane enabled to use
4939 * the stream with a cursor enabled.
4941 if (state->enable && state->active &&
4942 does_crtc_have_active_cursor(state) &&
4943 dm_crtc_state->active_planes == 0)
4946 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
4952 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
4953 const struct drm_display_mode *mode,
4954 struct drm_display_mode *adjusted_mode)
4959 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
4960 .disable = dm_crtc_helper_disable,
4961 .atomic_check = dm_crtc_helper_atomic_check,
4962 .mode_fixup = dm_crtc_helper_mode_fixup
4965 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
4970 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
4972 switch (display_color_depth) {
4973 case COLOR_DEPTH_666:
4975 case COLOR_DEPTH_888:
4977 case COLOR_DEPTH_101010:
4979 case COLOR_DEPTH_121212:
4981 case COLOR_DEPTH_141414:
4983 case COLOR_DEPTH_161616:
4991 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
4992 struct drm_crtc_state *crtc_state,
4993 struct drm_connector_state *conn_state)
4995 struct drm_atomic_state *state = crtc_state->state;
4996 struct drm_connector *connector = conn_state->connector;
4997 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4998 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
4999 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5000 struct drm_dp_mst_topology_mgr *mst_mgr;
5001 struct drm_dp_mst_port *mst_port;
5002 enum dc_color_depth color_depth;
5004 bool is_y420 = false;
5006 if (!aconnector->port || !aconnector->dc_sink)
5009 mst_port = aconnector->port;
5010 mst_mgr = &aconnector->mst_port->mst_mgr;
5012 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5015 if (!state->duplicated) {
5016 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5017 aconnector->force_yuv420_output;
5018 color_depth = convert_color_depth_from_display_info(connector, conn_state,
5020 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5021 clock = adjusted_mode->clock;
5022 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5024 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5027 dm_new_connector_state->pbn,
5029 if (dm_new_connector_state->vcpi_slots < 0) {
5030 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5031 return dm_new_connector_state->vcpi_slots;
5036 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5037 .disable = dm_encoder_helper_disable,
5038 .atomic_check = dm_encoder_helper_atomic_check
5041 #if defined(CONFIG_DRM_AMD_DC_DCN)
5042 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5043 struct dc_state *dc_state)
5045 struct dc_stream_state *stream = NULL;
5046 struct drm_connector *connector;
5047 struct drm_connector_state *new_con_state, *old_con_state;
5048 struct amdgpu_dm_connector *aconnector;
5049 struct dm_connector_state *dm_conn_state;
5050 int i, j, clock, bpp;
5051 int vcpi, pbn_div, pbn = 0;
5053 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5055 aconnector = to_amdgpu_dm_connector(connector);
5057 if (!aconnector->port)
5060 if (!new_con_state || !new_con_state->crtc)
5063 dm_conn_state = to_dm_connector_state(new_con_state);
5065 for (j = 0; j < dc_state->stream_count; j++) {
5066 stream = dc_state->streams[j];
5070 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5079 if (stream->timing.flags.DSC != 1) {
5080 drm_dp_mst_atomic_enable_dsc(state,
5088 pbn_div = dm_mst_get_pbn_divider(stream->link);
5089 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5090 clock = stream->timing.pix_clk_100hz / 10;
5091 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5092 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5099 dm_conn_state->pbn = pbn;
5100 dm_conn_state->vcpi_slots = vcpi;
5106 static void dm_drm_plane_reset(struct drm_plane *plane)
5108 struct dm_plane_state *amdgpu_state = NULL;
5111 plane->funcs->atomic_destroy_state(plane, plane->state);
5113 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5114 WARN_ON(amdgpu_state == NULL);
5117 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5120 static struct drm_plane_state *
5121 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5123 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5125 old_dm_plane_state = to_dm_plane_state(plane->state);
5126 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5127 if (!dm_plane_state)
5130 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5132 if (old_dm_plane_state->dc_state) {
5133 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5134 dc_plane_state_retain(dm_plane_state->dc_state);
5137 return &dm_plane_state->base;
5140 void dm_drm_plane_destroy_state(struct drm_plane *plane,
5141 struct drm_plane_state *state)
5143 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5145 if (dm_plane_state->dc_state)
5146 dc_plane_state_release(dm_plane_state->dc_state);
5148 drm_atomic_helper_plane_destroy_state(plane, state);
5151 static const struct drm_plane_funcs dm_plane_funcs = {
5152 .update_plane = drm_atomic_helper_update_plane,
5153 .disable_plane = drm_atomic_helper_disable_plane,
5154 .destroy = drm_primary_helper_destroy,
5155 .reset = dm_drm_plane_reset,
5156 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5157 .atomic_destroy_state = dm_drm_plane_destroy_state,
5160 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5161 struct drm_plane_state *new_state)
5163 struct amdgpu_framebuffer *afb;
5164 struct drm_gem_object *obj;
5165 struct amdgpu_device *adev;
5166 struct amdgpu_bo *rbo;
5167 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5168 struct list_head list;
5169 struct ttm_validate_buffer tv;
5170 struct ww_acquire_ctx ticket;
5171 uint64_t tiling_flags;
5175 dm_plane_state_old = to_dm_plane_state(plane->state);
5176 dm_plane_state_new = to_dm_plane_state(new_state);
5178 if (!new_state->fb) {
5179 DRM_DEBUG_DRIVER("No FB bound\n");
5183 afb = to_amdgpu_framebuffer(new_state->fb);
5184 obj = new_state->fb->obj[0];
5185 rbo = gem_to_amdgpu_bo(obj);
5186 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5187 INIT_LIST_HEAD(&list);
5191 list_add(&tv.head, &list);
5193 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5195 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5199 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5200 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5202 domain = AMDGPU_GEM_DOMAIN_VRAM;
5204 r = amdgpu_bo_pin(rbo, domain);
5205 if (unlikely(r != 0)) {
5206 if (r != -ERESTARTSYS)
5207 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5208 ttm_eu_backoff_reservation(&ticket, &list);
5212 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5213 if (unlikely(r != 0)) {
5214 amdgpu_bo_unpin(rbo);
5215 ttm_eu_backoff_reservation(&ticket, &list);
5216 DRM_ERROR("%p bind failed\n", rbo);
5220 amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5222 ttm_eu_backoff_reservation(&ticket, &list);
5224 afb->address = amdgpu_bo_gpu_offset(rbo);
5228 if (dm_plane_state_new->dc_state &&
5229 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5230 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5232 fill_plane_buffer_attributes(
5233 adev, afb, plane_state->format, plane_state->rotation,
5234 tiling_flags, &plane_state->tiling_info,
5235 &plane_state->plane_size, &plane_state->dcc,
5236 &plane_state->address);
5242 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5243 struct drm_plane_state *old_state)
5245 struct amdgpu_bo *rbo;
5251 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5252 r = amdgpu_bo_reserve(rbo, false);
5254 DRM_ERROR("failed to reserve rbo before unpin\n");
5258 amdgpu_bo_unpin(rbo);
5259 amdgpu_bo_unreserve(rbo);
5260 amdgpu_bo_unref(&rbo);
5263 static int dm_plane_atomic_check(struct drm_plane *plane,
5264 struct drm_plane_state *state)
5266 struct amdgpu_device *adev = plane->dev->dev_private;
5267 struct dc *dc = adev->dm.dc;
5268 struct dm_plane_state *dm_plane_state;
5269 struct dc_scaling_info scaling_info;
5272 dm_plane_state = to_dm_plane_state(state);
5274 if (!dm_plane_state->dc_state)
5277 ret = fill_dc_scaling_info(state, &scaling_info);
5281 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5287 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5288 struct drm_plane_state *new_plane_state)
5290 /* Only support async updates on cursor planes. */
5291 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5297 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5298 struct drm_plane_state *new_state)
5300 struct drm_plane_state *old_state =
5301 drm_atomic_get_old_plane_state(new_state->state, plane);
5303 swap(plane->state->fb, new_state->fb);
5305 plane->state->src_x = new_state->src_x;
5306 plane->state->src_y = new_state->src_y;
5307 plane->state->src_w = new_state->src_w;
5308 plane->state->src_h = new_state->src_h;
5309 plane->state->crtc_x = new_state->crtc_x;
5310 plane->state->crtc_y = new_state->crtc_y;
5311 plane->state->crtc_w = new_state->crtc_w;
5312 plane->state->crtc_h = new_state->crtc_h;
5314 handle_cursor_update(plane, old_state);
5317 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5318 .prepare_fb = dm_plane_helper_prepare_fb,
5319 .cleanup_fb = dm_plane_helper_cleanup_fb,
5320 .atomic_check = dm_plane_atomic_check,
5321 .atomic_async_check = dm_plane_atomic_async_check,
5322 .atomic_async_update = dm_plane_atomic_async_update
5326 * TODO: these are currently initialized to rgb formats only.
5327 * For future use cases we should either initialize them dynamically based on
5328 * plane capabilities, or initialize this array to all formats, so internal drm
5329 * check will succeed, and let DC implement proper check
5331 static const uint32_t rgb_formats[] = {
5332 DRM_FORMAT_XRGB8888,
5333 DRM_FORMAT_ARGB8888,
5334 DRM_FORMAT_RGBA8888,
5335 DRM_FORMAT_XRGB2101010,
5336 DRM_FORMAT_XBGR2101010,
5337 DRM_FORMAT_ARGB2101010,
5338 DRM_FORMAT_ABGR2101010,
5339 DRM_FORMAT_XBGR8888,
5340 DRM_FORMAT_ABGR8888,
5344 static const uint32_t overlay_formats[] = {
5345 DRM_FORMAT_XRGB8888,
5346 DRM_FORMAT_ARGB8888,
5347 DRM_FORMAT_RGBA8888,
5348 DRM_FORMAT_XBGR8888,
5349 DRM_FORMAT_ABGR8888,
5353 static const u32 cursor_formats[] = {
5357 static int get_plane_formats(const struct drm_plane *plane,
5358 const struct dc_plane_cap *plane_cap,
5359 uint32_t *formats, int max_formats)
5361 int i, num_formats = 0;
5364 * TODO: Query support for each group of formats directly from
5365 * DC plane caps. This will require adding more formats to the
5369 switch (plane->type) {
5370 case DRM_PLANE_TYPE_PRIMARY:
5371 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5372 if (num_formats >= max_formats)
5375 formats[num_formats++] = rgb_formats[i];
5378 if (plane_cap && plane_cap->pixel_format_support.nv12)
5379 formats[num_formats++] = DRM_FORMAT_NV12;
5382 case DRM_PLANE_TYPE_OVERLAY:
5383 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5384 if (num_formats >= max_formats)
5387 formats[num_formats++] = overlay_formats[i];
5391 case DRM_PLANE_TYPE_CURSOR:
5392 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5393 if (num_formats >= max_formats)
5396 formats[num_formats++] = cursor_formats[i];
5404 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5405 struct drm_plane *plane,
5406 unsigned long possible_crtcs,
5407 const struct dc_plane_cap *plane_cap)
5409 uint32_t formats[32];
5413 num_formats = get_plane_formats(plane, plane_cap, formats,
5414 ARRAY_SIZE(formats));
5416 res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5417 &dm_plane_funcs, formats, num_formats,
5418 NULL, plane->type, NULL);
5422 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5423 plane_cap && plane_cap->per_pixel_alpha) {
5424 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5425 BIT(DRM_MODE_BLEND_PREMULTI);
5427 drm_plane_create_alpha_property(plane);
5428 drm_plane_create_blend_mode_property(plane, blend_caps);
5431 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5432 plane_cap && plane_cap->pixel_format_support.nv12) {
5433 /* This only affects YUV formats. */
5434 drm_plane_create_color_properties(
5436 BIT(DRM_COLOR_YCBCR_BT601) |
5437 BIT(DRM_COLOR_YCBCR_BT709),
5438 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5439 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5440 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5443 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
5445 /* Create (reset) the plane state */
5446 if (plane->funcs->reset)
5447 plane->funcs->reset(plane);
5452 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5453 struct drm_plane *plane,
5454 uint32_t crtc_index)
5456 struct amdgpu_crtc *acrtc = NULL;
5457 struct drm_plane *cursor_plane;
5461 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5465 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
5466 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
5468 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5472 res = drm_crtc_init_with_planes(
5477 &amdgpu_dm_crtc_funcs, NULL);
5482 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5484 /* Create (reset) the plane state */
5485 if (acrtc->base.funcs->reset)
5486 acrtc->base.funcs->reset(&acrtc->base);
5488 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5489 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5491 acrtc->crtc_id = crtc_index;
5492 acrtc->base.enabled = false;
5493 acrtc->otg_inst = -1;
5495 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
5496 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5497 true, MAX_COLOR_LUT_ENTRIES);
5498 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
5504 kfree(cursor_plane);
5509 static int to_drm_connector_type(enum signal_type st)
5512 case SIGNAL_TYPE_HDMI_TYPE_A:
5513 return DRM_MODE_CONNECTOR_HDMIA;
5514 case SIGNAL_TYPE_EDP:
5515 return DRM_MODE_CONNECTOR_eDP;
5516 case SIGNAL_TYPE_LVDS:
5517 return DRM_MODE_CONNECTOR_LVDS;
5518 case SIGNAL_TYPE_RGB:
5519 return DRM_MODE_CONNECTOR_VGA;
5520 case SIGNAL_TYPE_DISPLAY_PORT:
5521 case SIGNAL_TYPE_DISPLAY_PORT_MST:
5522 return DRM_MODE_CONNECTOR_DisplayPort;
5523 case SIGNAL_TYPE_DVI_DUAL_LINK:
5524 case SIGNAL_TYPE_DVI_SINGLE_LINK:
5525 return DRM_MODE_CONNECTOR_DVID;
5526 case SIGNAL_TYPE_VIRTUAL:
5527 return DRM_MODE_CONNECTOR_VIRTUAL;
5530 return DRM_MODE_CONNECTOR_Unknown;
5534 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5536 struct drm_encoder *encoder;
5538 /* There is only one encoder per connector */
5539 drm_connector_for_each_possible_encoder(connector, encoder)
5545 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
5547 struct drm_encoder *encoder;
5548 struct amdgpu_encoder *amdgpu_encoder;
5550 encoder = amdgpu_dm_connector_to_encoder(connector);
5552 if (encoder == NULL)
5555 amdgpu_encoder = to_amdgpu_encoder(encoder);
5557 amdgpu_encoder->native_mode.clock = 0;
5559 if (!list_empty(&connector->probed_modes)) {
5560 struct drm_display_mode *preferred_mode = NULL;
5562 list_for_each_entry(preferred_mode,
5563 &connector->probed_modes,
5565 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
5566 amdgpu_encoder->native_mode = *preferred_mode;
5574 static struct drm_display_mode *
5575 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
5577 int hdisplay, int vdisplay)
5579 struct drm_device *dev = encoder->dev;
5580 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5581 struct drm_display_mode *mode = NULL;
5582 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5584 mode = drm_mode_duplicate(dev, native_mode);
5589 mode->hdisplay = hdisplay;
5590 mode->vdisplay = vdisplay;
5591 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
5592 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
5598 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
5599 struct drm_connector *connector)
5601 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5602 struct drm_display_mode *mode = NULL;
5603 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5604 struct amdgpu_dm_connector *amdgpu_dm_connector =
5605 to_amdgpu_dm_connector(connector);
5609 char name[DRM_DISPLAY_MODE_LEN];
5612 } common_modes[] = {
5613 { "640x480", 640, 480},
5614 { "800x600", 800, 600},
5615 { "1024x768", 1024, 768},
5616 { "1280x720", 1280, 720},
5617 { "1280x800", 1280, 800},
5618 {"1280x1024", 1280, 1024},
5619 { "1440x900", 1440, 900},
5620 {"1680x1050", 1680, 1050},
5621 {"1600x1200", 1600, 1200},
5622 {"1920x1080", 1920, 1080},
5623 {"1920x1200", 1920, 1200}
5626 n = ARRAY_SIZE(common_modes);
5628 for (i = 0; i < n; i++) {
5629 struct drm_display_mode *curmode = NULL;
5630 bool mode_existed = false;
5632 if (common_modes[i].w > native_mode->hdisplay ||
5633 common_modes[i].h > native_mode->vdisplay ||
5634 (common_modes[i].w == native_mode->hdisplay &&
5635 common_modes[i].h == native_mode->vdisplay))
5638 list_for_each_entry(curmode, &connector->probed_modes, head) {
5639 if (common_modes[i].w == curmode->hdisplay &&
5640 common_modes[i].h == curmode->vdisplay) {
5641 mode_existed = true;
5649 mode = amdgpu_dm_create_common_mode(encoder,
5650 common_modes[i].name, common_modes[i].w,
5652 drm_mode_probed_add(connector, mode);
5653 amdgpu_dm_connector->num_modes++;
5657 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
5660 struct amdgpu_dm_connector *amdgpu_dm_connector =
5661 to_amdgpu_dm_connector(connector);
5664 /* empty probed_modes */
5665 INIT_LIST_HEAD(&connector->probed_modes);
5666 amdgpu_dm_connector->num_modes =
5667 drm_add_edid_modes(connector, edid);
5669 /* sorting the probed modes before calling function
5670 * amdgpu_dm_get_native_mode() since EDID can have
5671 * more than one preferred mode. The modes that are
5672 * later in the probed mode list could be of higher
5673 * and preferred resolution. For example, 3840x2160
5674 * resolution in base EDID preferred timing and 4096x2160
5675 * preferred resolution in DID extension block later.
5677 drm_mode_sort(&connector->probed_modes);
5678 amdgpu_dm_get_native_mode(connector);
5680 amdgpu_dm_connector->num_modes = 0;
5684 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
5686 struct amdgpu_dm_connector *amdgpu_dm_connector =
5687 to_amdgpu_dm_connector(connector);
5688 struct drm_encoder *encoder;
5689 struct edid *edid = amdgpu_dm_connector->edid;
5691 encoder = amdgpu_dm_connector_to_encoder(connector);
5693 if (!edid || !drm_edid_is_valid(edid)) {
5694 amdgpu_dm_connector->num_modes =
5695 drm_add_modes_noedid(connector, 640, 480);
5697 amdgpu_dm_connector_ddc_get_modes(connector, edid);
5698 amdgpu_dm_connector_add_common_modes(encoder, connector);
5700 amdgpu_dm_fbc_init(connector);
5702 return amdgpu_dm_connector->num_modes;
5705 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
5706 struct amdgpu_dm_connector *aconnector,
5708 struct dc_link *link,
5711 struct amdgpu_device *adev = dm->ddev->dev_private;
5714 * Some of the properties below require access to state, like bpc.
5715 * Allocate some default initial connector state with our reset helper.
5717 if (aconnector->base.funcs->reset)
5718 aconnector->base.funcs->reset(&aconnector->base);
5720 aconnector->connector_id = link_index;
5721 aconnector->dc_link = link;
5722 aconnector->base.interlace_allowed = false;
5723 aconnector->base.doublescan_allowed = false;
5724 aconnector->base.stereo_allowed = false;
5725 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
5726 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
5727 aconnector->audio_inst = -1;
5728 mutex_init(&aconnector->hpd_lock);
5731 * configure support HPD hot plug connector_>polled default value is 0
5732 * which means HPD hot plug not supported
5734 switch (connector_type) {
5735 case DRM_MODE_CONNECTOR_HDMIA:
5736 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5737 aconnector->base.ycbcr_420_allowed =
5738 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
5740 case DRM_MODE_CONNECTOR_DisplayPort:
5741 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5742 aconnector->base.ycbcr_420_allowed =
5743 link->link_enc->features.dp_ycbcr420_supported ? true : false;
5745 case DRM_MODE_CONNECTOR_DVID:
5746 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5752 drm_object_attach_property(&aconnector->base.base,
5753 dm->ddev->mode_config.scaling_mode_property,
5754 DRM_MODE_SCALE_NONE);
5756 drm_object_attach_property(&aconnector->base.base,
5757 adev->mode_info.underscan_property,
5759 drm_object_attach_property(&aconnector->base.base,
5760 adev->mode_info.underscan_hborder_property,
5762 drm_object_attach_property(&aconnector->base.base,
5763 adev->mode_info.underscan_vborder_property,
5766 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
5768 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
5769 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
5770 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
5772 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5773 dc_is_dmcu_initialized(adev->dm.dc)) {
5774 drm_object_attach_property(&aconnector->base.base,
5775 adev->mode_info.abm_level_property, 0);
5778 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
5779 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5780 connector_type == DRM_MODE_CONNECTOR_eDP) {
5781 drm_object_attach_property(
5782 &aconnector->base.base,
5783 dm->ddev->mode_config.hdr_output_metadata_property, 0);
5785 drm_connector_attach_vrr_capable_property(
5787 #ifdef CONFIG_DRM_AMD_DC_HDCP
5788 if (adev->dm.hdcp_workqueue)
5789 drm_connector_attach_content_protection_property(&aconnector->base, true);
5794 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
5795 struct i2c_msg *msgs, int num)
5797 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
5798 struct ddc_service *ddc_service = i2c->ddc_service;
5799 struct i2c_command cmd;
5803 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
5808 cmd.number_of_payloads = num;
5809 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
5812 for (i = 0; i < num; i++) {
5813 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
5814 cmd.payloads[i].address = msgs[i].addr;
5815 cmd.payloads[i].length = msgs[i].len;
5816 cmd.payloads[i].data = msgs[i].buf;
5820 ddc_service->ctx->dc,
5821 ddc_service->ddc_pin->hw_info.ddc_channel,
5825 kfree(cmd.payloads);
5829 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
5831 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
5834 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
5835 .master_xfer = amdgpu_dm_i2c_xfer,
5836 .functionality = amdgpu_dm_i2c_func,
5839 static struct amdgpu_i2c_adapter *
5840 create_i2c(struct ddc_service *ddc_service,
5844 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
5845 struct amdgpu_i2c_adapter *i2c;
5847 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
5850 i2c->base.owner = THIS_MODULE;
5851 i2c->base.class = I2C_CLASS_DDC;
5852 i2c->base.dev.parent = &adev->pdev->dev;
5853 i2c->base.algo = &amdgpu_dm_i2c_algo;
5854 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
5855 i2c_set_adapdata(&i2c->base, i2c);
5856 i2c->ddc_service = ddc_service;
5857 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
5864 * Note: this function assumes that dc_link_detect() was called for the
5865 * dc_link which will be represented by this aconnector.
5867 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
5868 struct amdgpu_dm_connector *aconnector,
5869 uint32_t link_index,
5870 struct amdgpu_encoder *aencoder)
5874 struct dc *dc = dm->dc;
5875 struct dc_link *link = dc_get_link_at_index(dc, link_index);
5876 struct amdgpu_i2c_adapter *i2c;
5878 link->priv = aconnector;
5880 DRM_DEBUG_DRIVER("%s()\n", __func__);
5882 i2c = create_i2c(link->ddc, link->link_index, &res);
5884 DRM_ERROR("Failed to create i2c adapter data\n");
5888 aconnector->i2c = i2c;
5889 res = i2c_add_adapter(&i2c->base);
5892 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
5896 connector_type = to_drm_connector_type(link->connector_signal);
5898 res = drm_connector_init_with_ddc(
5901 &amdgpu_dm_connector_funcs,
5906 DRM_ERROR("connector_init failed\n");
5907 aconnector->connector_id = -1;
5911 drm_connector_helper_add(
5913 &amdgpu_dm_connector_helper_funcs);
5915 amdgpu_dm_connector_init_helper(
5922 drm_connector_attach_encoder(
5923 &aconnector->base, &aencoder->base);
5925 drm_connector_register(&aconnector->base);
5926 #if defined(CONFIG_DEBUG_FS)
5927 connector_debugfs_init(aconnector);
5928 aconnector->debugfs_dpcd_address = 0;
5929 aconnector->debugfs_dpcd_size = 0;
5932 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
5933 || connector_type == DRM_MODE_CONNECTOR_eDP)
5934 amdgpu_dm_initialize_dp_connector(dm, aconnector);
5939 aconnector->i2c = NULL;
5944 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
5946 switch (adev->mode_info.num_crtc) {
5963 static int amdgpu_dm_encoder_init(struct drm_device *dev,
5964 struct amdgpu_encoder *aencoder,
5965 uint32_t link_index)
5967 struct amdgpu_device *adev = dev->dev_private;
5969 int res = drm_encoder_init(dev,
5971 &amdgpu_dm_encoder_funcs,
5972 DRM_MODE_ENCODER_TMDS,
5975 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
5978 aencoder->encoder_id = link_index;
5980 aencoder->encoder_id = -1;
5982 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
5987 static void manage_dm_interrupts(struct amdgpu_device *adev,
5988 struct amdgpu_crtc *acrtc,
5992 * this is not correct translation but will work as soon as VBLANK
5993 * constant is the same as PFLIP
5996 amdgpu_display_crtc_idx_to_irq_type(
6001 drm_crtc_vblank_on(&acrtc->base);
6004 &adev->pageflip_irq,
6010 &adev->pageflip_irq,
6012 drm_crtc_vblank_off(&acrtc->base);
6017 is_scaling_state_different(const struct dm_connector_state *dm_state,
6018 const struct dm_connector_state *old_dm_state)
6020 if (dm_state->scaling != old_dm_state->scaling)
6022 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6023 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6025 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6026 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6028 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6029 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6034 #ifdef CONFIG_DRM_AMD_DC_HDCP
6035 static bool is_content_protection_different(struct drm_connector_state *state,
6036 const struct drm_connector_state *old_state,
6037 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6039 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6041 if (old_state->hdcp_content_type != state->hdcp_content_type &&
6042 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6043 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6047 /* CP is being re enabled, ignore this */
6048 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6049 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6050 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6054 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6055 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6056 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6057 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6059 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6060 * hot-plug, headless s3, dpms
6062 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6063 aconnector->dc_sink != NULL)
6066 if (old_state->content_protection == state->content_protection)
6069 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6076 static void remove_stream(struct amdgpu_device *adev,
6077 struct amdgpu_crtc *acrtc,
6078 struct dc_stream_state *stream)
6080 /* this is the update mode case */
6082 acrtc->otg_inst = -1;
6083 acrtc->enabled = false;
6086 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6087 struct dc_cursor_position *position)
6089 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6091 int xorigin = 0, yorigin = 0;
6093 position->enable = false;
6097 if (!crtc || !plane->state->fb)
6100 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6101 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6102 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6104 plane->state->crtc_w,
6105 plane->state->crtc_h);
6109 x = plane->state->crtc_x;
6110 y = plane->state->crtc_y;
6112 if (x <= -amdgpu_crtc->max_cursor_width ||
6113 y <= -amdgpu_crtc->max_cursor_height)
6116 if (crtc->primary->state) {
6117 /* avivo cursor are offset into the total surface */
6118 x += crtc->primary->state->src_x >> 16;
6119 y += crtc->primary->state->src_y >> 16;
6123 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6127 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6130 position->enable = true;
6133 position->x_hotspot = xorigin;
6134 position->y_hotspot = yorigin;
6139 static void handle_cursor_update(struct drm_plane *plane,
6140 struct drm_plane_state *old_plane_state)
6142 struct amdgpu_device *adev = plane->dev->dev_private;
6143 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6144 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6145 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6146 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6147 uint64_t address = afb ? afb->address : 0;
6148 struct dc_cursor_position position;
6149 struct dc_cursor_attributes attributes;
6152 if (!plane->state->fb && !old_plane_state->fb)
6155 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6157 amdgpu_crtc->crtc_id,
6158 plane->state->crtc_w,
6159 plane->state->crtc_h);
6161 ret = get_cursor_position(plane, crtc, &position);
6165 if (!position.enable) {
6166 /* turn off cursor */
6167 if (crtc_state && crtc_state->stream) {
6168 mutex_lock(&adev->dm.dc_lock);
6169 dc_stream_set_cursor_position(crtc_state->stream,
6171 mutex_unlock(&adev->dm.dc_lock);
6176 amdgpu_crtc->cursor_width = plane->state->crtc_w;
6177 amdgpu_crtc->cursor_height = plane->state->crtc_h;
6179 memset(&attributes, 0, sizeof(attributes));
6180 attributes.address.high_part = upper_32_bits(address);
6181 attributes.address.low_part = lower_32_bits(address);
6182 attributes.width = plane->state->crtc_w;
6183 attributes.height = plane->state->crtc_h;
6184 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6185 attributes.rotation_angle = 0;
6186 attributes.attribute_flags.value = 0;
6188 attributes.pitch = attributes.width;
6190 if (crtc_state->stream) {
6191 mutex_lock(&adev->dm.dc_lock);
6192 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6194 DRM_ERROR("DC failed to set cursor attributes\n");
6196 if (!dc_stream_set_cursor_position(crtc_state->stream,
6198 DRM_ERROR("DC failed to set cursor position\n");
6199 mutex_unlock(&adev->dm.dc_lock);
6203 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6206 assert_spin_locked(&acrtc->base.dev->event_lock);
6207 WARN_ON(acrtc->event);
6209 acrtc->event = acrtc->base.state->event;
6211 /* Set the flip status */
6212 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6214 /* Mark this event as consumed */
6215 acrtc->base.state->event = NULL;
6217 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6221 static void update_freesync_state_on_stream(
6222 struct amdgpu_display_manager *dm,
6223 struct dm_crtc_state *new_crtc_state,
6224 struct dc_stream_state *new_stream,
6225 struct dc_plane_state *surface,
6226 u32 flip_timestamp_in_us)
6228 struct mod_vrr_params vrr_params;
6229 struct dc_info_packet vrr_infopacket = {0};
6230 struct amdgpu_device *adev = dm->adev;
6231 unsigned long flags;
6237 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6238 * For now it's sufficient to just guard against these conditions.
6241 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6244 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6245 vrr_params = new_crtc_state->vrr_params;
6248 mod_freesync_handle_preflip(
6249 dm->freesync_module,
6252 flip_timestamp_in_us,
6255 if (adev->family < AMDGPU_FAMILY_AI &&
6256 amdgpu_dm_vrr_active(new_crtc_state)) {
6257 mod_freesync_handle_v_update(dm->freesync_module,
6258 new_stream, &vrr_params);
6260 /* Need to call this before the frame ends. */
6261 dc_stream_adjust_vmin_vmax(dm->dc,
6262 new_crtc_state->stream,
6263 &vrr_params.adjust);
6267 mod_freesync_build_vrr_infopacket(
6268 dm->freesync_module,
6272 TRANSFER_FUNC_UNKNOWN,
6275 new_crtc_state->freesync_timing_changed |=
6276 (memcmp(&new_crtc_state->vrr_params.adjust,
6278 sizeof(vrr_params.adjust)) != 0);
6280 new_crtc_state->freesync_vrr_info_changed |=
6281 (memcmp(&new_crtc_state->vrr_infopacket,
6283 sizeof(vrr_infopacket)) != 0);
6285 new_crtc_state->vrr_params = vrr_params;
6286 new_crtc_state->vrr_infopacket = vrr_infopacket;
6288 new_stream->adjust = new_crtc_state->vrr_params.adjust;
6289 new_stream->vrr_infopacket = vrr_infopacket;
6291 if (new_crtc_state->freesync_vrr_info_changed)
6292 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6293 new_crtc_state->base.crtc->base.id,
6294 (int)new_crtc_state->base.vrr_enabled,
6295 (int)vrr_params.state);
6297 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6300 static void pre_update_freesync_state_on_stream(
6301 struct amdgpu_display_manager *dm,
6302 struct dm_crtc_state *new_crtc_state)
6304 struct dc_stream_state *new_stream = new_crtc_state->stream;
6305 struct mod_vrr_params vrr_params;
6306 struct mod_freesync_config config = new_crtc_state->freesync_config;
6307 struct amdgpu_device *adev = dm->adev;
6308 unsigned long flags;
6314 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6315 * For now it's sufficient to just guard against these conditions.
6317 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6320 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6321 vrr_params = new_crtc_state->vrr_params;
6323 if (new_crtc_state->vrr_supported &&
6324 config.min_refresh_in_uhz &&
6325 config.max_refresh_in_uhz) {
6326 config.state = new_crtc_state->base.vrr_enabled ?
6327 VRR_STATE_ACTIVE_VARIABLE :
6330 config.state = VRR_STATE_UNSUPPORTED;
6333 mod_freesync_build_vrr_params(dm->freesync_module,
6335 &config, &vrr_params);
6337 new_crtc_state->freesync_timing_changed |=
6338 (memcmp(&new_crtc_state->vrr_params.adjust,
6340 sizeof(vrr_params.adjust)) != 0);
6342 new_crtc_state->vrr_params = vrr_params;
6343 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6346 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6347 struct dm_crtc_state *new_state)
6349 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6350 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6352 if (!old_vrr_active && new_vrr_active) {
6353 /* Transition VRR inactive -> active:
6354 * While VRR is active, we must not disable vblank irq, as a
6355 * reenable after disable would compute bogus vblank/pflip
6356 * timestamps if it likely happened inside display front-porch.
6358 * We also need vupdate irq for the actual core vblank handling
6361 dm_set_vupdate_irq(new_state->base.crtc, true);
6362 drm_crtc_vblank_get(new_state->base.crtc);
6363 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6364 __func__, new_state->base.crtc->base.id);
6365 } else if (old_vrr_active && !new_vrr_active) {
6366 /* Transition VRR active -> inactive:
6367 * Allow vblank irq disable again for fixed refresh rate.
6369 dm_set_vupdate_irq(new_state->base.crtc, false);
6370 drm_crtc_vblank_put(new_state->base.crtc);
6371 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6372 __func__, new_state->base.crtc->base.id);
6376 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6378 struct drm_plane *plane;
6379 struct drm_plane_state *old_plane_state, *new_plane_state;
6383 * TODO: Make this per-stream so we don't issue redundant updates for
6384 * commits with multiple streams.
6386 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6388 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6389 handle_cursor_update(plane, old_plane_state);
6392 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6393 struct dc_state *dc_state,
6394 struct drm_device *dev,
6395 struct amdgpu_display_manager *dm,
6396 struct drm_crtc *pcrtc,
6397 bool wait_for_vblank)
6400 uint64_t timestamp_ns;
6401 struct drm_plane *plane;
6402 struct drm_plane_state *old_plane_state, *new_plane_state;
6403 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6404 struct drm_crtc_state *new_pcrtc_state =
6405 drm_atomic_get_new_crtc_state(state, pcrtc);
6406 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6407 struct dm_crtc_state *dm_old_crtc_state =
6408 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6409 int planes_count = 0, vpos, hpos;
6411 unsigned long flags;
6412 struct amdgpu_bo *abo;
6413 uint64_t tiling_flags;
6414 uint32_t target_vblank, last_flip_vblank;
6415 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6416 bool pflip_present = false;
6417 bool swizzle = true;
6419 struct dc_surface_update surface_updates[MAX_SURFACES];
6420 struct dc_plane_info plane_infos[MAX_SURFACES];
6421 struct dc_scaling_info scaling_infos[MAX_SURFACES];
6422 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
6423 struct dc_stream_update stream_update;
6426 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
6429 dm_error("Failed to allocate update bundle\n");
6434 * Disable the cursor first if we're disabling all the planes.
6435 * It'll remain on the screen after the planes are re-enabled
6438 if (acrtc_state->active_planes == 0)
6439 amdgpu_dm_commit_cursors(state);
6441 /* update planes when needed */
6442 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6443 struct drm_crtc *crtc = new_plane_state->crtc;
6444 struct drm_crtc_state *new_crtc_state;
6445 struct drm_framebuffer *fb = new_plane_state->fb;
6446 bool plane_needs_flip;
6447 struct dc_plane_state *dc_plane;
6448 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
6450 /* Cursor plane is handled after stream updates */
6451 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6454 if (!fb || !crtc || pcrtc != crtc)
6457 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6458 if (!new_crtc_state->active)
6461 dc_plane = dm_new_plane_state->dc_state;
6463 if (dc_plane && !dc_plane->tiling_info.gfx9.swizzle)
6466 bundle->surface_updates[planes_count].surface = dc_plane;
6467 if (new_pcrtc_state->color_mgmt_changed) {
6468 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6469 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
6472 fill_dc_scaling_info(new_plane_state,
6473 &bundle->scaling_infos[planes_count]);
6475 bundle->surface_updates[planes_count].scaling_info =
6476 &bundle->scaling_infos[planes_count];
6478 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
6480 pflip_present = pflip_present || plane_needs_flip;
6482 if (!plane_needs_flip) {
6487 abo = gem_to_amdgpu_bo(fb->obj[0]);
6490 * Wait for all fences on this FB. Do limited wait to avoid
6491 * deadlock during GPU reset when this fence will not signal
6492 * but we hold reservation lock for the BO.
6494 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
6496 msecs_to_jiffies(5000));
6497 if (unlikely(r <= 0))
6498 DRM_ERROR("Waiting for fences timed out!");
6501 * TODO This might fail and hence better not used, wait
6502 * explicitly on fences instead
6503 * and in general should be called for
6504 * blocking commit to as per framework helpers
6506 r = amdgpu_bo_reserve(abo, true);
6507 if (unlikely(r != 0))
6508 DRM_ERROR("failed to reserve buffer before flip\n");
6510 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
6512 amdgpu_bo_unreserve(abo);
6514 fill_dc_plane_info_and_addr(
6515 dm->adev, new_plane_state, tiling_flags,
6516 &bundle->plane_infos[planes_count],
6517 &bundle->flip_addrs[planes_count].address);
6519 bundle->surface_updates[planes_count].plane_info =
6520 &bundle->plane_infos[planes_count];
6523 * Only allow immediate flips for fast updates that don't
6524 * change FB pitch, DCC state, rotation or mirroing.
6526 bundle->flip_addrs[planes_count].flip_immediate =
6527 crtc->state->async_flip &&
6528 acrtc_state->update_type == UPDATE_TYPE_FAST;
6530 timestamp_ns = ktime_get_ns();
6531 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
6532 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
6533 bundle->surface_updates[planes_count].surface = dc_plane;
6535 if (!bundle->surface_updates[planes_count].surface) {
6536 DRM_ERROR("No surface for CRTC: id=%d\n",
6537 acrtc_attach->crtc_id);
6541 if (plane == pcrtc->primary)
6542 update_freesync_state_on_stream(
6545 acrtc_state->stream,
6547 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
6549 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
6551 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
6552 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
6558 if (pflip_present) {
6560 /* Use old throttling in non-vrr fixed refresh rate mode
6561 * to keep flip scheduling based on target vblank counts
6562 * working in a backwards compatible way, e.g., for
6563 * clients using the GLX_OML_sync_control extension or
6564 * DRI3/Present extension with defined target_msc.
6566 last_flip_vblank = amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id);
6569 /* For variable refresh rate mode only:
6570 * Get vblank of last completed flip to avoid > 1 vrr
6571 * flips per video frame by use of throttling, but allow
6572 * flip programming anywhere in the possibly large
6573 * variable vrr vblank interval for fine-grained flip
6574 * timing control and more opportunity to avoid stutter
6575 * on late submission of flips.
6577 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6578 last_flip_vblank = acrtc_attach->last_flip_vblank;
6579 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6582 target_vblank = last_flip_vblank + wait_for_vblank;
6585 * Wait until we're out of the vertical blank period before the one
6586 * targeted by the flip
6588 while ((acrtc_attach->enabled &&
6589 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
6590 0, &vpos, &hpos, NULL,
6591 NULL, &pcrtc->hwmode)
6592 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
6593 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
6594 (int)(target_vblank -
6595 amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id)) > 0)) {
6596 usleep_range(1000, 1100);
6599 if (acrtc_attach->base.state->event) {
6600 drm_crtc_vblank_get(pcrtc);
6602 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6604 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
6605 prepare_flip_isr(acrtc_attach);
6607 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6610 if (acrtc_state->stream) {
6611 if (acrtc_state->freesync_vrr_info_changed)
6612 bundle->stream_update.vrr_infopacket =
6613 &acrtc_state->stream->vrr_infopacket;
6617 /* Update the planes if changed or disable if we don't have any. */
6618 if ((planes_count || acrtc_state->active_planes == 0) &&
6619 acrtc_state->stream) {
6620 bundle->stream_update.stream = acrtc_state->stream;
6621 if (new_pcrtc_state->mode_changed) {
6622 bundle->stream_update.src = acrtc_state->stream->src;
6623 bundle->stream_update.dst = acrtc_state->stream->dst;
6626 if (new_pcrtc_state->color_mgmt_changed) {
6628 * TODO: This isn't fully correct since we've actually
6629 * already modified the stream in place.
6631 bundle->stream_update.gamut_remap =
6632 &acrtc_state->stream->gamut_remap_matrix;
6633 bundle->stream_update.output_csc_transform =
6634 &acrtc_state->stream->csc_color_matrix;
6635 bundle->stream_update.out_transfer_func =
6636 acrtc_state->stream->out_transfer_func;
6639 acrtc_state->stream->abm_level = acrtc_state->abm_level;
6640 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
6641 bundle->stream_update.abm_level = &acrtc_state->abm_level;
6644 * If FreeSync state on the stream has changed then we need to
6645 * re-adjust the min/max bounds now that DC doesn't handle this
6646 * as part of commit.
6648 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
6649 amdgpu_dm_vrr_active(acrtc_state)) {
6650 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6651 dc_stream_adjust_vmin_vmax(
6652 dm->dc, acrtc_state->stream,
6653 &acrtc_state->vrr_params.adjust);
6654 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6656 mutex_lock(&dm->dc_lock);
6657 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6658 acrtc_state->stream->link->psr_allow_active)
6659 amdgpu_dm_psr_disable(acrtc_state->stream);
6661 dc_commit_updates_for_stream(dm->dc,
6662 bundle->surface_updates,
6664 acrtc_state->stream,
6665 &bundle->stream_update,
6668 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6669 acrtc_state->stream->psr_version &&
6670 !acrtc_state->stream->link->psr_feature_enabled)
6671 amdgpu_dm_link_setup_psr(acrtc_state->stream);
6672 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
6673 acrtc_state->stream->link->psr_feature_enabled &&
6674 !acrtc_state->stream->link->psr_allow_active &&
6676 amdgpu_dm_psr_enable(acrtc_state->stream);
6679 mutex_unlock(&dm->dc_lock);
6683 * Update cursor state *after* programming all the planes.
6684 * This avoids redundant programming in the case where we're going
6685 * to be disabling a single plane - those pipes are being disabled.
6687 if (acrtc_state->active_planes)
6688 amdgpu_dm_commit_cursors(state);
6694 static void amdgpu_dm_commit_audio(struct drm_device *dev,
6695 struct drm_atomic_state *state)
6697 struct amdgpu_device *adev = dev->dev_private;
6698 struct amdgpu_dm_connector *aconnector;
6699 struct drm_connector *connector;
6700 struct drm_connector_state *old_con_state, *new_con_state;
6701 struct drm_crtc_state *new_crtc_state;
6702 struct dm_crtc_state *new_dm_crtc_state;
6703 const struct dc_stream_status *status;
6706 /* Notify device removals. */
6707 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6708 if (old_con_state->crtc != new_con_state->crtc) {
6709 /* CRTC changes require notification. */
6713 if (!new_con_state->crtc)
6716 new_crtc_state = drm_atomic_get_new_crtc_state(
6717 state, new_con_state->crtc);
6719 if (!new_crtc_state)
6722 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6726 aconnector = to_amdgpu_dm_connector(connector);
6728 mutex_lock(&adev->dm.audio_lock);
6729 inst = aconnector->audio_inst;
6730 aconnector->audio_inst = -1;
6731 mutex_unlock(&adev->dm.audio_lock);
6733 amdgpu_dm_audio_eld_notify(adev, inst);
6736 /* Notify audio device additions. */
6737 for_each_new_connector_in_state(state, connector, new_con_state, i) {
6738 if (!new_con_state->crtc)
6741 new_crtc_state = drm_atomic_get_new_crtc_state(
6742 state, new_con_state->crtc);
6744 if (!new_crtc_state)
6747 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6750 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
6751 if (!new_dm_crtc_state->stream)
6754 status = dc_stream_get_status(new_dm_crtc_state->stream);
6758 aconnector = to_amdgpu_dm_connector(connector);
6760 mutex_lock(&adev->dm.audio_lock);
6761 inst = status->audio_inst;
6762 aconnector->audio_inst = inst;
6763 mutex_unlock(&adev->dm.audio_lock);
6765 amdgpu_dm_audio_eld_notify(adev, inst);
6770 * Enable interrupts on CRTCs that are newly active, undergone
6771 * a modeset, or have active planes again.
6773 * Done in two passes, based on the for_modeset flag:
6774 * Pass 1: For CRTCs going through modeset
6775 * Pass 2: For CRTCs going from 0 to n active planes
6777 * Interrupts can only be enabled after the planes are programmed,
6778 * so this requires a two-pass approach since we don't want to
6779 * just defer the interrupts until after commit planes every time.
6781 static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
6782 struct drm_atomic_state *state,
6785 struct amdgpu_device *adev = dev->dev_private;
6786 struct drm_crtc *crtc;
6787 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6789 #ifdef CONFIG_DEBUG_FS
6790 enum amdgpu_dm_pipe_crc_source source;
6793 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
6794 new_crtc_state, i) {
6795 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6796 struct dm_crtc_state *dm_new_crtc_state =
6797 to_dm_crtc_state(new_crtc_state);
6798 struct dm_crtc_state *dm_old_crtc_state =
6799 to_dm_crtc_state(old_crtc_state);
6800 bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
6803 run_pass = (for_modeset && modeset) ||
6804 (!for_modeset && !modeset &&
6805 !dm_old_crtc_state->interrupts_enabled);
6810 if (!dm_new_crtc_state->interrupts_enabled)
6813 manage_dm_interrupts(adev, acrtc, true);
6815 #ifdef CONFIG_DEBUG_FS
6816 /* The stream has changed so CRC capture needs to re-enabled. */
6817 source = dm_new_crtc_state->crc_src;
6818 if (amdgpu_dm_is_valid_crc_source(source)) {
6819 amdgpu_dm_crtc_configure_crc_source(
6820 crtc, dm_new_crtc_state,
6821 dm_new_crtc_state->crc_src);
6828 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
6829 * @crtc_state: the DRM CRTC state
6830 * @stream_state: the DC stream state.
6832 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
6833 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
6835 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
6836 struct dc_stream_state *stream_state)
6838 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
6841 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
6842 struct drm_atomic_state *state,
6845 struct drm_crtc *crtc;
6846 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6847 struct amdgpu_device *adev = dev->dev_private;
6851 * We evade vblank and pflip interrupts on CRTCs that are undergoing
6852 * a modeset, being disabled, or have no active planes.
6854 * It's done in atomic commit rather than commit tail for now since
6855 * some of these interrupt handlers access the current CRTC state and
6856 * potentially the stream pointer itself.
6858 * Since the atomic state is swapped within atomic commit and not within
6859 * commit tail this would leave to new state (that hasn't been committed yet)
6860 * being accesssed from within the handlers.
6862 * TODO: Fix this so we can do this in commit tail and not have to block
6865 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
6866 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
6867 struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6868 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6870 if (dm_old_crtc_state->interrupts_enabled &&
6871 (!dm_new_crtc_state->interrupts_enabled ||
6872 drm_atomic_crtc_needs_modeset(new_crtc_state)))
6873 manage_dm_interrupts(adev, acrtc, false);
6876 * Add check here for SoC's that support hardware cursor plane, to
6877 * unset legacy_cursor_update
6880 return drm_atomic_helper_commit(dev, state, nonblock);
6882 /*TODO Handle EINTR, reenable IRQ*/
6886 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
6887 * @state: The atomic state to commit
6889 * This will tell DC to commit the constructed DC state from atomic_check,
6890 * programming the hardware. Any failures here implies a hardware failure, since
6891 * atomic check should have filtered anything non-kosher.
6893 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
6895 struct drm_device *dev = state->dev;
6896 struct amdgpu_device *adev = dev->dev_private;
6897 struct amdgpu_display_manager *dm = &adev->dm;
6898 struct dm_atomic_state *dm_state;
6899 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
6901 struct drm_crtc *crtc;
6902 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6903 unsigned long flags;
6904 bool wait_for_vblank = true;
6905 struct drm_connector *connector;
6906 struct drm_connector_state *old_con_state, *new_con_state;
6907 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
6908 int crtc_disable_count = 0;
6910 drm_atomic_helper_update_legacy_modeset_state(dev, state);
6912 dm_state = dm_atomic_get_new_state(state);
6913 if (dm_state && dm_state->context) {
6914 dc_state = dm_state->context;
6916 /* No state changes, retain current state. */
6917 dc_state_temp = dc_create_state(dm->dc);
6918 ASSERT(dc_state_temp);
6919 dc_state = dc_state_temp;
6920 dc_resource_state_copy_construct_current(dm->dc, dc_state);
6923 /* update changed items */
6924 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
6925 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6927 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6928 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
6931 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
6932 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
6933 "connectors_changed:%d\n",
6935 new_crtc_state->enable,
6936 new_crtc_state->active,
6937 new_crtc_state->planes_changed,
6938 new_crtc_state->mode_changed,
6939 new_crtc_state->active_changed,
6940 new_crtc_state->connectors_changed);
6942 /* Copy all transient state flags into dc state */
6943 if (dm_new_crtc_state->stream) {
6944 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
6945 dm_new_crtc_state->stream);
6948 /* handles headless hotplug case, updating new_state and
6949 * aconnector as needed
6952 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
6954 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
6956 if (!dm_new_crtc_state->stream) {
6958 * this could happen because of issues with
6959 * userspace notifications delivery.
6960 * In this case userspace tries to set mode on
6961 * display which is disconnected in fact.
6962 * dc_sink is NULL in this case on aconnector.
6963 * We expect reset mode will come soon.
6965 * This can also happen when unplug is done
6966 * during resume sequence ended
6968 * In this case, we want to pretend we still
6969 * have a sink to keep the pipe running so that
6970 * hw state is consistent with the sw state
6972 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
6973 __func__, acrtc->base.base.id);
6977 if (dm_old_crtc_state->stream)
6978 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
6980 pm_runtime_get_noresume(dev->dev);
6982 acrtc->enabled = true;
6983 acrtc->hw_mode = new_crtc_state->mode;
6984 crtc->hwmode = new_crtc_state->mode;
6985 } else if (modereset_required(new_crtc_state)) {
6986 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
6987 /* i.e. reset mode */
6988 if (dm_old_crtc_state->stream) {
6989 if (dm_old_crtc_state->stream->link->psr_allow_active)
6990 amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
6992 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
6995 } /* for_each_crtc_in_state() */
6998 dm_enable_per_frame_crtc_master_sync(dc_state);
6999 mutex_lock(&dm->dc_lock);
7000 WARN_ON(!dc_commit_state(dm->dc, dc_state));
7001 mutex_unlock(&dm->dc_lock);
7004 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7005 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7007 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7009 if (dm_new_crtc_state->stream != NULL) {
7010 const struct dc_stream_status *status =
7011 dc_stream_get_status(dm_new_crtc_state->stream);
7014 status = dc_stream_get_status_from_state(dc_state,
7015 dm_new_crtc_state->stream);
7018 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7020 acrtc->otg_inst = status->primary_otg_inst;
7023 #ifdef CONFIG_DRM_AMD_DC_HDCP
7024 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7025 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7026 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7027 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7029 new_crtc_state = NULL;
7032 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7034 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7036 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7037 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7038 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7039 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7043 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7044 hdcp_update_display(
7045 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7046 new_con_state->hdcp_content_type,
7047 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7052 /* Handle connector state changes */
7053 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7054 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7055 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7056 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7057 struct dc_surface_update dummy_updates[MAX_SURFACES];
7058 struct dc_stream_update stream_update;
7059 struct dc_info_packet hdr_packet;
7060 struct dc_stream_status *status = NULL;
7061 bool abm_changed, hdr_changed, scaling_changed;
7063 memset(&dummy_updates, 0, sizeof(dummy_updates));
7064 memset(&stream_update, 0, sizeof(stream_update));
7067 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7068 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7071 /* Skip any modesets/resets */
7072 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7075 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7076 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7078 scaling_changed = is_scaling_state_different(dm_new_con_state,
7081 abm_changed = dm_new_crtc_state->abm_level !=
7082 dm_old_crtc_state->abm_level;
7085 is_hdr_metadata_different(old_con_state, new_con_state);
7087 if (!scaling_changed && !abm_changed && !hdr_changed)
7090 stream_update.stream = dm_new_crtc_state->stream;
7091 if (scaling_changed) {
7092 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7093 dm_new_con_state, dm_new_crtc_state->stream);
7095 stream_update.src = dm_new_crtc_state->stream->src;
7096 stream_update.dst = dm_new_crtc_state->stream->dst;
7100 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7102 stream_update.abm_level = &dm_new_crtc_state->abm_level;
7106 fill_hdr_info_packet(new_con_state, &hdr_packet);
7107 stream_update.hdr_static_metadata = &hdr_packet;
7110 status = dc_stream_get_status(dm_new_crtc_state->stream);
7112 WARN_ON(!status->plane_count);
7115 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7116 * Here we create an empty update on each plane.
7117 * To fix this, DC should permit updating only stream properties.
7119 for (j = 0; j < status->plane_count; j++)
7120 dummy_updates[j].surface = status->plane_states[0];
7123 mutex_lock(&dm->dc_lock);
7124 dc_commit_updates_for_stream(dm->dc,
7126 status->plane_count,
7127 dm_new_crtc_state->stream,
7130 mutex_unlock(&dm->dc_lock);
7133 /* Count number of newly disabled CRTCs for dropping PM refs later. */
7134 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7135 new_crtc_state, i) {
7136 if (old_crtc_state->active && !new_crtc_state->active)
7137 crtc_disable_count++;
7139 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7140 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7142 /* Update freesync active state. */
7143 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7145 /* Handle vrr on->off / off->on transitions */
7146 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7150 /* Enable interrupts for CRTCs going through a modeset. */
7151 amdgpu_dm_enable_crtc_interrupts(dev, state, true);
7153 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7154 if (new_crtc_state->async_flip)
7155 wait_for_vblank = false;
7157 /* update planes when needed per crtc*/
7158 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7159 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7161 if (dm_new_crtc_state->stream)
7162 amdgpu_dm_commit_planes(state, dc_state, dev,
7163 dm, crtc, wait_for_vblank);
7166 /* Enable interrupts for CRTCs going from 0 to n active planes. */
7167 amdgpu_dm_enable_crtc_interrupts(dev, state, false);
7169 /* Update audio instances for each connector. */
7170 amdgpu_dm_commit_audio(dev, state);
7173 * send vblank event on all events not handled in flip and
7174 * mark consumed event for drm_atomic_helper_commit_hw_done
7176 spin_lock_irqsave(&adev->ddev->event_lock, flags);
7177 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7179 if (new_crtc_state->event)
7180 drm_send_event_locked(dev, &new_crtc_state->event->base);
7182 new_crtc_state->event = NULL;
7184 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7186 /* Signal HW programming completion */
7187 drm_atomic_helper_commit_hw_done(state);
7189 if (wait_for_vblank)
7190 drm_atomic_helper_wait_for_flip_done(dev, state);
7192 drm_atomic_helper_cleanup_planes(dev, state);
7195 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7196 * so we can put the GPU into runtime suspend if we're not driving any
7199 for (i = 0; i < crtc_disable_count; i++)
7200 pm_runtime_put_autosuspend(dev->dev);
7201 pm_runtime_mark_last_busy(dev->dev);
7204 dc_release_state(dc_state_temp);
7208 static int dm_force_atomic_commit(struct drm_connector *connector)
7211 struct drm_device *ddev = connector->dev;
7212 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7213 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7214 struct drm_plane *plane = disconnected_acrtc->base.primary;
7215 struct drm_connector_state *conn_state;
7216 struct drm_crtc_state *crtc_state;
7217 struct drm_plane_state *plane_state;
7222 state->acquire_ctx = ddev->mode_config.acquire_ctx;
7224 /* Construct an atomic state to restore previous display setting */
7227 * Attach connectors to drm_atomic_state
7229 conn_state = drm_atomic_get_connector_state(state, connector);
7231 ret = PTR_ERR_OR_ZERO(conn_state);
7235 /* Attach crtc to drm_atomic_state*/
7236 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7238 ret = PTR_ERR_OR_ZERO(crtc_state);
7242 /* force a restore */
7243 crtc_state->mode_changed = true;
7245 /* Attach plane to drm_atomic_state */
7246 plane_state = drm_atomic_get_plane_state(state, plane);
7248 ret = PTR_ERR_OR_ZERO(plane_state);
7253 /* Call commit internally with the state we just constructed */
7254 ret = drm_atomic_commit(state);
7259 DRM_ERROR("Restoring old state failed with %i\n", ret);
7260 drm_atomic_state_put(state);
7266 * This function handles all cases when set mode does not come upon hotplug.
7267 * This includes when a display is unplugged then plugged back into the
7268 * same port and when running without usermode desktop manager supprot
7270 void dm_restore_drm_connector_state(struct drm_device *dev,
7271 struct drm_connector *connector)
7273 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7274 struct amdgpu_crtc *disconnected_acrtc;
7275 struct dm_crtc_state *acrtc_state;
7277 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7280 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7281 if (!disconnected_acrtc)
7284 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7285 if (!acrtc_state->stream)
7289 * If the previous sink is not released and different from the current,
7290 * we deduce we are in a state where we can not rely on usermode call
7291 * to turn on the display, so we do it here
7293 if (acrtc_state->stream->sink != aconnector->dc_sink)
7294 dm_force_atomic_commit(&aconnector->base);
7298 * Grabs all modesetting locks to serialize against any blocking commits,
7299 * Waits for completion of all non blocking commits.
7301 static int do_aquire_global_lock(struct drm_device *dev,
7302 struct drm_atomic_state *state)
7304 struct drm_crtc *crtc;
7305 struct drm_crtc_commit *commit;
7309 * Adding all modeset locks to aquire_ctx will
7310 * ensure that when the framework release it the
7311 * extra locks we are locking here will get released to
7313 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7317 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7318 spin_lock(&crtc->commit_lock);
7319 commit = list_first_entry_or_null(&crtc->commit_list,
7320 struct drm_crtc_commit, commit_entry);
7322 drm_crtc_commit_get(commit);
7323 spin_unlock(&crtc->commit_lock);
7329 * Make sure all pending HW programming completed and
7332 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7335 ret = wait_for_completion_interruptible_timeout(
7336 &commit->flip_done, 10*HZ);
7339 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7340 "timed out\n", crtc->base.id, crtc->name);
7342 drm_crtc_commit_put(commit);
7345 return ret < 0 ? ret : 0;
7348 static void get_freesync_config_for_crtc(
7349 struct dm_crtc_state *new_crtc_state,
7350 struct dm_connector_state *new_con_state)
7352 struct mod_freesync_config config = {0};
7353 struct amdgpu_dm_connector *aconnector =
7354 to_amdgpu_dm_connector(new_con_state->base.connector);
7355 struct drm_display_mode *mode = &new_crtc_state->base.mode;
7356 int vrefresh = drm_mode_vrefresh(mode);
7358 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7359 vrefresh >= aconnector->min_vfreq &&
7360 vrefresh <= aconnector->max_vfreq;
7362 if (new_crtc_state->vrr_supported) {
7363 new_crtc_state->stream->ignore_msa_timing_param = true;
7364 config.state = new_crtc_state->base.vrr_enabled ?
7365 VRR_STATE_ACTIVE_VARIABLE :
7367 config.min_refresh_in_uhz =
7368 aconnector->min_vfreq * 1000000;
7369 config.max_refresh_in_uhz =
7370 aconnector->max_vfreq * 1000000;
7371 config.vsif_supported = true;
7375 new_crtc_state->freesync_config = config;
7378 static void reset_freesync_config_for_crtc(
7379 struct dm_crtc_state *new_crtc_state)
7381 new_crtc_state->vrr_supported = false;
7383 memset(&new_crtc_state->vrr_params, 0,
7384 sizeof(new_crtc_state->vrr_params));
7385 memset(&new_crtc_state->vrr_infopacket, 0,
7386 sizeof(new_crtc_state->vrr_infopacket));
7389 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7390 struct drm_atomic_state *state,
7391 struct drm_crtc *crtc,
7392 struct drm_crtc_state *old_crtc_state,
7393 struct drm_crtc_state *new_crtc_state,
7395 bool *lock_and_validation_needed)
7397 struct dm_atomic_state *dm_state = NULL;
7398 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7399 struct dc_stream_state *new_stream;
7403 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7404 * update changed items
7406 struct amdgpu_crtc *acrtc = NULL;
7407 struct amdgpu_dm_connector *aconnector = NULL;
7408 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7409 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7413 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7414 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7415 acrtc = to_amdgpu_crtc(crtc);
7416 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7418 /* TODO This hack should go away */
7419 if (aconnector && enable) {
7420 /* Make sure fake sink is created in plug-in scenario */
7421 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7423 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7426 if (IS_ERR(drm_new_conn_state)) {
7427 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7431 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7432 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
7434 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7437 new_stream = create_stream_for_sink(aconnector,
7438 &new_crtc_state->mode,
7440 dm_old_crtc_state->stream);
7443 * we can have no stream on ACTION_SET if a display
7444 * was disconnected during S3, in this case it is not an
7445 * error, the OS will be updated after detection, and
7446 * will do the right thing on next atomic commit
7450 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7451 __func__, acrtc->base.base.id);
7456 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7458 ret = fill_hdr_info_packet(drm_new_conn_state,
7459 &new_stream->hdr_static_metadata);
7464 * If we already removed the old stream from the context
7465 * (and set the new stream to NULL) then we can't reuse
7466 * the old stream even if the stream and scaling are unchanged.
7467 * We'll hit the BUG_ON and black screen.
7469 * TODO: Refactor this function to allow this check to work
7470 * in all conditions.
7472 if (dm_new_crtc_state->stream &&
7473 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
7474 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7475 new_crtc_state->mode_changed = false;
7476 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7477 new_crtc_state->mode_changed);
7481 /* mode_changed flag may get updated above, need to check again */
7482 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7486 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7487 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7488 "connectors_changed:%d\n",
7490 new_crtc_state->enable,
7491 new_crtc_state->active,
7492 new_crtc_state->planes_changed,
7493 new_crtc_state->mode_changed,
7494 new_crtc_state->active_changed,
7495 new_crtc_state->connectors_changed);
7497 /* Remove stream for any changed/disabled CRTC */
7500 if (!dm_old_crtc_state->stream)
7503 ret = dm_atomic_get_state(state, &dm_state);
7507 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7510 /* i.e. reset mode */
7511 if (dc_remove_stream_from_ctx(
7514 dm_old_crtc_state->stream) != DC_OK) {
7519 dc_stream_release(dm_old_crtc_state->stream);
7520 dm_new_crtc_state->stream = NULL;
7522 reset_freesync_config_for_crtc(dm_new_crtc_state);
7524 *lock_and_validation_needed = true;
7526 } else {/* Add stream for any updated/enabled CRTC */
7528 * Quick fix to prevent NULL pointer on new_stream when
7529 * added MST connectors not found in existing crtc_state in the chained mode
7530 * TODO: need to dig out the root cause of that
7532 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7535 if (modereset_required(new_crtc_state))
7538 if (modeset_required(new_crtc_state, new_stream,
7539 dm_old_crtc_state->stream)) {
7541 WARN_ON(dm_new_crtc_state->stream);
7543 ret = dm_atomic_get_state(state, &dm_state);
7547 dm_new_crtc_state->stream = new_stream;
7549 dc_stream_retain(new_stream);
7551 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
7554 if (dc_add_stream_to_ctx(
7557 dm_new_crtc_state->stream) != DC_OK) {
7562 *lock_and_validation_needed = true;
7567 /* Release extra reference */
7569 dc_stream_release(new_stream);
7572 * We want to do dc stream updates that do not require a
7573 * full modeset below.
7575 if (!(enable && aconnector && new_crtc_state->enable &&
7576 new_crtc_state->active))
7579 * Given above conditions, the dc state cannot be NULL because:
7580 * 1. We're in the process of enabling CRTCs (just been added
7581 * to the dc context, or already is on the context)
7582 * 2. Has a valid connector attached, and
7583 * 3. Is currently active and enabled.
7584 * => The dc stream state currently exists.
7586 BUG_ON(dm_new_crtc_state->stream == NULL);
7588 /* Scaling or underscan settings */
7589 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
7590 update_stream_scaling_settings(
7591 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
7594 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7597 * Color management settings. We also update color properties
7598 * when a modeset is needed, to ensure it gets reprogrammed.
7600 if (dm_new_crtc_state->base.color_mgmt_changed ||
7601 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
7602 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
7607 /* Update Freesync settings. */
7608 get_freesync_config_for_crtc(dm_new_crtc_state,
7615 dc_stream_release(new_stream);
7619 static bool should_reset_plane(struct drm_atomic_state *state,
7620 struct drm_plane *plane,
7621 struct drm_plane_state *old_plane_state,
7622 struct drm_plane_state *new_plane_state)
7624 struct drm_plane *other;
7625 struct drm_plane_state *old_other_state, *new_other_state;
7626 struct drm_crtc_state *new_crtc_state;
7630 * TODO: Remove this hack once the checks below are sufficient
7631 * enough to determine when we need to reset all the planes on
7634 if (state->allow_modeset)
7637 /* Exit early if we know that we're adding or removing the plane. */
7638 if (old_plane_state->crtc != new_plane_state->crtc)
7641 /* old crtc == new_crtc == NULL, plane not in context. */
7642 if (!new_plane_state->crtc)
7646 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
7648 if (!new_crtc_state)
7651 /* CRTC Degamma changes currently require us to recreate planes. */
7652 if (new_crtc_state->color_mgmt_changed)
7655 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
7659 * If there are any new primary or overlay planes being added or
7660 * removed then the z-order can potentially change. To ensure
7661 * correct z-order and pipe acquisition the current DC architecture
7662 * requires us to remove and recreate all existing planes.
7664 * TODO: Come up with a more elegant solution for this.
7666 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
7667 if (other->type == DRM_PLANE_TYPE_CURSOR)
7670 if (old_other_state->crtc != new_plane_state->crtc &&
7671 new_other_state->crtc != new_plane_state->crtc)
7674 if (old_other_state->crtc != new_other_state->crtc)
7677 /* TODO: Remove this once we can handle fast format changes. */
7678 if (old_other_state->fb && new_other_state->fb &&
7679 old_other_state->fb->format != new_other_state->fb->format)
7686 static int dm_update_plane_state(struct dc *dc,
7687 struct drm_atomic_state *state,
7688 struct drm_plane *plane,
7689 struct drm_plane_state *old_plane_state,
7690 struct drm_plane_state *new_plane_state,
7692 bool *lock_and_validation_needed)
7695 struct dm_atomic_state *dm_state = NULL;
7696 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
7697 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7698 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
7699 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
7704 new_plane_crtc = new_plane_state->crtc;
7705 old_plane_crtc = old_plane_state->crtc;
7706 dm_new_plane_state = to_dm_plane_state(new_plane_state);
7707 dm_old_plane_state = to_dm_plane_state(old_plane_state);
7709 /*TODO Implement atomic check for cursor plane */
7710 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7713 needs_reset = should_reset_plane(state, plane, old_plane_state,
7716 /* Remove any changed/removed planes */
7721 if (!old_plane_crtc)
7724 old_crtc_state = drm_atomic_get_old_crtc_state(
7725 state, old_plane_crtc);
7726 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7728 if (!dm_old_crtc_state->stream)
7731 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
7732 plane->base.id, old_plane_crtc->base.id);
7734 ret = dm_atomic_get_state(state, &dm_state);
7738 if (!dc_remove_plane_from_context(
7740 dm_old_crtc_state->stream,
7741 dm_old_plane_state->dc_state,
7742 dm_state->context)) {
7749 dc_plane_state_release(dm_old_plane_state->dc_state);
7750 dm_new_plane_state->dc_state = NULL;
7752 *lock_and_validation_needed = true;
7754 } else { /* Add new planes */
7755 struct dc_plane_state *dc_new_plane_state;
7757 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
7760 if (!new_plane_crtc)
7763 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
7764 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7766 if (!dm_new_crtc_state->stream)
7772 WARN_ON(dm_new_plane_state->dc_state);
7774 dc_new_plane_state = dc_create_plane_state(dc);
7775 if (!dc_new_plane_state)
7778 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
7779 plane->base.id, new_plane_crtc->base.id);
7781 ret = fill_dc_plane_attributes(
7782 new_plane_crtc->dev->dev_private,
7787 dc_plane_state_release(dc_new_plane_state);
7791 ret = dm_atomic_get_state(state, &dm_state);
7793 dc_plane_state_release(dc_new_plane_state);
7798 * Any atomic check errors that occur after this will
7799 * not need a release. The plane state will be attached
7800 * to the stream, and therefore part of the atomic
7801 * state. It'll be released when the atomic state is
7804 if (!dc_add_plane_to_context(
7806 dm_new_crtc_state->stream,
7808 dm_state->context)) {
7810 dc_plane_state_release(dc_new_plane_state);
7814 dm_new_plane_state->dc_state = dc_new_plane_state;
7816 /* Tell DC to do a full surface update every time there
7817 * is a plane change. Inefficient, but works for now.
7819 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
7821 *lock_and_validation_needed = true;
7829 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
7830 struct drm_atomic_state *state,
7831 enum surface_update_type *out_type)
7833 struct dc *dc = dm->dc;
7834 struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
7835 int i, j, num_plane, ret = 0;
7836 struct drm_plane_state *old_plane_state, *new_plane_state;
7837 struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
7838 struct drm_crtc *new_plane_crtc;
7839 struct drm_plane *plane;
7841 struct drm_crtc *crtc;
7842 struct drm_crtc_state *new_crtc_state, *old_crtc_state;
7843 struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
7844 struct dc_stream_status *status = NULL;
7845 enum surface_update_type update_type = UPDATE_TYPE_FAST;
7846 struct surface_info_bundle {
7847 struct dc_surface_update surface_updates[MAX_SURFACES];
7848 struct dc_plane_info plane_infos[MAX_SURFACES];
7849 struct dc_scaling_info scaling_infos[MAX_SURFACES];
7850 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7851 struct dc_stream_update stream_update;
7854 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7857 DRM_ERROR("Failed to allocate update bundle\n");
7858 /* Set type to FULL to avoid crashing in DC*/
7859 update_type = UPDATE_TYPE_FULL;
7863 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7865 memset(bundle, 0, sizeof(struct surface_info_bundle));
7867 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7868 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
7871 if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
7872 update_type = UPDATE_TYPE_FULL;
7876 if (!new_dm_crtc_state->stream)
7879 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
7880 const struct amdgpu_framebuffer *amdgpu_fb =
7881 to_amdgpu_framebuffer(new_plane_state->fb);
7882 struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
7883 struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
7884 struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
7885 uint64_t tiling_flags;
7887 new_plane_crtc = new_plane_state->crtc;
7888 new_dm_plane_state = to_dm_plane_state(new_plane_state);
7889 old_dm_plane_state = to_dm_plane_state(old_plane_state);
7891 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7894 if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
7895 update_type = UPDATE_TYPE_FULL;
7899 if (crtc != new_plane_crtc)
7902 bundle->surface_updates[num_plane].surface =
7903 new_dm_plane_state->dc_state;
7905 if (new_crtc_state->mode_changed) {
7906 bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
7907 bundle->stream_update.src = new_dm_crtc_state->stream->src;
7910 if (new_crtc_state->color_mgmt_changed) {
7911 bundle->surface_updates[num_plane].gamma =
7912 new_dm_plane_state->dc_state->gamma_correction;
7913 bundle->surface_updates[num_plane].in_transfer_func =
7914 new_dm_plane_state->dc_state->in_transfer_func;
7915 bundle->stream_update.gamut_remap =
7916 &new_dm_crtc_state->stream->gamut_remap_matrix;
7917 bundle->stream_update.output_csc_transform =
7918 &new_dm_crtc_state->stream->csc_color_matrix;
7919 bundle->stream_update.out_transfer_func =
7920 new_dm_crtc_state->stream->out_transfer_func;
7923 ret = fill_dc_scaling_info(new_plane_state,
7928 bundle->surface_updates[num_plane].scaling_info = scaling_info;
7931 ret = get_fb_info(amdgpu_fb, &tiling_flags);
7935 ret = fill_dc_plane_info_and_addr(
7936 dm->adev, new_plane_state, tiling_flags,
7938 &flip_addr->address);
7942 bundle->surface_updates[num_plane].plane_info = plane_info;
7943 bundle->surface_updates[num_plane].flip_addr = flip_addr;
7952 ret = dm_atomic_get_state(state, &dm_state);
7956 old_dm_state = dm_atomic_get_old_state(state);
7957 if (!old_dm_state) {
7962 status = dc_stream_get_status_from_state(old_dm_state->context,
7963 new_dm_crtc_state->stream);
7964 bundle->stream_update.stream = new_dm_crtc_state->stream;
7966 * TODO: DC modifies the surface during this call so we need
7967 * to lock here - find a way to do this without locking.
7969 mutex_lock(&dm->dc_lock);
7970 update_type = dc_check_update_surfaces_for_stream(
7971 dc, bundle->surface_updates, num_plane,
7972 &bundle->stream_update, status);
7973 mutex_unlock(&dm->dc_lock);
7975 if (update_type > UPDATE_TYPE_MED) {
7976 update_type = UPDATE_TYPE_FULL;
7984 *out_type = update_type;
7988 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
7990 struct drm_connector *connector;
7991 struct drm_connector_state *conn_state;
7992 struct amdgpu_dm_connector *aconnector = NULL;
7994 for_each_new_connector_in_state(state, connector, conn_state, i) {
7995 if (conn_state->crtc != crtc)
7998 aconnector = to_amdgpu_dm_connector(connector);
7999 if (!aconnector->port || !aconnector->mst_port)
8008 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8012 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8013 * @dev: The DRM device
8014 * @state: The atomic state to commit
8016 * Validate that the given atomic state is programmable by DC into hardware.
8017 * This involves constructing a &struct dc_state reflecting the new hardware
8018 * state we wish to commit, then querying DC to see if it is programmable. It's
8019 * important not to modify the existing DC state. Otherwise, atomic_check
8020 * may unexpectedly commit hardware changes.
8022 * When validating the DC state, it's important that the right locks are
8023 * acquired. For full updates case which removes/adds/updates streams on one
8024 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8025 * that any such full update commit will wait for completion of any outstanding
8026 * flip using DRMs synchronization events. See
8027 * dm_determine_update_type_for_commit()
8029 * Note that DM adds the affected connectors for all CRTCs in state, when that
8030 * might not seem necessary. This is because DC stream creation requires the
8031 * DC sink, which is tied to the DRM connector state. Cleaning this up should
8032 * be possible but non-trivial - a possible TODO item.
8034 * Return: -Error code if validation failed.
8036 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8037 struct drm_atomic_state *state)
8039 struct amdgpu_device *adev = dev->dev_private;
8040 struct dm_atomic_state *dm_state = NULL;
8041 struct dc *dc = adev->dm.dc;
8042 struct drm_connector *connector;
8043 struct drm_connector_state *old_con_state, *new_con_state;
8044 struct drm_crtc *crtc;
8045 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8046 struct drm_plane *plane;
8047 struct drm_plane_state *old_plane_state, *new_plane_state;
8048 enum surface_update_type update_type = UPDATE_TYPE_FAST;
8049 enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8054 * This bool will be set for true for any modeset/reset
8055 * or plane update which implies non fast surface update.
8057 bool lock_and_validation_needed = false;
8059 ret = drm_atomic_helper_check_modeset(dev, state);
8063 if (adev->asic_type >= CHIP_NAVI10) {
8064 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8065 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8066 ret = add_affected_mst_dsc_crtcs(state, crtc);
8073 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8074 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8075 !new_crtc_state->color_mgmt_changed &&
8076 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8079 if (!new_crtc_state->enable)
8082 ret = drm_atomic_add_affected_connectors(state, crtc);
8086 ret = drm_atomic_add_affected_planes(state, crtc);
8092 * Add all primary and overlay planes on the CRTC to the state
8093 * whenever a plane is enabled to maintain correct z-ordering
8094 * and to enable fast surface updates.
8096 drm_for_each_crtc(crtc, dev) {
8097 bool modified = false;
8099 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8100 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8103 if (new_plane_state->crtc == crtc ||
8104 old_plane_state->crtc == crtc) {
8113 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8114 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8118 drm_atomic_get_plane_state(state, plane);
8120 if (IS_ERR(new_plane_state)) {
8121 ret = PTR_ERR(new_plane_state);
8127 /* Remove exiting planes if they are modified */
8128 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8129 ret = dm_update_plane_state(dc, state, plane,
8133 &lock_and_validation_needed);
8138 /* Disable all crtcs which require disable */
8139 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8140 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8144 &lock_and_validation_needed);
8149 /* Enable all crtcs which require enable */
8150 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8151 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8155 &lock_and_validation_needed);
8160 /* Add new/modified planes */
8161 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8162 ret = dm_update_plane_state(dc, state, plane,
8166 &lock_and_validation_needed);
8171 /* Run this here since we want to validate the streams we created */
8172 ret = drm_atomic_helper_check_planes(dev, state);
8176 if (state->legacy_cursor_update) {
8178 * This is a fast cursor update coming from the plane update
8179 * helper, check if it can be done asynchronously for better
8182 state->async_update =
8183 !drm_atomic_helper_async_check(dev, state);
8186 * Skip the remaining global validation if this is an async
8187 * update. Cursor updates can be done without affecting
8188 * state or bandwidth calcs and this avoids the performance
8189 * penalty of locking the private state object and
8190 * allocating a new dc_state.
8192 if (state->async_update)
8196 /* Check scaling and underscan changes*/
8197 /* TODO Removed scaling changes validation due to inability to commit
8198 * new stream into context w\o causing full reset. Need to
8199 * decide how to handle.
8201 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8202 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8203 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8204 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8206 /* Skip any modesets/resets */
8207 if (!acrtc || drm_atomic_crtc_needs_modeset(
8208 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8211 /* Skip any thing not scale or underscan changes */
8212 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8215 overall_update_type = UPDATE_TYPE_FULL;
8216 lock_and_validation_needed = true;
8219 ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8223 if (overall_update_type < update_type)
8224 overall_update_type = update_type;
8227 * lock_and_validation_needed was an old way to determine if we need to set
8228 * the global lock. Leaving it in to check if we broke any corner cases
8229 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8230 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8232 if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8233 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8235 if (overall_update_type > UPDATE_TYPE_FAST) {
8236 ret = dm_atomic_get_state(state, &dm_state);
8240 ret = do_aquire_global_lock(dev, state);
8244 #if defined(CONFIG_DRM_AMD_DC_DCN)
8245 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8248 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8254 * Perform validation of MST topology in the state:
8255 * We need to perform MST atomic check before calling
8256 * dc_validate_global_state(), or there is a chance
8257 * to get stuck in an infinite loop and hang eventually.
8259 ret = drm_dp_mst_atomic_check(state);
8263 if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
8269 * The commit is a fast update. Fast updates shouldn't change
8270 * the DC context, affect global validation, and can have their
8271 * commit work done in parallel with other commits not touching
8272 * the same resource. If we have a new DC context as part of
8273 * the DM atomic state from validation we need to free it and
8274 * retain the existing one instead.
8276 struct dm_atomic_state *new_dm_state, *old_dm_state;
8278 new_dm_state = dm_atomic_get_new_state(state);
8279 old_dm_state = dm_atomic_get_old_state(state);
8281 if (new_dm_state && old_dm_state) {
8282 if (new_dm_state->context)
8283 dc_release_state(new_dm_state->context);
8285 new_dm_state->context = old_dm_state->context;
8287 if (old_dm_state->context)
8288 dc_retain_state(old_dm_state->context);
8292 /* Store the overall update type for use later in atomic check. */
8293 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8294 struct dm_crtc_state *dm_new_crtc_state =
8295 to_dm_crtc_state(new_crtc_state);
8297 dm_new_crtc_state->update_type = (int)overall_update_type;
8300 /* Must be success */
8305 if (ret == -EDEADLK)
8306 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8307 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8308 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8310 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8315 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8316 struct amdgpu_dm_connector *amdgpu_dm_connector)
8319 bool capable = false;
8321 if (amdgpu_dm_connector->dc_link &&
8322 dm_helpers_dp_read_dpcd(
8324 amdgpu_dm_connector->dc_link,
8325 DP_DOWN_STREAM_PORT_COUNT,
8327 sizeof(dpcd_data))) {
8328 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8333 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8337 bool edid_check_required;
8338 struct detailed_timing *timing;
8339 struct detailed_non_pixel *data;
8340 struct detailed_data_monitor_range *range;
8341 struct amdgpu_dm_connector *amdgpu_dm_connector =
8342 to_amdgpu_dm_connector(connector);
8343 struct dm_connector_state *dm_con_state = NULL;
8345 struct drm_device *dev = connector->dev;
8346 struct amdgpu_device *adev = dev->dev_private;
8347 bool freesync_capable = false;
8349 if (!connector->state) {
8350 DRM_ERROR("%s - Connector has no state", __func__);
8355 dm_con_state = to_dm_connector_state(connector->state);
8357 amdgpu_dm_connector->min_vfreq = 0;
8358 amdgpu_dm_connector->max_vfreq = 0;
8359 amdgpu_dm_connector->pixel_clock_mhz = 0;
8364 dm_con_state = to_dm_connector_state(connector->state);
8366 edid_check_required = false;
8367 if (!amdgpu_dm_connector->dc_sink) {
8368 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8371 if (!adev->dm.freesync_module)
8374 * if edid non zero restrict freesync only for dp and edp
8377 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8378 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8379 edid_check_required = is_dp_capable_without_timing_msa(
8381 amdgpu_dm_connector);
8384 if (edid_check_required == true && (edid->version > 1 ||
8385 (edid->version == 1 && edid->revision > 1))) {
8386 for (i = 0; i < 4; i++) {
8388 timing = &edid->detailed_timings[i];
8389 data = &timing->data.other_data;
8390 range = &data->data.range;
8392 * Check if monitor has continuous frequency mode
8394 if (data->type != EDID_DETAIL_MONITOR_RANGE)
8397 * Check for flag range limits only. If flag == 1 then
8398 * no additional timing information provided.
8399 * Default GTF, GTF Secondary curve and CVT are not
8402 if (range->flags != 1)
8405 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8406 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8407 amdgpu_dm_connector->pixel_clock_mhz =
8408 range->pixel_clock_mhz * 10;
8412 if (amdgpu_dm_connector->max_vfreq -
8413 amdgpu_dm_connector->min_vfreq > 10) {
8415 freesync_capable = true;
8421 dm_con_state->freesync_capable = freesync_capable;
8423 if (connector->vrr_capable_property)
8424 drm_connector_set_vrr_capable_property(connector,
8428 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8430 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8432 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8434 if (link->type == dc_connection_none)
8436 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8437 dpcd_data, sizeof(dpcd_data))) {
8438 link->psr_feature_enabled = dpcd_data[0] ? true:false;
8439 DRM_INFO("PSR support:%d\n", link->psr_feature_enabled);
8444 * amdgpu_dm_link_setup_psr() - configure psr link
8445 * @stream: stream state
8447 * Return: true if success
8449 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8451 struct dc_link *link = NULL;
8452 struct psr_config psr_config = {0};
8453 struct psr_context psr_context = {0};
8454 struct dc *dc = NULL;
8460 link = stream->link;
8463 psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version;
8465 if (psr_config.psr_version > 0) {
8466 psr_config.psr_exit_link_training_required = 0x1;
8467 psr_config.psr_frame_capture_indication_req = 0;
8468 psr_config.psr_rfb_setup_time = 0x37;
8469 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8470 psr_config.allow_smu_optimizations = 0x0;
8472 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8475 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_feature_enabled);
8481 * amdgpu_dm_psr_enable() - enable psr f/w
8482 * @stream: stream state
8484 * Return: true if success
8486 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8488 struct dc_link *link = stream->link;
8489 unsigned int vsync_rate_hz = 0;
8490 struct dc_static_screen_params params = {0};
8491 /* Calculate number of static frames before generating interrupt to
8494 // Init fail safe of 2 frames static
8495 unsigned int num_frames_static = 2;
8497 DRM_DEBUG_DRIVER("Enabling psr...\n");
8499 vsync_rate_hz = div64_u64(div64_u64((
8500 stream->timing.pix_clk_100hz * 100),
8501 stream->timing.v_total),
8502 stream->timing.h_total);
8505 * Calculate number of frames such that at least 30 ms of time has
8508 if (vsync_rate_hz != 0) {
8509 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
8510 num_frames_static = (30000 / frame_time_microsec) + 1;
8513 params.triggers.cursor_update = true;
8514 params.triggers.overlay_update = true;
8515 params.triggers.surface_update = true;
8516 params.num_frames = num_frames_static;
8518 dc_stream_set_static_screen_params(link->ctx->dc,
8522 return dc_link_set_psr_allow_active(link, true, false);
8526 * amdgpu_dm_psr_disable() - disable psr f/w
8527 * @stream: stream state
8529 * Return: true if success
8531 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
8534 DRM_DEBUG_DRIVER("Disabling psr...\n");
8536 return dc_link_set_psr_allow_active(stream->link, false, true);