]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
Merge remote-tracking branches 'spi/topic/rockchip', 'spi/topic/rspi', 'spi/topic...
[linux.git] / drivers / gpu / drm / amd / amdgpu / dce_v11_0.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "drmP.h"
24 #include "amdgpu.h"
25 #include "amdgpu_pm.h"
26 #include "amdgpu_i2c.h"
27 #include "vid.h"
28 #include "atom.h"
29 #include "amdgpu_atombios.h"
30 #include "atombios_crtc.h"
31 #include "atombios_encoders.h"
32 #include "amdgpu_pll.h"
33 #include "amdgpu_connectors.h"
34 #include "dce_v11_0.h"
35
36 #include "dce/dce_11_0_d.h"
37 #include "dce/dce_11_0_sh_mask.h"
38 #include "dce/dce_11_0_enum.h"
39 #include "oss/oss_3_0_d.h"
40 #include "oss/oss_3_0_sh_mask.h"
41 #include "gmc/gmc_8_1_d.h"
42 #include "gmc/gmc_8_1_sh_mask.h"
43
44 static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev);
45 static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev);
46
47 static const u32 crtc_offsets[] =
48 {
49         CRTC0_REGISTER_OFFSET,
50         CRTC1_REGISTER_OFFSET,
51         CRTC2_REGISTER_OFFSET,
52         CRTC3_REGISTER_OFFSET,
53         CRTC4_REGISTER_OFFSET,
54         CRTC5_REGISTER_OFFSET,
55         CRTC6_REGISTER_OFFSET
56 };
57
58 static const u32 hpd_offsets[] =
59 {
60         HPD0_REGISTER_OFFSET,
61         HPD1_REGISTER_OFFSET,
62         HPD2_REGISTER_OFFSET,
63         HPD3_REGISTER_OFFSET,
64         HPD4_REGISTER_OFFSET,
65         HPD5_REGISTER_OFFSET
66 };
67
68 static const uint32_t dig_offsets[] = {
69         DIG0_REGISTER_OFFSET,
70         DIG1_REGISTER_OFFSET,
71         DIG2_REGISTER_OFFSET,
72         DIG3_REGISTER_OFFSET,
73         DIG4_REGISTER_OFFSET,
74         DIG5_REGISTER_OFFSET,
75         DIG6_REGISTER_OFFSET,
76         DIG7_REGISTER_OFFSET,
77         DIG8_REGISTER_OFFSET
78 };
79
80 static const struct {
81         uint32_t        reg;
82         uint32_t        vblank;
83         uint32_t        vline;
84         uint32_t        hpd;
85
86 } interrupt_status_offsets[] = { {
87         .reg = mmDISP_INTERRUPT_STATUS,
88         .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
89         .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
90         .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
91 }, {
92         .reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
93         .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
94         .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
95         .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
96 }, {
97         .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
98         .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
99         .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
100         .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
101 }, {
102         .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
103         .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
104         .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
105         .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
106 }, {
107         .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
108         .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
109         .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
110         .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
111 }, {
112         .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
113         .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
114         .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
115         .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
116 } };
117
118 static const u32 cz_golden_settings_a11[] =
119 {
120         mmCRTC_DOUBLE_BUFFER_CONTROL, 0x00010101, 0x00010000,
121         mmFBC_MISC, 0x1f311fff, 0x14300000,
122 };
123
124 static const u32 cz_mgcg_cgcg_init[] =
125 {
126         mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
127         mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
128 };
129
130 static const u32 stoney_golden_settings_a11[] =
131 {
132         mmCRTC_DOUBLE_BUFFER_CONTROL, 0x00010101, 0x00010000,
133         mmFBC_MISC, 0x1f311fff, 0x14302000,
134 };
135
136 static const u32 polaris11_golden_settings_a11[] =
137 {
138         mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
139         mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
140         mmFBC_DEBUG1, 0xffffffff, 0x00000008,
141         mmFBC_MISC, 0x9f313fff, 0x14302008,
142         mmHDMI_CONTROL, 0x313f031f, 0x00000011,
143 };
144
145 static const u32 polaris10_golden_settings_a11[] =
146 {
147         mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
148         mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
149         mmFBC_MISC, 0x9f313fff, 0x14302008,
150         mmHDMI_CONTROL, 0x313f031f, 0x00000011,
151 };
152
153 static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
154 {
155         switch (adev->asic_type) {
156         case CHIP_CARRIZO:
157                 amdgpu_program_register_sequence(adev,
158                                                  cz_mgcg_cgcg_init,
159                                                  (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
160                 amdgpu_program_register_sequence(adev,
161                                                  cz_golden_settings_a11,
162                                                  (const u32)ARRAY_SIZE(cz_golden_settings_a11));
163                 break;
164         case CHIP_STONEY:
165                 amdgpu_program_register_sequence(adev,
166                                                  stoney_golden_settings_a11,
167                                                  (const u32)ARRAY_SIZE(stoney_golden_settings_a11));
168                 break;
169         case CHIP_POLARIS11:
170                 amdgpu_program_register_sequence(adev,
171                                                  polaris11_golden_settings_a11,
172                                                  (const u32)ARRAY_SIZE(polaris11_golden_settings_a11));
173                 break;
174         case CHIP_POLARIS10:
175                 amdgpu_program_register_sequence(adev,
176                                                  polaris10_golden_settings_a11,
177                                                  (const u32)ARRAY_SIZE(polaris10_golden_settings_a11));
178                 break;
179         default:
180                 break;
181         }
182 }
183
184 static u32 dce_v11_0_audio_endpt_rreg(struct amdgpu_device *adev,
185                                      u32 block_offset, u32 reg)
186 {
187         unsigned long flags;
188         u32 r;
189
190         spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
191         WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
192         r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
193         spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
194
195         return r;
196 }
197
198 static void dce_v11_0_audio_endpt_wreg(struct amdgpu_device *adev,
199                                       u32 block_offset, u32 reg, u32 v)
200 {
201         unsigned long flags;
202
203         spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
204         WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
205         WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
206         spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
207 }
208
209 static bool dce_v11_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
210 {
211         if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) &
212                         CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK)
213                 return true;
214         else
215                 return false;
216 }
217
218 static bool dce_v11_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
219 {
220         u32 pos1, pos2;
221
222         pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
223         pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
224
225         if (pos1 != pos2)
226                 return true;
227         else
228                 return false;
229 }
230
231 /**
232  * dce_v11_0_vblank_wait - vblank wait asic callback.
233  *
234  * @adev: amdgpu_device pointer
235  * @crtc: crtc to wait for vblank on
236  *
237  * Wait for vblank on the requested crtc (evergreen+).
238  */
239 static void dce_v11_0_vblank_wait(struct amdgpu_device *adev, int crtc)
240 {
241         unsigned i = 100;
242
243         if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
244                 return;
245
246         if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
247                 return;
248
249         /* depending on when we hit vblank, we may be close to active; if so,
250          * wait for another frame.
251          */
252         while (dce_v11_0_is_in_vblank(adev, crtc)) {
253                 if (i++ == 100) {
254                         i = 0;
255                         if (!dce_v11_0_is_counter_moving(adev, crtc))
256                                 break;
257                 }
258         }
259
260         while (!dce_v11_0_is_in_vblank(adev, crtc)) {
261                 if (i++ == 100) {
262                         i = 0;
263                         if (!dce_v11_0_is_counter_moving(adev, crtc))
264                                 break;
265                 }
266         }
267 }
268
269 static u32 dce_v11_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
270 {
271         if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
272                 return 0;
273         else
274                 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
275 }
276
277 static void dce_v11_0_pageflip_interrupt_init(struct amdgpu_device *adev)
278 {
279         unsigned i;
280
281         /* Enable pflip interrupts */
282         for (i = 0; i < adev->mode_info.num_crtc; i++)
283                 amdgpu_irq_get(adev, &adev->pageflip_irq, i);
284 }
285
286 static void dce_v11_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
287 {
288         unsigned i;
289
290         /* Disable pflip interrupts */
291         for (i = 0; i < adev->mode_info.num_crtc; i++)
292                 amdgpu_irq_put(adev, &adev->pageflip_irq, i);
293 }
294
295 /**
296  * dce_v11_0_page_flip - pageflip callback.
297  *
298  * @adev: amdgpu_device pointer
299  * @crtc_id: crtc to cleanup pageflip on
300  * @crtc_base: new address of the crtc (GPU MC address)
301  *
302  * Triggers the actual pageflip by updating the primary
303  * surface base address.
304  */
305 static void dce_v11_0_page_flip(struct amdgpu_device *adev,
306                                 int crtc_id, u64 crtc_base, bool async)
307 {
308         struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
309         u32 tmp;
310
311         /* flip immediate for async, default is vsync */
312         tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
313         tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
314                             GRPH_SURFACE_UPDATE_IMMEDIATE_EN, async ? 1 : 0);
315         WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
316         /* update the scanout addresses */
317         WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
318                upper_32_bits(crtc_base));
319         /* writing to the low address triggers the update */
320         WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
321                lower_32_bits(crtc_base));
322         /* post the write */
323         RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
324 }
325
326 static int dce_v11_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
327                                         u32 *vbl, u32 *position)
328 {
329         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
330                 return -EINVAL;
331
332         *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
333         *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
334
335         return 0;
336 }
337
338 /**
339  * dce_v11_0_hpd_sense - hpd sense callback.
340  *
341  * @adev: amdgpu_device pointer
342  * @hpd: hpd (hotplug detect) pin
343  *
344  * Checks if a digital monitor is connected (evergreen+).
345  * Returns true if connected, false if not connected.
346  */
347 static bool dce_v11_0_hpd_sense(struct amdgpu_device *adev,
348                                enum amdgpu_hpd_id hpd)
349 {
350         bool connected = false;
351
352         if (hpd >= adev->mode_info.num_hpd)
353                 return connected;
354
355         if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[hpd]) &
356             DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK)
357                 connected = true;
358
359         return connected;
360 }
361
362 /**
363  * dce_v11_0_hpd_set_polarity - hpd set polarity callback.
364  *
365  * @adev: amdgpu_device pointer
366  * @hpd: hpd (hotplug detect) pin
367  *
368  * Set the polarity of the hpd pin (evergreen+).
369  */
370 static void dce_v11_0_hpd_set_polarity(struct amdgpu_device *adev,
371                                       enum amdgpu_hpd_id hpd)
372 {
373         u32 tmp;
374         bool connected = dce_v11_0_hpd_sense(adev, hpd);
375
376         if (hpd >= adev->mode_info.num_hpd)
377                 return;
378
379         tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
380         if (connected)
381                 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0);
382         else
383                 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1);
384         WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
385 }
386
387 /**
388  * dce_v11_0_hpd_init - hpd setup callback.
389  *
390  * @adev: amdgpu_device pointer
391  *
392  * Setup the hpd pins used by the card (evergreen+).
393  * Enable the pin, set the polarity, and enable the hpd interrupts.
394  */
395 static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
396 {
397         struct drm_device *dev = adev->ddev;
398         struct drm_connector *connector;
399         u32 tmp;
400
401         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
402                 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
403
404                 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
405                         continue;
406
407                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
408                     connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
409                         /* don't try to enable hpd on eDP or LVDS avoid breaking the
410                          * aux dp channel on imac and help (but not completely fix)
411                          * https://bugzilla.redhat.com/show_bug.cgi?id=726143
412                          * also avoid interrupt storms during dpms.
413                          */
414                         tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
415                         tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
416                         WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
417                         continue;
418                 }
419
420                 tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
421                 tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
422                 WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
423
424                 tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd]);
425                 tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
426                                     DC_HPD_CONNECT_INT_DELAY,
427                                     AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS);
428                 tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
429                                     DC_HPD_DISCONNECT_INT_DELAY,
430                                     AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
431                 WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
432
433                 dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
434                 amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
435         }
436 }
437
438 /**
439  * dce_v11_0_hpd_fini - hpd tear down callback.
440  *
441  * @adev: amdgpu_device pointer
442  *
443  * Tear down the hpd pins used by the card (evergreen+).
444  * Disable the hpd interrupts.
445  */
446 static void dce_v11_0_hpd_fini(struct amdgpu_device *adev)
447 {
448         struct drm_device *dev = adev->ddev;
449         struct drm_connector *connector;
450         u32 tmp;
451
452         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
453                 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
454
455                 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
456                         continue;
457
458                 tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
459                 tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0);
460                 WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
461
462                 amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
463         }
464 }
465
466 static u32 dce_v11_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
467 {
468         return mmDC_GPIO_HPD_A;
469 }
470
471 static bool dce_v11_0_is_display_hung(struct amdgpu_device *adev)
472 {
473         u32 crtc_hung = 0;
474         u32 crtc_status[6];
475         u32 i, j, tmp;
476
477         for (i = 0; i < adev->mode_info.num_crtc; i++) {
478                 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
479                 if (REG_GET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN)) {
480                         crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
481                         crtc_hung |= (1 << i);
482                 }
483         }
484
485         for (j = 0; j < 10; j++) {
486                 for (i = 0; i < adev->mode_info.num_crtc; i++) {
487                         if (crtc_hung & (1 << i)) {
488                                 tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
489                                 if (tmp != crtc_status[i])
490                                         crtc_hung &= ~(1 << i);
491                         }
492                 }
493                 if (crtc_hung == 0)
494                         return false;
495                 udelay(100);
496         }
497
498         return true;
499 }
500
501 static void dce_v11_0_stop_mc_access(struct amdgpu_device *adev,
502                                      struct amdgpu_mode_mc_save *save)
503 {
504         u32 crtc_enabled, tmp;
505         int i;
506
507         save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
508         save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
509
510         /* disable VGA render */
511         tmp = RREG32(mmVGA_RENDER_CONTROL);
512         tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
513         WREG32(mmVGA_RENDER_CONTROL, tmp);
514
515         /* blank the display controllers */
516         for (i = 0; i < adev->mode_info.num_crtc; i++) {
517                 crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
518                                              CRTC_CONTROL, CRTC_MASTER_EN);
519                 if (crtc_enabled) {
520 #if 1
521                         save->crtc_enabled[i] = true;
522                         tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
523                         if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
524                                 /*it is correct only for RGB ; black is 0*/
525                                 WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0);
526                                 tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
527                                 WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
528                         }
529 #else
530                         /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
531                         WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
532                         tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
533                         tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
534                         WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
535                         WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
536                         save->crtc_enabled[i] = false;
537                         /* ***** */
538 #endif
539                 } else {
540                         save->crtc_enabled[i] = false;
541                 }
542         }
543 }
544
545 static void dce_v11_0_resume_mc_access(struct amdgpu_device *adev,
546                                        struct amdgpu_mode_mc_save *save)
547 {
548         u32 tmp;
549         int i;
550
551         /* update crtc base addresses */
552         for (i = 0; i < adev->mode_info.num_crtc; i++) {
553                 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
554                        upper_32_bits(adev->mc.vram_start));
555                 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
556                        (u32)adev->mc.vram_start);
557
558                 if (save->crtc_enabled[i]) {
559                         tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
560                         tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
561                         WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
562                 }
563         }
564
565         WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
566         WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start));
567
568         /* Unlock vga access */
569         WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
570         mdelay(1);
571         WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
572 }
573
574 static void dce_v11_0_set_vga_render_state(struct amdgpu_device *adev,
575                                            bool render)
576 {
577         u32 tmp;
578
579         /* Lockout access through VGA aperture*/
580         tmp = RREG32(mmVGA_HDP_CONTROL);
581         if (render)
582                 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
583         else
584                 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
585         WREG32(mmVGA_HDP_CONTROL, tmp);
586
587         /* disable VGA render */
588         tmp = RREG32(mmVGA_RENDER_CONTROL);
589         if (render)
590                 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
591         else
592                 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
593         WREG32(mmVGA_RENDER_CONTROL, tmp);
594 }
595
596 static int dce_v11_0_get_num_crtc (struct amdgpu_device *adev)
597 {
598         int num_crtc = 0;
599
600         switch (adev->asic_type) {
601         case CHIP_CARRIZO:
602                 num_crtc = 3;
603                 break;
604         case CHIP_STONEY:
605                 num_crtc = 2;
606                 break;
607         case CHIP_POLARIS10:
608                 num_crtc = 6;
609                 break;
610         case CHIP_POLARIS11:
611                 num_crtc = 5;
612                 break;
613         default:
614                 num_crtc = 0;
615         }
616         return num_crtc;
617 }
618
619 void dce_v11_0_disable_dce(struct amdgpu_device *adev)
620 {
621         /*Disable VGA render and enabled crtc, if has DCE engine*/
622         if (amdgpu_atombios_has_dce_engine_info(adev)) {
623                 u32 tmp;
624                 int crtc_enabled, i;
625
626                 dce_v11_0_set_vga_render_state(adev, false);
627
628                 /*Disable crtc*/
629                 for (i = 0; i < dce_v11_0_get_num_crtc(adev); i++) {
630                         crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
631                                                                          CRTC_CONTROL, CRTC_MASTER_EN);
632                         if (crtc_enabled) {
633                                 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
634                                 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
635                                 tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
636                                 WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
637                                 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
638                         }
639                 }
640         }
641 }
642
643 static void dce_v11_0_program_fmt(struct drm_encoder *encoder)
644 {
645         struct drm_device *dev = encoder->dev;
646         struct amdgpu_device *adev = dev->dev_private;
647         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
648         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
649         struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
650         int bpc = 0;
651         u32 tmp = 0;
652         enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
653
654         if (connector) {
655                 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
656                 bpc = amdgpu_connector_get_monitor_bpc(connector);
657                 dither = amdgpu_connector->dither;
658         }
659
660         /* LVDS/eDP FMT is set up by atom */
661         if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
662                 return;
663
664         /* not needed for analog */
665         if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
666             (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
667                 return;
668
669         if (bpc == 0)
670                 return;
671
672         switch (bpc) {
673         case 6:
674                 if (dither == AMDGPU_FMT_DITHER_ENABLE) {
675                         /* XXX sort out optimal dither settings */
676                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
677                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
678                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
679                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 0);
680                 } else {
681                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
682                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 0);
683                 }
684                 break;
685         case 8:
686                 if (dither == AMDGPU_FMT_DITHER_ENABLE) {
687                         /* XXX sort out optimal dither settings */
688                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
689                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
690                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
691                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
692                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 1);
693                 } else {
694                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
695                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 1);
696                 }
697                 break;
698         case 10:
699                 if (dither == AMDGPU_FMT_DITHER_ENABLE) {
700                         /* XXX sort out optimal dither settings */
701                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
702                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
703                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
704                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
705                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 2);
706                 } else {
707                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
708                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 2);
709                 }
710                 break;
711         default:
712                 /* not needed */
713                 break;
714         }
715
716         WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
717 }
718
719
720 /* display watermark setup */
721 /**
722  * dce_v11_0_line_buffer_adjust - Set up the line buffer
723  *
724  * @adev: amdgpu_device pointer
725  * @amdgpu_crtc: the selected display controller
726  * @mode: the current display mode on the selected display
727  * controller
728  *
729  * Setup up the line buffer allocation for
730  * the selected display controller (CIK).
731  * Returns the line buffer size in pixels.
732  */
733 static u32 dce_v11_0_line_buffer_adjust(struct amdgpu_device *adev,
734                                        struct amdgpu_crtc *amdgpu_crtc,
735                                        struct drm_display_mode *mode)
736 {
737         u32 tmp, buffer_alloc, i, mem_cfg;
738         u32 pipe_offset = amdgpu_crtc->crtc_id;
739         /*
740          * Line Buffer Setup
741          * There are 6 line buffers, one for each display controllers.
742          * There are 3 partitions per LB. Select the number of partitions
743          * to enable based on the display width.  For display widths larger
744          * than 4096, you need use to use 2 display controllers and combine
745          * them using the stereo blender.
746          */
747         if (amdgpu_crtc->base.enabled && mode) {
748                 if (mode->crtc_hdisplay < 1920) {
749                         mem_cfg = 1;
750                         buffer_alloc = 2;
751                 } else if (mode->crtc_hdisplay < 2560) {
752                         mem_cfg = 2;
753                         buffer_alloc = 2;
754                 } else if (mode->crtc_hdisplay < 4096) {
755                         mem_cfg = 0;
756                         buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
757                 } else {
758                         DRM_DEBUG_KMS("Mode too big for LB!\n");
759                         mem_cfg = 0;
760                         buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
761                 }
762         } else {
763                 mem_cfg = 1;
764                 buffer_alloc = 0;
765         }
766
767         tmp = RREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset);
768         tmp = REG_SET_FIELD(tmp, LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mem_cfg);
769         WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset, tmp);
770
771         tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
772         tmp = REG_SET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, buffer_alloc);
773         WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, tmp);
774
775         for (i = 0; i < adev->usec_timeout; i++) {
776                 tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
777                 if (REG_GET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED))
778                         break;
779                 udelay(1);
780         }
781
782         if (amdgpu_crtc->base.enabled && mode) {
783                 switch (mem_cfg) {
784                 case 0:
785                 default:
786                         return 4096 * 2;
787                 case 1:
788                         return 1920 * 2;
789                 case 2:
790                         return 2560 * 2;
791                 }
792         }
793
794         /* controller not enabled, so no lb used */
795         return 0;
796 }
797
798 /**
799  * cik_get_number_of_dram_channels - get the number of dram channels
800  *
801  * @adev: amdgpu_device pointer
802  *
803  * Look up the number of video ram channels (CIK).
804  * Used for display watermark bandwidth calculations
805  * Returns the number of dram channels
806  */
807 static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
808 {
809         u32 tmp = RREG32(mmMC_SHARED_CHMAP);
810
811         switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
812         case 0:
813         default:
814                 return 1;
815         case 1:
816                 return 2;
817         case 2:
818                 return 4;
819         case 3:
820                 return 8;
821         case 4:
822                 return 3;
823         case 5:
824                 return 6;
825         case 6:
826                 return 10;
827         case 7:
828                 return 12;
829         case 8:
830                 return 16;
831         }
832 }
833
834 struct dce10_wm_params {
835         u32 dram_channels; /* number of dram channels */
836         u32 yclk;          /* bandwidth per dram data pin in kHz */
837         u32 sclk;          /* engine clock in kHz */
838         u32 disp_clk;      /* display clock in kHz */
839         u32 src_width;     /* viewport width */
840         u32 active_time;   /* active display time in ns */
841         u32 blank_time;    /* blank time in ns */
842         bool interlaced;    /* mode is interlaced */
843         fixed20_12 vsc;    /* vertical scale ratio */
844         u32 num_heads;     /* number of active crtcs */
845         u32 bytes_per_pixel; /* bytes per pixel display + overlay */
846         u32 lb_size;       /* line buffer allocated to pipe */
847         u32 vtaps;         /* vertical scaler taps */
848 };
849
850 /**
851  * dce_v11_0_dram_bandwidth - get the dram bandwidth
852  *
853  * @wm: watermark calculation data
854  *
855  * Calculate the raw dram bandwidth (CIK).
856  * Used for display watermark bandwidth calculations
857  * Returns the dram bandwidth in MBytes/s
858  */
859 static u32 dce_v11_0_dram_bandwidth(struct dce10_wm_params *wm)
860 {
861         /* Calculate raw DRAM Bandwidth */
862         fixed20_12 dram_efficiency; /* 0.7 */
863         fixed20_12 yclk, dram_channels, bandwidth;
864         fixed20_12 a;
865
866         a.full = dfixed_const(1000);
867         yclk.full = dfixed_const(wm->yclk);
868         yclk.full = dfixed_div(yclk, a);
869         dram_channels.full = dfixed_const(wm->dram_channels * 4);
870         a.full = dfixed_const(10);
871         dram_efficiency.full = dfixed_const(7);
872         dram_efficiency.full = dfixed_div(dram_efficiency, a);
873         bandwidth.full = dfixed_mul(dram_channels, yclk);
874         bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
875
876         return dfixed_trunc(bandwidth);
877 }
878
879 /**
880  * dce_v11_0_dram_bandwidth_for_display - get the dram bandwidth for display
881  *
882  * @wm: watermark calculation data
883  *
884  * Calculate the dram bandwidth used for display (CIK).
885  * Used for display watermark bandwidth calculations
886  * Returns the dram bandwidth for display in MBytes/s
887  */
888 static u32 dce_v11_0_dram_bandwidth_for_display(struct dce10_wm_params *wm)
889 {
890         /* Calculate DRAM Bandwidth and the part allocated to display. */
891         fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
892         fixed20_12 yclk, dram_channels, bandwidth;
893         fixed20_12 a;
894
895         a.full = dfixed_const(1000);
896         yclk.full = dfixed_const(wm->yclk);
897         yclk.full = dfixed_div(yclk, a);
898         dram_channels.full = dfixed_const(wm->dram_channels * 4);
899         a.full = dfixed_const(10);
900         disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
901         disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
902         bandwidth.full = dfixed_mul(dram_channels, yclk);
903         bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
904
905         return dfixed_trunc(bandwidth);
906 }
907
908 /**
909  * dce_v11_0_data_return_bandwidth - get the data return bandwidth
910  *
911  * @wm: watermark calculation data
912  *
913  * Calculate the data return bandwidth used for display (CIK).
914  * Used for display watermark bandwidth calculations
915  * Returns the data return bandwidth in MBytes/s
916  */
917 static u32 dce_v11_0_data_return_bandwidth(struct dce10_wm_params *wm)
918 {
919         /* Calculate the display Data return Bandwidth */
920         fixed20_12 return_efficiency; /* 0.8 */
921         fixed20_12 sclk, bandwidth;
922         fixed20_12 a;
923
924         a.full = dfixed_const(1000);
925         sclk.full = dfixed_const(wm->sclk);
926         sclk.full = dfixed_div(sclk, a);
927         a.full = dfixed_const(10);
928         return_efficiency.full = dfixed_const(8);
929         return_efficiency.full = dfixed_div(return_efficiency, a);
930         a.full = dfixed_const(32);
931         bandwidth.full = dfixed_mul(a, sclk);
932         bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
933
934         return dfixed_trunc(bandwidth);
935 }
936
937 /**
938  * dce_v11_0_dmif_request_bandwidth - get the dmif bandwidth
939  *
940  * @wm: watermark calculation data
941  *
942  * Calculate the dmif bandwidth used for display (CIK).
943  * Used for display watermark bandwidth calculations
944  * Returns the dmif bandwidth in MBytes/s
945  */
946 static u32 dce_v11_0_dmif_request_bandwidth(struct dce10_wm_params *wm)
947 {
948         /* Calculate the DMIF Request Bandwidth */
949         fixed20_12 disp_clk_request_efficiency; /* 0.8 */
950         fixed20_12 disp_clk, bandwidth;
951         fixed20_12 a, b;
952
953         a.full = dfixed_const(1000);
954         disp_clk.full = dfixed_const(wm->disp_clk);
955         disp_clk.full = dfixed_div(disp_clk, a);
956         a.full = dfixed_const(32);
957         b.full = dfixed_mul(a, disp_clk);
958
959         a.full = dfixed_const(10);
960         disp_clk_request_efficiency.full = dfixed_const(8);
961         disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
962
963         bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
964
965         return dfixed_trunc(bandwidth);
966 }
967
968 /**
969  * dce_v11_0_available_bandwidth - get the min available bandwidth
970  *
971  * @wm: watermark calculation data
972  *
973  * Calculate the min available bandwidth used for display (CIK).
974  * Used for display watermark bandwidth calculations
975  * Returns the min available bandwidth in MBytes/s
976  */
977 static u32 dce_v11_0_available_bandwidth(struct dce10_wm_params *wm)
978 {
979         /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
980         u32 dram_bandwidth = dce_v11_0_dram_bandwidth(wm);
981         u32 data_return_bandwidth = dce_v11_0_data_return_bandwidth(wm);
982         u32 dmif_req_bandwidth = dce_v11_0_dmif_request_bandwidth(wm);
983
984         return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
985 }
986
987 /**
988  * dce_v11_0_average_bandwidth - get the average available bandwidth
989  *
990  * @wm: watermark calculation data
991  *
992  * Calculate the average available bandwidth used for display (CIK).
993  * Used for display watermark bandwidth calculations
994  * Returns the average available bandwidth in MBytes/s
995  */
996 static u32 dce_v11_0_average_bandwidth(struct dce10_wm_params *wm)
997 {
998         /* Calculate the display mode Average Bandwidth
999          * DisplayMode should contain the source and destination dimensions,
1000          * timing, etc.
1001          */
1002         fixed20_12 bpp;
1003         fixed20_12 line_time;
1004         fixed20_12 src_width;
1005         fixed20_12 bandwidth;
1006         fixed20_12 a;
1007
1008         a.full = dfixed_const(1000);
1009         line_time.full = dfixed_const(wm->active_time + wm->blank_time);
1010         line_time.full = dfixed_div(line_time, a);
1011         bpp.full = dfixed_const(wm->bytes_per_pixel);
1012         src_width.full = dfixed_const(wm->src_width);
1013         bandwidth.full = dfixed_mul(src_width, bpp);
1014         bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
1015         bandwidth.full = dfixed_div(bandwidth, line_time);
1016
1017         return dfixed_trunc(bandwidth);
1018 }
1019
1020 /**
1021  * dce_v11_0_latency_watermark - get the latency watermark
1022  *
1023  * @wm: watermark calculation data
1024  *
1025  * Calculate the latency watermark (CIK).
1026  * Used for display watermark bandwidth calculations
1027  * Returns the latency watermark in ns
1028  */
1029 static u32 dce_v11_0_latency_watermark(struct dce10_wm_params *wm)
1030 {
1031         /* First calculate the latency in ns */
1032         u32 mc_latency = 2000; /* 2000 ns. */
1033         u32 available_bandwidth = dce_v11_0_available_bandwidth(wm);
1034         u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
1035         u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
1036         u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
1037         u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
1038                 (wm->num_heads * cursor_line_pair_return_time);
1039         u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
1040         u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
1041         u32 tmp, dmif_size = 12288;
1042         fixed20_12 a, b, c;
1043
1044         if (wm->num_heads == 0)
1045                 return 0;
1046
1047         a.full = dfixed_const(2);
1048         b.full = dfixed_const(1);
1049         if ((wm->vsc.full > a.full) ||
1050             ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
1051             (wm->vtaps >= 5) ||
1052             ((wm->vsc.full >= a.full) && wm->interlaced))
1053                 max_src_lines_per_dst_line = 4;
1054         else
1055                 max_src_lines_per_dst_line = 2;
1056
1057         a.full = dfixed_const(available_bandwidth);
1058         b.full = dfixed_const(wm->num_heads);
1059         a.full = dfixed_div(a, b);
1060
1061         b.full = dfixed_const(mc_latency + 512);
1062         c.full = dfixed_const(wm->disp_clk);
1063         b.full = dfixed_div(b, c);
1064
1065         c.full = dfixed_const(dmif_size);
1066         b.full = dfixed_div(c, b);
1067
1068         tmp = min(dfixed_trunc(a), dfixed_trunc(b));
1069
1070         b.full = dfixed_const(1000);
1071         c.full = dfixed_const(wm->disp_clk);
1072         b.full = dfixed_div(c, b);
1073         c.full = dfixed_const(wm->bytes_per_pixel);
1074         b.full = dfixed_mul(b, c);
1075
1076         lb_fill_bw = min(tmp, dfixed_trunc(b));
1077
1078         a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
1079         b.full = dfixed_const(1000);
1080         c.full = dfixed_const(lb_fill_bw);
1081         b.full = dfixed_div(c, b);
1082         a.full = dfixed_div(a, b);
1083         line_fill_time = dfixed_trunc(a);
1084
1085         if (line_fill_time < wm->active_time)
1086                 return latency;
1087         else
1088                 return latency + (line_fill_time - wm->active_time);
1089
1090 }
1091
1092 /**
1093  * dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display - check
1094  * average and available dram bandwidth
1095  *
1096  * @wm: watermark calculation data
1097  *
1098  * Check if the display average bandwidth fits in the display
1099  * dram bandwidth (CIK).
1100  * Used for display watermark bandwidth calculations
1101  * Returns true if the display fits, false if not.
1102  */
1103 static bool dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce10_wm_params *wm)
1104 {
1105         if (dce_v11_0_average_bandwidth(wm) <=
1106             (dce_v11_0_dram_bandwidth_for_display(wm) / wm->num_heads))
1107                 return true;
1108         else
1109                 return false;
1110 }
1111
1112 /**
1113  * dce_v11_0_average_bandwidth_vs_available_bandwidth - check
1114  * average and available bandwidth
1115  *
1116  * @wm: watermark calculation data
1117  *
1118  * Check if the display average bandwidth fits in the display
1119  * available bandwidth (CIK).
1120  * Used for display watermark bandwidth calculations
1121  * Returns true if the display fits, false if not.
1122  */
1123 static bool dce_v11_0_average_bandwidth_vs_available_bandwidth(struct dce10_wm_params *wm)
1124 {
1125         if (dce_v11_0_average_bandwidth(wm) <=
1126             (dce_v11_0_available_bandwidth(wm) / wm->num_heads))
1127                 return true;
1128         else
1129                 return false;
1130 }
1131
1132 /**
1133  * dce_v11_0_check_latency_hiding - check latency hiding
1134  *
1135  * @wm: watermark calculation data
1136  *
1137  * Check latency hiding (CIK).
1138  * Used for display watermark bandwidth calculations
1139  * Returns true if the display fits, false if not.
1140  */
1141 static bool dce_v11_0_check_latency_hiding(struct dce10_wm_params *wm)
1142 {
1143         u32 lb_partitions = wm->lb_size / wm->src_width;
1144         u32 line_time = wm->active_time + wm->blank_time;
1145         u32 latency_tolerant_lines;
1146         u32 latency_hiding;
1147         fixed20_12 a;
1148
1149         a.full = dfixed_const(1);
1150         if (wm->vsc.full > a.full)
1151                 latency_tolerant_lines = 1;
1152         else {
1153                 if (lb_partitions <= (wm->vtaps + 1))
1154                         latency_tolerant_lines = 1;
1155                 else
1156                         latency_tolerant_lines = 2;
1157         }
1158
1159         latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
1160
1161         if (dce_v11_0_latency_watermark(wm) <= latency_hiding)
1162                 return true;
1163         else
1164                 return false;
1165 }
1166
1167 /**
1168  * dce_v11_0_program_watermarks - program display watermarks
1169  *
1170  * @adev: amdgpu_device pointer
1171  * @amdgpu_crtc: the selected display controller
1172  * @lb_size: line buffer size
1173  * @num_heads: number of display controllers in use
1174  *
1175  * Calculate and program the display watermarks for the
1176  * selected display controller (CIK).
1177  */
1178 static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
1179                                         struct amdgpu_crtc *amdgpu_crtc,
1180                                         u32 lb_size, u32 num_heads)
1181 {
1182         struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
1183         struct dce10_wm_params wm_low, wm_high;
1184         u32 pixel_period;
1185         u32 line_time = 0;
1186         u32 latency_watermark_a = 0, latency_watermark_b = 0;
1187         u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
1188
1189         if (amdgpu_crtc->base.enabled && num_heads && mode) {
1190                 pixel_period = 1000000 / (u32)mode->clock;
1191                 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
1192
1193                 /* watermark for high clocks */
1194                 if (adev->pm.dpm_enabled) {
1195                         wm_high.yclk =
1196                                 amdgpu_dpm_get_mclk(adev, false) * 10;
1197                         wm_high.sclk =
1198                                 amdgpu_dpm_get_sclk(adev, false) * 10;
1199                 } else {
1200                         wm_high.yclk = adev->pm.current_mclk * 10;
1201                         wm_high.sclk = adev->pm.current_sclk * 10;
1202                 }
1203
1204                 wm_high.disp_clk = mode->clock;
1205                 wm_high.src_width = mode->crtc_hdisplay;
1206                 wm_high.active_time = mode->crtc_hdisplay * pixel_period;
1207                 wm_high.blank_time = line_time - wm_high.active_time;
1208                 wm_high.interlaced = false;
1209                 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1210                         wm_high.interlaced = true;
1211                 wm_high.vsc = amdgpu_crtc->vsc;
1212                 wm_high.vtaps = 1;
1213                 if (amdgpu_crtc->rmx_type != RMX_OFF)
1214                         wm_high.vtaps = 2;
1215                 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
1216                 wm_high.lb_size = lb_size;
1217                 wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
1218                 wm_high.num_heads = num_heads;
1219
1220                 /* set for high clocks */
1221                 latency_watermark_a = min(dce_v11_0_latency_watermark(&wm_high), (u32)65535);
1222
1223                 /* possibly force display priority to high */
1224                 /* should really do this at mode validation time... */
1225                 if (!dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1226                     !dce_v11_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1227                     !dce_v11_0_check_latency_hiding(&wm_high) ||
1228                     (adev->mode_info.disp_priority == 2)) {
1229                         DRM_DEBUG_KMS("force priority to high\n");
1230                 }
1231
1232                 /* watermark for low clocks */
1233                 if (adev->pm.dpm_enabled) {
1234                         wm_low.yclk =
1235                                 amdgpu_dpm_get_mclk(adev, true) * 10;
1236                         wm_low.sclk =
1237                                 amdgpu_dpm_get_sclk(adev, true) * 10;
1238                 } else {
1239                         wm_low.yclk = adev->pm.current_mclk * 10;
1240                         wm_low.sclk = adev->pm.current_sclk * 10;
1241                 }
1242
1243                 wm_low.disp_clk = mode->clock;
1244                 wm_low.src_width = mode->crtc_hdisplay;
1245                 wm_low.active_time = mode->crtc_hdisplay * pixel_period;
1246                 wm_low.blank_time = line_time - wm_low.active_time;
1247                 wm_low.interlaced = false;
1248                 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1249                         wm_low.interlaced = true;
1250                 wm_low.vsc = amdgpu_crtc->vsc;
1251                 wm_low.vtaps = 1;
1252                 if (amdgpu_crtc->rmx_type != RMX_OFF)
1253                         wm_low.vtaps = 2;
1254                 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
1255                 wm_low.lb_size = lb_size;
1256                 wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
1257                 wm_low.num_heads = num_heads;
1258
1259                 /* set for low clocks */
1260                 latency_watermark_b = min(dce_v11_0_latency_watermark(&wm_low), (u32)65535);
1261
1262                 /* possibly force display priority to high */
1263                 /* should really do this at mode validation time... */
1264                 if (!dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1265                     !dce_v11_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1266                     !dce_v11_0_check_latency_hiding(&wm_low) ||
1267                     (adev->mode_info.disp_priority == 2)) {
1268                         DRM_DEBUG_KMS("force priority to high\n");
1269                 }
1270                 lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1271         }
1272
1273         /* select wm A */
1274         wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1275         tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 1);
1276         WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1277         tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
1278         tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a);
1279         tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
1280         WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1281         /* select wm B */
1282         tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2);
1283         WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1284         tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
1285         tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_b);
1286         tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
1287         WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1288         /* restore original selection */
1289         WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
1290
1291         /* save values for DPM */
1292         amdgpu_crtc->line_time = line_time;
1293         amdgpu_crtc->wm_high = latency_watermark_a;
1294         amdgpu_crtc->wm_low = latency_watermark_b;
1295         /* Save number of lines the linebuffer leads before the scanout */
1296         amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1297 }
1298
1299 /**
1300  * dce_v11_0_bandwidth_update - program display watermarks
1301  *
1302  * @adev: amdgpu_device pointer
1303  *
1304  * Calculate and program the display watermarks and line
1305  * buffer allocation (CIK).
1306  */
1307 static void dce_v11_0_bandwidth_update(struct amdgpu_device *adev)
1308 {
1309         struct drm_display_mode *mode = NULL;
1310         u32 num_heads = 0, lb_size;
1311         int i;
1312
1313         amdgpu_update_display_priority(adev);
1314
1315         for (i = 0; i < adev->mode_info.num_crtc; i++) {
1316                 if (adev->mode_info.crtcs[i]->base.enabled)
1317                         num_heads++;
1318         }
1319         for (i = 0; i < adev->mode_info.num_crtc; i++) {
1320                 mode = &adev->mode_info.crtcs[i]->base.mode;
1321                 lb_size = dce_v11_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
1322                 dce_v11_0_program_watermarks(adev, adev->mode_info.crtcs[i],
1323                                             lb_size, num_heads);
1324         }
1325 }
1326
1327 static void dce_v11_0_audio_get_connected_pins(struct amdgpu_device *adev)
1328 {
1329         int i;
1330         u32 offset, tmp;
1331
1332         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1333                 offset = adev->mode_info.audio.pin[i].offset;
1334                 tmp = RREG32_AUDIO_ENDPT(offset,
1335                                          ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1336                 if (((tmp &
1337                 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
1338                 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
1339                         adev->mode_info.audio.pin[i].connected = false;
1340                 else
1341                         adev->mode_info.audio.pin[i].connected = true;
1342         }
1343 }
1344
1345 static struct amdgpu_audio_pin *dce_v11_0_audio_get_pin(struct amdgpu_device *adev)
1346 {
1347         int i;
1348
1349         dce_v11_0_audio_get_connected_pins(adev);
1350
1351         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1352                 if (adev->mode_info.audio.pin[i].connected)
1353                         return &adev->mode_info.audio.pin[i];
1354         }
1355         DRM_ERROR("No connected audio pins found!\n");
1356         return NULL;
1357 }
1358
1359 static void dce_v11_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1360 {
1361         struct amdgpu_device *adev = encoder->dev->dev_private;
1362         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1363         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1364         u32 tmp;
1365
1366         if (!dig || !dig->afmt || !dig->afmt->pin)
1367                 return;
1368
1369         tmp = RREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset);
1370         tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, dig->afmt->pin->id);
1371         WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset, tmp);
1372 }
1373
1374 static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder,
1375                                                 struct drm_display_mode *mode)
1376 {
1377         struct amdgpu_device *adev = encoder->dev->dev_private;
1378         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1379         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1380         struct drm_connector *connector;
1381         struct amdgpu_connector *amdgpu_connector = NULL;
1382         u32 tmp;
1383         int interlace = 0;
1384
1385         if (!dig || !dig->afmt || !dig->afmt->pin)
1386                 return;
1387
1388         list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1389                 if (connector->encoder == encoder) {
1390                         amdgpu_connector = to_amdgpu_connector(connector);
1391                         break;
1392                 }
1393         }
1394
1395         if (!amdgpu_connector) {
1396                 DRM_ERROR("Couldn't find encoder's connector\n");
1397                 return;
1398         }
1399
1400         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1401                 interlace = 1;
1402         if (connector->latency_present[interlace]) {
1403                 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1404                                     VIDEO_LIPSYNC, connector->video_latency[interlace]);
1405                 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1406                                     AUDIO_LIPSYNC, connector->audio_latency[interlace]);
1407         } else {
1408                 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1409                                     VIDEO_LIPSYNC, 0);
1410                 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1411                                     AUDIO_LIPSYNC, 0);
1412         }
1413         WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1414                            ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1415 }
1416
1417 static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1418 {
1419         struct amdgpu_device *adev = encoder->dev->dev_private;
1420         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1421         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1422         struct drm_connector *connector;
1423         struct amdgpu_connector *amdgpu_connector = NULL;
1424         u32 tmp;
1425         u8 *sadb = NULL;
1426         int sad_count;
1427
1428         if (!dig || !dig->afmt || !dig->afmt->pin)
1429                 return;
1430
1431         list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1432                 if (connector->encoder == encoder) {
1433                         amdgpu_connector = to_amdgpu_connector(connector);
1434                         break;
1435                 }
1436         }
1437
1438         if (!amdgpu_connector) {
1439                 DRM_ERROR("Couldn't find encoder's connector\n");
1440                 return;
1441         }
1442
1443         sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
1444         if (sad_count < 0) {
1445                 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1446                 sad_count = 0;
1447         }
1448
1449         /* program the speaker allocation */
1450         tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1451                                  ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1452         tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1453                             DP_CONNECTION, 0);
1454         /* set HDMI mode */
1455         tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1456                             HDMI_CONNECTION, 1);
1457         if (sad_count)
1458                 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1459                                     SPEAKER_ALLOCATION, sadb[0]);
1460         else
1461                 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1462                                     SPEAKER_ALLOCATION, 5); /* stereo */
1463         WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1464                            ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1465
1466         kfree(sadb);
1467 }
1468
1469 static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder)
1470 {
1471         struct amdgpu_device *adev = encoder->dev->dev_private;
1472         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1473         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1474         struct drm_connector *connector;
1475         struct amdgpu_connector *amdgpu_connector = NULL;
1476         struct cea_sad *sads;
1477         int i, sad_count;
1478
1479         static const u16 eld_reg_to_type[][2] = {
1480                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1481                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1482                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1483                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1484                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1485                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1486                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1487                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1488                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1489                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1490                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1491                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1492         };
1493
1494         if (!dig || !dig->afmt || !dig->afmt->pin)
1495                 return;
1496
1497         list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1498                 if (connector->encoder == encoder) {
1499                         amdgpu_connector = to_amdgpu_connector(connector);
1500                         break;
1501                 }
1502         }
1503
1504         if (!amdgpu_connector) {
1505                 DRM_ERROR("Couldn't find encoder's connector\n");
1506                 return;
1507         }
1508
1509         sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
1510         if (sad_count <= 0) {
1511                 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1512                 return;
1513         }
1514         BUG_ON(!sads);
1515
1516         for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1517                 u32 tmp = 0;
1518                 u8 stereo_freqs = 0;
1519                 int max_channels = -1;
1520                 int j;
1521
1522                 for (j = 0; j < sad_count; j++) {
1523                         struct cea_sad *sad = &sads[j];
1524
1525                         if (sad->format == eld_reg_to_type[i][1]) {
1526                                 if (sad->channels > max_channels) {
1527                                         tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1528                                                             MAX_CHANNELS, sad->channels);
1529                                         tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1530                                                             DESCRIPTOR_BYTE_2, sad->byte2);
1531                                         tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1532                                                             SUPPORTED_FREQUENCIES, sad->freq);
1533                                         max_channels = sad->channels;
1534                                 }
1535
1536                                 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1537                                         stereo_freqs |= sad->freq;
1538                                 else
1539                                         break;
1540                         }
1541                 }
1542
1543                 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1544                                     SUPPORTED_FREQUENCIES_STEREO, stereo_freqs);
1545                 WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp);
1546         }
1547
1548         kfree(sads);
1549 }
1550
1551 static void dce_v11_0_audio_enable(struct amdgpu_device *adev,
1552                                   struct amdgpu_audio_pin *pin,
1553                                   bool enable)
1554 {
1555         if (!pin)
1556                 return;
1557
1558         WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1559                            enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1560 }
1561
1562 static const u32 pin_offsets[] =
1563 {
1564         AUD0_REGISTER_OFFSET,
1565         AUD1_REGISTER_OFFSET,
1566         AUD2_REGISTER_OFFSET,
1567         AUD3_REGISTER_OFFSET,
1568         AUD4_REGISTER_OFFSET,
1569         AUD5_REGISTER_OFFSET,
1570         AUD6_REGISTER_OFFSET,
1571         AUD7_REGISTER_OFFSET,
1572 };
1573
1574 static int dce_v11_0_audio_init(struct amdgpu_device *adev)
1575 {
1576         int i;
1577
1578         if (!amdgpu_audio)
1579                 return 0;
1580
1581         adev->mode_info.audio.enabled = true;
1582
1583         switch (adev->asic_type) {
1584         case CHIP_CARRIZO:
1585         case CHIP_STONEY:
1586                 adev->mode_info.audio.num_pins = 7;
1587                 break;
1588         case CHIP_POLARIS10:
1589                 adev->mode_info.audio.num_pins = 8;
1590                 break;
1591         case CHIP_POLARIS11:
1592                 adev->mode_info.audio.num_pins = 6;
1593                 break;
1594         default:
1595                 return -EINVAL;
1596         }
1597
1598         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1599                 adev->mode_info.audio.pin[i].channels = -1;
1600                 adev->mode_info.audio.pin[i].rate = -1;
1601                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
1602                 adev->mode_info.audio.pin[i].status_bits = 0;
1603                 adev->mode_info.audio.pin[i].category_code = 0;
1604                 adev->mode_info.audio.pin[i].connected = false;
1605                 adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1606                 adev->mode_info.audio.pin[i].id = i;
1607                 /* disable audio.  it will be set up later */
1608                 /* XXX remove once we switch to ip funcs */
1609                 dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1610         }
1611
1612         return 0;
1613 }
1614
1615 static void dce_v11_0_audio_fini(struct amdgpu_device *adev)
1616 {
1617         int i;
1618
1619         if (!amdgpu_audio)
1620                 return;
1621
1622         if (!adev->mode_info.audio.enabled)
1623                 return;
1624
1625         for (i = 0; i < adev->mode_info.audio.num_pins; i++)
1626                 dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1627
1628         adev->mode_info.audio.enabled = false;
1629 }
1630
1631 /*
1632  * update the N and CTS parameters for a given pixel clock rate
1633  */
1634 static void dce_v11_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1635 {
1636         struct drm_device *dev = encoder->dev;
1637         struct amdgpu_device *adev = dev->dev_private;
1638         struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1639         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1640         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1641         u32 tmp;
1642
1643         tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset);
1644         tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz);
1645         WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp);
1646         tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset);
1647         tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz);
1648         WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp);
1649
1650         tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset);
1651         tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz);
1652         WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp);
1653         tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset);
1654         tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz);
1655         WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp);
1656
1657         tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset);
1658         tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz);
1659         WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp);
1660         tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset);
1661         tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz);
1662         WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp);
1663
1664 }
1665
1666 /*
1667  * build a HDMI Video Info Frame
1668  */
1669 static void dce_v11_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1670                                                void *buffer, size_t size)
1671 {
1672         struct drm_device *dev = encoder->dev;
1673         struct amdgpu_device *adev = dev->dev_private;
1674         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1675         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1676         uint8_t *frame = buffer + 3;
1677         uint8_t *header = buffer;
1678
1679         WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset,
1680                 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
1681         WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset,
1682                 frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
1683         WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset,
1684                 frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
1685         WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset,
1686                 frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
1687 }
1688
1689 static void dce_v11_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1690 {
1691         struct drm_device *dev = encoder->dev;
1692         struct amdgpu_device *adev = dev->dev_private;
1693         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1694         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1695         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1696         u32 dto_phase = 24 * 1000;
1697         u32 dto_modulo = clock;
1698         u32 tmp;
1699
1700         if (!dig || !dig->afmt)
1701                 return;
1702
1703         /* XXX two dtos; generally use dto0 for hdmi */
1704         /* Express [24MHz / target pixel clock] as an exact rational
1705          * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
1706          * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1707          */
1708         tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE);
1709         tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL,
1710                             amdgpu_crtc->crtc_id);
1711         WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp);
1712         WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
1713         WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
1714 }
1715
1716 /*
1717  * update the info frames with the data from the current display mode
1718  */
1719 static void dce_v11_0_afmt_setmode(struct drm_encoder *encoder,
1720                                   struct drm_display_mode *mode)
1721 {
1722         struct drm_device *dev = encoder->dev;
1723         struct amdgpu_device *adev = dev->dev_private;
1724         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1725         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1726         struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1727         u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1728         struct hdmi_avi_infoframe frame;
1729         ssize_t err;
1730         u32 tmp;
1731         int bpc = 8;
1732
1733         if (!dig || !dig->afmt)
1734                 return;
1735
1736         /* Silent, r600_hdmi_enable will raise WARN for us */
1737         if (!dig->afmt->enabled)
1738                 return;
1739
1740         /* hdmi deep color mode general control packets setup, if bpc > 8 */
1741         if (encoder->crtc) {
1742                 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1743                 bpc = amdgpu_crtc->bpc;
1744         }
1745
1746         /* disable audio prior to setting up hw */
1747         dig->afmt->pin = dce_v11_0_audio_get_pin(adev);
1748         dce_v11_0_audio_enable(adev, dig->afmt->pin, false);
1749
1750         dce_v11_0_audio_set_dto(encoder, mode->clock);
1751
1752         tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
1753         tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1);
1754         WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); /* send null packets when required */
1755
1756         WREG32(mmAFMT_AUDIO_CRC_CONTROL + dig->afmt->offset, 0x1000);
1757
1758         tmp = RREG32(mmHDMI_CONTROL + dig->afmt->offset);
1759         switch (bpc) {
1760         case 0:
1761         case 6:
1762         case 8:
1763         case 16:
1764         default:
1765                 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 0);
1766                 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0);
1767                 DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
1768                           connector->name, bpc);
1769                 break;
1770         case 10:
1771                 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
1772                 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 1);
1773                 DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
1774                           connector->name);
1775                 break;
1776         case 12:
1777                 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
1778                 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 2);
1779                 DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
1780                           connector->name);
1781                 break;
1782         }
1783         WREG32(mmHDMI_CONTROL + dig->afmt->offset, tmp);
1784
1785         tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
1786         tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); /* send null packets when required */
1787         tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1); /* send general control packets */
1788         tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1); /* send general control packets every frame */
1789         WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp);
1790
1791         tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1792         /* enable audio info frames (frames won't be set until audio is enabled) */
1793         tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
1794         /* required for audio info values to be updated */
1795         tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1);
1796         WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1797
1798         tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset);
1799         /* required for audio info values to be updated */
1800         tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
1801         WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1802
1803         tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1804         /* anything other than 0 */
1805         tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, 2);
1806         WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1807
1808         WREG32(mmHDMI_GC + dig->afmt->offset, 0); /* unset HDMI_GC_AVMUTE */
1809
1810         tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1811         /* set the default audio delay */
1812         tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1);
1813         /* should be suffient for all audio modes and small enough for all hblanks */
1814         tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3);
1815         WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1816
1817         tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1818         /* allow 60958 channel status fields to be updated */
1819         tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
1820         WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1821
1822         tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset);
1823         if (bpc > 8)
1824                 /* clear SW CTS value */
1825                 tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 0);
1826         else
1827                 /* select SW CTS value */
1828                 tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 1);
1829         /* allow hw to sent ACR packets when required */
1830         tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1);
1831         WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp);
1832
1833         dce_v11_0_afmt_update_ACR(encoder, mode->clock);
1834
1835         tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset);
1836         tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1);
1837         WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp);
1838
1839         tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset);
1840         tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
1841         WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp);
1842
1843         tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset);
1844         tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3);
1845         tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4);
1846         tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5);
1847         tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6);
1848         tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7);
1849         tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
1850         WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp);
1851
1852         dce_v11_0_audio_write_speaker_allocation(encoder);
1853
1854         WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset,
1855                (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
1856
1857         dce_v11_0_afmt_audio_select_pin(encoder);
1858         dce_v11_0_audio_write_sad_regs(encoder);
1859         dce_v11_0_audio_write_latency_fields(encoder, mode);
1860
1861         err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
1862         if (err < 0) {
1863                 DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1864                 return;
1865         }
1866
1867         err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1868         if (err < 0) {
1869                 DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1870                 return;
1871         }
1872
1873         dce_v11_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
1874
1875         tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1876         /* enable AVI info frames */
1877         tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1);
1878         /* required for audio info values to be updated */
1879         tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1);
1880         WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1881
1882         tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1883         tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2);
1884         WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1885
1886         tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1887         /* send audio packets */
1888         tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
1889         WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1890
1891         WREG32(mmAFMT_RAMP_CONTROL0 + dig->afmt->offset, 0x00FFFFFF);
1892         WREG32(mmAFMT_RAMP_CONTROL1 + dig->afmt->offset, 0x007FFFFF);
1893         WREG32(mmAFMT_RAMP_CONTROL2 + dig->afmt->offset, 0x00000001);
1894         WREG32(mmAFMT_RAMP_CONTROL3 + dig->afmt->offset, 0x00000001);
1895
1896         /* enable audio after to setting up hw */
1897         dce_v11_0_audio_enable(adev, dig->afmt->pin, true);
1898 }
1899
1900 static void dce_v11_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1901 {
1902         struct drm_device *dev = encoder->dev;
1903         struct amdgpu_device *adev = dev->dev_private;
1904         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1905         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1906
1907         if (!dig || !dig->afmt)
1908                 return;
1909
1910         /* Silent, r600_hdmi_enable will raise WARN for us */
1911         if (enable && dig->afmt->enabled)
1912                 return;
1913         if (!enable && !dig->afmt->enabled)
1914                 return;
1915
1916         if (!enable && dig->afmt->pin) {
1917                 dce_v11_0_audio_enable(adev, dig->afmt->pin, false);
1918                 dig->afmt->pin = NULL;
1919         }
1920
1921         dig->afmt->enabled = enable;
1922
1923         DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1924                   enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1925 }
1926
1927 static int dce_v11_0_afmt_init(struct amdgpu_device *adev)
1928 {
1929         int i;
1930
1931         for (i = 0; i < adev->mode_info.num_dig; i++)
1932                 adev->mode_info.afmt[i] = NULL;
1933
1934         /* DCE11 has audio blocks tied to DIG encoders */
1935         for (i = 0; i < adev->mode_info.num_dig; i++) {
1936                 adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1937                 if (adev->mode_info.afmt[i]) {
1938                         adev->mode_info.afmt[i]->offset = dig_offsets[i];
1939                         adev->mode_info.afmt[i]->id = i;
1940                 } else {
1941                         int j;
1942                         for (j = 0; j < i; j++) {
1943                                 kfree(adev->mode_info.afmt[j]);
1944                                 adev->mode_info.afmt[j] = NULL;
1945                         }
1946                         return -ENOMEM;
1947                 }
1948         }
1949         return 0;
1950 }
1951
1952 static void dce_v11_0_afmt_fini(struct amdgpu_device *adev)
1953 {
1954         int i;
1955
1956         for (i = 0; i < adev->mode_info.num_dig; i++) {
1957                 kfree(adev->mode_info.afmt[i]);
1958                 adev->mode_info.afmt[i] = NULL;
1959         }
1960 }
1961
1962 static const u32 vga_control_regs[6] =
1963 {
1964         mmD1VGA_CONTROL,
1965         mmD2VGA_CONTROL,
1966         mmD3VGA_CONTROL,
1967         mmD4VGA_CONTROL,
1968         mmD5VGA_CONTROL,
1969         mmD6VGA_CONTROL,
1970 };
1971
1972 static void dce_v11_0_vga_enable(struct drm_crtc *crtc, bool enable)
1973 {
1974         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1975         struct drm_device *dev = crtc->dev;
1976         struct amdgpu_device *adev = dev->dev_private;
1977         u32 vga_control;
1978
1979         vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1980         if (enable)
1981                 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
1982         else
1983                 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
1984 }
1985
1986 static void dce_v11_0_grph_enable(struct drm_crtc *crtc, bool enable)
1987 {
1988         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1989         struct drm_device *dev = crtc->dev;
1990         struct amdgpu_device *adev = dev->dev_private;
1991
1992         if (enable)
1993                 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
1994         else
1995                 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
1996 }
1997
1998 static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
1999                                      struct drm_framebuffer *fb,
2000                                      int x, int y, int atomic)
2001 {
2002         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2003         struct drm_device *dev = crtc->dev;
2004         struct amdgpu_device *adev = dev->dev_private;
2005         struct amdgpu_framebuffer *amdgpu_fb;
2006         struct drm_framebuffer *target_fb;
2007         struct drm_gem_object *obj;
2008         struct amdgpu_bo *abo;
2009         uint64_t fb_location, tiling_flags;
2010         uint32_t fb_format, fb_pitch_pixels;
2011         u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE);
2012         u32 pipe_config;
2013         u32 tmp, viewport_w, viewport_h;
2014         int r;
2015         bool bypass_lut = false;
2016         struct drm_format_name_buf format_name;
2017
2018         /* no fb bound */
2019         if (!atomic && !crtc->primary->fb) {
2020                 DRM_DEBUG_KMS("No FB bound\n");
2021                 return 0;
2022         }
2023
2024         if (atomic) {
2025                 amdgpu_fb = to_amdgpu_framebuffer(fb);
2026                 target_fb = fb;
2027         } else {
2028                 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
2029                 target_fb = crtc->primary->fb;
2030         }
2031
2032         /* If atomic, assume fb object is pinned & idle & fenced and
2033          * just update base pointers
2034          */
2035         obj = amdgpu_fb->obj;
2036         abo = gem_to_amdgpu_bo(obj);
2037         r = amdgpu_bo_reserve(abo, false);
2038         if (unlikely(r != 0))
2039                 return r;
2040
2041         if (atomic) {
2042                 fb_location = amdgpu_bo_gpu_offset(abo);
2043         } else {
2044                 r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
2045                 if (unlikely(r != 0)) {
2046                         amdgpu_bo_unreserve(abo);
2047                         return -EINVAL;
2048                 }
2049         }
2050
2051         amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
2052         amdgpu_bo_unreserve(abo);
2053
2054         pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
2055
2056         switch (target_fb->pixel_format) {
2057         case DRM_FORMAT_C8:
2058                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 0);
2059                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
2060                 break;
2061         case DRM_FORMAT_XRGB4444:
2062         case DRM_FORMAT_ARGB4444:
2063                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
2064                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 2);
2065 #ifdef __BIG_ENDIAN
2066                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
2067                                         ENDIAN_8IN16);
2068 #endif
2069                 break;
2070         case DRM_FORMAT_XRGB1555:
2071         case DRM_FORMAT_ARGB1555:
2072                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
2073                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
2074 #ifdef __BIG_ENDIAN
2075                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
2076                                         ENDIAN_8IN16);
2077 #endif
2078                 break;
2079         case DRM_FORMAT_BGRX5551:
2080         case DRM_FORMAT_BGRA5551:
2081                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
2082                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 5);
2083 #ifdef __BIG_ENDIAN
2084                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
2085                                         ENDIAN_8IN16);
2086 #endif
2087                 break;
2088         case DRM_FORMAT_RGB565:
2089                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
2090                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
2091 #ifdef __BIG_ENDIAN
2092                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
2093                                         ENDIAN_8IN16);
2094 #endif
2095                 break;
2096         case DRM_FORMAT_XRGB8888:
2097         case DRM_FORMAT_ARGB8888:
2098                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
2099                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
2100 #ifdef __BIG_ENDIAN
2101                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
2102                                         ENDIAN_8IN32);
2103 #endif
2104                 break;
2105         case DRM_FORMAT_XRGB2101010:
2106         case DRM_FORMAT_ARGB2101010:
2107                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
2108                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
2109 #ifdef __BIG_ENDIAN
2110                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
2111                                         ENDIAN_8IN32);
2112 #endif
2113                 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
2114                 bypass_lut = true;
2115                 break;
2116         case DRM_FORMAT_BGRX1010102:
2117         case DRM_FORMAT_BGRA1010102:
2118                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
2119                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 4);
2120 #ifdef __BIG_ENDIAN
2121                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
2122                                         ENDIAN_8IN32);
2123 #endif
2124                 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
2125                 bypass_lut = true;
2126                 break;
2127         default:
2128                 DRM_ERROR("Unsupported screen format %s\n",
2129                           drm_get_format_name(target_fb->pixel_format, &format_name));
2130                 return -EINVAL;
2131         }
2132
2133         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
2134                 unsigned bankw, bankh, mtaspect, tile_split, num_banks;
2135
2136                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
2137                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
2138                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
2139                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
2140                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
2141
2142                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_NUM_BANKS, num_banks);
2143                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
2144                                           ARRAY_2D_TILED_THIN1);
2145                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_TILE_SPLIT,
2146                                           tile_split);
2147                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_WIDTH, bankw);
2148                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_HEIGHT, bankh);
2149                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT,
2150                                           mtaspect);
2151                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MICRO_TILE_MODE,
2152                                           ADDR_SURF_MICRO_TILING_DISPLAY);
2153         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
2154                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
2155                                           ARRAY_1D_TILED_THIN1);
2156         }
2157
2158         fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_PIPE_CONFIG,
2159                                   pipe_config);
2160
2161         dce_v11_0_vga_enable(crtc, false);
2162
2163         /* Make sure surface address is updated at vertical blank rather than
2164          * horizontal blank
2165          */
2166         tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
2167         tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
2168                             GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0);
2169         WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2170
2171         WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2172                upper_32_bits(fb_location));
2173         WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2174                upper_32_bits(fb_location));
2175         WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2176                (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
2177         WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2178                (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
2179         WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
2180         WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
2181
2182         /*
2183          * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
2184          * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
2185          * retain the full precision throughout the pipeline.
2186          */
2187         tmp = RREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset);
2188         if (bypass_lut)
2189                 tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 1);
2190         else
2191                 tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 0);
2192         WREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset, tmp);
2193
2194         if (bypass_lut)
2195                 DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
2196
2197         WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
2198         WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
2199         WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
2200         WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
2201         WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
2202         WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
2203
2204         fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
2205         WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
2206
2207         dce_v11_0_grph_enable(crtc, true);
2208
2209         WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
2210                target_fb->height);
2211
2212         x &= ~3;
2213         y &= ~1;
2214         WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
2215                (x << 16) | y);
2216         viewport_w = crtc->mode.hdisplay;
2217         viewport_h = (crtc->mode.vdisplay + 1) & ~1;
2218         WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
2219                (viewport_w << 16) | viewport_h);
2220
2221         /* set pageflip to happen anywhere in vblank interval */
2222         WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
2223
2224         if (!atomic && fb && fb != crtc->primary->fb) {
2225                 amdgpu_fb = to_amdgpu_framebuffer(fb);
2226                 abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2227                 r = amdgpu_bo_reserve(abo, false);
2228                 if (unlikely(r != 0))
2229                         return r;
2230                 amdgpu_bo_unpin(abo);
2231                 amdgpu_bo_unreserve(abo);
2232         }
2233
2234         /* Bytes per pixel may have changed */
2235         dce_v11_0_bandwidth_update(adev);
2236
2237         return 0;
2238 }
2239
2240 static void dce_v11_0_set_interleave(struct drm_crtc *crtc,
2241                                      struct drm_display_mode *mode)
2242 {
2243         struct drm_device *dev = crtc->dev;
2244         struct amdgpu_device *adev = dev->dev_private;
2245         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2246         u32 tmp;
2247
2248         tmp = RREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset);
2249         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2250                 tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 1);
2251         else
2252                 tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 0);
2253         WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, tmp);
2254 }
2255
2256 static void dce_v11_0_crtc_load_lut(struct drm_crtc *crtc)
2257 {
2258         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2259         struct drm_device *dev = crtc->dev;
2260         struct amdgpu_device *adev = dev->dev_private;
2261         int i;
2262         u32 tmp;
2263
2264         DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2265
2266         tmp = RREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
2267         tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_GRPH_MODE, 0);
2268         WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2269
2270         tmp = RREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset);
2271         tmp = REG_SET_FIELD(tmp, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, 1);
2272         WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2273
2274         tmp = RREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset);
2275         tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, 0);
2276         WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2277
2278         WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2279
2280         WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2281         WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2282         WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2283
2284         WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2285         WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2286         WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2287
2288         WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2289         WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2290
2291         WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2292         for (i = 0; i < 256; i++) {
2293                 WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2294                        (amdgpu_crtc->lut_r[i] << 20) |
2295                        (amdgpu_crtc->lut_g[i] << 10) |
2296                        (amdgpu_crtc->lut_b[i] << 0));
2297         }
2298
2299         tmp = RREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
2300         tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, 0);
2301         tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, 0);
2302         tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR2_DEGAMMA_MODE, 0);
2303         WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2304
2305         tmp = RREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset);
2306         tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, 0);
2307         WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2308
2309         tmp = RREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
2310         tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, GRPH_REGAMMA_MODE, 0);
2311         WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2312
2313         tmp = RREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
2314         tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, 0);
2315         WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2316
2317         /* XXX match this to the depth of the crtc fmt block, move to modeset? */
2318         WREG32(mmDENORM_CONTROL + amdgpu_crtc->crtc_offset, 0);
2319         /* XXX this only needs to be programmed once per crtc at startup,
2320          * not sure where the best place for it is
2321          */
2322         tmp = RREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset);
2323         tmp = REG_SET_FIELD(tmp, ALPHA_CONTROL, CURSOR_ALPHA_BLND_ENA, 1);
2324         WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2325 }
2326
2327 static int dce_v11_0_pick_dig_encoder(struct drm_encoder *encoder)
2328 {
2329         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2330         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2331
2332         switch (amdgpu_encoder->encoder_id) {
2333         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2334                 if (dig->linkb)
2335                         return 1;
2336                 else
2337                         return 0;
2338                 break;
2339         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2340                 if (dig->linkb)
2341                         return 3;
2342                 else
2343                         return 2;
2344                 break;
2345         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2346                 if (dig->linkb)
2347                         return 5;
2348                 else
2349                         return 4;
2350                 break;
2351         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2352                 return 6;
2353                 break;
2354         default:
2355                 DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2356                 return 0;
2357         }
2358 }
2359
2360 /**
2361  * dce_v11_0_pick_pll - Allocate a PPLL for use by the crtc.
2362  *
2363  * @crtc: drm crtc
2364  *
2365  * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
2366  * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
2367  * monitors a dedicated PPLL must be used.  If a particular board has
2368  * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2369  * as there is no need to program the PLL itself.  If we are not able to
2370  * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2371  * avoid messing up an existing monitor.
2372  *
2373  * Asic specific PLL information
2374  *
2375  * DCE 10.x
2376  * Tonga
2377  * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
2378  * CI
2379  * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
2380  *
2381  */
2382 static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc)
2383 {
2384         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2385         struct drm_device *dev = crtc->dev;
2386         struct amdgpu_device *adev = dev->dev_private;
2387         u32 pll_in_use;
2388         int pll;
2389
2390         if ((adev->asic_type == CHIP_POLARIS10) ||
2391             (adev->asic_type == CHIP_POLARIS11)) {
2392                 struct amdgpu_encoder *amdgpu_encoder =
2393                         to_amdgpu_encoder(amdgpu_crtc->encoder);
2394                 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2395
2396                 if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2397                         return ATOM_DP_DTO;
2398
2399                 switch (amdgpu_encoder->encoder_id) {
2400                 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2401                         if (dig->linkb)
2402                                 return ATOM_COMBOPHY_PLL1;
2403                         else
2404                                 return ATOM_COMBOPHY_PLL0;
2405                         break;
2406                 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2407                         if (dig->linkb)
2408                                 return ATOM_COMBOPHY_PLL3;
2409                         else
2410                                 return ATOM_COMBOPHY_PLL2;
2411                         break;
2412                 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2413                         if (dig->linkb)
2414                                 return ATOM_COMBOPHY_PLL5;
2415                         else
2416                                 return ATOM_COMBOPHY_PLL4;
2417                         break;
2418                 default:
2419                         DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2420                         return ATOM_PPLL_INVALID;
2421                 }
2422         }
2423
2424         if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2425                 if (adev->clock.dp_extclk)
2426                         /* skip PPLL programming if using ext clock */
2427                         return ATOM_PPLL_INVALID;
2428                 else {
2429                         /* use the same PPLL for all DP monitors */
2430                         pll = amdgpu_pll_get_shared_dp_ppll(crtc);
2431                         if (pll != ATOM_PPLL_INVALID)
2432                                 return pll;
2433                 }
2434         } else {
2435                 /* use the same PPLL for all monitors with the same clock */
2436                 pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2437                 if (pll != ATOM_PPLL_INVALID)
2438                         return pll;
2439         }
2440
2441         /* XXX need to determine what plls are available on each DCE11 part */
2442         pll_in_use = amdgpu_pll_get_use_mask(crtc);
2443         if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) {
2444                 if (!(pll_in_use & (1 << ATOM_PPLL1)))
2445                         return ATOM_PPLL1;
2446                 if (!(pll_in_use & (1 << ATOM_PPLL0)))
2447                         return ATOM_PPLL0;
2448                 DRM_ERROR("unable to allocate a PPLL\n");
2449                 return ATOM_PPLL_INVALID;
2450         } else {
2451                 if (!(pll_in_use & (1 << ATOM_PPLL2)))
2452                         return ATOM_PPLL2;
2453                 if (!(pll_in_use & (1 << ATOM_PPLL1)))
2454                         return ATOM_PPLL1;
2455                 if (!(pll_in_use & (1 << ATOM_PPLL0)))
2456                         return ATOM_PPLL0;
2457                 DRM_ERROR("unable to allocate a PPLL\n");
2458                 return ATOM_PPLL_INVALID;
2459         }
2460         return ATOM_PPLL_INVALID;
2461 }
2462
2463 static void dce_v11_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2464 {
2465         struct amdgpu_device *adev = crtc->dev->dev_private;
2466         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2467         uint32_t cur_lock;
2468
2469         cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2470         if (lock)
2471                 cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 1);
2472         else
2473                 cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 0);
2474         WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2475 }
2476
2477 static void dce_v11_0_hide_cursor(struct drm_crtc *crtc)
2478 {
2479         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2480         struct amdgpu_device *adev = crtc->dev->dev_private;
2481         u32 tmp;
2482
2483         tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
2484         tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0);
2485         WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2486 }
2487
2488 static void dce_v11_0_show_cursor(struct drm_crtc *crtc)
2489 {
2490         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2491         struct amdgpu_device *adev = crtc->dev->dev_private;
2492         u32 tmp;
2493
2494         WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2495                upper_32_bits(amdgpu_crtc->cursor_addr));
2496         WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2497                lower_32_bits(amdgpu_crtc->cursor_addr));
2498
2499         tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
2500         tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
2501         tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
2502         WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2503 }
2504
2505 static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
2506                                         int x, int y)
2507 {
2508         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2509         struct amdgpu_device *adev = crtc->dev->dev_private;
2510         int xorigin = 0, yorigin = 0;
2511
2512         amdgpu_crtc->cursor_x = x;
2513         amdgpu_crtc->cursor_y = y;
2514
2515         /* avivo cursor are offset into the total surface */
2516         x += crtc->x;
2517         y += crtc->y;
2518         DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2519
2520         if (x < 0) {
2521                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2522                 x = 0;
2523         }
2524         if (y < 0) {
2525                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2526                 y = 0;
2527         }
2528
2529         WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2530         WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2531
2532         return 0;
2533 }
2534
2535 static int dce_v11_0_crtc_cursor_move(struct drm_crtc *crtc,
2536                                       int x, int y)
2537 {
2538         int ret;
2539
2540         dce_v11_0_lock_cursor(crtc, true);
2541         ret = dce_v11_0_cursor_move_locked(crtc, x, y);
2542         dce_v11_0_lock_cursor(crtc, false);
2543
2544         return ret;
2545 }
2546
2547 static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
2548                                       struct drm_file *file_priv,
2549                                       uint32_t handle,
2550                                       uint32_t width,
2551                                       uint32_t height,
2552                                       int32_t hot_x,
2553                                       int32_t hot_y)
2554 {
2555         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2556         struct amdgpu_device *adev = crtc->dev->dev_private;
2557         struct drm_gem_object *obj;
2558         struct amdgpu_bo *aobj;
2559         int ret;
2560
2561         if (!handle) {
2562                 /* turn off cursor */
2563                 dce_v11_0_hide_cursor(crtc);
2564                 obj = NULL;
2565                 goto unpin;
2566         }
2567
2568         if ((width > amdgpu_crtc->max_cursor_width) ||
2569             (height > amdgpu_crtc->max_cursor_height)) {
2570                 DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2571                 return -EINVAL;
2572         }
2573
2574         obj = drm_gem_object_lookup(file_priv, handle);
2575         if (!obj) {
2576                 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2577                 return -ENOENT;
2578         }
2579
2580         aobj = gem_to_amdgpu_bo(obj);
2581         ret = amdgpu_bo_reserve(aobj, false);
2582         if (ret != 0) {
2583                 drm_gem_object_unreference_unlocked(obj);
2584                 return ret;
2585         }
2586
2587         ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
2588         amdgpu_bo_unreserve(aobj);
2589         if (ret) {
2590                 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2591                 drm_gem_object_unreference_unlocked(obj);
2592                 return ret;
2593         }
2594
2595         dce_v11_0_lock_cursor(crtc, true);
2596
2597         if (hot_x != amdgpu_crtc->cursor_hot_x ||
2598             hot_y != amdgpu_crtc->cursor_hot_y) {
2599                 int x, y;
2600
2601                 x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2602                 y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2603
2604                 dce_v11_0_cursor_move_locked(crtc, x, y);
2605
2606                 amdgpu_crtc->cursor_hot_x = hot_x;
2607                 amdgpu_crtc->cursor_hot_y = hot_y;
2608         }
2609
2610         if (width != amdgpu_crtc->cursor_width ||
2611             height != amdgpu_crtc->cursor_height) {
2612                 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2613                        (width - 1) << 16 | (height - 1));
2614                 amdgpu_crtc->cursor_width = width;
2615                 amdgpu_crtc->cursor_height = height;
2616         }
2617
2618         dce_v11_0_show_cursor(crtc);
2619         dce_v11_0_lock_cursor(crtc, false);
2620
2621 unpin:
2622         if (amdgpu_crtc->cursor_bo) {
2623                 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2624                 ret = amdgpu_bo_reserve(aobj, false);
2625                 if (likely(ret == 0)) {
2626                         amdgpu_bo_unpin(aobj);
2627                         amdgpu_bo_unreserve(aobj);
2628                 }
2629                 drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
2630         }
2631
2632         amdgpu_crtc->cursor_bo = obj;
2633         return 0;
2634 }
2635
2636 static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
2637 {
2638         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2639         struct amdgpu_device *adev = crtc->dev->dev_private;
2640
2641         if (amdgpu_crtc->cursor_bo) {
2642                 dce_v11_0_lock_cursor(crtc, true);
2643
2644                 dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2645                                              amdgpu_crtc->cursor_y);
2646
2647                 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2648                        (amdgpu_crtc->cursor_width - 1) << 16 |
2649                        (amdgpu_crtc->cursor_height - 1));
2650
2651                 dce_v11_0_show_cursor(crtc);
2652
2653                 dce_v11_0_lock_cursor(crtc, false);
2654         }
2655 }
2656
2657 static int dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2658                                     u16 *blue, uint32_t size)
2659 {
2660         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2661         int i;
2662
2663         /* userspace palettes are always correct as is */
2664         for (i = 0; i < size; i++) {
2665                 amdgpu_crtc->lut_r[i] = red[i] >> 6;
2666                 amdgpu_crtc->lut_g[i] = green[i] >> 6;
2667                 amdgpu_crtc->lut_b[i] = blue[i] >> 6;
2668         }
2669         dce_v11_0_crtc_load_lut(crtc);
2670
2671         return 0;
2672 }
2673
2674 static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc)
2675 {
2676         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2677
2678         drm_crtc_cleanup(crtc);
2679         kfree(amdgpu_crtc);
2680 }
2681
2682 static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = {
2683         .cursor_set2 = dce_v11_0_crtc_cursor_set2,
2684         .cursor_move = dce_v11_0_crtc_cursor_move,
2685         .gamma_set = dce_v11_0_crtc_gamma_set,
2686         .set_config = amdgpu_crtc_set_config,
2687         .destroy = dce_v11_0_crtc_destroy,
2688         .page_flip_target = amdgpu_crtc_page_flip_target,
2689 };
2690
2691 static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2692 {
2693         struct drm_device *dev = crtc->dev;
2694         struct amdgpu_device *adev = dev->dev_private;
2695         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2696         unsigned type;
2697
2698         switch (mode) {
2699         case DRM_MODE_DPMS_ON:
2700                 amdgpu_crtc->enabled = true;
2701                 amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2702                 dce_v11_0_vga_enable(crtc, true);
2703                 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2704                 dce_v11_0_vga_enable(crtc, false);
2705                 /* Make sure VBLANK and PFLIP interrupts are still enabled */
2706                 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2707                 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2708                 amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2709                 drm_crtc_vblank_on(crtc);
2710                 dce_v11_0_crtc_load_lut(crtc);
2711                 break;
2712         case DRM_MODE_DPMS_STANDBY:
2713         case DRM_MODE_DPMS_SUSPEND:
2714         case DRM_MODE_DPMS_OFF:
2715                 drm_crtc_vblank_off(crtc);
2716                 if (amdgpu_crtc->enabled) {
2717                         dce_v11_0_vga_enable(crtc, true);
2718                         amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2719                         dce_v11_0_vga_enable(crtc, false);
2720                 }
2721                 amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2722                 amdgpu_crtc->enabled = false;
2723                 break;
2724         }
2725         /* adjust pm to dpms */
2726         amdgpu_pm_compute_clocks(adev);
2727 }
2728
2729 static void dce_v11_0_crtc_prepare(struct drm_crtc *crtc)
2730 {
2731         /* disable crtc pair power gating before programming */
2732         amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2733         amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2734         dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2735 }
2736
2737 static void dce_v11_0_crtc_commit(struct drm_crtc *crtc)
2738 {
2739         dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2740         amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2741 }
2742
2743 static void dce_v11_0_crtc_disable(struct drm_crtc *crtc)
2744 {
2745         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2746         struct drm_device *dev = crtc->dev;
2747         struct amdgpu_device *adev = dev->dev_private;
2748         struct amdgpu_atom_ss ss;
2749         int i;
2750
2751         dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2752         if (crtc->primary->fb) {
2753                 int r;
2754                 struct amdgpu_framebuffer *amdgpu_fb;
2755                 struct amdgpu_bo *abo;
2756
2757                 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
2758                 abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2759                 r = amdgpu_bo_reserve(abo, false);
2760                 if (unlikely(r))
2761                         DRM_ERROR("failed to reserve abo before unpin\n");
2762                 else {
2763                         amdgpu_bo_unpin(abo);
2764                         amdgpu_bo_unreserve(abo);
2765                 }
2766         }
2767         /* disable the GRPH */
2768         dce_v11_0_grph_enable(crtc, false);
2769
2770         amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2771
2772         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2773                 if (adev->mode_info.crtcs[i] &&
2774                     adev->mode_info.crtcs[i]->enabled &&
2775                     i != amdgpu_crtc->crtc_id &&
2776                     amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2777                         /* one other crtc is using this pll don't turn
2778                          * off the pll
2779                          */
2780                         goto done;
2781                 }
2782         }
2783
2784         switch (amdgpu_crtc->pll_id) {
2785         case ATOM_PPLL0:
2786         case ATOM_PPLL1:
2787         case ATOM_PPLL2:
2788                 /* disable the ppll */
2789                 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2790                                                  0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2791                 break;
2792         case ATOM_COMBOPHY_PLL0:
2793         case ATOM_COMBOPHY_PLL1:
2794         case ATOM_COMBOPHY_PLL2:
2795         case ATOM_COMBOPHY_PLL3:
2796         case ATOM_COMBOPHY_PLL4:
2797         case ATOM_COMBOPHY_PLL5:
2798                 /* disable the ppll */
2799                 amdgpu_atombios_crtc_program_pll(crtc, ATOM_CRTC_INVALID, amdgpu_crtc->pll_id,
2800                                                  0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2801                 break;
2802         default:
2803                 break;
2804         }
2805 done:
2806         amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2807         amdgpu_crtc->adjusted_clock = 0;
2808         amdgpu_crtc->encoder = NULL;
2809         amdgpu_crtc->connector = NULL;
2810 }
2811
2812 static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc,
2813                                   struct drm_display_mode *mode,
2814                                   struct drm_display_mode *adjusted_mode,
2815                                   int x, int y, struct drm_framebuffer *old_fb)
2816 {
2817         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2818         struct drm_device *dev = crtc->dev;
2819         struct amdgpu_device *adev = dev->dev_private;
2820
2821         if (!amdgpu_crtc->adjusted_clock)
2822                 return -EINVAL;
2823
2824         if ((adev->asic_type == CHIP_POLARIS10) ||
2825             (adev->asic_type == CHIP_POLARIS11)) {
2826                 struct amdgpu_encoder *amdgpu_encoder =
2827                         to_amdgpu_encoder(amdgpu_crtc->encoder);
2828                 int encoder_mode =
2829                         amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder);
2830
2831                 /* SetPixelClock calculates the plls and ss values now */
2832                 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id,
2833                                                  amdgpu_crtc->pll_id,
2834                                                  encoder_mode, amdgpu_encoder->encoder_id,
2835                                                  adjusted_mode->clock, 0, 0, 0, 0,
2836                                                  amdgpu_crtc->bpc, amdgpu_crtc->ss_enabled, &amdgpu_crtc->ss);
2837         } else {
2838                 amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2839         }
2840         amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2841         dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2842         amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2843         amdgpu_atombios_crtc_scaler_setup(crtc);
2844         dce_v11_0_cursor_reset(crtc);
2845         /* update the hw version fpr dpm */
2846         amdgpu_crtc->hw_mode = *adjusted_mode;
2847
2848         return 0;
2849 }
2850
2851 static bool dce_v11_0_crtc_mode_fixup(struct drm_crtc *crtc,
2852                                      const struct drm_display_mode *mode,
2853                                      struct drm_display_mode *adjusted_mode)
2854 {
2855         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2856         struct drm_device *dev = crtc->dev;
2857         struct drm_encoder *encoder;
2858
2859         /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2860         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2861                 if (encoder->crtc == crtc) {
2862                         amdgpu_crtc->encoder = encoder;
2863                         amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2864                         break;
2865                 }
2866         }
2867         if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2868                 amdgpu_crtc->encoder = NULL;
2869                 amdgpu_crtc->connector = NULL;
2870                 return false;
2871         }
2872         if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2873                 return false;
2874         if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2875                 return false;
2876         /* pick pll */
2877         amdgpu_crtc->pll_id = dce_v11_0_pick_pll(crtc);
2878         /* if we can't get a PPLL for a non-DP encoder, fail */
2879         if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2880             !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2881                 return false;
2882
2883         return true;
2884 }
2885
2886 static int dce_v11_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2887                                   struct drm_framebuffer *old_fb)
2888 {
2889         return dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2890 }
2891
2892 static int dce_v11_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2893                                          struct drm_framebuffer *fb,
2894                                          int x, int y, enum mode_set_atomic state)
2895 {
2896        return dce_v11_0_crtc_do_set_base(crtc, fb, x, y, 1);
2897 }
2898
2899 static const struct drm_crtc_helper_funcs dce_v11_0_crtc_helper_funcs = {
2900         .dpms = dce_v11_0_crtc_dpms,
2901         .mode_fixup = dce_v11_0_crtc_mode_fixup,
2902         .mode_set = dce_v11_0_crtc_mode_set,
2903         .mode_set_base = dce_v11_0_crtc_set_base,
2904         .mode_set_base_atomic = dce_v11_0_crtc_set_base_atomic,
2905         .prepare = dce_v11_0_crtc_prepare,
2906         .commit = dce_v11_0_crtc_commit,
2907         .load_lut = dce_v11_0_crtc_load_lut,
2908         .disable = dce_v11_0_crtc_disable,
2909 };
2910
2911 static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index)
2912 {
2913         struct amdgpu_crtc *amdgpu_crtc;
2914         int i;
2915
2916         amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2917                               (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2918         if (amdgpu_crtc == NULL)
2919                 return -ENOMEM;
2920
2921         drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v11_0_crtc_funcs);
2922
2923         drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2924         amdgpu_crtc->crtc_id = index;
2925         adev->mode_info.crtcs[index] = amdgpu_crtc;
2926
2927         amdgpu_crtc->max_cursor_width = 128;
2928         amdgpu_crtc->max_cursor_height = 128;
2929         adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2930         adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2931
2932         for (i = 0; i < 256; i++) {
2933                 amdgpu_crtc->lut_r[i] = i << 2;
2934                 amdgpu_crtc->lut_g[i] = i << 2;
2935                 amdgpu_crtc->lut_b[i] = i << 2;
2936         }
2937
2938         switch (amdgpu_crtc->crtc_id) {
2939         case 0:
2940         default:
2941                 amdgpu_crtc->crtc_offset = CRTC0_REGISTER_OFFSET;
2942                 break;
2943         case 1:
2944                 amdgpu_crtc->crtc_offset = CRTC1_REGISTER_OFFSET;
2945                 break;
2946         case 2:
2947                 amdgpu_crtc->crtc_offset = CRTC2_REGISTER_OFFSET;
2948                 break;
2949         case 3:
2950                 amdgpu_crtc->crtc_offset = CRTC3_REGISTER_OFFSET;
2951                 break;
2952         case 4:
2953                 amdgpu_crtc->crtc_offset = CRTC4_REGISTER_OFFSET;
2954                 break;
2955         case 5:
2956                 amdgpu_crtc->crtc_offset = CRTC5_REGISTER_OFFSET;
2957                 break;
2958         }
2959
2960         amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2961         amdgpu_crtc->adjusted_clock = 0;
2962         amdgpu_crtc->encoder = NULL;
2963         amdgpu_crtc->connector = NULL;
2964         drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v11_0_crtc_helper_funcs);
2965
2966         return 0;
2967 }
2968
2969 static int dce_v11_0_early_init(void *handle)
2970 {
2971         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2972
2973         adev->audio_endpt_rreg = &dce_v11_0_audio_endpt_rreg;
2974         adev->audio_endpt_wreg = &dce_v11_0_audio_endpt_wreg;
2975
2976         dce_v11_0_set_display_funcs(adev);
2977         dce_v11_0_set_irq_funcs(adev);
2978
2979         adev->mode_info.num_crtc = dce_v11_0_get_num_crtc(adev);
2980
2981         switch (adev->asic_type) {
2982         case CHIP_CARRIZO:
2983                 adev->mode_info.num_hpd = 6;
2984                 adev->mode_info.num_dig = 9;
2985                 break;
2986         case CHIP_STONEY:
2987                 adev->mode_info.num_hpd = 6;
2988                 adev->mode_info.num_dig = 9;
2989                 break;
2990         case CHIP_POLARIS10:
2991                 adev->mode_info.num_hpd = 6;
2992                 adev->mode_info.num_dig = 6;
2993                 break;
2994         case CHIP_POLARIS11:
2995                 adev->mode_info.num_hpd = 5;
2996                 adev->mode_info.num_dig = 5;
2997                 break;
2998         default:
2999                 /* FIXME: not supported yet */
3000                 return -EINVAL;
3001         }
3002
3003         return 0;
3004 }
3005
3006 static int dce_v11_0_sw_init(void *handle)
3007 {
3008         int r, i;
3009         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3010
3011         for (i = 0; i < adev->mode_info.num_crtc; i++) {
3012                 r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
3013                 if (r)
3014                         return r;
3015         }
3016
3017         for (i = 8; i < 20; i += 2) {
3018                 r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
3019                 if (r)
3020                         return r;
3021         }
3022
3023         /* HPD hotplug */
3024         r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
3025         if (r)
3026                 return r;
3027
3028         adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
3029
3030         adev->ddev->mode_config.async_page_flip = true;
3031
3032         adev->ddev->mode_config.max_width = 16384;
3033         adev->ddev->mode_config.max_height = 16384;
3034
3035         adev->ddev->mode_config.preferred_depth = 24;
3036         adev->ddev->mode_config.prefer_shadow = 1;
3037
3038         adev->ddev->mode_config.fb_base = adev->mc.aper_base;
3039
3040         r = amdgpu_modeset_create_props(adev);
3041         if (r)
3042                 return r;
3043
3044         adev->ddev->mode_config.max_width = 16384;
3045         adev->ddev->mode_config.max_height = 16384;
3046
3047
3048         /* allocate crtcs */
3049         for (i = 0; i < adev->mode_info.num_crtc; i++) {
3050                 r = dce_v11_0_crtc_init(adev, i);
3051                 if (r)
3052                         return r;
3053         }
3054
3055         if (amdgpu_atombios_get_connector_info_from_object_table(adev))
3056                 amdgpu_print_display_setup(adev->ddev);
3057         else
3058                 return -EINVAL;
3059
3060         /* setup afmt */
3061         r = dce_v11_0_afmt_init(adev);
3062         if (r)
3063                 return r;
3064
3065         r = dce_v11_0_audio_init(adev);
3066         if (r)
3067                 return r;
3068
3069         drm_kms_helper_poll_init(adev->ddev);
3070
3071         adev->mode_info.mode_config_initialized = true;
3072         return 0;
3073 }
3074
3075 static int dce_v11_0_sw_fini(void *handle)
3076 {
3077         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3078
3079         kfree(adev->mode_info.bios_hardcoded_edid);
3080
3081         drm_kms_helper_poll_fini(adev->ddev);
3082
3083         dce_v11_0_audio_fini(adev);
3084
3085         dce_v11_0_afmt_fini(adev);
3086
3087         drm_mode_config_cleanup(adev->ddev);
3088         adev->mode_info.mode_config_initialized = false;
3089
3090         return 0;
3091 }
3092
3093 static int dce_v11_0_hw_init(void *handle)
3094 {
3095         int i;
3096         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3097
3098         dce_v11_0_init_golden_registers(adev);
3099
3100         /* init dig PHYs, disp eng pll */
3101         amdgpu_atombios_crtc_powergate_init(adev);
3102         amdgpu_atombios_encoder_init_dig(adev);
3103         if ((adev->asic_type == CHIP_POLARIS10) ||
3104             (adev->asic_type == CHIP_POLARIS11)) {
3105                 amdgpu_atombios_crtc_set_dce_clock(adev, adev->clock.default_dispclk,
3106                                                    DCE_CLOCK_TYPE_DISPCLK, ATOM_GCK_DFS);
3107                 amdgpu_atombios_crtc_set_dce_clock(adev, 0,
3108                                                    DCE_CLOCK_TYPE_DPREFCLK, ATOM_GCK_DFS);
3109         } else {
3110                 amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
3111         }
3112
3113         /* initialize hpd */
3114         dce_v11_0_hpd_init(adev);
3115
3116         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
3117                 dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
3118         }
3119
3120         dce_v11_0_pageflip_interrupt_init(adev);
3121
3122         return 0;
3123 }
3124
3125 static int dce_v11_0_hw_fini(void *handle)
3126 {
3127         int i;
3128         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3129
3130         dce_v11_0_hpd_fini(adev);
3131
3132         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
3133                 dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
3134         }
3135
3136         dce_v11_0_pageflip_interrupt_fini(adev);
3137
3138         return 0;
3139 }
3140
3141 static int dce_v11_0_suspend(void *handle)
3142 {
3143         return dce_v11_0_hw_fini(handle);
3144 }
3145
3146 static int dce_v11_0_resume(void *handle)
3147 {
3148         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3149         int ret;
3150
3151         ret = dce_v11_0_hw_init(handle);
3152
3153         /* turn on the BL */
3154         if (adev->mode_info.bl_encoder) {
3155                 u8 bl_level = amdgpu_display_backlight_get_level(adev,
3156                                                                   adev->mode_info.bl_encoder);
3157                 amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
3158                                                     bl_level);
3159         }
3160
3161         return ret;
3162 }
3163
3164 static bool dce_v11_0_is_idle(void *handle)
3165 {
3166         return true;
3167 }
3168
3169 static int dce_v11_0_wait_for_idle(void *handle)
3170 {
3171         return 0;
3172 }
3173
3174 static int dce_v11_0_soft_reset(void *handle)
3175 {
3176         u32 srbm_soft_reset = 0, tmp;
3177         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3178
3179         if (dce_v11_0_is_display_hung(adev))
3180                 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
3181
3182         if (srbm_soft_reset) {
3183                 tmp = RREG32(mmSRBM_SOFT_RESET);
3184                 tmp |= srbm_soft_reset;
3185                 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
3186                 WREG32(mmSRBM_SOFT_RESET, tmp);
3187                 tmp = RREG32(mmSRBM_SOFT_RESET);
3188
3189                 udelay(50);
3190
3191                 tmp &= ~srbm_soft_reset;
3192                 WREG32(mmSRBM_SOFT_RESET, tmp);
3193                 tmp = RREG32(mmSRBM_SOFT_RESET);
3194
3195                 /* Wait a little for things to settle down */
3196                 udelay(50);
3197         }
3198         return 0;
3199 }
3200
3201 static void dce_v11_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
3202                                                      int crtc,
3203                                                      enum amdgpu_interrupt_state state)
3204 {
3205         u32 lb_interrupt_mask;
3206
3207         if (crtc >= adev->mode_info.num_crtc) {
3208                 DRM_DEBUG("invalid crtc %d\n", crtc);
3209                 return;
3210         }
3211
3212         switch (state) {
3213         case AMDGPU_IRQ_STATE_DISABLE:
3214                 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3215                 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3216                                                   VBLANK_INTERRUPT_MASK, 0);
3217                 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3218                 break;
3219         case AMDGPU_IRQ_STATE_ENABLE:
3220                 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3221                 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3222                                                   VBLANK_INTERRUPT_MASK, 1);
3223                 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3224                 break;
3225         default:
3226                 break;
3227         }
3228 }
3229
3230 static void dce_v11_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
3231                                                     int crtc,
3232                                                     enum amdgpu_interrupt_state state)
3233 {
3234         u32 lb_interrupt_mask;
3235
3236         if (crtc >= adev->mode_info.num_crtc) {
3237                 DRM_DEBUG("invalid crtc %d\n", crtc);
3238                 return;
3239         }
3240
3241         switch (state) {
3242         case AMDGPU_IRQ_STATE_DISABLE:
3243                 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3244                 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3245                                                   VLINE_INTERRUPT_MASK, 0);
3246                 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3247                 break;
3248         case AMDGPU_IRQ_STATE_ENABLE:
3249                 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3250                 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3251                                                   VLINE_INTERRUPT_MASK, 1);
3252                 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3253                 break;
3254         default:
3255                 break;
3256         }
3257 }
3258
3259 static int dce_v11_0_set_hpd_irq_state(struct amdgpu_device *adev,
3260                                         struct amdgpu_irq_src *source,
3261                                         unsigned hpd,
3262                                         enum amdgpu_interrupt_state state)
3263 {
3264         u32 tmp;
3265
3266         if (hpd >= adev->mode_info.num_hpd) {
3267                 DRM_DEBUG("invalid hdp %d\n", hpd);
3268                 return 0;
3269         }
3270
3271         switch (state) {
3272         case AMDGPU_IRQ_STATE_DISABLE:
3273                 tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
3274                 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
3275                 WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
3276                 break;
3277         case AMDGPU_IRQ_STATE_ENABLE:
3278                 tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
3279                 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 1);
3280                 WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
3281                 break;
3282         default:
3283                 break;
3284         }
3285
3286         return 0;
3287 }
3288
3289 static int dce_v11_0_set_crtc_irq_state(struct amdgpu_device *adev,
3290                                         struct amdgpu_irq_src *source,
3291                                         unsigned type,
3292                                         enum amdgpu_interrupt_state state)
3293 {
3294         switch (type) {
3295         case AMDGPU_CRTC_IRQ_VBLANK1:
3296                 dce_v11_0_set_crtc_vblank_interrupt_state(adev, 0, state);
3297                 break;
3298         case AMDGPU_CRTC_IRQ_VBLANK2:
3299                 dce_v11_0_set_crtc_vblank_interrupt_state(adev, 1, state);
3300                 break;
3301         case AMDGPU_CRTC_IRQ_VBLANK3:
3302                 dce_v11_0_set_crtc_vblank_interrupt_state(adev, 2, state);
3303                 break;
3304         case AMDGPU_CRTC_IRQ_VBLANK4:
3305                 dce_v11_0_set_crtc_vblank_interrupt_state(adev, 3, state);
3306                 break;
3307         case AMDGPU_CRTC_IRQ_VBLANK5:
3308                 dce_v11_0_set_crtc_vblank_interrupt_state(adev, 4, state);
3309                 break;
3310         case AMDGPU_CRTC_IRQ_VBLANK6:
3311                 dce_v11_0_set_crtc_vblank_interrupt_state(adev, 5, state);
3312                 break;
3313         case AMDGPU_CRTC_IRQ_VLINE1:
3314                 dce_v11_0_set_crtc_vline_interrupt_state(adev, 0, state);
3315                 break;
3316         case AMDGPU_CRTC_IRQ_VLINE2:
3317                 dce_v11_0_set_crtc_vline_interrupt_state(adev, 1, state);
3318                 break;
3319         case AMDGPU_CRTC_IRQ_VLINE3:
3320                 dce_v11_0_set_crtc_vline_interrupt_state(adev, 2, state);
3321                 break;
3322         case AMDGPU_CRTC_IRQ_VLINE4:
3323                 dce_v11_0_set_crtc_vline_interrupt_state(adev, 3, state);
3324                 break;
3325         case AMDGPU_CRTC_IRQ_VLINE5:
3326                 dce_v11_0_set_crtc_vline_interrupt_state(adev, 4, state);
3327                 break;
3328          case AMDGPU_CRTC_IRQ_VLINE6:
3329                 dce_v11_0_set_crtc_vline_interrupt_state(adev, 5, state);
3330                 break;
3331         default:
3332                 break;
3333         }
3334         return 0;
3335 }
3336
3337 static int dce_v11_0_set_pageflip_irq_state(struct amdgpu_device *adev,
3338                                             struct amdgpu_irq_src *src,
3339                                             unsigned type,
3340                                             enum amdgpu_interrupt_state state)
3341 {
3342         u32 reg;
3343
3344         if (type >= adev->mode_info.num_crtc) {
3345                 DRM_ERROR("invalid pageflip crtc %d\n", type);
3346                 return -EINVAL;
3347         }
3348
3349         reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3350         if (state == AMDGPU_IRQ_STATE_DISABLE)
3351                 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3352                        reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3353         else
3354                 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3355                        reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3356
3357         return 0;
3358 }
3359
3360 static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
3361                                   struct amdgpu_irq_src *source,
3362                                   struct amdgpu_iv_entry *entry)
3363 {
3364         unsigned long flags;
3365         unsigned crtc_id;
3366         struct amdgpu_crtc *amdgpu_crtc;
3367         struct amdgpu_flip_work *works;
3368
3369         crtc_id = (entry->src_id - 8) >> 1;
3370         amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3371
3372         if (crtc_id >= adev->mode_info.num_crtc) {
3373                 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3374                 return -EINVAL;
3375         }
3376
3377         if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3378             GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3379                 WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3380                        GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3381
3382         /* IRQ could occur when in initial stage */
3383         if(amdgpu_crtc == NULL)
3384                 return 0;
3385
3386         spin_lock_irqsave(&adev->ddev->event_lock, flags);
3387         works = amdgpu_crtc->pflip_works;
3388         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
3389                 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3390                                                  "AMDGPU_FLIP_SUBMITTED(%d)\n",
3391                                                  amdgpu_crtc->pflip_status,
3392                                                  AMDGPU_FLIP_SUBMITTED);
3393                 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3394                 return 0;
3395         }
3396
3397         /* page flip completed. clean up */
3398         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3399         amdgpu_crtc->pflip_works = NULL;
3400
3401         /* wakeup usersapce */
3402         if(works->event)
3403                 drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
3404
3405         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3406
3407         drm_crtc_vblank_put(&amdgpu_crtc->base);
3408         schedule_work(&works->unpin_work);
3409
3410         return 0;
3411 }
3412
3413 static void dce_v11_0_hpd_int_ack(struct amdgpu_device *adev,
3414                                   int hpd)
3415 {
3416         u32 tmp;
3417
3418         if (hpd >= adev->mode_info.num_hpd) {
3419                 DRM_DEBUG("invalid hdp %d\n", hpd);
3420                 return;
3421         }
3422
3423         tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
3424         tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_ACK, 1);
3425         WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
3426 }
3427
3428 static void dce_v11_0_crtc_vblank_int_ack(struct amdgpu_device *adev,
3429                                           int crtc)
3430 {
3431         u32 tmp;
3432
3433         if (crtc < 0 || crtc >= adev->mode_info.num_crtc) {
3434                 DRM_DEBUG("invalid crtc %d\n", crtc);
3435                 return;
3436         }
3437
3438         tmp = RREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc]);
3439         tmp = REG_SET_FIELD(tmp, LB_VBLANK_STATUS, VBLANK_ACK, 1);
3440         WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], tmp);
3441 }
3442
3443 static void dce_v11_0_crtc_vline_int_ack(struct amdgpu_device *adev,
3444                                          int crtc)
3445 {
3446         u32 tmp;
3447
3448         if (crtc < 0 || crtc >= adev->mode_info.num_crtc) {
3449                 DRM_DEBUG("invalid crtc %d\n", crtc);
3450                 return;
3451         }
3452
3453         tmp = RREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc]);
3454         tmp = REG_SET_FIELD(tmp, LB_VLINE_STATUS, VLINE_ACK, 1);
3455         WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], tmp);
3456 }
3457
3458 static int dce_v11_0_crtc_irq(struct amdgpu_device *adev,
3459                                 struct amdgpu_irq_src *source,
3460                                 struct amdgpu_iv_entry *entry)
3461 {
3462         unsigned crtc = entry->src_id - 1;
3463         uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
3464         unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
3465
3466         switch (entry->src_data) {
3467         case 0: /* vblank */
3468                 if (disp_int & interrupt_status_offsets[crtc].vblank)
3469                         dce_v11_0_crtc_vblank_int_ack(adev, crtc);
3470                 else
3471                         DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3472
3473                 if (amdgpu_irq_enabled(adev, source, irq_type)) {
3474                         drm_handle_vblank(adev->ddev, crtc);
3475                 }
3476                 DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3477
3478                 break;
3479         case 1: /* vline */
3480                 if (disp_int & interrupt_status_offsets[crtc].vline)
3481                         dce_v11_0_crtc_vline_int_ack(adev, crtc);
3482                 else
3483                         DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3484
3485                 DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3486
3487                 break;
3488         default:
3489                 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
3490                 break;
3491         }
3492
3493         return 0;
3494 }
3495
3496 static int dce_v11_0_hpd_irq(struct amdgpu_device *adev,
3497                              struct amdgpu_irq_src *source,
3498                              struct amdgpu_iv_entry *entry)
3499 {
3500         uint32_t disp_int, mask;
3501         unsigned hpd;
3502
3503         if (entry->src_data >= adev->mode_info.num_hpd) {
3504                 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
3505                 return 0;
3506         }
3507
3508         hpd = entry->src_data;
3509         disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3510         mask = interrupt_status_offsets[hpd].hpd;
3511
3512         if (disp_int & mask) {
3513                 dce_v11_0_hpd_int_ack(adev, hpd);
3514                 schedule_work(&adev->hotplug_work);
3515                 DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3516         }
3517
3518         return 0;
3519 }
3520
3521 static int dce_v11_0_set_clockgating_state(void *handle,
3522                                           enum amd_clockgating_state state)
3523 {
3524         return 0;
3525 }
3526
3527 static int dce_v11_0_set_powergating_state(void *handle,
3528                                           enum amd_powergating_state state)
3529 {
3530         return 0;
3531 }
3532
3533 static const struct amd_ip_funcs dce_v11_0_ip_funcs = {
3534         .name = "dce_v11_0",
3535         .early_init = dce_v11_0_early_init,
3536         .late_init = NULL,
3537         .sw_init = dce_v11_0_sw_init,
3538         .sw_fini = dce_v11_0_sw_fini,
3539         .hw_init = dce_v11_0_hw_init,
3540         .hw_fini = dce_v11_0_hw_fini,
3541         .suspend = dce_v11_0_suspend,
3542         .resume = dce_v11_0_resume,
3543         .is_idle = dce_v11_0_is_idle,
3544         .wait_for_idle = dce_v11_0_wait_for_idle,
3545         .soft_reset = dce_v11_0_soft_reset,
3546         .set_clockgating_state = dce_v11_0_set_clockgating_state,
3547         .set_powergating_state = dce_v11_0_set_powergating_state,
3548 };
3549
3550 static void
3551 dce_v11_0_encoder_mode_set(struct drm_encoder *encoder,
3552                           struct drm_display_mode *mode,
3553                           struct drm_display_mode *adjusted_mode)
3554 {
3555         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3556
3557         amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3558
3559         /* need to call this here rather than in prepare() since we need some crtc info */
3560         amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3561
3562         /* set scaler clears this on some chips */
3563         dce_v11_0_set_interleave(encoder->crtc, mode);
3564
3565         if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
3566                 dce_v11_0_afmt_enable(encoder, true);
3567                 dce_v11_0_afmt_setmode(encoder, adjusted_mode);
3568         }
3569 }
3570
3571 static void dce_v11_0_encoder_prepare(struct drm_encoder *encoder)
3572 {
3573         struct amdgpu_device *adev = encoder->dev->dev_private;
3574         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3575         struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3576
3577         if ((amdgpu_encoder->active_device &
3578              (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3579             (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3580              ENCODER_OBJECT_ID_NONE)) {
3581                 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3582                 if (dig) {
3583                         dig->dig_encoder = dce_v11_0_pick_dig_encoder(encoder);
3584                         if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3585                                 dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3586                 }
3587         }
3588
3589         amdgpu_atombios_scratch_regs_lock(adev, true);
3590
3591         if (connector) {
3592                 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3593
3594                 /* select the clock/data port if it uses a router */
3595                 if (amdgpu_connector->router.cd_valid)
3596                         amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3597
3598                 /* turn eDP panel on for mode set */
3599                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3600                         amdgpu_atombios_encoder_set_edp_panel_power(connector,
3601                                                              ATOM_TRANSMITTER_ACTION_POWER_ON);
3602         }
3603
3604         /* this is needed for the pll/ss setup to work correctly in some cases */
3605         amdgpu_atombios_encoder_set_crtc_source(encoder);
3606         /* set up the FMT blocks */
3607         dce_v11_0_program_fmt(encoder);
3608 }
3609
3610 static void dce_v11_0_encoder_commit(struct drm_encoder *encoder)
3611 {
3612         struct drm_device *dev = encoder->dev;
3613         struct amdgpu_device *adev = dev->dev_private;
3614
3615         /* need to call this here as we need the crtc set up */
3616         amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3617         amdgpu_atombios_scratch_regs_lock(adev, false);
3618 }
3619
3620 static void dce_v11_0_encoder_disable(struct drm_encoder *encoder)
3621 {
3622         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3623         struct amdgpu_encoder_atom_dig *dig;
3624
3625         amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3626
3627         if (amdgpu_atombios_encoder_is_digital(encoder)) {
3628                 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
3629                         dce_v11_0_afmt_enable(encoder, false);
3630                 dig = amdgpu_encoder->enc_priv;
3631                 dig->dig_encoder = -1;
3632         }
3633         amdgpu_encoder->active_device = 0;
3634 }
3635
3636 /* these are handled by the primary encoders */
3637 static void dce_v11_0_ext_prepare(struct drm_encoder *encoder)
3638 {
3639
3640 }
3641
3642 static void dce_v11_0_ext_commit(struct drm_encoder *encoder)
3643 {
3644
3645 }
3646
3647 static void
3648 dce_v11_0_ext_mode_set(struct drm_encoder *encoder,
3649                       struct drm_display_mode *mode,
3650                       struct drm_display_mode *adjusted_mode)
3651 {
3652
3653 }
3654
3655 static void dce_v11_0_ext_disable(struct drm_encoder *encoder)
3656 {
3657
3658 }
3659
3660 static void
3661 dce_v11_0_ext_dpms(struct drm_encoder *encoder, int mode)
3662 {
3663
3664 }
3665
3666 static const struct drm_encoder_helper_funcs dce_v11_0_ext_helper_funcs = {
3667         .dpms = dce_v11_0_ext_dpms,
3668         .prepare = dce_v11_0_ext_prepare,
3669         .mode_set = dce_v11_0_ext_mode_set,
3670         .commit = dce_v11_0_ext_commit,
3671         .disable = dce_v11_0_ext_disable,
3672         /* no detect for TMDS/LVDS yet */
3673 };
3674
3675 static const struct drm_encoder_helper_funcs dce_v11_0_dig_helper_funcs = {
3676         .dpms = amdgpu_atombios_encoder_dpms,
3677         .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3678         .prepare = dce_v11_0_encoder_prepare,
3679         .mode_set = dce_v11_0_encoder_mode_set,
3680         .commit = dce_v11_0_encoder_commit,
3681         .disable = dce_v11_0_encoder_disable,
3682         .detect = amdgpu_atombios_encoder_dig_detect,
3683 };
3684
3685 static const struct drm_encoder_helper_funcs dce_v11_0_dac_helper_funcs = {
3686         .dpms = amdgpu_atombios_encoder_dpms,
3687         .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3688         .prepare = dce_v11_0_encoder_prepare,
3689         .mode_set = dce_v11_0_encoder_mode_set,
3690         .commit = dce_v11_0_encoder_commit,
3691         .detect = amdgpu_atombios_encoder_dac_detect,
3692 };
3693
3694 static void dce_v11_0_encoder_destroy(struct drm_encoder *encoder)
3695 {
3696         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3697         if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3698                 amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3699         kfree(amdgpu_encoder->enc_priv);
3700         drm_encoder_cleanup(encoder);
3701         kfree(amdgpu_encoder);
3702 }
3703
3704 static const struct drm_encoder_funcs dce_v11_0_encoder_funcs = {
3705         .destroy = dce_v11_0_encoder_destroy,
3706 };
3707
3708 static void dce_v11_0_encoder_add(struct amdgpu_device *adev,
3709                                  uint32_t encoder_enum,
3710                                  uint32_t supported_device,
3711                                  u16 caps)
3712 {
3713         struct drm_device *dev = adev->ddev;
3714         struct drm_encoder *encoder;
3715         struct amdgpu_encoder *amdgpu_encoder;
3716
3717         /* see if we already added it */
3718         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3719                 amdgpu_encoder = to_amdgpu_encoder(encoder);
3720                 if (amdgpu_encoder->encoder_enum == encoder_enum) {
3721                         amdgpu_encoder->devices |= supported_device;
3722                         return;
3723                 }
3724
3725         }
3726
3727         /* add a new one */
3728         amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3729         if (!amdgpu_encoder)
3730                 return;
3731
3732         encoder = &amdgpu_encoder->base;
3733         switch (adev->mode_info.num_crtc) {
3734         case 1:
3735                 encoder->possible_crtcs = 0x1;
3736                 break;
3737         case 2:
3738         default:
3739                 encoder->possible_crtcs = 0x3;
3740                 break;
3741         case 4:
3742                 encoder->possible_crtcs = 0xf;
3743                 break;
3744         case 6:
3745                 encoder->possible_crtcs = 0x3f;
3746                 break;
3747         }
3748
3749         amdgpu_encoder->enc_priv = NULL;
3750
3751         amdgpu_encoder->encoder_enum = encoder_enum;
3752         amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3753         amdgpu_encoder->devices = supported_device;
3754         amdgpu_encoder->rmx_type = RMX_OFF;
3755         amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3756         amdgpu_encoder->is_ext_encoder = false;
3757         amdgpu_encoder->caps = caps;
3758
3759         switch (amdgpu_encoder->encoder_id) {
3760         case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3761         case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3762                 drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3763                                  DRM_MODE_ENCODER_DAC, NULL);
3764                 drm_encoder_helper_add(encoder, &dce_v11_0_dac_helper_funcs);
3765                 break;
3766         case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3767         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3768         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3769         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3770         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3771                 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3772                         amdgpu_encoder->rmx_type = RMX_FULL;
3773                         drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3774                                          DRM_MODE_ENCODER_LVDS, NULL);
3775                         amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3776                 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3777                         drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3778                                          DRM_MODE_ENCODER_DAC, NULL);
3779                         amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3780                 } else {
3781                         drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3782                                          DRM_MODE_ENCODER_TMDS, NULL);
3783                         amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3784                 }
3785                 drm_encoder_helper_add(encoder, &dce_v11_0_dig_helper_funcs);
3786                 break;
3787         case ENCODER_OBJECT_ID_SI170B:
3788         case ENCODER_OBJECT_ID_CH7303:
3789         case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3790         case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3791         case ENCODER_OBJECT_ID_TITFP513:
3792         case ENCODER_OBJECT_ID_VT1623:
3793         case ENCODER_OBJECT_ID_HDMI_SI1930:
3794         case ENCODER_OBJECT_ID_TRAVIS:
3795         case ENCODER_OBJECT_ID_NUTMEG:
3796                 /* these are handled by the primary encoders */
3797                 amdgpu_encoder->is_ext_encoder = true;
3798                 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3799                         drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3800                                          DRM_MODE_ENCODER_LVDS, NULL);
3801                 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3802                         drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3803                                          DRM_MODE_ENCODER_DAC, NULL);
3804                 else
3805                         drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3806                                          DRM_MODE_ENCODER_TMDS, NULL);
3807                 drm_encoder_helper_add(encoder, &dce_v11_0_ext_helper_funcs);
3808                 break;
3809         }
3810 }
3811
3812 static const struct amdgpu_display_funcs dce_v11_0_display_funcs = {
3813         .set_vga_render_state = &dce_v11_0_set_vga_render_state,
3814         .bandwidth_update = &dce_v11_0_bandwidth_update,
3815         .vblank_get_counter = &dce_v11_0_vblank_get_counter,
3816         .vblank_wait = &dce_v11_0_vblank_wait,
3817         .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3818         .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3819         .hpd_sense = &dce_v11_0_hpd_sense,
3820         .hpd_set_polarity = &dce_v11_0_hpd_set_polarity,
3821         .hpd_get_gpio_reg = &dce_v11_0_hpd_get_gpio_reg,
3822         .page_flip = &dce_v11_0_page_flip,
3823         .page_flip_get_scanoutpos = &dce_v11_0_crtc_get_scanoutpos,
3824         .add_encoder = &dce_v11_0_encoder_add,
3825         .add_connector = &amdgpu_connector_add,
3826         .stop_mc_access = &dce_v11_0_stop_mc_access,
3827         .resume_mc_access = &dce_v11_0_resume_mc_access,
3828 };
3829
3830 static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev)
3831 {
3832         if (adev->mode_info.funcs == NULL)
3833                 adev->mode_info.funcs = &dce_v11_0_display_funcs;
3834 }
3835
3836 static const struct amdgpu_irq_src_funcs dce_v11_0_crtc_irq_funcs = {
3837         .set = dce_v11_0_set_crtc_irq_state,
3838         .process = dce_v11_0_crtc_irq,
3839 };
3840
3841 static const struct amdgpu_irq_src_funcs dce_v11_0_pageflip_irq_funcs = {
3842         .set = dce_v11_0_set_pageflip_irq_state,
3843         .process = dce_v11_0_pageflip_irq,
3844 };
3845
3846 static const struct amdgpu_irq_src_funcs dce_v11_0_hpd_irq_funcs = {
3847         .set = dce_v11_0_set_hpd_irq_state,
3848         .process = dce_v11_0_hpd_irq,
3849 };
3850
3851 static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev)
3852 {
3853         adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
3854         adev->crtc_irq.funcs = &dce_v11_0_crtc_irq_funcs;
3855
3856         adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
3857         adev->pageflip_irq.funcs = &dce_v11_0_pageflip_irq_funcs;
3858
3859         adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
3860         adev->hpd_irq.funcs = &dce_v11_0_hpd_irq_funcs;
3861 }
3862
3863 const struct amdgpu_ip_block_version dce_v11_0_ip_block =
3864 {
3865         .type = AMD_IP_BLOCK_TYPE_DCE,
3866         .major = 11,
3867         .minor = 0,
3868         .rev = 0,
3869         .funcs = &dce_v11_0_ip_funcs,
3870 };
3871
3872 const struct amdgpu_ip_block_version dce_v11_2_ip_block =
3873 {
3874         .type = AMD_IP_BLOCK_TYPE_DCE,
3875         .major = 11,
3876         .minor = 2,
3877         .rev = 0,
3878         .funcs = &dce_v11_0_ip_funcs,
3879 };