]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
drm/amd/powerplay: enable Arcturus runtime VCN dpm on/off
[linux.git] / drivers / gpu / drm / amd / amdgpu / vcn_v2_5.c
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/firmware.h>
25
26 #include "amdgpu.h"
27 #include "amdgpu_vcn.h"
28 #include "amdgpu_pm.h"
29 #include "soc15.h"
30 #include "soc15d.h"
31 #include "vcn_v2_0.h"
32
33 #include "vcn/vcn_2_5_offset.h"
34 #include "vcn/vcn_2_5_sh_mask.h"
35 #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
36
37 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET                        0x27
38 #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET                    0x0f
39 #define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET                  0x10
40 #define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET                  0x11
41 #define mmUVD_NO_OP_INTERNAL_OFFSET                             0x29
42 #define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET                       0x66
43 #define mmUVD_SCRATCH9_INTERNAL_OFFSET                          0xc01d
44
45 #define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET                   0x431
46 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET          0x3b4
47 #define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET         0x3b5
48 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET                       0x25c
49
50 #define mmUVD_JPEG_PITCH_INTERNAL_OFFSET                        0x401f
51
52 #define VCN25_MAX_HW_INSTANCES_ARCTURUS                         2
53
54 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
55 static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev);
56 static void vcn_v2_5_set_jpeg_ring_funcs(struct amdgpu_device *adev);
57 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev);
58 static int vcn_v2_5_set_powergating_state(void *handle,
59                                 enum amd_powergating_state state);
60
61 static int amdgpu_ih_clientid_vcns[] = {
62         SOC15_IH_CLIENTID_VCN,
63         SOC15_IH_CLIENTID_VCN1
64 };
65
66 /**
67  * vcn_v2_5_early_init - set function pointers
68  *
69  * @handle: amdgpu_device pointer
70  *
71  * Set ring and irq function pointers
72  */
73 static int vcn_v2_5_early_init(void *handle)
74 {
75         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
76         if (adev->asic_type == CHIP_ARCTURUS) {
77                 u32 harvest;
78                 int i;
79
80                 adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS;
81                 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
82                         harvest = RREG32_SOC15(UVD, i, mmCC_UVD_HARVESTING);
83                         if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
84                                 adev->vcn.harvest_config |= 1 << i;
85                 }
86
87                 if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
88                                                  AMDGPU_VCN_HARVEST_VCN1))
89                         /* both instances are harvested, disable the block */
90                         return -ENOENT;
91         } else
92                 adev->vcn.num_vcn_inst = 1;
93
94         adev->vcn.num_enc_rings = 2;
95
96         vcn_v2_5_set_dec_ring_funcs(adev);
97         vcn_v2_5_set_enc_ring_funcs(adev);
98         vcn_v2_5_set_jpeg_ring_funcs(adev);
99         vcn_v2_5_set_irq_funcs(adev);
100
101         return 0;
102 }
103
104 /**
105  * vcn_v2_5_sw_init - sw init for VCN block
106  *
107  * @handle: amdgpu_device pointer
108  *
109  * Load firmware and sw initialization
110  */
111 static int vcn_v2_5_sw_init(void *handle)
112 {
113         struct amdgpu_ring *ring;
114         int i, j, r;
115         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
116
117         for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
118                 if (adev->vcn.harvest_config & (1 << j))
119                         continue;
120                 /* VCN DEC TRAP */
121                 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
122                                 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[j].irq);
123                 if (r)
124                         return r;
125
126                 /* VCN ENC TRAP */
127                 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
128                         r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
129                                 i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[j].irq);
130                         if (r)
131                                 return r;
132                 }
133
134                 /* VCN JPEG TRAP */
135                 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
136                                 VCN_2_0__SRCID__JPEG_DECODE, &adev->vcn.inst[j].irq);
137                 if (r)
138                         return r;
139         }
140
141         r = amdgpu_vcn_sw_init(adev);
142         if (r)
143                 return r;
144
145         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
146                 const struct common_firmware_header *hdr;
147                 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
148                 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
149                 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
150                 adev->firmware.fw_size +=
151                         ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
152
153                 if (adev->vcn.num_vcn_inst == VCN25_MAX_HW_INSTANCES_ARCTURUS) {
154                         adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].ucode_id = AMDGPU_UCODE_ID_VCN1;
155                         adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].fw = adev->vcn.fw;
156                         adev->firmware.fw_size +=
157                                 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
158                 }
159                 DRM_INFO("PSP loading VCN firmware\n");
160         }
161
162         r = amdgpu_vcn_resume(adev);
163         if (r)
164                 return r;
165
166         for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
167                 if (adev->vcn.harvest_config & (1 << j))
168                         continue;
169                 adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
170                 adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
171                 adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
172                 adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
173                 adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
174                 adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
175
176                 adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
177                 adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(UVD, j, mmUVD_SCRATCH9);
178                 adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
179                 adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA0);
180                 adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
181                 adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA1);
182                 adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
183                 adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_CMD);
184                 adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
185                 adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(UVD, j, mmUVD_NO_OP);
186
187                 adev->vcn.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
188                 adev->vcn.inst[j].external.jpeg_pitch = SOC15_REG_OFFSET(UVD, j, mmUVD_JPEG_PITCH);
189
190                 ring = &adev->vcn.inst[j].ring_dec;
191                 ring->use_doorbell = true;
192                 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8*j;
193                 sprintf(ring->name, "vcn_dec_%d", j);
194                 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
195                 if (r)
196                         return r;
197
198                 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
199                         ring = &adev->vcn.inst[j].ring_enc[i];
200                         ring->use_doorbell = true;
201                         ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i + 8*j;
202                         sprintf(ring->name, "vcn_enc_%d.%d", j, i);
203                         r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
204                         if (r)
205                                 return r;
206                 }
207
208                 ring = &adev->vcn.inst[j].ring_jpeg;
209                 ring->use_doorbell = true;
210                 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8*j;
211                 sprintf(ring->name, "vcn_jpeg_%d", j);
212                 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
213                 if (r)
214                         return r;
215         }
216
217         return 0;
218 }
219
220 /**
221  * vcn_v2_5_sw_fini - sw fini for VCN block
222  *
223  * @handle: amdgpu_device pointer
224  *
225  * VCN suspend and free up sw allocation
226  */
227 static int vcn_v2_5_sw_fini(void *handle)
228 {
229         int r;
230         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
231
232         r = amdgpu_vcn_suspend(adev);
233         if (r)
234                 return r;
235
236         r = amdgpu_vcn_sw_fini(adev);
237
238         return r;
239 }
240
241 /**
242  * vcn_v2_5_hw_init - start and test VCN block
243  *
244  * @handle: amdgpu_device pointer
245  *
246  * Initialize the hardware, boot up the VCPU and do some testing
247  */
248 static int vcn_v2_5_hw_init(void *handle)
249 {
250         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
251         struct amdgpu_ring *ring;
252         int i, j, r;
253
254         for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
255                 if (adev->vcn.harvest_config & (1 << j))
256                         continue;
257                 ring = &adev->vcn.inst[j].ring_dec;
258
259                 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
260                                                      ring->doorbell_index, j);
261
262                 r = amdgpu_ring_test_helper(ring);
263                 if (r)
264                         goto done;
265
266                 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
267                         ring = &adev->vcn.inst[j].ring_enc[i];
268                         /* disable encode rings till the robustness of the FW */
269                         ring->sched.ready = false;
270                         continue;
271                         r = amdgpu_ring_test_helper(ring);
272                         if (r)
273                                 goto done;
274                 }
275
276                 ring = &adev->vcn.inst[j].ring_jpeg;
277                 r = amdgpu_ring_test_helper(ring);
278                 if (r)
279                         goto done;
280         }
281 done:
282         if (!r)
283                 DRM_INFO("VCN decode and encode initialized successfully.\n");
284
285         return r;
286 }
287
288 /**
289  * vcn_v2_5_hw_fini - stop the hardware block
290  *
291  * @handle: amdgpu_device pointer
292  *
293  * Stop the VCN block, mark ring as not ready any more
294  */
295 static int vcn_v2_5_hw_fini(void *handle)
296 {
297         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
298         struct amdgpu_ring *ring;
299         int i;
300
301         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
302                 if (adev->vcn.harvest_config & (1 << i))
303                         continue;
304                 ring = &adev->vcn.inst[i].ring_dec;
305
306                 if (RREG32_SOC15(VCN, i, mmUVD_STATUS))
307                         vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
308
309                 ring->sched.ready = false;
310
311                 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
312                         ring = &adev->vcn.inst[i].ring_enc[i];
313                         ring->sched.ready = false;
314                 }
315
316                 ring = &adev->vcn.inst[i].ring_jpeg;
317                 ring->sched.ready = false;
318         }
319
320         return 0;
321 }
322
323 /**
324  * vcn_v2_5_suspend - suspend VCN block
325  *
326  * @handle: amdgpu_device pointer
327  *
328  * HW fini and suspend VCN block
329  */
330 static int vcn_v2_5_suspend(void *handle)
331 {
332         int r;
333         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
334
335         r = vcn_v2_5_hw_fini(adev);
336         if (r)
337                 return r;
338
339         r = amdgpu_vcn_suspend(adev);
340
341         return r;
342 }
343
344 /**
345  * vcn_v2_5_resume - resume VCN block
346  *
347  * @handle: amdgpu_device pointer
348  *
349  * Resume firmware and hw init VCN block
350  */
351 static int vcn_v2_5_resume(void *handle)
352 {
353         int r;
354         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
355
356         r = amdgpu_vcn_resume(adev);
357         if (r)
358                 return r;
359
360         r = vcn_v2_5_hw_init(adev);
361
362         return r;
363 }
364
365 /**
366  * vcn_v2_5_mc_resume - memory controller programming
367  *
368  * @adev: amdgpu_device pointer
369  *
370  * Let the VCN memory controller know it's offsets
371  */
372 static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
373 {
374         uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
375         uint32_t offset;
376         int i;
377
378         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
379                 if (adev->vcn.harvest_config & (1 << i))
380                         continue;
381                 /* cache window 0: fw */
382                 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
383                         WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
384                                 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
385                         WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
386                                 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
387                         WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
388                         offset = 0;
389                 } else {
390                         WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
391                                 lower_32_bits(adev->vcn.inst[i].gpu_addr));
392                         WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
393                                 upper_32_bits(adev->vcn.inst[i].gpu_addr));
394                         offset = size;
395                         WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
396                                 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
397                 }
398                 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
399
400                 /* cache window 1: stack */
401                 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
402                         lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
403                 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
404                         upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
405                 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
406                 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
407
408                 /* cache window 2: context */
409                 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
410                         lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
411                 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
412                         upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
413                 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
414                 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
415         }
416 }
417
418 /**
419  * vcn_v2_5_disable_clock_gating - disable VCN clock gating
420  *
421  * @adev: amdgpu_device pointer
422  *
423  * Disable clock gating for VCN block
424  */
425 static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev)
426 {
427         uint32_t data;
428         int ret = 0;
429         int i;
430
431         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
432                 if (adev->vcn.harvest_config & (1 << i))
433                         continue;
434                 /* UVD disable CGC */
435                 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
436                 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
437                         data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
438                 else
439                         data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
440                 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
441                 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
442                 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
443
444                 data = RREG32_SOC15(VCN, i, mmUVD_CGC_GATE);
445                 data &= ~(UVD_CGC_GATE__SYS_MASK
446                         | UVD_CGC_GATE__UDEC_MASK
447                         | UVD_CGC_GATE__MPEG2_MASK
448                         | UVD_CGC_GATE__REGS_MASK
449                         | UVD_CGC_GATE__RBC_MASK
450                         | UVD_CGC_GATE__LMI_MC_MASK
451                         | UVD_CGC_GATE__LMI_UMC_MASK
452                         | UVD_CGC_GATE__IDCT_MASK
453                         | UVD_CGC_GATE__MPRD_MASK
454                         | UVD_CGC_GATE__MPC_MASK
455                         | UVD_CGC_GATE__LBSI_MASK
456                         | UVD_CGC_GATE__LRBBM_MASK
457                         | UVD_CGC_GATE__UDEC_RE_MASK
458                         | UVD_CGC_GATE__UDEC_CM_MASK
459                         | UVD_CGC_GATE__UDEC_IT_MASK
460                         | UVD_CGC_GATE__UDEC_DB_MASK
461                         | UVD_CGC_GATE__UDEC_MP_MASK
462                         | UVD_CGC_GATE__WCB_MASK
463                         | UVD_CGC_GATE__VCPU_MASK
464                         | UVD_CGC_GATE__MMSCH_MASK);
465
466                 WREG32_SOC15(VCN, i, mmUVD_CGC_GATE, data);
467
468                 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_CGC_GATE, 0,  0xFFFFFFFF, ret);
469
470                 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
471                 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
472                         | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
473                         | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
474                         | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
475                         | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
476                         | UVD_CGC_CTRL__SYS_MODE_MASK
477                         | UVD_CGC_CTRL__UDEC_MODE_MASK
478                         | UVD_CGC_CTRL__MPEG2_MODE_MASK
479                         | UVD_CGC_CTRL__REGS_MODE_MASK
480                         | UVD_CGC_CTRL__RBC_MODE_MASK
481                         | UVD_CGC_CTRL__LMI_MC_MODE_MASK
482                         | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
483                         | UVD_CGC_CTRL__IDCT_MODE_MASK
484                         | UVD_CGC_CTRL__MPRD_MODE_MASK
485                         | UVD_CGC_CTRL__MPC_MODE_MASK
486                         | UVD_CGC_CTRL__LBSI_MODE_MASK
487                         | UVD_CGC_CTRL__LRBBM_MODE_MASK
488                         | UVD_CGC_CTRL__WCB_MODE_MASK
489                         | UVD_CGC_CTRL__VCPU_MODE_MASK
490                         | UVD_CGC_CTRL__MMSCH_MODE_MASK);
491                 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
492
493                 /* turn on */
494                 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE);
495                 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
496                         | UVD_SUVD_CGC_GATE__SIT_MASK
497                         | UVD_SUVD_CGC_GATE__SMP_MASK
498                         | UVD_SUVD_CGC_GATE__SCM_MASK
499                         | UVD_SUVD_CGC_GATE__SDB_MASK
500                         | UVD_SUVD_CGC_GATE__SRE_H264_MASK
501                         | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
502                         | UVD_SUVD_CGC_GATE__SIT_H264_MASK
503                         | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
504                         | UVD_SUVD_CGC_GATE__SCM_H264_MASK
505                         | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
506                         | UVD_SUVD_CGC_GATE__SDB_H264_MASK
507                         | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
508                         | UVD_SUVD_CGC_GATE__SCLR_MASK
509                         | UVD_SUVD_CGC_GATE__UVD_SC_MASK
510                         | UVD_SUVD_CGC_GATE__ENT_MASK
511                         | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
512                         | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
513                         | UVD_SUVD_CGC_GATE__SITE_MASK
514                         | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
515                         | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
516                         | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
517                         | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
518                         | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
519                 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE, data);
520
521                 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
522                 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
523                         | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
524                         | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
525                         | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
526                         | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
527                         | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
528                         | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
529                         | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
530                         | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
531                         | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
532                 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
533         }
534 }
535
536 /**
537  * vcn_v2_5_enable_clock_gating - enable VCN clock gating
538  *
539  * @adev: amdgpu_device pointer
540  *
541  * Enable clock gating for VCN block
542  */
543 static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev)
544 {
545         uint32_t data = 0;
546         int i;
547
548         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
549                 if (adev->vcn.harvest_config & (1 << i))
550                         continue;
551                 /* enable UVD CGC */
552                 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
553                 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
554                         data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
555                 else
556                         data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
557                 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
558                 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
559                 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
560
561                 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
562                 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
563                         | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
564                         | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
565                         | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
566                         | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
567                         | UVD_CGC_CTRL__SYS_MODE_MASK
568                         | UVD_CGC_CTRL__UDEC_MODE_MASK
569                         | UVD_CGC_CTRL__MPEG2_MODE_MASK
570                         | UVD_CGC_CTRL__REGS_MODE_MASK
571                         | UVD_CGC_CTRL__RBC_MODE_MASK
572                         | UVD_CGC_CTRL__LMI_MC_MODE_MASK
573                         | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
574                         | UVD_CGC_CTRL__IDCT_MODE_MASK
575                         | UVD_CGC_CTRL__MPRD_MODE_MASK
576                         | UVD_CGC_CTRL__MPC_MODE_MASK
577                         | UVD_CGC_CTRL__LBSI_MODE_MASK
578                         | UVD_CGC_CTRL__LRBBM_MODE_MASK
579                         | UVD_CGC_CTRL__WCB_MODE_MASK
580                         | UVD_CGC_CTRL__VCPU_MODE_MASK);
581                 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
582
583                 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
584                 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
585                         | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
586                         | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
587                         | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
588                         | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
589                         | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
590                         | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
591                         | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
592                         | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
593                         | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
594                 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
595         }
596 }
597
598 /**
599  * jpeg_v2_5_start - start JPEG block
600  *
601  * @adev: amdgpu_device pointer
602  *
603  * Setup and start the JPEG block
604  */
605 static int jpeg_v2_5_start(struct amdgpu_device *adev)
606 {
607         struct amdgpu_ring *ring;
608         uint32_t tmp;
609         int i;
610
611         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
612                 if (adev->vcn.harvest_config & (1 << i))
613                         continue;
614                 ring = &adev->vcn.inst[i].ring_jpeg;
615                 /* disable anti hang mechanism */
616                 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JPEG_POWER_STATUS), 0,
617                         ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
618
619                 /* JPEG disable CGC */
620                 tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL);
621                 tmp |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
622                 tmp |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
623                 tmp |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
624                 WREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL, tmp);
625
626                 tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_GATE);
627                 tmp &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
628                         | JPEG_CGC_GATE__JPEG2_DEC_MASK
629                         | JPEG_CGC_GATE__JMCIF_MASK
630                         | JPEG_CGC_GATE__JRBBM_MASK);
631                 WREG32_SOC15(VCN, i, mmJPEG_CGC_GATE, tmp);
632
633                 tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL);
634                 tmp &= ~(JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK
635                         | JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK
636                         | JPEG_CGC_CTRL__JMCIF_MODE_MASK
637                         | JPEG_CGC_CTRL__JRBBM_MODE_MASK);
638                 WREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL, tmp);
639
640                 /* MJPEG global tiling registers */
641                 WREG32_SOC15(UVD, i, mmJPEG_DEC_GFX8_ADDR_CONFIG,
642                         adev->gfx.config.gb_addr_config);
643                 WREG32_SOC15(UVD, i, mmJPEG_DEC_GFX10_ADDR_CONFIG,
644                         adev->gfx.config.gb_addr_config);
645
646                 /* enable JMI channel */
647                 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JMI_CNTL), 0,
648                         ~UVD_JMI_CNTL__SOFT_RESET_MASK);
649
650                 /* enable System Interrupt for JRBC */
651                 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmJPEG_SYS_INT_EN),
652                         JPEG_SYS_INT_EN__DJRBC_MASK,
653                         ~JPEG_SYS_INT_EN__DJRBC_MASK);
654
655                 WREG32_SOC15(UVD, i, mmUVD_LMI_JRBC_RB_VMID, 0);
656                 WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
657                 WREG32_SOC15(UVD, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
658                         lower_32_bits(ring->gpu_addr));
659                 WREG32_SOC15(UVD, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
660                         upper_32_bits(ring->gpu_addr));
661                 WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_RPTR, 0);
662                 WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_WPTR, 0);
663                 WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_CNTL, 0x00000002L);
664                 WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4);
665                 ring->wptr = RREG32_SOC15(UVD, i, mmUVD_JRBC_RB_WPTR);
666         }
667
668         return 0;
669 }
670
671 /**
672  * jpeg_v2_5_stop - stop JPEG block
673  *
674  * @adev: amdgpu_device pointer
675  *
676  * stop the JPEG block
677  */
678 static int jpeg_v2_5_stop(struct amdgpu_device *adev)
679 {
680         uint32_t tmp;
681         int i;
682
683         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
684                 if (adev->vcn.harvest_config & (1 << i))
685                         continue;
686                 /* reset JMI */
687                 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JMI_CNTL),
688                         UVD_JMI_CNTL__SOFT_RESET_MASK,
689                         ~UVD_JMI_CNTL__SOFT_RESET_MASK);
690
691                 tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_GATE);
692                 tmp |= (JPEG_CGC_GATE__JPEG_DEC_MASK
693                         |JPEG_CGC_GATE__JPEG2_DEC_MASK
694                         |JPEG_CGC_GATE__JMCIF_MASK
695                         |JPEG_CGC_GATE__JRBBM_MASK);
696                 WREG32_SOC15(VCN, i, mmJPEG_CGC_GATE, tmp);
697
698                 /* enable anti hang mechanism */
699                 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JPEG_POWER_STATUS),
700                         UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
701                         ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
702         }
703
704         return 0;
705 }
706
707 static int vcn_v2_5_start(struct amdgpu_device *adev)
708 {
709         struct amdgpu_ring *ring;
710         uint32_t rb_bufsz, tmp;
711         int i, j, k, r;
712
713         if (adev->pm.dpm_enabled)
714                 amdgpu_dpm_enable_uvd(adev, true);
715
716         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
717                 if (adev->vcn.harvest_config & (1 << i))
718                         continue;
719                 /* disable register anti-hang mechanism */
720                 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS), 0,
721                         ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
722
723                 /* set uvd status busy */
724                 tmp = RREG32_SOC15(UVD, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
725                 WREG32_SOC15(UVD, i, mmUVD_STATUS, tmp);
726         }
727
728         /*SW clock gating */
729         vcn_v2_5_disable_clock_gating(adev);
730
731         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
732                 if (adev->vcn.harvest_config & (1 << i))
733                         continue;
734                 /* enable VCPU clock */
735                 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
736                         UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
737
738                 /* disable master interrupt */
739                 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN), 0,
740                         ~UVD_MASTINT_EN__VCPU_EN_MASK);
741
742                 /* setup mmUVD_LMI_CTRL */
743                 tmp = RREG32_SOC15(UVD, i, mmUVD_LMI_CTRL);
744                 tmp &= ~0xff;
745                 WREG32_SOC15(UVD, i, mmUVD_LMI_CTRL, tmp | 0x8|
746                         UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
747                         UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
748                         UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
749                         UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
750
751                 /* setup mmUVD_MPC_CNTL */
752                 tmp = RREG32_SOC15(UVD, i, mmUVD_MPC_CNTL);
753                 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
754                 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
755                 WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
756
757                 /* setup UVD_MPC_SET_MUXA0 */
758                 WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXA0,
759                         ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
760                         (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
761                         (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
762                         (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
763
764                 /* setup UVD_MPC_SET_MUXB0 */
765                 WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXB0,
766                         ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
767                         (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
768                         (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
769                         (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
770
771                 /* setup mmUVD_MPC_SET_MUX */
772                 WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUX,
773                         ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
774                         (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
775                         (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
776         }
777
778         vcn_v2_5_mc_resume(adev);
779
780         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
781                 if (adev->vcn.harvest_config & (1 << i))
782                         continue;
783                 /* VCN global tiling registers */
784                 WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG,
785                         adev->gfx.config.gb_addr_config);
786                 WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG,
787                         adev->gfx.config.gb_addr_config);
788
789                 /* enable LMI MC and UMC channels */
790                 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
791                         ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
792
793                 /* unblock VCPU register access */
794                 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL), 0,
795                         ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
796
797                 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
798                         ~UVD_VCPU_CNTL__BLK_RST_MASK);
799
800                 for (k = 0; k < 10; ++k) {
801                         uint32_t status;
802
803                         for (j = 0; j < 100; ++j) {
804                                 status = RREG32_SOC15(UVD, i, mmUVD_STATUS);
805                                 if (status & 2)
806                                         break;
807                                 if (amdgpu_emu_mode == 1)
808                                         msleep(500);
809                                 else
810                                         mdelay(10);
811                         }
812                         r = 0;
813                         if (status & 2)
814                                 break;
815
816                         DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
817                         WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
818                                 UVD_VCPU_CNTL__BLK_RST_MASK,
819                                 ~UVD_VCPU_CNTL__BLK_RST_MASK);
820                         mdelay(10);
821                         WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
822                                 ~UVD_VCPU_CNTL__BLK_RST_MASK);
823
824                         mdelay(10);
825                         r = -1;
826                 }
827
828                 if (r) {
829                         DRM_ERROR("VCN decode not responding, giving up!!!\n");
830                         return r;
831                 }
832
833                 /* enable master interrupt */
834                 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
835                         UVD_MASTINT_EN__VCPU_EN_MASK,
836                         ~UVD_MASTINT_EN__VCPU_EN_MASK);
837
838                 /* clear the busy bit of VCN_STATUS */
839                 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0,
840                         ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
841
842                 WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_VMID, 0);
843
844                 ring = &adev->vcn.inst[i].ring_dec;
845                 /* force RBC into idle state */
846                 rb_bufsz = order_base_2(ring->ring_size);
847                 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
848                 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
849                 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
850                 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
851                 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
852                 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, tmp);
853
854                 /* programm the RB_BASE for ring buffer */
855                 WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
856                         lower_32_bits(ring->gpu_addr));
857                 WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
858                         upper_32_bits(ring->gpu_addr));
859
860                 /* Initialize the ring buffer's read and write pointers */
861                 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR, 0);
862
863                 ring->wptr = RREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR);
864                 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_WPTR,
865                                 lower_32_bits(ring->wptr));
866                 ring = &adev->vcn.inst[i].ring_enc[0];
867                 WREG32_SOC15(UVD, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
868                 WREG32_SOC15(UVD, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
869                 WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
870                 WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
871                 WREG32_SOC15(UVD, i, mmUVD_RB_SIZE, ring->ring_size / 4);
872
873                 ring = &adev->vcn.inst[i].ring_enc[1];
874                 WREG32_SOC15(UVD, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
875                 WREG32_SOC15(UVD, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
876                 WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
877                 WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
878                 WREG32_SOC15(UVD, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
879         }
880         r = jpeg_v2_5_start(adev);
881
882         return r;
883 }
884
885 static int vcn_v2_5_stop(struct amdgpu_device *adev)
886 {
887         uint32_t tmp;
888         int i, r;
889
890         r = jpeg_v2_5_stop(adev);
891         if (r)
892                 return r;
893
894         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
895                 if (adev->vcn.harvest_config & (1 << i))
896                         continue;
897                 /* wait for vcn idle */
898                 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, r);
899                 if (r)
900                         return r;
901
902                 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
903                         UVD_LMI_STATUS__READ_CLEAN_MASK |
904                         UVD_LMI_STATUS__WRITE_CLEAN_MASK |
905                         UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
906                 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp, r);
907                 if (r)
908                         return r;
909
910                 /* block LMI UMC channel */
911                 tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
912                 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
913                 WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
914
915                 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
916                         UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
917                 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp, r);
918                 if (r)
919                         return r;
920
921                 /* block VCPU register access */
922                 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL),
923                         UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
924                         ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
925
926                 /* reset VCPU */
927                 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
928                         UVD_VCPU_CNTL__BLK_RST_MASK,
929                         ~UVD_VCPU_CNTL__BLK_RST_MASK);
930
931                 /* disable VCPU clock */
932                 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
933                         ~(UVD_VCPU_CNTL__CLK_EN_MASK));
934
935                 /* clear status */
936                 WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
937
938                 vcn_v2_5_enable_clock_gating(adev);
939
940                 /* enable register anti-hang mechanism */
941                 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS),
942                         UVD_POWER_STATUS__UVD_POWER_STATUS_MASK,
943                         ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
944         }
945
946         if (adev->pm.dpm_enabled)
947                 amdgpu_dpm_enable_uvd(adev, false);
948
949         return 0;
950 }
951
952 /**
953  * vcn_v2_5_dec_ring_get_rptr - get read pointer
954  *
955  * @ring: amdgpu_ring pointer
956  *
957  * Returns the current hardware read pointer
958  */
959 static uint64_t vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
960 {
961         struct amdgpu_device *adev = ring->adev;
962
963         return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
964 }
965
966 /**
967  * vcn_v2_5_dec_ring_get_wptr - get write pointer
968  *
969  * @ring: amdgpu_ring pointer
970  *
971  * Returns the current hardware write pointer
972  */
973 static uint64_t vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
974 {
975         struct amdgpu_device *adev = ring->adev;
976
977         if (ring->use_doorbell)
978                 return adev->wb.wb[ring->wptr_offs];
979         else
980                 return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
981 }
982
983 /**
984  * vcn_v2_5_dec_ring_set_wptr - set write pointer
985  *
986  * @ring: amdgpu_ring pointer
987  *
988  * Commits the write pointer to the hardware
989  */
990 static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
991 {
992         struct amdgpu_device *adev = ring->adev;
993
994         if (ring->use_doorbell) {
995                 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
996                 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
997         } else {
998                 WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
999         }
1000 }
1001
1002 static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
1003         .type = AMDGPU_RING_TYPE_VCN_DEC,
1004         .align_mask = 0xf,
1005         .vmhub = AMDGPU_MMHUB_1,
1006         .get_rptr = vcn_v2_5_dec_ring_get_rptr,
1007         .get_wptr = vcn_v2_5_dec_ring_get_wptr,
1008         .set_wptr = vcn_v2_5_dec_ring_set_wptr,
1009         .emit_frame_size =
1010                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1011                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1012                 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
1013                 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
1014                 6,
1015         .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
1016         .emit_ib = vcn_v2_0_dec_ring_emit_ib,
1017         .emit_fence = vcn_v2_0_dec_ring_emit_fence,
1018         .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
1019         .test_ring = amdgpu_vcn_dec_ring_test_ring,
1020         .test_ib = amdgpu_vcn_dec_ring_test_ib,
1021         .insert_nop = vcn_v2_0_dec_ring_insert_nop,
1022         .insert_start = vcn_v2_0_dec_ring_insert_start,
1023         .insert_end = vcn_v2_0_dec_ring_insert_end,
1024         .pad_ib = amdgpu_ring_generic_pad_ib,
1025         .begin_use = amdgpu_vcn_ring_begin_use,
1026         .end_use = amdgpu_vcn_ring_end_use,
1027         .emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
1028         .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
1029         .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1030 };
1031
1032 /**
1033  * vcn_v2_5_enc_ring_get_rptr - get enc read pointer
1034  *
1035  * @ring: amdgpu_ring pointer
1036  *
1037  * Returns the current hardware enc read pointer
1038  */
1039 static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring)
1040 {
1041         struct amdgpu_device *adev = ring->adev;
1042
1043         if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
1044                 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
1045         else
1046                 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
1047 }
1048
1049 /**
1050  * vcn_v2_5_enc_ring_get_wptr - get enc write pointer
1051  *
1052  * @ring: amdgpu_ring pointer
1053  *
1054  * Returns the current hardware enc write pointer
1055  */
1056 static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring)
1057 {
1058         struct amdgpu_device *adev = ring->adev;
1059
1060         if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1061                 if (ring->use_doorbell)
1062                         return adev->wb.wb[ring->wptr_offs];
1063                 else
1064                         return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
1065         } else {
1066                 if (ring->use_doorbell)
1067                         return adev->wb.wb[ring->wptr_offs];
1068                 else
1069                         return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
1070         }
1071 }
1072
1073 /**
1074  * vcn_v2_5_enc_ring_set_wptr - set enc write pointer
1075  *
1076  * @ring: amdgpu_ring pointer
1077  *
1078  * Commits the enc write pointer to the hardware
1079  */
1080 static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring)
1081 {
1082         struct amdgpu_device *adev = ring->adev;
1083
1084         if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1085                 if (ring->use_doorbell) {
1086                         adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1087                         WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1088                 } else {
1089                         WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1090                 }
1091         } else {
1092                 if (ring->use_doorbell) {
1093                         adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1094                         WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1095                 } else {
1096                         WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1097                 }
1098         }
1099 }
1100
1101 static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = {
1102         .type = AMDGPU_RING_TYPE_VCN_ENC,
1103         .align_mask = 0x3f,
1104         .nop = VCN_ENC_CMD_NO_OP,
1105         .vmhub = AMDGPU_MMHUB_1,
1106         .get_rptr = vcn_v2_5_enc_ring_get_rptr,
1107         .get_wptr = vcn_v2_5_enc_ring_get_wptr,
1108         .set_wptr = vcn_v2_5_enc_ring_set_wptr,
1109         .emit_frame_size =
1110                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1111                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1112                 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1113                 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1114                 1, /* vcn_v2_0_enc_ring_insert_end */
1115         .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1116         .emit_ib = vcn_v2_0_enc_ring_emit_ib,
1117         .emit_fence = vcn_v2_0_enc_ring_emit_fence,
1118         .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1119         .test_ring = amdgpu_vcn_enc_ring_test_ring,
1120         .test_ib = amdgpu_vcn_enc_ring_test_ib,
1121         .insert_nop = amdgpu_ring_insert_nop,
1122         .insert_end = vcn_v2_0_enc_ring_insert_end,
1123         .pad_ib = amdgpu_ring_generic_pad_ib,
1124         .begin_use = amdgpu_vcn_ring_begin_use,
1125         .end_use = amdgpu_vcn_ring_end_use,
1126         .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1127         .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1128         .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1129 };
1130
1131 /**
1132  * vcn_v2_5_jpeg_ring_get_rptr - get read pointer
1133  *
1134  * @ring: amdgpu_ring pointer
1135  *
1136  * Returns the current hardware read pointer
1137  */
1138 static uint64_t vcn_v2_5_jpeg_ring_get_rptr(struct amdgpu_ring *ring)
1139 {
1140         struct amdgpu_device *adev = ring->adev;
1141
1142         return RREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_RPTR);
1143 }
1144
1145 /**
1146  * vcn_v2_5_jpeg_ring_get_wptr - get write pointer
1147  *
1148  * @ring: amdgpu_ring pointer
1149  *
1150  * Returns the current hardware write pointer
1151  */
1152 static uint64_t vcn_v2_5_jpeg_ring_get_wptr(struct amdgpu_ring *ring)
1153 {
1154         struct amdgpu_device *adev = ring->adev;
1155
1156         if (ring->use_doorbell)
1157                 return adev->wb.wb[ring->wptr_offs];
1158         else
1159                 return RREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_WPTR);
1160 }
1161
1162 /**
1163  * vcn_v2_5_jpeg_ring_set_wptr - set write pointer
1164  *
1165  * @ring: amdgpu_ring pointer
1166  *
1167  * Commits the write pointer to the hardware
1168  */
1169 static void vcn_v2_5_jpeg_ring_set_wptr(struct amdgpu_ring *ring)
1170 {
1171         struct amdgpu_device *adev = ring->adev;
1172
1173         if (ring->use_doorbell) {
1174                 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1175                 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1176         } else {
1177                 WREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
1178         }
1179 }
1180
1181 static const struct amdgpu_ring_funcs vcn_v2_5_jpeg_ring_vm_funcs = {
1182         .type = AMDGPU_RING_TYPE_VCN_JPEG,
1183         .align_mask = 0xf,
1184         .vmhub = AMDGPU_MMHUB_1,
1185         .get_rptr = vcn_v2_5_jpeg_ring_get_rptr,
1186         .get_wptr = vcn_v2_5_jpeg_ring_get_wptr,
1187         .set_wptr = vcn_v2_5_jpeg_ring_set_wptr,
1188         .emit_frame_size =
1189                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1190                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1191                 8 + /* vcn_v2_0_jpeg_ring_emit_vm_flush */
1192                 18 + 18 + /* vcn_v2_0_jpeg_ring_emit_fence x2 vm fence */
1193                 8 + 16,
1194         .emit_ib_size = 22, /* vcn_v2_0_jpeg_ring_emit_ib */
1195         .emit_ib = vcn_v2_0_jpeg_ring_emit_ib,
1196         .emit_fence = vcn_v2_0_jpeg_ring_emit_fence,
1197         .emit_vm_flush = vcn_v2_0_jpeg_ring_emit_vm_flush,
1198         .test_ring = amdgpu_vcn_jpeg_ring_test_ring,
1199         .test_ib = amdgpu_vcn_jpeg_ring_test_ib,
1200         .insert_nop = vcn_v2_0_jpeg_ring_nop,
1201         .insert_start = vcn_v2_0_jpeg_ring_insert_start,
1202         .insert_end = vcn_v2_0_jpeg_ring_insert_end,
1203         .pad_ib = amdgpu_ring_generic_pad_ib,
1204         .begin_use = amdgpu_vcn_ring_begin_use,
1205         .end_use = amdgpu_vcn_ring_end_use,
1206         .emit_wreg = vcn_v2_0_jpeg_ring_emit_wreg,
1207         .emit_reg_wait = vcn_v2_0_jpeg_ring_emit_reg_wait,
1208         .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1209 };
1210
1211 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
1212 {
1213         int i;
1214
1215         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1216                 if (adev->vcn.harvest_config & (1 << i))
1217                         continue;
1218                 adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
1219                 adev->vcn.inst[i].ring_dec.me = i;
1220                 DRM_INFO("VCN(%d) decode is enabled in VM mode\n", i);
1221         }
1222 }
1223
1224 static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
1225 {
1226         int i, j;
1227
1228         for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
1229                 if (adev->vcn.harvest_config & (1 << j))
1230                         continue;
1231                 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
1232                         adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
1233                         adev->vcn.inst[j].ring_enc[i].me = j;
1234                 }
1235                 DRM_INFO("VCN(%d) encode is enabled in VM mode\n", j);
1236         }
1237 }
1238
1239 static void vcn_v2_5_set_jpeg_ring_funcs(struct amdgpu_device *adev)
1240 {
1241         int i;
1242
1243         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1244                 if (adev->vcn.harvest_config & (1 << i))
1245                         continue;
1246                 adev->vcn.inst[i].ring_jpeg.funcs = &vcn_v2_5_jpeg_ring_vm_funcs;
1247                 adev->vcn.inst[i].ring_jpeg.me = i;
1248                 DRM_INFO("VCN(%d) jpeg decode is enabled in VM mode\n", i);
1249         }
1250 }
1251
1252 static bool vcn_v2_5_is_idle(void *handle)
1253 {
1254         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1255         int i, ret = 1;
1256
1257         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1258                 if (adev->vcn.harvest_config & (1 << i))
1259                         continue;
1260                 ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE);
1261         }
1262
1263         return ret;
1264 }
1265
1266 static int vcn_v2_5_wait_for_idle(void *handle)
1267 {
1268         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1269         int i, ret = 0;
1270
1271         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1272                 if (adev->vcn.harvest_config & (1 << i))
1273                         continue;
1274                 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE,
1275                         UVD_STATUS__IDLE, ret);
1276                 if (ret)
1277                         return ret;
1278         }
1279
1280         return ret;
1281 }
1282
1283 static int vcn_v2_5_set_clockgating_state(void *handle,
1284                                           enum amd_clockgating_state state)
1285 {
1286         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1287         bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1288
1289         if (enable) {
1290                 if (vcn_v2_5_is_idle(handle))
1291                         return -EBUSY;
1292                 vcn_v2_5_enable_clock_gating(adev);
1293         } else {
1294                 vcn_v2_5_disable_clock_gating(adev);
1295         }
1296
1297         return 0;
1298 }
1299
1300 static int vcn_v2_5_set_powergating_state(void *handle,
1301                                           enum amd_powergating_state state)
1302 {
1303         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1304         int ret;
1305
1306         if(state == adev->vcn.cur_state)
1307                 return 0;
1308
1309         if (state == AMD_PG_STATE_GATE)
1310                 ret = vcn_v2_5_stop(adev);
1311         else
1312                 ret = vcn_v2_5_start(adev);
1313
1314         if(!ret)
1315                 adev->vcn.cur_state = state;
1316
1317         return ret;
1318 }
1319
1320 static int vcn_v2_5_set_interrupt_state(struct amdgpu_device *adev,
1321                                         struct amdgpu_irq_src *source,
1322                                         unsigned type,
1323                                         enum amdgpu_interrupt_state state)
1324 {
1325         return 0;
1326 }
1327
1328 static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev,
1329                                       struct amdgpu_irq_src *source,
1330                                       struct amdgpu_iv_entry *entry)
1331 {
1332         uint32_t ip_instance;
1333
1334         switch (entry->client_id) {
1335         case SOC15_IH_CLIENTID_VCN:
1336                 ip_instance = 0;
1337                 break;
1338         case SOC15_IH_CLIENTID_VCN1:
1339                 ip_instance = 1;
1340                 break;
1341         default:
1342                 DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1343                 return 0;
1344         }
1345
1346         DRM_DEBUG("IH: VCN TRAP\n");
1347
1348         switch (entry->src_id) {
1349         case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
1350                 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec);
1351                 break;
1352         case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1353                 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
1354                 break;
1355         case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
1356                 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
1357                 break;
1358         case VCN_2_0__SRCID__JPEG_DECODE:
1359                 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_jpeg);
1360                 break;
1361         default:
1362                 DRM_ERROR("Unhandled interrupt: %d %d\n",
1363                           entry->src_id, entry->src_data[0]);
1364                 break;
1365         }
1366
1367         return 0;
1368 }
1369
1370 static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = {
1371         .set = vcn_v2_5_set_interrupt_state,
1372         .process = vcn_v2_5_process_interrupt,
1373 };
1374
1375 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
1376 {
1377         int i;
1378
1379         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1380                 if (adev->vcn.harvest_config & (1 << i))
1381                         continue;
1382                 adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 2;
1383                 adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs;
1384         }
1385 }
1386
1387 static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
1388         .name = "vcn_v2_5",
1389         .early_init = vcn_v2_5_early_init,
1390         .late_init = NULL,
1391         .sw_init = vcn_v2_5_sw_init,
1392         .sw_fini = vcn_v2_5_sw_fini,
1393         .hw_init = vcn_v2_5_hw_init,
1394         .hw_fini = vcn_v2_5_hw_fini,
1395         .suspend = vcn_v2_5_suspend,
1396         .resume = vcn_v2_5_resume,
1397         .is_idle = vcn_v2_5_is_idle,
1398         .wait_for_idle = vcn_v2_5_wait_for_idle,
1399         .check_soft_reset = NULL,
1400         .pre_soft_reset = NULL,
1401         .soft_reset = NULL,
1402         .post_soft_reset = NULL,
1403         .set_clockgating_state = vcn_v2_5_set_clockgating_state,
1404         .set_powergating_state = vcn_v2_5_set_powergating_state,
1405 };
1406
1407 const struct amdgpu_ip_block_version vcn_v2_5_ip_block =
1408 {
1409                 .type = AMD_IP_BLOCK_TYPE_VCN,
1410                 .major = 2,
1411                 .minor = 5,
1412                 .rev = 0,
1413                 .funcs = &vcn_v2_5_ip_funcs,
1414 };