]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
0d5c95d73b636153118fca9f7a3dfb61afef9a00
[linux.git] / drivers / gpu / drm / amd / amdgpu / vcn_v2_5.c
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/firmware.h>
25
26 #include "amdgpu.h"
27 #include "amdgpu_vcn.h"
28 #include "soc15.h"
29 #include "soc15d.h"
30 #include "vcn_v2_0.h"
31
32 #include "vcn/vcn_2_5_offset.h"
33 #include "vcn/vcn_2_5_sh_mask.h"
34 #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
35
36 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET                        0x27
37 #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET                    0x0f
38 #define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET                  0x10
39 #define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET                  0x11
40 #define mmUVD_NO_OP_INTERNAL_OFFSET                             0x29
41 #define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET                       0x66
42 #define mmUVD_SCRATCH9_INTERNAL_OFFSET                          0xc01d
43
44 #define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET                   0x431
45 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET          0x3b4
46 #define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET         0x3b5
47 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET                       0x25c
48
49 #define mmUVD_JPEG_PITCH_INTERNAL_OFFSET                        0x401f
50
51 #define VCN25_MAX_HW_INSTANCES_ARCTURUS                         2
52
53 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
54 static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev);
55 static void vcn_v2_5_set_jpeg_ring_funcs(struct amdgpu_device *adev);
56 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev);
57 static int vcn_v2_5_set_powergating_state(void *handle,
58                                 enum amd_powergating_state state);
59
60 static int amdgpu_ih_clientid_vcns[] = {
61         SOC15_IH_CLIENTID_VCN,
62         SOC15_IH_CLIENTID_VCN1
63 };
64
65 /**
66  * vcn_v2_5_early_init - set function pointers
67  *
68  * @handle: amdgpu_device pointer
69  *
70  * Set ring and irq function pointers
71  */
72 static int vcn_v2_5_early_init(void *handle)
73 {
74         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
75         if (adev->asic_type == CHIP_ARCTURUS) {
76                 u32 harvest;
77                 int i;
78
79                 adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS;
80                 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
81                         harvest = RREG32_SOC15(UVD, i, mmCC_UVD_HARVESTING);
82                         if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
83                                 adev->vcn.harvest_config |= 1 << i;
84                 }
85
86                 if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
87                                                  AMDGPU_VCN_HARVEST_VCN1))
88                         /* both instances are harvested, disable the block */
89                         return -ENOENT;
90         } else
91                 adev->vcn.num_vcn_inst = 1;
92
93         adev->vcn.num_enc_rings = 2;
94
95         vcn_v2_5_set_dec_ring_funcs(adev);
96         vcn_v2_5_set_enc_ring_funcs(adev);
97         vcn_v2_5_set_jpeg_ring_funcs(adev);
98         vcn_v2_5_set_irq_funcs(adev);
99
100         return 0;
101 }
102
103 /**
104  * vcn_v2_5_sw_init - sw init for VCN block
105  *
106  * @handle: amdgpu_device pointer
107  *
108  * Load firmware and sw initialization
109  */
110 static int vcn_v2_5_sw_init(void *handle)
111 {
112         struct amdgpu_ring *ring;
113         int i, j, r;
114         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
115
116         for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
117                 if (adev->vcn.harvest_config & (1 << j))
118                         continue;
119                 /* VCN DEC TRAP */
120                 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
121                                 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[j].irq);
122                 if (r)
123                         return r;
124
125                 /* VCN ENC TRAP */
126                 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
127                         r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
128                                 i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[j].irq);
129                         if (r)
130                                 return r;
131                 }
132
133                 /* VCN JPEG TRAP */
134                 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
135                                 VCN_2_0__SRCID__JPEG_DECODE, &adev->vcn.inst[j].irq);
136                 if (r)
137                         return r;
138         }
139
140         r = amdgpu_vcn_sw_init(adev);
141         if (r)
142                 return r;
143
144         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
145                 const struct common_firmware_header *hdr;
146                 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
147                 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
148                 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
149                 adev->firmware.fw_size +=
150                         ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
151
152                 if (adev->vcn.num_vcn_inst == VCN25_MAX_HW_INSTANCES_ARCTURUS) {
153                         adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].ucode_id = AMDGPU_UCODE_ID_VCN1;
154                         adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].fw = adev->vcn.fw;
155                         adev->firmware.fw_size +=
156                                 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
157                 }
158                 DRM_INFO("PSP loading VCN firmware\n");
159         }
160
161         r = amdgpu_vcn_resume(adev);
162         if (r)
163                 return r;
164
165         for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
166                 if (adev->vcn.harvest_config & (1 << j))
167                         continue;
168                 adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
169                 adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
170                 adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
171                 adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
172                 adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
173                 adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
174
175                 adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
176                 adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(UVD, j, mmUVD_SCRATCH9);
177                 adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
178                 adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA0);
179                 adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
180                 adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA1);
181                 adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
182                 adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_CMD);
183                 adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
184                 adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(UVD, j, mmUVD_NO_OP);
185
186                 adev->vcn.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
187                 adev->vcn.inst[j].external.jpeg_pitch = SOC15_REG_OFFSET(UVD, j, mmUVD_JPEG_PITCH);
188
189                 ring = &adev->vcn.inst[j].ring_dec;
190                 ring->use_doorbell = true;
191                 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8*j;
192                 sprintf(ring->name, "vcn_dec_%d", j);
193                 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
194                 if (r)
195                         return r;
196
197                 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
198                         ring = &adev->vcn.inst[j].ring_enc[i];
199                         ring->use_doorbell = true;
200                         ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i + 8*j;
201                         sprintf(ring->name, "vcn_enc_%d.%d", j, i);
202                         r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
203                         if (r)
204                                 return r;
205                 }
206
207                 ring = &adev->vcn.inst[j].ring_jpeg;
208                 ring->use_doorbell = true;
209                 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8*j;
210                 sprintf(ring->name, "vcn_jpeg_%d", j);
211                 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
212                 if (r)
213                         return r;
214         }
215
216         return 0;
217 }
218
219 /**
220  * vcn_v2_5_sw_fini - sw fini for VCN block
221  *
222  * @handle: amdgpu_device pointer
223  *
224  * VCN suspend and free up sw allocation
225  */
226 static int vcn_v2_5_sw_fini(void *handle)
227 {
228         int r;
229         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
230
231         r = amdgpu_vcn_suspend(adev);
232         if (r)
233                 return r;
234
235         r = amdgpu_vcn_sw_fini(adev);
236
237         return r;
238 }
239
240 /**
241  * vcn_v2_5_hw_init - start and test VCN block
242  *
243  * @handle: amdgpu_device pointer
244  *
245  * Initialize the hardware, boot up the VCPU and do some testing
246  */
247 static int vcn_v2_5_hw_init(void *handle)
248 {
249         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
250         struct amdgpu_ring *ring;
251         int i, j, r;
252
253         for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
254                 if (adev->vcn.harvest_config & (1 << j))
255                         continue;
256                 ring = &adev->vcn.inst[j].ring_dec;
257
258                 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
259                                                      ring->doorbell_index, j);
260
261                 r = amdgpu_ring_test_helper(ring);
262                 if (r)
263                         goto done;
264
265                 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
266                         ring = &adev->vcn.inst[j].ring_enc[i];
267                         /* disable encode rings till the robustness of the FW */
268                         ring->sched.ready = false;
269                         continue;
270                         r = amdgpu_ring_test_helper(ring);
271                         if (r)
272                                 goto done;
273                 }
274
275                 ring = &adev->vcn.inst[j].ring_jpeg;
276                 r = amdgpu_ring_test_helper(ring);
277                 if (r)
278                         goto done;
279         }
280 done:
281         if (!r)
282                 DRM_INFO("VCN decode and encode initialized successfully.\n");
283
284         return r;
285 }
286
287 /**
288  * vcn_v2_5_hw_fini - stop the hardware block
289  *
290  * @handle: amdgpu_device pointer
291  *
292  * Stop the VCN block, mark ring as not ready any more
293  */
294 static int vcn_v2_5_hw_fini(void *handle)
295 {
296         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
297         struct amdgpu_ring *ring;
298         int i;
299
300         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
301                 if (adev->vcn.harvest_config & (1 << i))
302                         continue;
303                 ring = &adev->vcn.inst[i].ring_dec;
304
305                 if (RREG32_SOC15(VCN, i, mmUVD_STATUS))
306                         vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
307
308                 ring->sched.ready = false;
309
310                 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
311                         ring = &adev->vcn.inst[i].ring_enc[i];
312                         ring->sched.ready = false;
313                 }
314
315                 ring = &adev->vcn.inst[i].ring_jpeg;
316                 ring->sched.ready = false;
317         }
318
319         return 0;
320 }
321
322 /**
323  * vcn_v2_5_suspend - suspend VCN block
324  *
325  * @handle: amdgpu_device pointer
326  *
327  * HW fini and suspend VCN block
328  */
329 static int vcn_v2_5_suspend(void *handle)
330 {
331         int r;
332         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
333
334         r = vcn_v2_5_hw_fini(adev);
335         if (r)
336                 return r;
337
338         r = amdgpu_vcn_suspend(adev);
339
340         return r;
341 }
342
343 /**
344  * vcn_v2_5_resume - resume VCN block
345  *
346  * @handle: amdgpu_device pointer
347  *
348  * Resume firmware and hw init VCN block
349  */
350 static int vcn_v2_5_resume(void *handle)
351 {
352         int r;
353         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
354
355         r = amdgpu_vcn_resume(adev);
356         if (r)
357                 return r;
358
359         r = vcn_v2_5_hw_init(adev);
360
361         return r;
362 }
363
364 /**
365  * vcn_v2_5_mc_resume - memory controller programming
366  *
367  * @adev: amdgpu_device pointer
368  *
369  * Let the VCN memory controller know it's offsets
370  */
371 static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
372 {
373         uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
374         uint32_t offset;
375         int i;
376
377         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
378                 if (adev->vcn.harvest_config & (1 << i))
379                         continue;
380                 /* cache window 0: fw */
381                 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
382                         WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
383                                 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
384                         WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
385                                 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
386                         WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
387                         offset = 0;
388                 } else {
389                         WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
390                                 lower_32_bits(adev->vcn.inst[i].gpu_addr));
391                         WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
392                                 upper_32_bits(adev->vcn.inst[i].gpu_addr));
393                         offset = size;
394                         WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
395                                 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
396                 }
397                 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
398
399                 /* cache window 1: stack */
400                 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
401                         lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
402                 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
403                         upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
404                 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
405                 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
406
407                 /* cache window 2: context */
408                 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
409                         lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
410                 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
411                         upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
412                 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
413                 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
414         }
415 }
416
417 /**
418  * vcn_v2_5_disable_clock_gating - disable VCN clock gating
419  *
420  * @adev: amdgpu_device pointer
421  *
422  * Disable clock gating for VCN block
423  */
424 static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev)
425 {
426         uint32_t data;
427         int ret = 0;
428         int i;
429
430         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
431                 if (adev->vcn.harvest_config & (1 << i))
432                         continue;
433                 /* UVD disable CGC */
434                 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
435                 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
436                         data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
437                 else
438                         data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
439                 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
440                 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
441                 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
442
443                 data = RREG32_SOC15(VCN, i, mmUVD_CGC_GATE);
444                 data &= ~(UVD_CGC_GATE__SYS_MASK
445                         | UVD_CGC_GATE__UDEC_MASK
446                         | UVD_CGC_GATE__MPEG2_MASK
447                         | UVD_CGC_GATE__REGS_MASK
448                         | UVD_CGC_GATE__RBC_MASK
449                         | UVD_CGC_GATE__LMI_MC_MASK
450                         | UVD_CGC_GATE__LMI_UMC_MASK
451                         | UVD_CGC_GATE__IDCT_MASK
452                         | UVD_CGC_GATE__MPRD_MASK
453                         | UVD_CGC_GATE__MPC_MASK
454                         | UVD_CGC_GATE__LBSI_MASK
455                         | UVD_CGC_GATE__LRBBM_MASK
456                         | UVD_CGC_GATE__UDEC_RE_MASK
457                         | UVD_CGC_GATE__UDEC_CM_MASK
458                         | UVD_CGC_GATE__UDEC_IT_MASK
459                         | UVD_CGC_GATE__UDEC_DB_MASK
460                         | UVD_CGC_GATE__UDEC_MP_MASK
461                         | UVD_CGC_GATE__WCB_MASK
462                         | UVD_CGC_GATE__VCPU_MASK
463                         | UVD_CGC_GATE__MMSCH_MASK);
464
465                 WREG32_SOC15(VCN, i, mmUVD_CGC_GATE, data);
466
467                 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_CGC_GATE, 0,  0xFFFFFFFF, ret);
468
469                 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
470                 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
471                         | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
472                         | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
473                         | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
474                         | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
475                         | UVD_CGC_CTRL__SYS_MODE_MASK
476                         | UVD_CGC_CTRL__UDEC_MODE_MASK
477                         | UVD_CGC_CTRL__MPEG2_MODE_MASK
478                         | UVD_CGC_CTRL__REGS_MODE_MASK
479                         | UVD_CGC_CTRL__RBC_MODE_MASK
480                         | UVD_CGC_CTRL__LMI_MC_MODE_MASK
481                         | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
482                         | UVD_CGC_CTRL__IDCT_MODE_MASK
483                         | UVD_CGC_CTRL__MPRD_MODE_MASK
484                         | UVD_CGC_CTRL__MPC_MODE_MASK
485                         | UVD_CGC_CTRL__LBSI_MODE_MASK
486                         | UVD_CGC_CTRL__LRBBM_MODE_MASK
487                         | UVD_CGC_CTRL__WCB_MODE_MASK
488                         | UVD_CGC_CTRL__VCPU_MODE_MASK
489                         | UVD_CGC_CTRL__MMSCH_MODE_MASK);
490                 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
491
492                 /* turn on */
493                 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE);
494                 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
495                         | UVD_SUVD_CGC_GATE__SIT_MASK
496                         | UVD_SUVD_CGC_GATE__SMP_MASK
497                         | UVD_SUVD_CGC_GATE__SCM_MASK
498                         | UVD_SUVD_CGC_GATE__SDB_MASK
499                         | UVD_SUVD_CGC_GATE__SRE_H264_MASK
500                         | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
501                         | UVD_SUVD_CGC_GATE__SIT_H264_MASK
502                         | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
503                         | UVD_SUVD_CGC_GATE__SCM_H264_MASK
504                         | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
505                         | UVD_SUVD_CGC_GATE__SDB_H264_MASK
506                         | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
507                         | UVD_SUVD_CGC_GATE__SCLR_MASK
508                         | UVD_SUVD_CGC_GATE__UVD_SC_MASK
509                         | UVD_SUVD_CGC_GATE__ENT_MASK
510                         | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
511                         | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
512                         | UVD_SUVD_CGC_GATE__SITE_MASK
513                         | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
514                         | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
515                         | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
516                         | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
517                         | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
518                 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE, data);
519
520                 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
521                 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
522                         | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
523                         | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
524                         | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
525                         | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
526                         | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
527                         | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
528                         | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
529                         | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
530                         | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
531                 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
532         }
533 }
534
535 /**
536  * vcn_v2_5_enable_clock_gating - enable VCN clock gating
537  *
538  * @adev: amdgpu_device pointer
539  *
540  * Enable clock gating for VCN block
541  */
542 static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev)
543 {
544         uint32_t data = 0;
545         int i;
546
547         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
548                 if (adev->vcn.harvest_config & (1 << i))
549                         continue;
550                 /* enable UVD CGC */
551                 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
552                 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
553                         data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
554                 else
555                         data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
556                 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
557                 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
558                 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
559
560                 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
561                 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
562                         | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
563                         | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
564                         | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
565                         | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
566                         | UVD_CGC_CTRL__SYS_MODE_MASK
567                         | UVD_CGC_CTRL__UDEC_MODE_MASK
568                         | UVD_CGC_CTRL__MPEG2_MODE_MASK
569                         | UVD_CGC_CTRL__REGS_MODE_MASK
570                         | UVD_CGC_CTRL__RBC_MODE_MASK
571                         | UVD_CGC_CTRL__LMI_MC_MODE_MASK
572                         | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
573                         | UVD_CGC_CTRL__IDCT_MODE_MASK
574                         | UVD_CGC_CTRL__MPRD_MODE_MASK
575                         | UVD_CGC_CTRL__MPC_MODE_MASK
576                         | UVD_CGC_CTRL__LBSI_MODE_MASK
577                         | UVD_CGC_CTRL__LRBBM_MODE_MASK
578                         | UVD_CGC_CTRL__WCB_MODE_MASK
579                         | UVD_CGC_CTRL__VCPU_MODE_MASK);
580                 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
581
582                 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
583                 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
584                         | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
585                         | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
586                         | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
587                         | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
588                         | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
589                         | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
590                         | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
591                         | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
592                         | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
593                 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
594         }
595 }
596
597 /**
598  * jpeg_v2_5_start - start JPEG block
599  *
600  * @adev: amdgpu_device pointer
601  *
602  * Setup and start the JPEG block
603  */
604 static int jpeg_v2_5_start(struct amdgpu_device *adev)
605 {
606         struct amdgpu_ring *ring;
607         uint32_t tmp;
608         int i;
609
610         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
611                 if (adev->vcn.harvest_config & (1 << i))
612                         continue;
613                 ring = &adev->vcn.inst[i].ring_jpeg;
614                 /* disable anti hang mechanism */
615                 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JPEG_POWER_STATUS), 0,
616                         ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
617
618                 /* JPEG disable CGC */
619                 tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL);
620                 tmp |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
621                 tmp |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
622                 tmp |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
623                 WREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL, tmp);
624
625                 tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_GATE);
626                 tmp &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
627                         | JPEG_CGC_GATE__JPEG2_DEC_MASK
628                         | JPEG_CGC_GATE__JMCIF_MASK
629                         | JPEG_CGC_GATE__JRBBM_MASK);
630                 WREG32_SOC15(VCN, i, mmJPEG_CGC_GATE, tmp);
631
632                 tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL);
633                 tmp &= ~(JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK
634                         | JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK
635                         | JPEG_CGC_CTRL__JMCIF_MODE_MASK
636                         | JPEG_CGC_CTRL__JRBBM_MODE_MASK);
637                 WREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL, tmp);
638
639                 /* MJPEG global tiling registers */
640                 WREG32_SOC15(UVD, i, mmJPEG_DEC_GFX8_ADDR_CONFIG,
641                         adev->gfx.config.gb_addr_config);
642                 WREG32_SOC15(UVD, i, mmJPEG_DEC_GFX10_ADDR_CONFIG,
643                         adev->gfx.config.gb_addr_config);
644
645                 /* enable JMI channel */
646                 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JMI_CNTL), 0,
647                         ~UVD_JMI_CNTL__SOFT_RESET_MASK);
648
649                 /* enable System Interrupt for JRBC */
650                 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmJPEG_SYS_INT_EN),
651                         JPEG_SYS_INT_EN__DJRBC_MASK,
652                         ~JPEG_SYS_INT_EN__DJRBC_MASK);
653
654                 WREG32_SOC15(UVD, i, mmUVD_LMI_JRBC_RB_VMID, 0);
655                 WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
656                 WREG32_SOC15(UVD, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
657                         lower_32_bits(ring->gpu_addr));
658                 WREG32_SOC15(UVD, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
659                         upper_32_bits(ring->gpu_addr));
660                 WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_RPTR, 0);
661                 WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_WPTR, 0);
662                 WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_CNTL, 0x00000002L);
663                 WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4);
664                 ring->wptr = RREG32_SOC15(UVD, i, mmUVD_JRBC_RB_WPTR);
665         }
666
667         return 0;
668 }
669
670 /**
671  * jpeg_v2_5_stop - stop JPEG block
672  *
673  * @adev: amdgpu_device pointer
674  *
675  * stop the JPEG block
676  */
677 static int jpeg_v2_5_stop(struct amdgpu_device *adev)
678 {
679         uint32_t tmp;
680         int i;
681
682         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
683                 if (adev->vcn.harvest_config & (1 << i))
684                         continue;
685                 /* reset JMI */
686                 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JMI_CNTL),
687                         UVD_JMI_CNTL__SOFT_RESET_MASK,
688                         ~UVD_JMI_CNTL__SOFT_RESET_MASK);
689
690                 tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_GATE);
691                 tmp |= (JPEG_CGC_GATE__JPEG_DEC_MASK
692                         |JPEG_CGC_GATE__JPEG2_DEC_MASK
693                         |JPEG_CGC_GATE__JMCIF_MASK
694                         |JPEG_CGC_GATE__JRBBM_MASK);
695                 WREG32_SOC15(VCN, i, mmJPEG_CGC_GATE, tmp);
696
697                 /* enable anti hang mechanism */
698                 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JPEG_POWER_STATUS),
699                         UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
700                         ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
701         }
702
703         return 0;
704 }
705
706 static int vcn_v2_5_start(struct amdgpu_device *adev)
707 {
708         struct amdgpu_ring *ring;
709         uint32_t rb_bufsz, tmp;
710         int i, j, k, r;
711
712         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
713                 if (adev->vcn.harvest_config & (1 << i))
714                         continue;
715                 /* disable register anti-hang mechanism */
716                 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS), 0,
717                         ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
718
719                 /* set uvd status busy */
720                 tmp = RREG32_SOC15(UVD, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
721                 WREG32_SOC15(UVD, i, mmUVD_STATUS, tmp);
722         }
723
724         /*SW clock gating */
725         vcn_v2_5_disable_clock_gating(adev);
726
727         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
728                 if (adev->vcn.harvest_config & (1 << i))
729                         continue;
730                 /* enable VCPU clock */
731                 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
732                         UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
733
734                 /* disable master interrupt */
735                 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN), 0,
736                         ~UVD_MASTINT_EN__VCPU_EN_MASK);
737
738                 /* setup mmUVD_LMI_CTRL */
739                 tmp = RREG32_SOC15(UVD, i, mmUVD_LMI_CTRL);
740                 tmp &= ~0xff;
741                 WREG32_SOC15(UVD, i, mmUVD_LMI_CTRL, tmp | 0x8|
742                         UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
743                         UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
744                         UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
745                         UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
746
747                 /* setup mmUVD_MPC_CNTL */
748                 tmp = RREG32_SOC15(UVD, i, mmUVD_MPC_CNTL);
749                 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
750                 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
751                 WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
752
753                 /* setup UVD_MPC_SET_MUXA0 */
754                 WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXA0,
755                         ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
756                         (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
757                         (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
758                         (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
759
760                 /* setup UVD_MPC_SET_MUXB0 */
761                 WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXB0,
762                         ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
763                         (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
764                         (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
765                         (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
766
767                 /* setup mmUVD_MPC_SET_MUX */
768                 WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUX,
769                         ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
770                         (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
771                         (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
772         }
773
774         vcn_v2_5_mc_resume(adev);
775
776         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
777                 if (adev->vcn.harvest_config & (1 << i))
778                         continue;
779                 /* VCN global tiling registers */
780                 WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG,
781                         adev->gfx.config.gb_addr_config);
782                 WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG,
783                         adev->gfx.config.gb_addr_config);
784
785                 /* enable LMI MC and UMC channels */
786                 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
787                         ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
788
789                 /* unblock VCPU register access */
790                 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL), 0,
791                         ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
792
793                 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
794                         ~UVD_VCPU_CNTL__BLK_RST_MASK);
795
796                 for (k = 0; k < 10; ++k) {
797                         uint32_t status;
798
799                         for (j = 0; j < 100; ++j) {
800                                 status = RREG32_SOC15(UVD, i, mmUVD_STATUS);
801                                 if (status & 2)
802                                         break;
803                                 if (amdgpu_emu_mode == 1)
804                                         msleep(500);
805                                 else
806                                         mdelay(10);
807                         }
808                         r = 0;
809                         if (status & 2)
810                                 break;
811
812                         DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
813                         WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
814                                 UVD_VCPU_CNTL__BLK_RST_MASK,
815                                 ~UVD_VCPU_CNTL__BLK_RST_MASK);
816                         mdelay(10);
817                         WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
818                                 ~UVD_VCPU_CNTL__BLK_RST_MASK);
819
820                         mdelay(10);
821                         r = -1;
822                 }
823
824                 if (r) {
825                         DRM_ERROR("VCN decode not responding, giving up!!!\n");
826                         return r;
827                 }
828
829                 /* enable master interrupt */
830                 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
831                         UVD_MASTINT_EN__VCPU_EN_MASK,
832                         ~UVD_MASTINT_EN__VCPU_EN_MASK);
833
834                 /* clear the busy bit of VCN_STATUS */
835                 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0,
836                         ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
837
838                 WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_VMID, 0);
839
840                 ring = &adev->vcn.inst[i].ring_dec;
841                 /* force RBC into idle state */
842                 rb_bufsz = order_base_2(ring->ring_size);
843                 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
844                 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
845                 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
846                 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
847                 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
848                 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, tmp);
849
850                 /* programm the RB_BASE for ring buffer */
851                 WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
852                         lower_32_bits(ring->gpu_addr));
853                 WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
854                         upper_32_bits(ring->gpu_addr));
855
856                 /* Initialize the ring buffer's read and write pointers */
857                 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR, 0);
858
859                 ring->wptr = RREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR);
860                 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_WPTR,
861                                 lower_32_bits(ring->wptr));
862                 ring = &adev->vcn.inst[i].ring_enc[0];
863                 WREG32_SOC15(UVD, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
864                 WREG32_SOC15(UVD, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
865                 WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
866                 WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
867                 WREG32_SOC15(UVD, i, mmUVD_RB_SIZE, ring->ring_size / 4);
868
869                 ring = &adev->vcn.inst[i].ring_enc[1];
870                 WREG32_SOC15(UVD, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
871                 WREG32_SOC15(UVD, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
872                 WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
873                 WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
874                 WREG32_SOC15(UVD, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
875         }
876         r = jpeg_v2_5_start(adev);
877
878         return r;
879 }
880
881 static int vcn_v2_5_stop(struct amdgpu_device *adev)
882 {
883         uint32_t tmp;
884         int i, r;
885
886         r = jpeg_v2_5_stop(adev);
887         if (r)
888                 return r;
889
890         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
891                 if (adev->vcn.harvest_config & (1 << i))
892                         continue;
893                 /* wait for vcn idle */
894                 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, r);
895                 if (r)
896                         return r;
897
898                 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
899                         UVD_LMI_STATUS__READ_CLEAN_MASK |
900                         UVD_LMI_STATUS__WRITE_CLEAN_MASK |
901                         UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
902                 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp, r);
903                 if (r)
904                         return r;
905
906                 /* block LMI UMC channel */
907                 tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
908                 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
909                 WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
910
911                 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
912                         UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
913                 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp, r);
914                 if (r)
915                         return r;
916
917                 /* block VCPU register access */
918                 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL),
919                         UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
920                         ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
921
922                 /* reset VCPU */
923                 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
924                         UVD_VCPU_CNTL__BLK_RST_MASK,
925                         ~UVD_VCPU_CNTL__BLK_RST_MASK);
926
927                 /* disable VCPU clock */
928                 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
929                         ~(UVD_VCPU_CNTL__CLK_EN_MASK));
930
931                 /* clear status */
932                 WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
933
934                 vcn_v2_5_enable_clock_gating(adev);
935
936                 /* enable register anti-hang mechanism */
937                 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS),
938                         UVD_POWER_STATUS__UVD_POWER_STATUS_MASK,
939                         ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
940         }
941
942         return 0;
943 }
944
945 /**
946  * vcn_v2_5_dec_ring_get_rptr - get read pointer
947  *
948  * @ring: amdgpu_ring pointer
949  *
950  * Returns the current hardware read pointer
951  */
952 static uint64_t vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
953 {
954         struct amdgpu_device *adev = ring->adev;
955
956         return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
957 }
958
959 /**
960  * vcn_v2_5_dec_ring_get_wptr - get write pointer
961  *
962  * @ring: amdgpu_ring pointer
963  *
964  * Returns the current hardware write pointer
965  */
966 static uint64_t vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
967 {
968         struct amdgpu_device *adev = ring->adev;
969
970         if (ring->use_doorbell)
971                 return adev->wb.wb[ring->wptr_offs];
972         else
973                 return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
974 }
975
976 /**
977  * vcn_v2_5_dec_ring_set_wptr - set write pointer
978  *
979  * @ring: amdgpu_ring pointer
980  *
981  * Commits the write pointer to the hardware
982  */
983 static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
984 {
985         struct amdgpu_device *adev = ring->adev;
986
987         if (ring->use_doorbell) {
988                 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
989                 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
990         } else {
991                 WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
992         }
993 }
994
995 static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
996         .type = AMDGPU_RING_TYPE_VCN_DEC,
997         .align_mask = 0xf,
998         .vmhub = AMDGPU_MMHUB_1,
999         .get_rptr = vcn_v2_5_dec_ring_get_rptr,
1000         .get_wptr = vcn_v2_5_dec_ring_get_wptr,
1001         .set_wptr = vcn_v2_5_dec_ring_set_wptr,
1002         .emit_frame_size =
1003                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1004                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1005                 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
1006                 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
1007                 6,
1008         .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
1009         .emit_ib = vcn_v2_0_dec_ring_emit_ib,
1010         .emit_fence = vcn_v2_0_dec_ring_emit_fence,
1011         .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
1012         .test_ring = amdgpu_vcn_dec_ring_test_ring,
1013         .test_ib = amdgpu_vcn_dec_ring_test_ib,
1014         .insert_nop = vcn_v2_0_dec_ring_insert_nop,
1015         .insert_start = vcn_v2_0_dec_ring_insert_start,
1016         .insert_end = vcn_v2_0_dec_ring_insert_end,
1017         .pad_ib = amdgpu_ring_generic_pad_ib,
1018         .begin_use = amdgpu_vcn_ring_begin_use,
1019         .end_use = amdgpu_vcn_ring_end_use,
1020         .emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
1021         .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
1022         .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1023 };
1024
1025 /**
1026  * vcn_v2_5_enc_ring_get_rptr - get enc read pointer
1027  *
1028  * @ring: amdgpu_ring pointer
1029  *
1030  * Returns the current hardware enc read pointer
1031  */
1032 static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring)
1033 {
1034         struct amdgpu_device *adev = ring->adev;
1035
1036         if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
1037                 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
1038         else
1039                 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
1040 }
1041
1042 /**
1043  * vcn_v2_5_enc_ring_get_wptr - get enc write pointer
1044  *
1045  * @ring: amdgpu_ring pointer
1046  *
1047  * Returns the current hardware enc write pointer
1048  */
1049 static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring)
1050 {
1051         struct amdgpu_device *adev = ring->adev;
1052
1053         if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1054                 if (ring->use_doorbell)
1055                         return adev->wb.wb[ring->wptr_offs];
1056                 else
1057                         return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
1058         } else {
1059                 if (ring->use_doorbell)
1060                         return adev->wb.wb[ring->wptr_offs];
1061                 else
1062                         return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
1063         }
1064 }
1065
1066 /**
1067  * vcn_v2_5_enc_ring_set_wptr - set enc write pointer
1068  *
1069  * @ring: amdgpu_ring pointer
1070  *
1071  * Commits the enc write pointer to the hardware
1072  */
1073 static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring)
1074 {
1075         struct amdgpu_device *adev = ring->adev;
1076
1077         if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1078                 if (ring->use_doorbell) {
1079                         adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1080                         WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1081                 } else {
1082                         WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1083                 }
1084         } else {
1085                 if (ring->use_doorbell) {
1086                         adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1087                         WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1088                 } else {
1089                         WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1090                 }
1091         }
1092 }
1093
1094 static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = {
1095         .type = AMDGPU_RING_TYPE_VCN_ENC,
1096         .align_mask = 0x3f,
1097         .nop = VCN_ENC_CMD_NO_OP,
1098         .vmhub = AMDGPU_MMHUB_1,
1099         .get_rptr = vcn_v2_5_enc_ring_get_rptr,
1100         .get_wptr = vcn_v2_5_enc_ring_get_wptr,
1101         .set_wptr = vcn_v2_5_enc_ring_set_wptr,
1102         .emit_frame_size =
1103                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1104                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1105                 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1106                 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1107                 1, /* vcn_v2_0_enc_ring_insert_end */
1108         .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1109         .emit_ib = vcn_v2_0_enc_ring_emit_ib,
1110         .emit_fence = vcn_v2_0_enc_ring_emit_fence,
1111         .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1112         .test_ring = amdgpu_vcn_enc_ring_test_ring,
1113         .test_ib = amdgpu_vcn_enc_ring_test_ib,
1114         .insert_nop = amdgpu_ring_insert_nop,
1115         .insert_end = vcn_v2_0_enc_ring_insert_end,
1116         .pad_ib = amdgpu_ring_generic_pad_ib,
1117         .begin_use = amdgpu_vcn_ring_begin_use,
1118         .end_use = amdgpu_vcn_ring_end_use,
1119         .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1120         .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1121         .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1122 };
1123
1124 /**
1125  * vcn_v2_5_jpeg_ring_get_rptr - get read pointer
1126  *
1127  * @ring: amdgpu_ring pointer
1128  *
1129  * Returns the current hardware read pointer
1130  */
1131 static uint64_t vcn_v2_5_jpeg_ring_get_rptr(struct amdgpu_ring *ring)
1132 {
1133         struct amdgpu_device *adev = ring->adev;
1134
1135         return RREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_RPTR);
1136 }
1137
1138 /**
1139  * vcn_v2_5_jpeg_ring_get_wptr - get write pointer
1140  *
1141  * @ring: amdgpu_ring pointer
1142  *
1143  * Returns the current hardware write pointer
1144  */
1145 static uint64_t vcn_v2_5_jpeg_ring_get_wptr(struct amdgpu_ring *ring)
1146 {
1147         struct amdgpu_device *adev = ring->adev;
1148
1149         if (ring->use_doorbell)
1150                 return adev->wb.wb[ring->wptr_offs];
1151         else
1152                 return RREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_WPTR);
1153 }
1154
1155 /**
1156  * vcn_v2_5_jpeg_ring_set_wptr - set write pointer
1157  *
1158  * @ring: amdgpu_ring pointer
1159  *
1160  * Commits the write pointer to the hardware
1161  */
1162 static void vcn_v2_5_jpeg_ring_set_wptr(struct amdgpu_ring *ring)
1163 {
1164         struct amdgpu_device *adev = ring->adev;
1165
1166         if (ring->use_doorbell) {
1167                 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1168                 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1169         } else {
1170                 WREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
1171         }
1172 }
1173
1174 static const struct amdgpu_ring_funcs vcn_v2_5_jpeg_ring_vm_funcs = {
1175         .type = AMDGPU_RING_TYPE_VCN_JPEG,
1176         .align_mask = 0xf,
1177         .vmhub = AMDGPU_MMHUB_1,
1178         .get_rptr = vcn_v2_5_jpeg_ring_get_rptr,
1179         .get_wptr = vcn_v2_5_jpeg_ring_get_wptr,
1180         .set_wptr = vcn_v2_5_jpeg_ring_set_wptr,
1181         .emit_frame_size =
1182                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1183                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1184                 8 + /* vcn_v2_0_jpeg_ring_emit_vm_flush */
1185                 18 + 18 + /* vcn_v2_0_jpeg_ring_emit_fence x2 vm fence */
1186                 8 + 16,
1187         .emit_ib_size = 22, /* vcn_v2_0_jpeg_ring_emit_ib */
1188         .emit_ib = vcn_v2_0_jpeg_ring_emit_ib,
1189         .emit_fence = vcn_v2_0_jpeg_ring_emit_fence,
1190         .emit_vm_flush = vcn_v2_0_jpeg_ring_emit_vm_flush,
1191         .test_ring = amdgpu_vcn_jpeg_ring_test_ring,
1192         .test_ib = amdgpu_vcn_jpeg_ring_test_ib,
1193         .insert_nop = vcn_v2_0_jpeg_ring_nop,
1194         .insert_start = vcn_v2_0_jpeg_ring_insert_start,
1195         .insert_end = vcn_v2_0_jpeg_ring_insert_end,
1196         .pad_ib = amdgpu_ring_generic_pad_ib,
1197         .begin_use = amdgpu_vcn_ring_begin_use,
1198         .end_use = amdgpu_vcn_ring_end_use,
1199         .emit_wreg = vcn_v2_0_jpeg_ring_emit_wreg,
1200         .emit_reg_wait = vcn_v2_0_jpeg_ring_emit_reg_wait,
1201         .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1202 };
1203
1204 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
1205 {
1206         int i;
1207
1208         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1209                 if (adev->vcn.harvest_config & (1 << i))
1210                         continue;
1211                 adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
1212                 adev->vcn.inst[i].ring_dec.me = i;
1213                 DRM_INFO("VCN(%d) decode is enabled in VM mode\n", i);
1214         }
1215 }
1216
1217 static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
1218 {
1219         int i, j;
1220
1221         for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
1222                 if (adev->vcn.harvest_config & (1 << j))
1223                         continue;
1224                 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
1225                         adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
1226                         adev->vcn.inst[j].ring_enc[i].me = j;
1227                 }
1228                 DRM_INFO("VCN(%d) encode is enabled in VM mode\n", j);
1229         }
1230 }
1231
1232 static void vcn_v2_5_set_jpeg_ring_funcs(struct amdgpu_device *adev)
1233 {
1234         int i;
1235
1236         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1237                 if (adev->vcn.harvest_config & (1 << i))
1238                         continue;
1239                 adev->vcn.inst[i].ring_jpeg.funcs = &vcn_v2_5_jpeg_ring_vm_funcs;
1240                 adev->vcn.inst[i].ring_jpeg.me = i;
1241                 DRM_INFO("VCN(%d) jpeg decode is enabled in VM mode\n", i);
1242         }
1243 }
1244
1245 static bool vcn_v2_5_is_idle(void *handle)
1246 {
1247         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1248         int i, ret = 1;
1249
1250         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1251                 if (adev->vcn.harvest_config & (1 << i))
1252                         continue;
1253                 ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE);
1254         }
1255
1256         return ret;
1257 }
1258
1259 static int vcn_v2_5_wait_for_idle(void *handle)
1260 {
1261         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1262         int i, ret = 0;
1263
1264         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1265                 if (adev->vcn.harvest_config & (1 << i))
1266                         continue;
1267                 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE,
1268                         UVD_STATUS__IDLE, ret);
1269                 if (ret)
1270                         return ret;
1271         }
1272
1273         return ret;
1274 }
1275
1276 static int vcn_v2_5_set_clockgating_state(void *handle,
1277                                           enum amd_clockgating_state state)
1278 {
1279         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1280         bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1281
1282         if (enable) {
1283                 if (vcn_v2_5_is_idle(handle))
1284                         return -EBUSY;
1285                 vcn_v2_5_enable_clock_gating(adev);
1286         } else {
1287                 vcn_v2_5_disable_clock_gating(adev);
1288         }
1289
1290         return 0;
1291 }
1292
1293 static int vcn_v2_5_set_powergating_state(void *handle,
1294                                           enum amd_powergating_state state)
1295 {
1296         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1297         int ret;
1298
1299         if(state == adev->vcn.cur_state)
1300                 return 0;
1301
1302         if (state == AMD_PG_STATE_GATE)
1303                 ret = vcn_v2_5_stop(adev);
1304         else
1305                 ret = vcn_v2_5_start(adev);
1306
1307         if(!ret)
1308                 adev->vcn.cur_state = state;
1309
1310         return ret;
1311 }
1312
1313 static int vcn_v2_5_set_interrupt_state(struct amdgpu_device *adev,
1314                                         struct amdgpu_irq_src *source,
1315                                         unsigned type,
1316                                         enum amdgpu_interrupt_state state)
1317 {
1318         return 0;
1319 }
1320
1321 static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev,
1322                                       struct amdgpu_irq_src *source,
1323                                       struct amdgpu_iv_entry *entry)
1324 {
1325         uint32_t ip_instance;
1326
1327         switch (entry->client_id) {
1328         case SOC15_IH_CLIENTID_VCN:
1329                 ip_instance = 0;
1330                 break;
1331         case SOC15_IH_CLIENTID_VCN1:
1332                 ip_instance = 1;
1333                 break;
1334         default:
1335                 DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1336                 return 0;
1337         }
1338
1339         DRM_DEBUG("IH: VCN TRAP\n");
1340
1341         switch (entry->src_id) {
1342         case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
1343                 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec);
1344                 break;
1345         case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1346                 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
1347                 break;
1348         case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
1349                 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
1350                 break;
1351         case VCN_2_0__SRCID__JPEG_DECODE:
1352                 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_jpeg);
1353                 break;
1354         default:
1355                 DRM_ERROR("Unhandled interrupt: %d %d\n",
1356                           entry->src_id, entry->src_data[0]);
1357                 break;
1358         }
1359
1360         return 0;
1361 }
1362
1363 static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = {
1364         .set = vcn_v2_5_set_interrupt_state,
1365         .process = vcn_v2_5_process_interrupt,
1366 };
1367
1368 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
1369 {
1370         int i;
1371
1372         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1373                 if (adev->vcn.harvest_config & (1 << i))
1374                         continue;
1375                 adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 2;
1376                 adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs;
1377         }
1378 }
1379
1380 static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
1381         .name = "vcn_v2_5",
1382         .early_init = vcn_v2_5_early_init,
1383         .late_init = NULL,
1384         .sw_init = vcn_v2_5_sw_init,
1385         .sw_fini = vcn_v2_5_sw_fini,
1386         .hw_init = vcn_v2_5_hw_init,
1387         .hw_fini = vcn_v2_5_hw_fini,
1388         .suspend = vcn_v2_5_suspend,
1389         .resume = vcn_v2_5_resume,
1390         .is_idle = vcn_v2_5_is_idle,
1391         .wait_for_idle = vcn_v2_5_wait_for_idle,
1392         .check_soft_reset = NULL,
1393         .pre_soft_reset = NULL,
1394         .soft_reset = NULL,
1395         .post_soft_reset = NULL,
1396         .set_clockgating_state = vcn_v2_5_set_clockgating_state,
1397         .set_powergating_state = vcn_v2_5_set_powergating_state,
1398 };
1399
1400 const struct amdgpu_ip_block_version vcn_v2_5_ip_block =
1401 {
1402                 .type = AMD_IP_BLOCK_TYPE_VCN,
1403                 .major = 2,
1404                 .minor = 5,
1405                 .rev = 0,
1406                 .funcs = &vcn_v2_5_ip_funcs,
1407 };