2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
27 #include "amdgpu_vcn.h"
32 #include "vcn/vcn_2_5_offset.h"
33 #include "vcn/vcn_2_5_sh_mask.h"
34 #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
36 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27
37 #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f
38 #define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET 0x10
39 #define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET 0x11
40 #define mmUVD_NO_OP_INTERNAL_OFFSET 0x29
41 #define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET 0x66
42 #define mmUVD_SCRATCH9_INTERNAL_OFFSET 0xc01d
44 #define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET 0x431
45 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x3b4
46 #define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x3b5
47 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x25c
49 #define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f
51 #define VCN25_MAX_HW_INSTANCES_ARCTURUS 2
53 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
54 static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev);
55 static void vcn_v2_5_set_jpeg_ring_funcs(struct amdgpu_device *adev);
56 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev);
57 static int vcn_v2_5_set_powergating_state(void *handle,
58 enum amd_powergating_state state);
60 static int amdgpu_ih_clientid_vcns[] = {
61 SOC15_IH_CLIENTID_VCN,
62 SOC15_IH_CLIENTID_VCN1
66 * vcn_v2_5_early_init - set function pointers
68 * @handle: amdgpu_device pointer
70 * Set ring and irq function pointers
72 static int vcn_v2_5_early_init(void *handle)
74 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
75 if (adev->asic_type == CHIP_ARCTURUS) {
79 adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS;
80 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
81 harvest = RREG32_SOC15(UVD, i, mmCC_UVD_HARVESTING);
82 if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
83 adev->vcn.harvest_config |= 1 << i;
86 if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
87 AMDGPU_VCN_HARVEST_VCN1))
88 /* both instances are harvested, disable the block */
91 adev->vcn.num_vcn_inst = 1;
93 adev->vcn.num_enc_rings = 2;
95 vcn_v2_5_set_dec_ring_funcs(adev);
96 vcn_v2_5_set_enc_ring_funcs(adev);
97 vcn_v2_5_set_jpeg_ring_funcs(adev);
98 vcn_v2_5_set_irq_funcs(adev);
104 * vcn_v2_5_sw_init - sw init for VCN block
106 * @handle: amdgpu_device pointer
108 * Load firmware and sw initialization
110 static int vcn_v2_5_sw_init(void *handle)
112 struct amdgpu_ring *ring;
114 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
116 for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
117 if (adev->vcn.harvest_config & (1 << j))
120 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
121 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[j].irq);
126 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
127 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
128 i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[j].irq);
134 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
135 VCN_2_0__SRCID__JPEG_DECODE, &adev->vcn.inst[j].irq);
140 r = amdgpu_vcn_sw_init(adev);
144 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
145 const struct common_firmware_header *hdr;
146 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
147 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
148 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
149 adev->firmware.fw_size +=
150 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
152 if (adev->vcn.num_vcn_inst == VCN25_MAX_HW_INSTANCES_ARCTURUS) {
153 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].ucode_id = AMDGPU_UCODE_ID_VCN1;
154 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].fw = adev->vcn.fw;
155 adev->firmware.fw_size +=
156 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
158 DRM_INFO("PSP loading VCN firmware\n");
161 r = amdgpu_vcn_resume(adev);
165 for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
166 if (adev->vcn.harvest_config & (1 << j))
168 adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
169 adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
170 adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
171 adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
172 adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
173 adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
175 adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
176 adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(UVD, j, mmUVD_SCRATCH9);
177 adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
178 adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA0);
179 adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
180 adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA1);
181 adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
182 adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_CMD);
183 adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
184 adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(UVD, j, mmUVD_NO_OP);
186 adev->vcn.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
187 adev->vcn.inst[j].external.jpeg_pitch = SOC15_REG_OFFSET(UVD, j, mmUVD_JPEG_PITCH);
189 ring = &adev->vcn.inst[j].ring_dec;
190 ring->use_doorbell = true;
191 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8*j;
192 sprintf(ring->name, "vcn_dec_%d", j);
193 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
197 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
198 ring = &adev->vcn.inst[j].ring_enc[i];
199 ring->use_doorbell = true;
200 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i + 8*j;
201 sprintf(ring->name, "vcn_enc_%d.%d", j, i);
202 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
207 ring = &adev->vcn.inst[j].ring_jpeg;
208 ring->use_doorbell = true;
209 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8*j;
210 sprintf(ring->name, "vcn_jpeg_%d", j);
211 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
220 * vcn_v2_5_sw_fini - sw fini for VCN block
222 * @handle: amdgpu_device pointer
224 * VCN suspend and free up sw allocation
226 static int vcn_v2_5_sw_fini(void *handle)
229 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
231 r = amdgpu_vcn_suspend(adev);
235 r = amdgpu_vcn_sw_fini(adev);
241 * vcn_v2_5_hw_init - start and test VCN block
243 * @handle: amdgpu_device pointer
245 * Initialize the hardware, boot up the VCPU and do some testing
247 static int vcn_v2_5_hw_init(void *handle)
249 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
250 struct amdgpu_ring *ring;
253 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
254 if (adev->vcn.harvest_config & (1 << j))
256 ring = &adev->vcn.inst[j].ring_dec;
258 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
259 ring->doorbell_index, j);
261 r = amdgpu_ring_test_helper(ring);
265 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
266 ring = &adev->vcn.inst[j].ring_enc[i];
267 /* disable encode rings till the robustness of the FW */
268 ring->sched.ready = false;
270 r = amdgpu_ring_test_helper(ring);
275 ring = &adev->vcn.inst[j].ring_jpeg;
276 r = amdgpu_ring_test_helper(ring);
282 DRM_INFO("VCN decode and encode initialized successfully.\n");
288 * vcn_v2_5_hw_fini - stop the hardware block
290 * @handle: amdgpu_device pointer
292 * Stop the VCN block, mark ring as not ready any more
294 static int vcn_v2_5_hw_fini(void *handle)
296 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
297 struct amdgpu_ring *ring;
300 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
301 if (adev->vcn.harvest_config & (1 << i))
303 ring = &adev->vcn.inst[i].ring_dec;
305 if (RREG32_SOC15(VCN, i, mmUVD_STATUS))
306 vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
308 ring->sched.ready = false;
310 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
311 ring = &adev->vcn.inst[i].ring_enc[i];
312 ring->sched.ready = false;
315 ring = &adev->vcn.inst[i].ring_jpeg;
316 ring->sched.ready = false;
323 * vcn_v2_5_suspend - suspend VCN block
325 * @handle: amdgpu_device pointer
327 * HW fini and suspend VCN block
329 static int vcn_v2_5_suspend(void *handle)
332 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
334 r = vcn_v2_5_hw_fini(adev);
338 r = amdgpu_vcn_suspend(adev);
344 * vcn_v2_5_resume - resume VCN block
346 * @handle: amdgpu_device pointer
348 * Resume firmware and hw init VCN block
350 static int vcn_v2_5_resume(void *handle)
353 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
355 r = amdgpu_vcn_resume(adev);
359 r = vcn_v2_5_hw_init(adev);
365 * vcn_v2_5_mc_resume - memory controller programming
367 * @adev: amdgpu_device pointer
369 * Let the VCN memory controller know it's offsets
371 static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
373 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
377 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
378 if (adev->vcn.harvest_config & (1 << i))
380 /* cache window 0: fw */
381 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
382 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
383 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
384 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
385 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
386 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
389 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
390 lower_32_bits(adev->vcn.inst[i].gpu_addr));
391 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
392 upper_32_bits(adev->vcn.inst[i].gpu_addr));
394 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
395 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
397 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
399 /* cache window 1: stack */
400 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
401 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
402 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
403 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
404 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
405 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
407 /* cache window 2: context */
408 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
409 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
410 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
411 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
412 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
413 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
418 * vcn_v2_5_disable_clock_gating - disable VCN clock gating
420 * @adev: amdgpu_device pointer
422 * Disable clock gating for VCN block
424 static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev)
430 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
431 if (adev->vcn.harvest_config & (1 << i))
433 /* UVD disable CGC */
434 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
435 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
436 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
438 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
439 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
440 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
441 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
443 data = RREG32_SOC15(VCN, i, mmUVD_CGC_GATE);
444 data &= ~(UVD_CGC_GATE__SYS_MASK
445 | UVD_CGC_GATE__UDEC_MASK
446 | UVD_CGC_GATE__MPEG2_MASK
447 | UVD_CGC_GATE__REGS_MASK
448 | UVD_CGC_GATE__RBC_MASK
449 | UVD_CGC_GATE__LMI_MC_MASK
450 | UVD_CGC_GATE__LMI_UMC_MASK
451 | UVD_CGC_GATE__IDCT_MASK
452 | UVD_CGC_GATE__MPRD_MASK
453 | UVD_CGC_GATE__MPC_MASK
454 | UVD_CGC_GATE__LBSI_MASK
455 | UVD_CGC_GATE__LRBBM_MASK
456 | UVD_CGC_GATE__UDEC_RE_MASK
457 | UVD_CGC_GATE__UDEC_CM_MASK
458 | UVD_CGC_GATE__UDEC_IT_MASK
459 | UVD_CGC_GATE__UDEC_DB_MASK
460 | UVD_CGC_GATE__UDEC_MP_MASK
461 | UVD_CGC_GATE__WCB_MASK
462 | UVD_CGC_GATE__VCPU_MASK
463 | UVD_CGC_GATE__MMSCH_MASK);
465 WREG32_SOC15(VCN, i, mmUVD_CGC_GATE, data);
467 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_CGC_GATE, 0, 0xFFFFFFFF, ret);
469 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
470 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
471 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
472 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
473 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
474 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
475 | UVD_CGC_CTRL__SYS_MODE_MASK
476 | UVD_CGC_CTRL__UDEC_MODE_MASK
477 | UVD_CGC_CTRL__MPEG2_MODE_MASK
478 | UVD_CGC_CTRL__REGS_MODE_MASK
479 | UVD_CGC_CTRL__RBC_MODE_MASK
480 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
481 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
482 | UVD_CGC_CTRL__IDCT_MODE_MASK
483 | UVD_CGC_CTRL__MPRD_MODE_MASK
484 | UVD_CGC_CTRL__MPC_MODE_MASK
485 | UVD_CGC_CTRL__LBSI_MODE_MASK
486 | UVD_CGC_CTRL__LRBBM_MODE_MASK
487 | UVD_CGC_CTRL__WCB_MODE_MASK
488 | UVD_CGC_CTRL__VCPU_MODE_MASK
489 | UVD_CGC_CTRL__MMSCH_MODE_MASK);
490 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
493 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE);
494 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
495 | UVD_SUVD_CGC_GATE__SIT_MASK
496 | UVD_SUVD_CGC_GATE__SMP_MASK
497 | UVD_SUVD_CGC_GATE__SCM_MASK
498 | UVD_SUVD_CGC_GATE__SDB_MASK
499 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
500 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
501 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
502 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
503 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
504 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
505 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
506 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
507 | UVD_SUVD_CGC_GATE__SCLR_MASK
508 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
509 | UVD_SUVD_CGC_GATE__ENT_MASK
510 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
511 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
512 | UVD_SUVD_CGC_GATE__SITE_MASK
513 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
514 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
515 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
516 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
517 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
518 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE, data);
520 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
521 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
522 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
523 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
524 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
525 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
526 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
527 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
528 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
529 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
530 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
531 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
536 * vcn_v2_5_enable_clock_gating - enable VCN clock gating
538 * @adev: amdgpu_device pointer
540 * Enable clock gating for VCN block
542 static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev)
547 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
548 if (adev->vcn.harvest_config & (1 << i))
551 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
552 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
553 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
555 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
556 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
557 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
558 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
560 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
561 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
562 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
563 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
564 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
565 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
566 | UVD_CGC_CTRL__SYS_MODE_MASK
567 | UVD_CGC_CTRL__UDEC_MODE_MASK
568 | UVD_CGC_CTRL__MPEG2_MODE_MASK
569 | UVD_CGC_CTRL__REGS_MODE_MASK
570 | UVD_CGC_CTRL__RBC_MODE_MASK
571 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
572 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
573 | UVD_CGC_CTRL__IDCT_MODE_MASK
574 | UVD_CGC_CTRL__MPRD_MODE_MASK
575 | UVD_CGC_CTRL__MPC_MODE_MASK
576 | UVD_CGC_CTRL__LBSI_MODE_MASK
577 | UVD_CGC_CTRL__LRBBM_MODE_MASK
578 | UVD_CGC_CTRL__WCB_MODE_MASK
579 | UVD_CGC_CTRL__VCPU_MODE_MASK);
580 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
582 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
583 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
584 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
585 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
586 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
587 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
588 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
589 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
590 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
591 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
592 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
593 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
598 * jpeg_v2_5_start - start JPEG block
600 * @adev: amdgpu_device pointer
602 * Setup and start the JPEG block
604 static int jpeg_v2_5_start(struct amdgpu_device *adev)
606 struct amdgpu_ring *ring;
610 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
611 if (adev->vcn.harvest_config & (1 << i))
613 ring = &adev->vcn.inst[i].ring_jpeg;
614 /* disable anti hang mechanism */
615 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JPEG_POWER_STATUS), 0,
616 ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
618 /* JPEG disable CGC */
619 tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL);
620 tmp |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
621 tmp |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
622 tmp |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
623 WREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL, tmp);
625 tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_GATE);
626 tmp &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
627 | JPEG_CGC_GATE__JPEG2_DEC_MASK
628 | JPEG_CGC_GATE__JMCIF_MASK
629 | JPEG_CGC_GATE__JRBBM_MASK);
630 WREG32_SOC15(VCN, i, mmJPEG_CGC_GATE, tmp);
632 tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL);
633 tmp &= ~(JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK
634 | JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK
635 | JPEG_CGC_CTRL__JMCIF_MODE_MASK
636 | JPEG_CGC_CTRL__JRBBM_MODE_MASK);
637 WREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL, tmp);
639 /* MJPEG global tiling registers */
640 WREG32_SOC15(UVD, i, mmJPEG_DEC_GFX8_ADDR_CONFIG,
641 adev->gfx.config.gb_addr_config);
642 WREG32_SOC15(UVD, i, mmJPEG_DEC_GFX10_ADDR_CONFIG,
643 adev->gfx.config.gb_addr_config);
645 /* enable JMI channel */
646 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JMI_CNTL), 0,
647 ~UVD_JMI_CNTL__SOFT_RESET_MASK);
649 /* enable System Interrupt for JRBC */
650 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmJPEG_SYS_INT_EN),
651 JPEG_SYS_INT_EN__DJRBC_MASK,
652 ~JPEG_SYS_INT_EN__DJRBC_MASK);
654 WREG32_SOC15(UVD, i, mmUVD_LMI_JRBC_RB_VMID, 0);
655 WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
656 WREG32_SOC15(UVD, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
657 lower_32_bits(ring->gpu_addr));
658 WREG32_SOC15(UVD, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
659 upper_32_bits(ring->gpu_addr));
660 WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_RPTR, 0);
661 WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_WPTR, 0);
662 WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_CNTL, 0x00000002L);
663 WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4);
664 ring->wptr = RREG32_SOC15(UVD, i, mmUVD_JRBC_RB_WPTR);
671 * jpeg_v2_5_stop - stop JPEG block
673 * @adev: amdgpu_device pointer
675 * stop the JPEG block
677 static int jpeg_v2_5_stop(struct amdgpu_device *adev)
682 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
683 if (adev->vcn.harvest_config & (1 << i))
686 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JMI_CNTL),
687 UVD_JMI_CNTL__SOFT_RESET_MASK,
688 ~UVD_JMI_CNTL__SOFT_RESET_MASK);
690 tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_GATE);
691 tmp |= (JPEG_CGC_GATE__JPEG_DEC_MASK
692 |JPEG_CGC_GATE__JPEG2_DEC_MASK
693 |JPEG_CGC_GATE__JMCIF_MASK
694 |JPEG_CGC_GATE__JRBBM_MASK);
695 WREG32_SOC15(VCN, i, mmJPEG_CGC_GATE, tmp);
697 /* enable anti hang mechanism */
698 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JPEG_POWER_STATUS),
699 UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
700 ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
706 static int vcn_v2_5_start(struct amdgpu_device *adev)
708 struct amdgpu_ring *ring;
709 uint32_t rb_bufsz, tmp;
712 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
713 if (adev->vcn.harvest_config & (1 << i))
715 /* disable register anti-hang mechanism */
716 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS), 0,
717 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
719 /* set uvd status busy */
720 tmp = RREG32_SOC15(UVD, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
721 WREG32_SOC15(UVD, i, mmUVD_STATUS, tmp);
725 vcn_v2_5_disable_clock_gating(adev);
727 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
728 if (adev->vcn.harvest_config & (1 << i))
730 /* enable VCPU clock */
731 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
732 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
734 /* disable master interrupt */
735 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN), 0,
736 ~UVD_MASTINT_EN__VCPU_EN_MASK);
738 /* setup mmUVD_LMI_CTRL */
739 tmp = RREG32_SOC15(UVD, i, mmUVD_LMI_CTRL);
741 WREG32_SOC15(UVD, i, mmUVD_LMI_CTRL, tmp | 0x8|
742 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
743 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
744 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
745 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
747 /* setup mmUVD_MPC_CNTL */
748 tmp = RREG32_SOC15(UVD, i, mmUVD_MPC_CNTL);
749 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
750 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
751 WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
753 /* setup UVD_MPC_SET_MUXA0 */
754 WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXA0,
755 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
756 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
757 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
758 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
760 /* setup UVD_MPC_SET_MUXB0 */
761 WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXB0,
762 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
763 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
764 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
765 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
767 /* setup mmUVD_MPC_SET_MUX */
768 WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUX,
769 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
770 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
771 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
774 vcn_v2_5_mc_resume(adev);
776 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
777 if (adev->vcn.harvest_config & (1 << i))
779 /* VCN global tiling registers */
780 WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG,
781 adev->gfx.config.gb_addr_config);
782 WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG,
783 adev->gfx.config.gb_addr_config);
785 /* enable LMI MC and UMC channels */
786 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
787 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
789 /* unblock VCPU register access */
790 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL), 0,
791 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
793 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
794 ~UVD_VCPU_CNTL__BLK_RST_MASK);
796 for (k = 0; k < 10; ++k) {
799 for (j = 0; j < 100; ++j) {
800 status = RREG32_SOC15(UVD, i, mmUVD_STATUS);
803 if (amdgpu_emu_mode == 1)
812 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
813 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
814 UVD_VCPU_CNTL__BLK_RST_MASK,
815 ~UVD_VCPU_CNTL__BLK_RST_MASK);
817 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
818 ~UVD_VCPU_CNTL__BLK_RST_MASK);
825 DRM_ERROR("VCN decode not responding, giving up!!!\n");
829 /* enable master interrupt */
830 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
831 UVD_MASTINT_EN__VCPU_EN_MASK,
832 ~UVD_MASTINT_EN__VCPU_EN_MASK);
834 /* clear the busy bit of VCN_STATUS */
835 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0,
836 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
838 WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_VMID, 0);
840 ring = &adev->vcn.inst[i].ring_dec;
841 /* force RBC into idle state */
842 rb_bufsz = order_base_2(ring->ring_size);
843 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
844 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
845 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
846 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
847 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
848 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, tmp);
850 /* programm the RB_BASE for ring buffer */
851 WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
852 lower_32_bits(ring->gpu_addr));
853 WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
854 upper_32_bits(ring->gpu_addr));
856 /* Initialize the ring buffer's read and write pointers */
857 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR, 0);
859 ring->wptr = RREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR);
860 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_WPTR,
861 lower_32_bits(ring->wptr));
862 ring = &adev->vcn.inst[i].ring_enc[0];
863 WREG32_SOC15(UVD, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
864 WREG32_SOC15(UVD, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
865 WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
866 WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
867 WREG32_SOC15(UVD, i, mmUVD_RB_SIZE, ring->ring_size / 4);
869 ring = &adev->vcn.inst[i].ring_enc[1];
870 WREG32_SOC15(UVD, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
871 WREG32_SOC15(UVD, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
872 WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
873 WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
874 WREG32_SOC15(UVD, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
876 r = jpeg_v2_5_start(adev);
881 static int vcn_v2_5_stop(struct amdgpu_device *adev)
886 r = jpeg_v2_5_stop(adev);
890 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
891 if (adev->vcn.harvest_config & (1 << i))
893 /* wait for vcn idle */
894 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, r);
898 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
899 UVD_LMI_STATUS__READ_CLEAN_MASK |
900 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
901 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
902 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp, r);
906 /* block LMI UMC channel */
907 tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
908 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
909 WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
911 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
912 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
913 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp, r);
917 /* block VCPU register access */
918 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL),
919 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
920 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
923 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
924 UVD_VCPU_CNTL__BLK_RST_MASK,
925 ~UVD_VCPU_CNTL__BLK_RST_MASK);
927 /* disable VCPU clock */
928 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
929 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
932 WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
934 vcn_v2_5_enable_clock_gating(adev);
936 /* enable register anti-hang mechanism */
937 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS),
938 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK,
939 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
946 * vcn_v2_5_dec_ring_get_rptr - get read pointer
948 * @ring: amdgpu_ring pointer
950 * Returns the current hardware read pointer
952 static uint64_t vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
954 struct amdgpu_device *adev = ring->adev;
956 return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
960 * vcn_v2_5_dec_ring_get_wptr - get write pointer
962 * @ring: amdgpu_ring pointer
964 * Returns the current hardware write pointer
966 static uint64_t vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
968 struct amdgpu_device *adev = ring->adev;
970 if (ring->use_doorbell)
971 return adev->wb.wb[ring->wptr_offs];
973 return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
977 * vcn_v2_5_dec_ring_set_wptr - set write pointer
979 * @ring: amdgpu_ring pointer
981 * Commits the write pointer to the hardware
983 static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
985 struct amdgpu_device *adev = ring->adev;
987 if (ring->use_doorbell) {
988 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
989 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
991 WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
995 static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
996 .type = AMDGPU_RING_TYPE_VCN_DEC,
998 .vmhub = AMDGPU_MMHUB_1,
999 .get_rptr = vcn_v2_5_dec_ring_get_rptr,
1000 .get_wptr = vcn_v2_5_dec_ring_get_wptr,
1001 .set_wptr = vcn_v2_5_dec_ring_set_wptr,
1003 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1004 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1005 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
1006 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
1008 .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
1009 .emit_ib = vcn_v2_0_dec_ring_emit_ib,
1010 .emit_fence = vcn_v2_0_dec_ring_emit_fence,
1011 .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
1012 .test_ring = amdgpu_vcn_dec_ring_test_ring,
1013 .test_ib = amdgpu_vcn_dec_ring_test_ib,
1014 .insert_nop = vcn_v2_0_dec_ring_insert_nop,
1015 .insert_start = vcn_v2_0_dec_ring_insert_start,
1016 .insert_end = vcn_v2_0_dec_ring_insert_end,
1017 .pad_ib = amdgpu_ring_generic_pad_ib,
1018 .begin_use = amdgpu_vcn_ring_begin_use,
1019 .end_use = amdgpu_vcn_ring_end_use,
1020 .emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
1021 .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
1022 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1026 * vcn_v2_5_enc_ring_get_rptr - get enc read pointer
1028 * @ring: amdgpu_ring pointer
1030 * Returns the current hardware enc read pointer
1032 static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring)
1034 struct amdgpu_device *adev = ring->adev;
1036 if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
1037 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
1039 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
1043 * vcn_v2_5_enc_ring_get_wptr - get enc write pointer
1045 * @ring: amdgpu_ring pointer
1047 * Returns the current hardware enc write pointer
1049 static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring)
1051 struct amdgpu_device *adev = ring->adev;
1053 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1054 if (ring->use_doorbell)
1055 return adev->wb.wb[ring->wptr_offs];
1057 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
1059 if (ring->use_doorbell)
1060 return adev->wb.wb[ring->wptr_offs];
1062 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
1067 * vcn_v2_5_enc_ring_set_wptr - set enc write pointer
1069 * @ring: amdgpu_ring pointer
1071 * Commits the enc write pointer to the hardware
1073 static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring)
1075 struct amdgpu_device *adev = ring->adev;
1077 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1078 if (ring->use_doorbell) {
1079 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1080 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1082 WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1085 if (ring->use_doorbell) {
1086 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1087 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1089 WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1094 static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = {
1095 .type = AMDGPU_RING_TYPE_VCN_ENC,
1097 .nop = VCN_ENC_CMD_NO_OP,
1098 .vmhub = AMDGPU_MMHUB_1,
1099 .get_rptr = vcn_v2_5_enc_ring_get_rptr,
1100 .get_wptr = vcn_v2_5_enc_ring_get_wptr,
1101 .set_wptr = vcn_v2_5_enc_ring_set_wptr,
1103 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1104 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1105 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1106 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1107 1, /* vcn_v2_0_enc_ring_insert_end */
1108 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1109 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
1110 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
1111 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1112 .test_ring = amdgpu_vcn_enc_ring_test_ring,
1113 .test_ib = amdgpu_vcn_enc_ring_test_ib,
1114 .insert_nop = amdgpu_ring_insert_nop,
1115 .insert_end = vcn_v2_0_enc_ring_insert_end,
1116 .pad_ib = amdgpu_ring_generic_pad_ib,
1117 .begin_use = amdgpu_vcn_ring_begin_use,
1118 .end_use = amdgpu_vcn_ring_end_use,
1119 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1120 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1121 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1125 * vcn_v2_5_jpeg_ring_get_rptr - get read pointer
1127 * @ring: amdgpu_ring pointer
1129 * Returns the current hardware read pointer
1131 static uint64_t vcn_v2_5_jpeg_ring_get_rptr(struct amdgpu_ring *ring)
1133 struct amdgpu_device *adev = ring->adev;
1135 return RREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_RPTR);
1139 * vcn_v2_5_jpeg_ring_get_wptr - get write pointer
1141 * @ring: amdgpu_ring pointer
1143 * Returns the current hardware write pointer
1145 static uint64_t vcn_v2_5_jpeg_ring_get_wptr(struct amdgpu_ring *ring)
1147 struct amdgpu_device *adev = ring->adev;
1149 if (ring->use_doorbell)
1150 return adev->wb.wb[ring->wptr_offs];
1152 return RREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_WPTR);
1156 * vcn_v2_5_jpeg_ring_set_wptr - set write pointer
1158 * @ring: amdgpu_ring pointer
1160 * Commits the write pointer to the hardware
1162 static void vcn_v2_5_jpeg_ring_set_wptr(struct amdgpu_ring *ring)
1164 struct amdgpu_device *adev = ring->adev;
1166 if (ring->use_doorbell) {
1167 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1168 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1170 WREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
1174 static const struct amdgpu_ring_funcs vcn_v2_5_jpeg_ring_vm_funcs = {
1175 .type = AMDGPU_RING_TYPE_VCN_JPEG,
1177 .vmhub = AMDGPU_MMHUB_1,
1178 .get_rptr = vcn_v2_5_jpeg_ring_get_rptr,
1179 .get_wptr = vcn_v2_5_jpeg_ring_get_wptr,
1180 .set_wptr = vcn_v2_5_jpeg_ring_set_wptr,
1182 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1183 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1184 8 + /* vcn_v2_0_jpeg_ring_emit_vm_flush */
1185 18 + 18 + /* vcn_v2_0_jpeg_ring_emit_fence x2 vm fence */
1187 .emit_ib_size = 22, /* vcn_v2_0_jpeg_ring_emit_ib */
1188 .emit_ib = vcn_v2_0_jpeg_ring_emit_ib,
1189 .emit_fence = vcn_v2_0_jpeg_ring_emit_fence,
1190 .emit_vm_flush = vcn_v2_0_jpeg_ring_emit_vm_flush,
1191 .test_ring = amdgpu_vcn_jpeg_ring_test_ring,
1192 .test_ib = amdgpu_vcn_jpeg_ring_test_ib,
1193 .insert_nop = vcn_v2_0_jpeg_ring_nop,
1194 .insert_start = vcn_v2_0_jpeg_ring_insert_start,
1195 .insert_end = vcn_v2_0_jpeg_ring_insert_end,
1196 .pad_ib = amdgpu_ring_generic_pad_ib,
1197 .begin_use = amdgpu_vcn_ring_begin_use,
1198 .end_use = amdgpu_vcn_ring_end_use,
1199 .emit_wreg = vcn_v2_0_jpeg_ring_emit_wreg,
1200 .emit_reg_wait = vcn_v2_0_jpeg_ring_emit_reg_wait,
1201 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1204 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
1208 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1209 if (adev->vcn.harvest_config & (1 << i))
1211 adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
1212 adev->vcn.inst[i].ring_dec.me = i;
1213 DRM_INFO("VCN(%d) decode is enabled in VM mode\n", i);
1217 static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
1221 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
1222 if (adev->vcn.harvest_config & (1 << j))
1224 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
1225 adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
1226 adev->vcn.inst[j].ring_enc[i].me = j;
1228 DRM_INFO("VCN(%d) encode is enabled in VM mode\n", j);
1232 static void vcn_v2_5_set_jpeg_ring_funcs(struct amdgpu_device *adev)
1236 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1237 if (adev->vcn.harvest_config & (1 << i))
1239 adev->vcn.inst[i].ring_jpeg.funcs = &vcn_v2_5_jpeg_ring_vm_funcs;
1240 adev->vcn.inst[i].ring_jpeg.me = i;
1241 DRM_INFO("VCN(%d) jpeg decode is enabled in VM mode\n", i);
1245 static bool vcn_v2_5_is_idle(void *handle)
1247 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1250 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1251 if (adev->vcn.harvest_config & (1 << i))
1253 ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE);
1259 static int vcn_v2_5_wait_for_idle(void *handle)
1261 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1264 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1265 if (adev->vcn.harvest_config & (1 << i))
1267 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE,
1268 UVD_STATUS__IDLE, ret);
1276 static int vcn_v2_5_set_clockgating_state(void *handle,
1277 enum amd_clockgating_state state)
1279 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1280 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1283 if (vcn_v2_5_is_idle(handle))
1285 vcn_v2_5_enable_clock_gating(adev);
1287 vcn_v2_5_disable_clock_gating(adev);
1293 static int vcn_v2_5_set_powergating_state(void *handle,
1294 enum amd_powergating_state state)
1296 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1299 if(state == adev->vcn.cur_state)
1302 if (state == AMD_PG_STATE_GATE)
1303 ret = vcn_v2_5_stop(adev);
1305 ret = vcn_v2_5_start(adev);
1308 adev->vcn.cur_state = state;
1313 static int vcn_v2_5_set_interrupt_state(struct amdgpu_device *adev,
1314 struct amdgpu_irq_src *source,
1316 enum amdgpu_interrupt_state state)
1321 static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev,
1322 struct amdgpu_irq_src *source,
1323 struct amdgpu_iv_entry *entry)
1325 uint32_t ip_instance;
1327 switch (entry->client_id) {
1328 case SOC15_IH_CLIENTID_VCN:
1331 case SOC15_IH_CLIENTID_VCN1:
1335 DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1339 DRM_DEBUG("IH: VCN TRAP\n");
1341 switch (entry->src_id) {
1342 case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
1343 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec);
1345 case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1346 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
1348 case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
1349 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
1351 case VCN_2_0__SRCID__JPEG_DECODE:
1352 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_jpeg);
1355 DRM_ERROR("Unhandled interrupt: %d %d\n",
1356 entry->src_id, entry->src_data[0]);
1363 static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = {
1364 .set = vcn_v2_5_set_interrupt_state,
1365 .process = vcn_v2_5_process_interrupt,
1368 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
1372 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1373 if (adev->vcn.harvest_config & (1 << i))
1375 adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 2;
1376 adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs;
1380 static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
1382 .early_init = vcn_v2_5_early_init,
1384 .sw_init = vcn_v2_5_sw_init,
1385 .sw_fini = vcn_v2_5_sw_fini,
1386 .hw_init = vcn_v2_5_hw_init,
1387 .hw_fini = vcn_v2_5_hw_fini,
1388 .suspend = vcn_v2_5_suspend,
1389 .resume = vcn_v2_5_resume,
1390 .is_idle = vcn_v2_5_is_idle,
1391 .wait_for_idle = vcn_v2_5_wait_for_idle,
1392 .check_soft_reset = NULL,
1393 .pre_soft_reset = NULL,
1395 .post_soft_reset = NULL,
1396 .set_clockgating_state = vcn_v2_5_set_clockgating_state,
1397 .set_powergating_state = vcn_v2_5_set_powergating_state,
1400 const struct amdgpu_ip_block_version vcn_v2_5_ip_block =
1402 .type = AMD_IP_BLOCK_TYPE_VCN,
1406 .funcs = &vcn_v2_5_ip_funcs,