2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
27 #include "amdgpu_vcn.h"
28 #include "amdgpu_pm.h"
32 #include "mmsch_v1_0.h"
34 #include "vcn/vcn_2_5_offset.h"
35 #include "vcn/vcn_2_5_sh_mask.h"
36 #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
38 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27
39 #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f
40 #define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET 0x10
41 #define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET 0x11
42 #define mmUVD_NO_OP_INTERNAL_OFFSET 0x29
43 #define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET 0x66
44 #define mmUVD_SCRATCH9_INTERNAL_OFFSET 0xc01d
46 #define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET 0x431
47 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x3b4
48 #define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x3b5
49 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x25c
51 #define VCN25_MAX_HW_INSTANCES_ARCTURUS 2
53 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
54 static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev);
55 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev);
56 static int vcn_v2_5_set_powergating_state(void *handle,
57 enum amd_powergating_state state);
58 static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
59 int inst_idx, struct dpg_pause_state *new_state);
60 static int vcn_v2_5_sriov_start(struct amdgpu_device *adev);
62 static int amdgpu_ih_clientid_vcns[] = {
63 SOC15_IH_CLIENTID_VCN,
64 SOC15_IH_CLIENTID_VCN1
68 * vcn_v2_5_early_init - set function pointers
70 * @handle: amdgpu_device pointer
72 * Set ring and irq function pointers
74 static int vcn_v2_5_early_init(void *handle)
76 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
77 if (adev->asic_type == CHIP_ARCTURUS) {
81 adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS;
82 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
83 harvest = RREG32_SOC15(UVD, i, mmCC_UVD_HARVESTING);
84 if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
85 adev->vcn.harvest_config |= 1 << i;
88 if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
89 AMDGPU_VCN_HARVEST_VCN1))
90 /* both instances are harvested, disable the block */
93 adev->vcn.num_vcn_inst = 1;
95 if (amdgpu_sriov_vf(adev)) {
96 adev->vcn.num_vcn_inst = 2;
97 adev->vcn.harvest_config = 0;
98 adev->vcn.num_enc_rings = 1;
100 adev->vcn.num_enc_rings = 2;
103 vcn_v2_5_set_dec_ring_funcs(adev);
104 vcn_v2_5_set_enc_ring_funcs(adev);
105 vcn_v2_5_set_irq_funcs(adev);
111 * vcn_v2_5_sw_init - sw init for VCN block
113 * @handle: amdgpu_device pointer
115 * Load firmware and sw initialization
117 static int vcn_v2_5_sw_init(void *handle)
119 struct amdgpu_ring *ring;
121 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
123 for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
124 if (adev->vcn.harvest_config & (1 << j))
127 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
128 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[j].irq);
133 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
134 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
135 i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[j].irq);
141 r = amdgpu_vcn_sw_init(adev);
145 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
146 const struct common_firmware_header *hdr;
147 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
148 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
149 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
150 adev->firmware.fw_size +=
151 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
153 if (adev->vcn.num_vcn_inst == VCN25_MAX_HW_INSTANCES_ARCTURUS) {
154 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].ucode_id = AMDGPU_UCODE_ID_VCN1;
155 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].fw = adev->vcn.fw;
156 adev->firmware.fw_size +=
157 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
159 DRM_INFO("PSP loading VCN firmware\n");
162 r = amdgpu_vcn_resume(adev);
166 for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
167 if (adev->vcn.harvest_config & (1 << j))
169 adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
170 adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
171 adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
172 adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
173 adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
174 adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
176 adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
177 adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(UVD, j, mmUVD_SCRATCH9);
178 adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
179 adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA0);
180 adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
181 adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA1);
182 adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
183 adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_CMD);
184 adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
185 adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(UVD, j, mmUVD_NO_OP);
187 ring = &adev->vcn.inst[j].ring_dec;
188 ring->use_doorbell = true;
190 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
191 (amdgpu_sriov_vf(adev) ? 2*j : 8*j);
192 sprintf(ring->name, "vcn_dec_%d", j);
193 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
197 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
198 ring = &adev->vcn.inst[j].ring_enc[i];
199 ring->use_doorbell = true;
201 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
202 (amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j));
204 sprintf(ring->name, "vcn_enc_%d.%d", j, i);
205 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
211 if (amdgpu_sriov_vf(adev)) {
212 r = amdgpu_virt_alloc_mm_table(adev);
217 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
218 adev->vcn.pause_dpg_mode = vcn_v2_5_pause_dpg_mode;
224 * vcn_v2_5_sw_fini - sw fini for VCN block
226 * @handle: amdgpu_device pointer
228 * VCN suspend and free up sw allocation
230 static int vcn_v2_5_sw_fini(void *handle)
233 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
235 if (amdgpu_sriov_vf(adev))
236 amdgpu_virt_free_mm_table(adev);
238 r = amdgpu_vcn_suspend(adev);
242 r = amdgpu_vcn_sw_fini(adev);
248 * vcn_v2_5_hw_init - start and test VCN block
250 * @handle: amdgpu_device pointer
252 * Initialize the hardware, boot up the VCPU and do some testing
254 static int vcn_v2_5_hw_init(void *handle)
256 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
257 struct amdgpu_ring *ring;
260 if (amdgpu_sriov_vf(adev))
261 r = vcn_v2_5_sriov_start(adev);
263 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
264 if (adev->vcn.harvest_config & (1 << j))
267 if (amdgpu_sriov_vf(adev)) {
268 adev->vcn.inst[j].ring_enc[0].sched.ready = true;
269 adev->vcn.inst[j].ring_enc[1].sched.ready = false;
270 adev->vcn.inst[j].ring_enc[2].sched.ready = false;
271 adev->vcn.inst[j].ring_dec.sched.ready = true;
274 ring = &adev->vcn.inst[j].ring_dec;
276 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
277 ring->doorbell_index, j);
279 r = amdgpu_ring_test_helper(ring);
283 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
284 ring = &adev->vcn.inst[j].ring_enc[i];
285 r = amdgpu_ring_test_helper(ring);
294 DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
295 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
301 * vcn_v2_5_hw_fini - stop the hardware block
303 * @handle: amdgpu_device pointer
305 * Stop the VCN block, mark ring as not ready any more
307 static int vcn_v2_5_hw_fini(void *handle)
309 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
310 struct amdgpu_ring *ring;
313 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
314 if (adev->vcn.harvest_config & (1 << i))
316 ring = &adev->vcn.inst[i].ring_dec;
318 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
319 (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
320 RREG32_SOC15(VCN, i, mmUVD_STATUS)))
321 vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
323 ring->sched.ready = false;
325 for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
326 ring = &adev->vcn.inst[i].ring_enc[j];
327 ring->sched.ready = false;
335 * vcn_v2_5_suspend - suspend VCN block
337 * @handle: amdgpu_device pointer
339 * HW fini and suspend VCN block
341 static int vcn_v2_5_suspend(void *handle)
344 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
346 r = vcn_v2_5_hw_fini(adev);
350 r = amdgpu_vcn_suspend(adev);
356 * vcn_v2_5_resume - resume VCN block
358 * @handle: amdgpu_device pointer
360 * Resume firmware and hw init VCN block
362 static int vcn_v2_5_resume(void *handle)
365 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
367 r = amdgpu_vcn_resume(adev);
371 r = vcn_v2_5_hw_init(adev);
377 * vcn_v2_5_mc_resume - memory controller programming
379 * @adev: amdgpu_device pointer
381 * Let the VCN memory controller know it's offsets
383 static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
385 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
389 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
390 if (adev->vcn.harvest_config & (1 << i))
392 /* cache window 0: fw */
393 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
394 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
395 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo));
396 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
397 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi));
398 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
401 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
402 lower_32_bits(adev->vcn.inst[i].gpu_addr));
403 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
404 upper_32_bits(adev->vcn.inst[i].gpu_addr));
406 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
407 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
409 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
411 /* cache window 1: stack */
412 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
413 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
414 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
415 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
416 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
417 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
419 /* cache window 2: context */
420 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
421 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
422 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
423 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
424 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
425 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
429 static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
431 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
434 /* cache window 0: fw */
435 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
437 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
438 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
439 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
440 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
441 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
442 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
443 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
444 UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
446 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
447 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
448 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
449 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
450 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
451 UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
455 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
456 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
457 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
458 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
459 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
460 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
462 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
463 UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
464 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
468 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
469 UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
471 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
472 UVD, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
474 /* cache window 1: stack */
476 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
477 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
478 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
479 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
480 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
481 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
482 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
483 UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
485 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
486 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
487 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
488 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
489 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
490 UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
492 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
493 UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
495 /* cache window 2: context */
496 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
497 UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
498 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
499 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
500 UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
501 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
502 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
503 UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
504 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
505 UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
507 /* non-cache window */
508 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
509 UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect);
510 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
511 UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect);
512 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
513 UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
514 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
515 UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect);
517 /* VCN global tiling registers */
518 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
519 UVD, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
523 * vcn_v2_5_disable_clock_gating - disable VCN clock gating
525 * @adev: amdgpu_device pointer
527 * Disable clock gating for VCN block
529 static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev)
535 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
536 if (adev->vcn.harvest_config & (1 << i))
538 /* UVD disable CGC */
539 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
540 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
541 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
543 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
544 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
545 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
546 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
548 data = RREG32_SOC15(VCN, i, mmUVD_CGC_GATE);
549 data &= ~(UVD_CGC_GATE__SYS_MASK
550 | UVD_CGC_GATE__UDEC_MASK
551 | UVD_CGC_GATE__MPEG2_MASK
552 | UVD_CGC_GATE__REGS_MASK
553 | UVD_CGC_GATE__RBC_MASK
554 | UVD_CGC_GATE__LMI_MC_MASK
555 | UVD_CGC_GATE__LMI_UMC_MASK
556 | UVD_CGC_GATE__IDCT_MASK
557 | UVD_CGC_GATE__MPRD_MASK
558 | UVD_CGC_GATE__MPC_MASK
559 | UVD_CGC_GATE__LBSI_MASK
560 | UVD_CGC_GATE__LRBBM_MASK
561 | UVD_CGC_GATE__UDEC_RE_MASK
562 | UVD_CGC_GATE__UDEC_CM_MASK
563 | UVD_CGC_GATE__UDEC_IT_MASK
564 | UVD_CGC_GATE__UDEC_DB_MASK
565 | UVD_CGC_GATE__UDEC_MP_MASK
566 | UVD_CGC_GATE__WCB_MASK
567 | UVD_CGC_GATE__VCPU_MASK
568 | UVD_CGC_GATE__MMSCH_MASK);
570 WREG32_SOC15(VCN, i, mmUVD_CGC_GATE, data);
572 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_CGC_GATE, 0, 0xFFFFFFFF, ret);
574 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
575 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
576 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
577 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
578 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
579 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
580 | UVD_CGC_CTRL__SYS_MODE_MASK
581 | UVD_CGC_CTRL__UDEC_MODE_MASK
582 | UVD_CGC_CTRL__MPEG2_MODE_MASK
583 | UVD_CGC_CTRL__REGS_MODE_MASK
584 | UVD_CGC_CTRL__RBC_MODE_MASK
585 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
586 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
587 | UVD_CGC_CTRL__IDCT_MODE_MASK
588 | UVD_CGC_CTRL__MPRD_MODE_MASK
589 | UVD_CGC_CTRL__MPC_MODE_MASK
590 | UVD_CGC_CTRL__LBSI_MODE_MASK
591 | UVD_CGC_CTRL__LRBBM_MODE_MASK
592 | UVD_CGC_CTRL__WCB_MODE_MASK
593 | UVD_CGC_CTRL__VCPU_MODE_MASK
594 | UVD_CGC_CTRL__MMSCH_MODE_MASK);
595 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
598 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE);
599 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
600 | UVD_SUVD_CGC_GATE__SIT_MASK
601 | UVD_SUVD_CGC_GATE__SMP_MASK
602 | UVD_SUVD_CGC_GATE__SCM_MASK
603 | UVD_SUVD_CGC_GATE__SDB_MASK
604 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
605 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
606 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
607 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
608 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
609 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
610 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
611 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
612 | UVD_SUVD_CGC_GATE__SCLR_MASK
613 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
614 | UVD_SUVD_CGC_GATE__ENT_MASK
615 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
616 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
617 | UVD_SUVD_CGC_GATE__SITE_MASK
618 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
619 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
620 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
621 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
622 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
623 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE, data);
625 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
626 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
627 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
628 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
629 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
630 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
631 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
632 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
633 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
634 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
635 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
636 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
640 static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev,
641 uint8_t sram_sel, int inst_idx, uint8_t indirect)
643 uint32_t reg_data = 0;
645 /* enable sw clock gating control */
646 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
647 reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
649 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
650 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
651 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
652 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
653 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
654 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
655 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
656 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
657 UVD_CGC_CTRL__SYS_MODE_MASK |
658 UVD_CGC_CTRL__UDEC_MODE_MASK |
659 UVD_CGC_CTRL__MPEG2_MODE_MASK |
660 UVD_CGC_CTRL__REGS_MODE_MASK |
661 UVD_CGC_CTRL__RBC_MODE_MASK |
662 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
663 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
664 UVD_CGC_CTRL__IDCT_MODE_MASK |
665 UVD_CGC_CTRL__MPRD_MODE_MASK |
666 UVD_CGC_CTRL__MPC_MODE_MASK |
667 UVD_CGC_CTRL__LBSI_MODE_MASK |
668 UVD_CGC_CTRL__LRBBM_MODE_MASK |
669 UVD_CGC_CTRL__WCB_MODE_MASK |
670 UVD_CGC_CTRL__VCPU_MODE_MASK |
671 UVD_CGC_CTRL__MMSCH_MODE_MASK);
672 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
673 UVD, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
675 /* turn off clock gating */
676 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
677 UVD, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
679 /* turn on SUVD clock gating */
680 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
681 UVD, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
683 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
684 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
685 UVD, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
689 * vcn_v2_5_enable_clock_gating - enable VCN clock gating
691 * @adev: amdgpu_device pointer
693 * Enable clock gating for VCN block
695 static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev)
700 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
701 if (adev->vcn.harvest_config & (1 << i))
704 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
705 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
706 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
708 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
709 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
710 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
711 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
713 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
714 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
715 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
716 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
717 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
718 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
719 | UVD_CGC_CTRL__SYS_MODE_MASK
720 | UVD_CGC_CTRL__UDEC_MODE_MASK
721 | UVD_CGC_CTRL__MPEG2_MODE_MASK
722 | UVD_CGC_CTRL__REGS_MODE_MASK
723 | UVD_CGC_CTRL__RBC_MODE_MASK
724 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
725 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
726 | UVD_CGC_CTRL__IDCT_MODE_MASK
727 | UVD_CGC_CTRL__MPRD_MODE_MASK
728 | UVD_CGC_CTRL__MPC_MODE_MASK
729 | UVD_CGC_CTRL__LBSI_MODE_MASK
730 | UVD_CGC_CTRL__LRBBM_MODE_MASK
731 | UVD_CGC_CTRL__WCB_MODE_MASK
732 | UVD_CGC_CTRL__VCPU_MODE_MASK);
733 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
735 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
736 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
737 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
738 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
739 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
740 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
741 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
742 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
743 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
744 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
745 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
746 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
750 static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
752 struct amdgpu_ring *ring;
753 uint32_t rb_bufsz, tmp;
755 /* disable register anti-hang mechanism */
756 WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), 1,
757 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
758 /* enable dynamic power gating mode */
759 tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_POWER_STATUS);
760 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
761 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
762 WREG32_SOC15(UVD, inst_idx, mmUVD_POWER_STATUS, tmp);
765 adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t*)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
767 /* enable clock gating */
768 vcn_v2_5_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
770 /* enable VCPU clock */
771 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
772 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
773 tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
774 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
775 UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
777 /* disable master interupt */
778 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
779 UVD, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
781 /* setup mmUVD_LMI_CTRL */
782 tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
783 UVD_LMI_CTRL__REQ_MODE_MASK |
784 UVD_LMI_CTRL__CRC_RESET_MASK |
785 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
786 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
787 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
788 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
790 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
791 UVD, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
793 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
794 UVD, 0, mmUVD_MPC_CNTL),
795 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
797 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
798 UVD, 0, mmUVD_MPC_SET_MUXA0),
799 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
800 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
801 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
802 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
804 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
805 UVD, 0, mmUVD_MPC_SET_MUXB0),
806 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
807 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
808 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
809 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
811 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
812 UVD, 0, mmUVD_MPC_SET_MUX),
813 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
814 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
815 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
817 vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect);
819 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
820 UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
821 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
822 UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
824 /* enable LMI MC and UMC channels */
825 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
826 UVD, 0, mmUVD_LMI_CTRL2), 0, 0, indirect);
828 /* unblock VCPU register access */
829 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
830 UVD, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
832 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
833 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
834 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
835 UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
837 /* enable master interrupt */
838 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
839 UVD, 0, mmUVD_MASTINT_EN),
840 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
843 psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
844 (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
845 (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr));
847 ring = &adev->vcn.inst[inst_idx].ring_dec;
848 /* force RBC into idle state */
849 rb_bufsz = order_base_2(ring->ring_size);
850 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
851 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
852 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
853 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
854 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
855 WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
857 /* set the write pointer delay */
858 WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
860 /* set the wb address */
861 WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
862 (upper_32_bits(ring->gpu_addr) >> 2));
864 /* programm the RB_BASE for ring buffer */
865 WREG32_SOC15(UVD, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
866 lower_32_bits(ring->gpu_addr));
867 WREG32_SOC15(UVD, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
868 upper_32_bits(ring->gpu_addr));
870 /* Initialize the ring buffer's read and write pointers */
871 WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR, 0);
873 WREG32_SOC15(UVD, inst_idx, mmUVD_SCRATCH2, 0);
875 ring->wptr = RREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR);
876 WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR,
877 lower_32_bits(ring->wptr));
882 static int vcn_v2_5_start(struct amdgpu_device *adev)
884 struct amdgpu_ring *ring;
885 uint32_t rb_bufsz, tmp;
888 if (adev->pm.dpm_enabled)
889 amdgpu_dpm_enable_uvd(adev, true);
891 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
892 if (adev->vcn.harvest_config & (1 << i))
894 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
895 r = vcn_v2_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
899 /* disable register anti-hang mechanism */
900 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS), 0,
901 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
903 /* set uvd status busy */
904 tmp = RREG32_SOC15(UVD, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
905 WREG32_SOC15(UVD, i, mmUVD_STATUS, tmp);
908 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
912 vcn_v2_5_disable_clock_gating(adev);
914 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
915 if (adev->vcn.harvest_config & (1 << i))
917 /* enable VCPU clock */
918 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
919 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
921 /* disable master interrupt */
922 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN), 0,
923 ~UVD_MASTINT_EN__VCPU_EN_MASK);
925 /* setup mmUVD_LMI_CTRL */
926 tmp = RREG32_SOC15(UVD, i, mmUVD_LMI_CTRL);
928 WREG32_SOC15(UVD, i, mmUVD_LMI_CTRL, tmp | 0x8|
929 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
930 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
931 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
932 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
934 /* setup mmUVD_MPC_CNTL */
935 tmp = RREG32_SOC15(UVD, i, mmUVD_MPC_CNTL);
936 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
937 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
938 WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
940 /* setup UVD_MPC_SET_MUXA0 */
941 WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXA0,
942 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
943 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
944 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
945 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
947 /* setup UVD_MPC_SET_MUXB0 */
948 WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXB0,
949 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
950 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
951 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
952 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
954 /* setup mmUVD_MPC_SET_MUX */
955 WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUX,
956 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
957 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
958 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
961 vcn_v2_5_mc_resume(adev);
963 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
964 if (adev->vcn.harvest_config & (1 << i))
966 /* VCN global tiling registers */
967 WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG,
968 adev->gfx.config.gb_addr_config);
969 WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG,
970 adev->gfx.config.gb_addr_config);
972 /* enable LMI MC and UMC channels */
973 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
974 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
976 /* unblock VCPU register access */
977 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL), 0,
978 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
980 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
981 ~UVD_VCPU_CNTL__BLK_RST_MASK);
983 for (k = 0; k < 10; ++k) {
986 for (j = 0; j < 100; ++j) {
987 status = RREG32_SOC15(UVD, i, mmUVD_STATUS);
990 if (amdgpu_emu_mode == 1)
999 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
1000 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
1001 UVD_VCPU_CNTL__BLK_RST_MASK,
1002 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1004 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
1005 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1012 DRM_ERROR("VCN decode not responding, giving up!!!\n");
1016 /* enable master interrupt */
1017 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
1018 UVD_MASTINT_EN__VCPU_EN_MASK,
1019 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1021 /* clear the busy bit of VCN_STATUS */
1022 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0,
1023 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1025 WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_VMID, 0);
1027 ring = &adev->vcn.inst[i].ring_dec;
1028 /* force RBC into idle state */
1029 rb_bufsz = order_base_2(ring->ring_size);
1030 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1031 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1032 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1033 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1034 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1035 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, tmp);
1037 /* programm the RB_BASE for ring buffer */
1038 WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1039 lower_32_bits(ring->gpu_addr));
1040 WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1041 upper_32_bits(ring->gpu_addr));
1043 /* Initialize the ring buffer's read and write pointers */
1044 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR, 0);
1046 ring->wptr = RREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR);
1047 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_WPTR,
1048 lower_32_bits(ring->wptr));
1049 ring = &adev->vcn.inst[i].ring_enc[0];
1050 WREG32_SOC15(UVD, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1051 WREG32_SOC15(UVD, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1052 WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
1053 WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1054 WREG32_SOC15(UVD, i, mmUVD_RB_SIZE, ring->ring_size / 4);
1056 ring = &adev->vcn.inst[i].ring_enc[1];
1057 WREG32_SOC15(UVD, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1058 WREG32_SOC15(UVD, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1059 WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1060 WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1061 WREG32_SOC15(UVD, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
1067 static int vcn_v2_5_mmsch_start(struct amdgpu_device *adev,
1068 struct amdgpu_mm_table *table)
1070 uint32_t data = 0, loop = 0, size = 0;
1071 uint64_t addr = table->gpu_addr;
1072 struct mmsch_v1_1_init_header *header = NULL;;
1074 header = (struct mmsch_v1_1_init_header *)table->cpu_addr;
1075 size = header->total_size;
1078 * 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of
1079 * memory descriptor location
1081 WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
1082 WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
1084 /* 2, update vmid of descriptor */
1085 data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID);
1086 data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
1087 /* use domain0 for MM scheduler */
1088 data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
1089 WREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID, data);
1091 /* 3, notify mmsch about the size of this descriptor */
1092 WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_SIZE, size);
1094 /* 4, set resp to zero */
1095 WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
1098 * 5, kick off the initialization and wait until
1099 * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
1101 WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
1103 data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
1105 while ((data & 0x10000002) != 0x10000002) {
1107 data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
1115 "failed to init MMSCH, mmMMSCH_VF_MAILBOX_RESP = %x\n",
1123 static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
1125 struct amdgpu_ring *ring;
1126 uint32_t offset, size, tmp, i, rb_bufsz;
1127 uint32_t table_size = 0;
1128 struct mmsch_v1_0_cmd_direct_write direct_wt = { { 0 } };
1129 struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { { 0 } };
1130 struct mmsch_v1_0_cmd_direct_polling direct_poll = { { 0 } };
1131 struct mmsch_v1_0_cmd_end end = { { 0 } };
1132 uint32_t *init_table = adev->virt.mm_table.cpu_addr;
1133 struct mmsch_v1_1_init_header *header = (struct mmsch_v1_1_init_header *)init_table;
1135 direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
1136 direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
1137 direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
1138 end.cmd_header.command_type = MMSCH_COMMAND__END;
1140 header->version = MMSCH_VERSION;
1141 header->total_size = sizeof(struct mmsch_v1_1_init_header) >> 2;
1142 init_table += header->total_size;
1144 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1145 header->eng[i].table_offset = header->total_size;
1146 header->eng[i].init_status = 0;
1147 header->eng[i].table_size = 0;
1151 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(
1152 SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
1153 ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
1155 size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
1157 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1158 MMSCH_V1_0_INSERT_DIRECT_WT(
1159 SOC15_REG_OFFSET(UVD, i,
1160 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1161 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
1162 MMSCH_V1_0_INSERT_DIRECT_WT(
1163 SOC15_REG_OFFSET(UVD, i,
1164 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1165 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
1167 MMSCH_V1_0_INSERT_DIRECT_WT(
1168 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0), 0);
1170 MMSCH_V1_0_INSERT_DIRECT_WT(
1171 SOC15_REG_OFFSET(UVD, i,
1172 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1173 lower_32_bits(adev->vcn.inst[i].gpu_addr));
1174 MMSCH_V1_0_INSERT_DIRECT_WT(
1175 SOC15_REG_OFFSET(UVD, i,
1176 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1177 upper_32_bits(adev->vcn.inst[i].gpu_addr));
1179 MMSCH_V1_0_INSERT_DIRECT_WT(
1180 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
1181 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
1184 MMSCH_V1_0_INSERT_DIRECT_WT(
1185 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0),
1187 MMSCH_V1_0_INSERT_DIRECT_WT(
1188 SOC15_REG_OFFSET(UVD, i,
1189 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
1190 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
1191 MMSCH_V1_0_INSERT_DIRECT_WT(
1192 SOC15_REG_OFFSET(UVD, i,
1193 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
1194 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
1195 MMSCH_V1_0_INSERT_DIRECT_WT(
1196 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1),
1198 MMSCH_V1_0_INSERT_DIRECT_WT(
1199 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1),
1200 AMDGPU_VCN_STACK_SIZE);
1201 MMSCH_V1_0_INSERT_DIRECT_WT(
1202 SOC15_REG_OFFSET(UVD, i,
1203 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
1204 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset +
1205 AMDGPU_VCN_STACK_SIZE));
1206 MMSCH_V1_0_INSERT_DIRECT_WT(
1207 SOC15_REG_OFFSET(UVD, i,
1208 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
1209 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset +
1210 AMDGPU_VCN_STACK_SIZE));
1211 MMSCH_V1_0_INSERT_DIRECT_WT(
1212 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2),
1214 MMSCH_V1_0_INSERT_DIRECT_WT(
1215 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
1216 AMDGPU_VCN_CONTEXT_SIZE);
1218 ring = &adev->vcn.inst[i].ring_enc[0];
1221 MMSCH_V1_0_INSERT_DIRECT_WT(
1222 SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO),
1223 lower_32_bits(ring->gpu_addr));
1224 MMSCH_V1_0_INSERT_DIRECT_WT(
1225 SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI),
1226 upper_32_bits(ring->gpu_addr));
1227 MMSCH_V1_0_INSERT_DIRECT_WT(
1228 SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE),
1229 ring->ring_size / 4);
1231 ring = &adev->vcn.inst[i].ring_dec;
1233 MMSCH_V1_0_INSERT_DIRECT_WT(
1234 SOC15_REG_OFFSET(UVD, i,
1235 mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
1236 lower_32_bits(ring->gpu_addr));
1237 MMSCH_V1_0_INSERT_DIRECT_WT(
1238 SOC15_REG_OFFSET(UVD, i,
1239 mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
1240 upper_32_bits(ring->gpu_addr));
1242 /* force RBC into idle state */
1243 rb_bufsz = order_base_2(ring->ring_size);
1244 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1245 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1246 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1247 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1248 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1249 MMSCH_V1_0_INSERT_DIRECT_WT(
1250 SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
1252 /* add end packet */
1253 memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
1254 table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
1255 init_table += sizeof(struct mmsch_v1_0_cmd_end) / 4;
1258 header->eng[i].table_size = table_size;
1259 header->total_size += table_size;
1262 return vcn_v2_5_mmsch_start(adev, &adev->virt.mm_table);
1265 static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
1270 /* Wait for power status to be 1 */
1271 SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 1,
1272 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1274 /* wait for read ptr to be equal to write ptr */
1275 tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR);
1276 SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
1278 tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR2);
1279 SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code);
1281 tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1282 SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
1284 SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 1,
1285 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1287 /* disable dynamic power gating mode */
1288 WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), 0,
1289 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1294 static int vcn_v2_5_stop(struct amdgpu_device *adev)
1299 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1300 if (adev->vcn.harvest_config & (1 << i))
1302 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1303 r = vcn_v2_5_stop_dpg_mode(adev, i);
1307 /* wait for vcn idle */
1308 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, r);
1312 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1313 UVD_LMI_STATUS__READ_CLEAN_MASK |
1314 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1315 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1316 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp, r);
1320 /* block LMI UMC channel */
1321 tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
1322 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1323 WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
1325 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
1326 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1327 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp, r);
1331 /* block VCPU register access */
1332 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL),
1333 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1334 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1337 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
1338 UVD_VCPU_CNTL__BLK_RST_MASK,
1339 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1341 /* disable VCPU clock */
1342 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
1343 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1346 WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
1348 vcn_v2_5_enable_clock_gating(adev);
1350 /* enable register anti-hang mechanism */
1351 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS),
1352 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK,
1353 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1356 if (adev->pm.dpm_enabled)
1357 amdgpu_dpm_enable_uvd(adev, false);
1362 static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
1363 int inst_idx, struct dpg_pause_state *new_state)
1365 struct amdgpu_ring *ring;
1366 uint32_t reg_data = 0;
1369 /* pause/unpause if state is changed */
1370 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1371 DRM_DEBUG("dpg pause state changed %d -> %d",
1372 adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
1373 reg_data = RREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE) &
1374 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1376 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1378 SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 0x1,
1379 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1383 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1384 WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1387 SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_DPG_PAUSE,
1388 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1389 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
1392 ring = &adev->vcn.inst[inst_idx].ring_enc[0];
1393 WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
1394 WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1395 WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
1396 WREG32_SOC15(UVD, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1397 WREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1399 ring = &adev->vcn.inst[inst_idx].ring_enc[1];
1400 WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1401 WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1402 WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
1403 WREG32_SOC15(UVD, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1404 WREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1406 WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR,
1407 RREG32_SOC15(UVD, inst_idx, mmUVD_SCRATCH2) & 0x7FFFFFFF);
1409 SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS,
1410 0x0, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1413 /* unpause dpg, no need to wait */
1414 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1415 WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1417 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1424 * vcn_v2_5_dec_ring_get_rptr - get read pointer
1426 * @ring: amdgpu_ring pointer
1428 * Returns the current hardware read pointer
1430 static uint64_t vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
1432 struct amdgpu_device *adev = ring->adev;
1434 return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
1438 * vcn_v2_5_dec_ring_get_wptr - get write pointer
1440 * @ring: amdgpu_ring pointer
1442 * Returns the current hardware write pointer
1444 static uint64_t vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
1446 struct amdgpu_device *adev = ring->adev;
1448 if (ring->use_doorbell)
1449 return adev->wb.wb[ring->wptr_offs];
1451 return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
1455 * vcn_v2_5_dec_ring_set_wptr - set write pointer
1457 * @ring: amdgpu_ring pointer
1459 * Commits the write pointer to the hardware
1461 static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
1463 struct amdgpu_device *adev = ring->adev;
1465 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1466 WREG32_SOC15(UVD, ring->me, mmUVD_SCRATCH2,
1467 lower_32_bits(ring->wptr) | 0x80000000);
1469 if (ring->use_doorbell) {
1470 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1471 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1473 WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1477 static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
1478 .type = AMDGPU_RING_TYPE_VCN_DEC,
1480 .vmhub = AMDGPU_MMHUB_1,
1481 .get_rptr = vcn_v2_5_dec_ring_get_rptr,
1482 .get_wptr = vcn_v2_5_dec_ring_get_wptr,
1483 .set_wptr = vcn_v2_5_dec_ring_set_wptr,
1485 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1486 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1487 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
1488 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
1490 .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
1491 .emit_ib = vcn_v2_0_dec_ring_emit_ib,
1492 .emit_fence = vcn_v2_0_dec_ring_emit_fence,
1493 .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
1494 .test_ring = vcn_v2_0_dec_ring_test_ring,
1495 .test_ib = amdgpu_vcn_dec_ring_test_ib,
1496 .insert_nop = vcn_v2_0_dec_ring_insert_nop,
1497 .insert_start = vcn_v2_0_dec_ring_insert_start,
1498 .insert_end = vcn_v2_0_dec_ring_insert_end,
1499 .pad_ib = amdgpu_ring_generic_pad_ib,
1500 .begin_use = amdgpu_vcn_ring_begin_use,
1501 .end_use = amdgpu_vcn_ring_end_use,
1502 .emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
1503 .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
1504 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1508 * vcn_v2_5_enc_ring_get_rptr - get enc read pointer
1510 * @ring: amdgpu_ring pointer
1512 * Returns the current hardware enc read pointer
1514 static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring)
1516 struct amdgpu_device *adev = ring->adev;
1518 if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
1519 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
1521 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
1525 * vcn_v2_5_enc_ring_get_wptr - get enc write pointer
1527 * @ring: amdgpu_ring pointer
1529 * Returns the current hardware enc write pointer
1531 static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring)
1533 struct amdgpu_device *adev = ring->adev;
1535 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1536 if (ring->use_doorbell)
1537 return adev->wb.wb[ring->wptr_offs];
1539 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
1541 if (ring->use_doorbell)
1542 return adev->wb.wb[ring->wptr_offs];
1544 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
1549 * vcn_v2_5_enc_ring_set_wptr - set enc write pointer
1551 * @ring: amdgpu_ring pointer
1553 * Commits the enc write pointer to the hardware
1555 static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring)
1557 struct amdgpu_device *adev = ring->adev;
1559 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1560 if (ring->use_doorbell) {
1561 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1562 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1564 WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1567 if (ring->use_doorbell) {
1568 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1569 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1571 WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1576 static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = {
1577 .type = AMDGPU_RING_TYPE_VCN_ENC,
1579 .nop = VCN_ENC_CMD_NO_OP,
1580 .vmhub = AMDGPU_MMHUB_1,
1581 .get_rptr = vcn_v2_5_enc_ring_get_rptr,
1582 .get_wptr = vcn_v2_5_enc_ring_get_wptr,
1583 .set_wptr = vcn_v2_5_enc_ring_set_wptr,
1585 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1586 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1587 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1588 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1589 1, /* vcn_v2_0_enc_ring_insert_end */
1590 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1591 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
1592 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
1593 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1594 .test_ring = amdgpu_vcn_enc_ring_test_ring,
1595 .test_ib = amdgpu_vcn_enc_ring_test_ib,
1596 .insert_nop = amdgpu_ring_insert_nop,
1597 .insert_end = vcn_v2_0_enc_ring_insert_end,
1598 .pad_ib = amdgpu_ring_generic_pad_ib,
1599 .begin_use = amdgpu_vcn_ring_begin_use,
1600 .end_use = amdgpu_vcn_ring_end_use,
1601 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1602 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1603 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1606 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
1610 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1611 if (adev->vcn.harvest_config & (1 << i))
1613 adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
1614 adev->vcn.inst[i].ring_dec.me = i;
1615 DRM_INFO("VCN(%d) decode is enabled in VM mode\n", i);
1619 static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
1623 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
1624 if (adev->vcn.harvest_config & (1 << j))
1626 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
1627 adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
1628 adev->vcn.inst[j].ring_enc[i].me = j;
1630 DRM_INFO("VCN(%d) encode is enabled in VM mode\n", j);
1634 static bool vcn_v2_5_is_idle(void *handle)
1636 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1639 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1640 if (adev->vcn.harvest_config & (1 << i))
1642 ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE);
1648 static int vcn_v2_5_wait_for_idle(void *handle)
1650 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1653 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1654 if (adev->vcn.harvest_config & (1 << i))
1656 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE,
1657 UVD_STATUS__IDLE, ret);
1665 static int vcn_v2_5_set_clockgating_state(void *handle,
1666 enum amd_clockgating_state state)
1668 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1669 bool enable = (state == AMD_CG_STATE_GATE);
1671 if (amdgpu_sriov_vf(adev))
1675 if (vcn_v2_5_is_idle(handle))
1677 vcn_v2_5_enable_clock_gating(adev);
1679 vcn_v2_5_disable_clock_gating(adev);
1685 static int vcn_v2_5_set_powergating_state(void *handle,
1686 enum amd_powergating_state state)
1688 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1691 if (amdgpu_sriov_vf(adev))
1694 if(state == adev->vcn.cur_state)
1697 if (state == AMD_PG_STATE_GATE)
1698 ret = vcn_v2_5_stop(adev);
1700 ret = vcn_v2_5_start(adev);
1703 adev->vcn.cur_state = state;
1708 static int vcn_v2_5_set_interrupt_state(struct amdgpu_device *adev,
1709 struct amdgpu_irq_src *source,
1711 enum amdgpu_interrupt_state state)
1716 static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev,
1717 struct amdgpu_irq_src *source,
1718 struct amdgpu_iv_entry *entry)
1720 uint32_t ip_instance;
1722 switch (entry->client_id) {
1723 case SOC15_IH_CLIENTID_VCN:
1726 case SOC15_IH_CLIENTID_VCN1:
1730 DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1734 DRM_DEBUG("IH: VCN TRAP\n");
1736 switch (entry->src_id) {
1737 case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
1738 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec);
1740 case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1741 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
1743 case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
1744 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
1747 DRM_ERROR("Unhandled interrupt: %d %d\n",
1748 entry->src_id, entry->src_data[0]);
1755 static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = {
1756 .set = vcn_v2_5_set_interrupt_state,
1757 .process = vcn_v2_5_process_interrupt,
1760 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
1764 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1765 if (adev->vcn.harvest_config & (1 << i))
1767 adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
1768 adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs;
1772 static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
1774 .early_init = vcn_v2_5_early_init,
1776 .sw_init = vcn_v2_5_sw_init,
1777 .sw_fini = vcn_v2_5_sw_fini,
1778 .hw_init = vcn_v2_5_hw_init,
1779 .hw_fini = vcn_v2_5_hw_fini,
1780 .suspend = vcn_v2_5_suspend,
1781 .resume = vcn_v2_5_resume,
1782 .is_idle = vcn_v2_5_is_idle,
1783 .wait_for_idle = vcn_v2_5_wait_for_idle,
1784 .check_soft_reset = NULL,
1785 .pre_soft_reset = NULL,
1787 .post_soft_reset = NULL,
1788 .set_clockgating_state = vcn_v2_5_set_clockgating_state,
1789 .set_powergating_state = vcn_v2_5_set_powergating_state,
1792 const struct amdgpu_ip_block_version vcn_v2_5_ip_block =
1794 .type = AMD_IP_BLOCK_TYPE_VCN,
1798 .funcs = &vcn_v2_5_ip_funcs,