]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
drm/amdgpu: Modify the argument of emit_ib interface
authorRex Zhu <Rex.Zhu@amd.com>
Wed, 24 Oct 2018 05:37:37 +0000 (13:37 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 5 Nov 2018 19:21:50 +0000 (14:21 -0500)
use the point of struct amdgpu_job as the function
argument instand of vmid, so the other members of
struct amdgpu_job can be visit in emit_ib function.

v2: add a wrapper for getting the VMID
    add the job before the ib on the parameter list.
v3: refine the wrapper name

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
21 files changed:
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/si_dma.c
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c

index ec0e6238dbc3678f40021d298064afc8c3b5c7d3..c48207b377bc5f5c64549eca69662896285971e5 100644 (file)
@@ -221,8 +221,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
                        !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
                        continue;
 
-               amdgpu_ring_emit_ib(ring, ib, job ? job->vmid : 0,
-                                   need_ctx_switch);
+               amdgpu_ring_emit_ib(ring, job, ib, need_ctx_switch);
                need_ctx_switch = false;
        }
 
index 57cfe78a262b11e70eb037afda5755769932fabd..e1b46a6703ded9a60370bedf20bab108b19fd808 100644 (file)
@@ -33,6 +33,8 @@
 #define to_amdgpu_job(sched_job)               \
                container_of((sched_job), struct amdgpu_job, base)
 
+#define AMDGPU_JOB_GET_VMID(job) ((job) ? (job)->vmid : 0)
+
 struct amdgpu_fence;
 
 struct amdgpu_job {
index 4cdddbc4491bbb47b76080086c1b0e573025f236..0beb01fef83fd38c9b940c167fa7df7d25df1f71 100644 (file)
@@ -129,8 +129,9 @@ struct amdgpu_ring_funcs {
        unsigned emit_ib_size;
        /* command emit functions */
        void (*emit_ib)(struct amdgpu_ring *ring,
+                       struct amdgpu_job *job,
                        struct amdgpu_ib *ib,
-                       unsigned vmid, bool ctx_switch);
+                       bool ctx_switch);
        void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
                           uint64_t seq, unsigned flags);
        void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
@@ -228,7 +229,7 @@ struct amdgpu_ring {
 #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
 #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
 #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
-#define amdgpu_ring_emit_ib(r, ib, vmid, c) (r)->funcs->emit_ib((r), (ib), (vmid), (c))
+#define amdgpu_ring_emit_ib(r, job, ib, c) ((r)->funcs->emit_ib((r), (job), (ib), (c)))
 #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
 #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
 #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
index 379e1ae7a8fba9e7c08892a63bd7322c9ddbfe08..98a1b2ce2b9d38cec778dc2c9a8097661eca27b0 100644 (file)
@@ -1032,8 +1032,10 @@ int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx)
  * @ib: the IB to execute
  *
  */
-void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
-                            unsigned vmid, bool ctx_switch)
+void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring,
+                               struct amdgpu_job *job,
+                               struct amdgpu_ib *ib,
+                               bool ctx_switch)
 {
        amdgpu_ring_write(ring, VCE_CMD_IB);
        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
index a1f209eed4c477498ee932a0f02e47fdff993137..50293652af148cc3c8fa2d04d8b7723e8b744e32 100644 (file)
@@ -65,8 +65,8 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
 int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx);
-void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
-                            unsigned vmid, bool ctx_switch);
+void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
+                               struct amdgpu_ib *ib, bool ctx_switch);
 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
                                unsigned flags);
 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);
index 49275f358f7a77286a8bd2e367b0c3be745f0a19..ad58dcec223e781df53b1de0dab3d12c21c9e886 100644 (file)
@@ -218,9 +218,11 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
  * Schedule an IB in the DMA ring (CIK).
  */
 static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
+                                 struct amdgpu_job *job,
                                  struct amdgpu_ib *ib,
-                                 unsigned vmid, bool ctx_switch)
+                                 bool ctx_switch)
 {
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
        u32 extra_bits = vmid & 0xf;
 
        /* IB packet must end on a 8 DW boundary */
index 25cf905965fb4130bff8990ef69c61dbbd690cc7..5b25c26fa30e53015015764dcf83dfb2c6184bea 100644 (file)
@@ -1840,9 +1840,11 @@ static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
 }
 
 static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
+                                 struct amdgpu_job *job,
                                  struct amdgpu_ib *ib,
-                                 unsigned vmid, bool ctx_switch)
+                                 bool ctx_switch)
 {
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
        u32 header, control = 0;
 
        /* insert SWITCH_BUFFER packet before first IB in the ring frame */
index ff8d316d05337cc30219b895aa9d95c41b64c584..243b8c502ca6358b159aad1b9e2ae379deb17104 100644 (file)
@@ -2227,9 +2227,11 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
  * on the gfx ring for execution by the GPU.
  */
 static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
-                                     struct amdgpu_ib *ib,
-                                     unsigned vmid, bool ctx_switch)
+                                       struct amdgpu_job *job,
+                                       struct amdgpu_ib *ib,
+                                       bool ctx_switch)
 {
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
        u32 header, control = 0;
 
        /* insert SWITCH_BUFFER packet before first IB in the ring frame */
@@ -2256,9 +2258,11 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
 }
 
 static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
+                                         struct amdgpu_job *job,
                                          struct amdgpu_ib *ib,
-                                         unsigned vmid, bool ctx_switch)
+                                         bool ctx_switch)
 {
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
        u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
 
        amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
index 58c5ebe1cd7308e071300489ec6d2afd3f1eb8c6..bdae5636a9105918a9e47011c28945ca145e7cca 100644 (file)
@@ -6109,9 +6109,11 @@ static void gfx_v8_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
 }
 
 static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
-                                     struct amdgpu_ib *ib,
-                                     unsigned vmid, bool ctx_switch)
+                                       struct amdgpu_job *job,
+                                       struct amdgpu_ib *ib,
+                                       bool ctx_switch)
 {
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
        u32 header, control = 0;
 
        if (ib->flags & AMDGPU_IB_FLAG_CE)
@@ -6139,9 +6141,11 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
 }
 
 static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
+                                         struct amdgpu_job *job,
                                          struct amdgpu_ib *ib,
-                                         unsigned vmid, bool ctx_switch)
+                                         bool ctx_switch)
 {
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
        u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
 
        amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
index 100f23b5e22f55fac80cfb3cc556e51b3862e963..928034ce799453b19556a938947c5fb2633f31ef 100644 (file)
@@ -4049,9 +4049,11 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 }
 
 static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
-                                      struct amdgpu_ib *ib,
-                                      unsigned vmid, bool ctx_switch)
+                                       struct amdgpu_job *job,
+                                       struct amdgpu_ib *ib,
+                                       bool ctx_switch)
 {
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
        u32 header, control = 0;
 
        if (ib->flags & AMDGPU_IB_FLAG_CE)
@@ -4080,20 +4082,22 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
 }
 
 static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
-                                          struct amdgpu_ib *ib,
-                                          unsigned vmid, bool ctx_switch)
+                                         struct amdgpu_job *job,
+                                         struct amdgpu_ib *ib,
+                                         bool ctx_switch)
 {
-        u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+       u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
 
-        amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+       amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
        BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
-        amdgpu_ring_write(ring,
+       amdgpu_ring_write(ring,
 #ifdef __BIG_ENDIAN
-                                (2 << 0) |
+                               (2 << 0) |
 #endif
-                                lower_32_bits(ib->gpu_addr));
-        amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
-        amdgpu_ring_write(ring, control);
+                               lower_32_bits(ib->gpu_addr));
+       amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+       amdgpu_ring_write(ring, control);
 }
 
 static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
index c4ab54a59fc98788900f5ff5d747a4701cb4be36..fb2a066c0ac9c7fa37712871208c707d019b851c 100644 (file)
@@ -245,9 +245,12 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
  * Schedule an IB in the DMA ring (VI).
  */
 static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
+                                  struct amdgpu_job *job,
                                   struct amdgpu_ib *ib,
-                                  unsigned vmid, bool ctx_switch)
+                                  bool ctx_switch)
 {
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
        /* IB packet must end on a 8 DW boundary */
        sdma_v2_4_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
 
index e3adddbcb5930b41cde8aedb2698610167cb9f79..a9848d28707d20ee566c07f1e289b54b29b89b57 100644 (file)
@@ -419,9 +419,12 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
  * Schedule an IB in the DMA ring (VI).
  */
 static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
+                                  struct amdgpu_job *job,
                                   struct amdgpu_ib *ib,
-                                  unsigned vmid, bool ctx_switch)
+                                  bool ctx_switch)
 {
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
        /* IB packet must end on a 8 DW boundary */
        sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
 
index 7f9a501c919d2b2a2a3c4aa68c4fb55a9844709d..e740953110d88d142ab4d72f3789c87f827c6bb8 100644 (file)
@@ -497,9 +497,12 @@ static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
  * Schedule an IB in the DMA ring (VEGA10).
  */
 static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
-                                       struct amdgpu_ib *ib,
-                                       unsigned vmid, bool ctx_switch)
+                                  struct amdgpu_job *job,
+                                  struct amdgpu_ib *ib,
+                                  bool ctx_switch)
 {
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
        /* IB packet must end on a 8 DW boundary */
        sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
 
index bff6954c21503f28ffdd6d9a7680aaf69e77f88f..b6e473134e19fae3bb107fa9160676616ecd44fc 100644 (file)
@@ -61,9 +61,11 @@ static void si_dma_ring_set_wptr(struct amdgpu_ring *ring)
 }
 
 static void si_dma_ring_emit_ib(struct amdgpu_ring *ring,
+                               struct amdgpu_job *job,
                                struct amdgpu_ib *ib,
-                               unsigned vmid, bool ctx_switch)
+                               bool ctx_switch)
 {
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
        /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
         * Pad as necessary with NOPs.
         */
index 51681eb0dd5801a0aa01007247a373762e0e5e4e..90bbcee00f289e007f945e0ed66f6e604b148c87 100644 (file)
@@ -509,8 +509,9 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
  * Write ring commands to execute the indirect buffer
  */
 static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
+                                 struct amdgpu_job *job,
                                  struct amdgpu_ib *ib,
-                                 unsigned vmid, bool ctx_switch)
+                                 bool ctx_switch)
 {
        amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0));
        amdgpu_ring_write(ring, ib->gpu_addr);
index 907afcf8d8671d6a28a72db292456a8401177a63..1c5e127031037e9b7dcfe47b52230ec12264dfca 100644 (file)
@@ -524,8 +524,9 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
  * Write ring commands to execute the indirect buffer
  */
 static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
+                                 struct amdgpu_job *job,
                                  struct amdgpu_ib *ib,
-                                 unsigned vmid, bool ctx_switch)
+                                 bool ctx_switch)
 {
        amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
index 7df41d1c818b7914bfb20540da7baff16e0edcbe..f184842ef2a280b183216b279a0253a42d4e00f7 100644 (file)
@@ -975,9 +975,12 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
  * Write ring commands to execute the indirect buffer
  */
 static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
+                                 struct amdgpu_job *job,
                                  struct amdgpu_ib *ib,
-                                 unsigned vmid, bool ctx_switch)
+                                 bool ctx_switch)
 {
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
        amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
        amdgpu_ring_write(ring, vmid);
 
@@ -998,8 +1001,12 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
  * Write enc ring commands to execute the indirect buffer
  */
 static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
-               struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+                                       struct amdgpu_job *job,
+                                       struct amdgpu_ib *ib,
+                                       bool ctx_switch)
 {
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
        amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
        amdgpu_ring_write(ring, vmid);
        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
index 3985530a882f4a95b27448ba876e2ad41fca576c..8a4595968d98f4b506bcfbddee121cd640c4c956 100644 (file)
@@ -1270,10 +1270,12 @@ static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
  * Write ring commands to execute the indirect buffer
  */
 static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
+                                 struct amdgpu_job *job,
                                  struct amdgpu_ib *ib,
-                                 unsigned vmid, bool ctx_switch)
+                                 bool ctx_switch)
 {
        struct amdgpu_device *adev = ring->adev;
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
 
        amdgpu_ring_write(ring,
                PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0));
@@ -1299,8 +1301,12 @@ static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
  * Write enc ring commands to execute the indirect buffer
  */
 static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
-               struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+                                       struct amdgpu_job *job,
+                                       struct amdgpu_ib *ib,
+                                       bool ctx_switch)
 {
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
        amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
        amdgpu_ring_write(ring, vmid);
        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
index 2b1a5a793942633b4b0146e2d6ba3adeaa78e4aa..3e84840859a725708d5df38640bf9b1b65201e3a 100644 (file)
@@ -833,8 +833,12 @@ static void vce_v3_0_get_clockgating_state(void *handle, u32 *flags)
 }
 
 static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
-               struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+                                 struct amdgpu_job *job,
+                                 struct amdgpu_ib *ib,
+                                 bool ctx_switch)
 {
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
        amdgpu_ring_write(ring, VCE_CMD_IB_VM);
        amdgpu_ring_write(ring, vmid);
        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
index 65b71fc2f7b90f9f15f9e58e6c24f774f2fe73ec..0054ba1b9a6855df99dfd1f3362bce11ed6af0ab 100644 (file)
@@ -946,9 +946,11 @@ static int vce_v4_0_set_powergating_state(void *handle,
 }
 #endif
 
-static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
-               struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
+                                       struct amdgpu_ib *ib, bool ctx_switch)
 {
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
        amdgpu_ring_write(ring, VCE_CMD_IB_VM);
        amdgpu_ring_write(ring, vmid);
        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
index 29628f60d50c84d232bde50ff12787add13f561d..c1a03505f956c9419967a5a07fa81f47bfa8dd81 100644 (file)
@@ -1358,10 +1358,12 @@ static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64
  * Write ring commands to execute the indirect buffer
  */
 static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
-                                 struct amdgpu_ib *ib,
-                                 unsigned vmid, bool ctx_switch)
+                                       struct amdgpu_job *job,
+                                       struct amdgpu_ib *ib,
+                                       bool ctx_switch)
 {
        struct amdgpu_device *adev = ring->adev;
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
 
        amdgpu_ring_write(ring,
                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
@@ -1516,8 +1518,12 @@ static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring)
  * Write enc ring commands to execute the indirect buffer
  */
 static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
-               struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+                                       struct amdgpu_job *job,
+                                       struct amdgpu_ib *ib,
+                                       bool ctx_switch)
 {
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
        amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
        amdgpu_ring_write(ring, vmid);
        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
@@ -1717,10 +1723,12 @@ static void vcn_v1_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u6
  * Write ring commands to execute the indirect buffer.
  */
 static void vcn_v1_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring,
-                                 struct amdgpu_ib *ib,
-                                 unsigned vmid, bool ctx_switch)
+                                       struct amdgpu_job *job,
+                                       struct amdgpu_ib *ib,
+                                       bool ctx_switch)
 {
        struct amdgpu_device *adev = ring->adev;
+       unsigned vmid = AMDGPU_JOB_GET_VMID(job);
 
        amdgpu_ring_write(ring,
                PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0));