2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/module.h>
24 #include <linux/fdtable.h>
25 #include <linux/uaccess.h>
26 #include <linux/firmware.h>
29 #include "amdgpu_amdkfd.h"
30 #include "amdgpu_ucode.h"
32 #include "gca/gfx_8_0_sh_mask.h"
33 #include "gca/gfx_8_0_d.h"
34 #include "gca/gfx_8_0_enum.h"
35 #include "oss/oss_3_0_sh_mask.h"
36 #include "oss/oss_3_0_d.h"
37 #include "gmc/gmc_8_1_sh_mask.h"
38 #include "gmc/gmc_8_1_d.h"
39 #include "vi_structs.h"
42 enum hqd_dequeue_request_type {
48 struct cik_sdma_rlc_registers;
51 * Register access functions
54 static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
55 uint32_t sh_mem_config,
56 uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit,
57 uint32_t sh_mem_bases);
58 static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
60 static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
61 uint32_t hpd_size, uint64_t hpd_gpu_addr);
62 static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
63 static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
64 uint32_t queue_id, uint32_t __user *wptr,
65 uint32_t wptr_shift, uint32_t wptr_mask,
66 struct mm_struct *mm);
67 static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
68 static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
69 uint32_t pipe_id, uint32_t queue_id);
70 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
71 static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
72 enum kfd_preempt_type reset_type,
73 unsigned int utimeout, uint32_t pipe_id,
75 static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
76 unsigned int utimeout);
77 static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
78 static int kgd_address_watch_disable(struct kgd_dev *kgd);
79 static int kgd_address_watch_execute(struct kgd_dev *kgd,
80 unsigned int watch_point_id,
84 static int kgd_wave_control_execute(struct kgd_dev *kgd,
85 uint32_t gfx_index_val,
87 static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
88 unsigned int watch_point_id,
89 unsigned int reg_offset);
91 static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
93 static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
95 static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
96 static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
97 static void set_scratch_backing_va(struct kgd_dev *kgd,
98 uint64_t va, uint32_t vmid);
100 /* Because of REG_GET_FIELD() being used, we put this function in the
101 * asic specific file.
103 static int get_tile_config(struct kgd_dev *kgd,
104 struct tile_config *config)
106 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
108 config->gb_addr_config = adev->gfx.config.gb_addr_config;
109 config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
110 MC_ARB_RAMCFG, NOOFBANK);
111 config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
112 MC_ARB_RAMCFG, NOOFRANKS);
114 config->tile_config_ptr = adev->gfx.config.tile_mode_array;
115 config->num_tile_configs =
116 ARRAY_SIZE(adev->gfx.config.tile_mode_array);
117 config->macro_tile_config_ptr =
118 adev->gfx.config.macrotile_mode_array;
119 config->num_macro_tile_configs =
120 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
125 static const struct kfd2kgd_calls kfd2kgd = {
126 .init_gtt_mem_allocation = alloc_gtt_mem,
127 .free_gtt_mem = free_gtt_mem,
128 .get_vmem_size = get_vmem_size,
129 .get_gpu_clock_counter = get_gpu_clock_counter,
130 .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
131 .alloc_pasid = amdgpu_vm_alloc_pasid,
132 .free_pasid = amdgpu_vm_free_pasid,
133 .program_sh_mem_settings = kgd_program_sh_mem_settings,
134 .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
135 .init_pipeline = kgd_init_pipeline,
136 .init_interrupts = kgd_init_interrupts,
137 .hqd_load = kgd_hqd_load,
138 .hqd_sdma_load = kgd_hqd_sdma_load,
139 .hqd_is_occupied = kgd_hqd_is_occupied,
140 .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
141 .hqd_destroy = kgd_hqd_destroy,
142 .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
143 .address_watch_disable = kgd_address_watch_disable,
144 .address_watch_execute = kgd_address_watch_execute,
145 .wave_control_execute = kgd_wave_control_execute,
146 .address_watch_get_offset = kgd_address_watch_get_offset,
147 .get_atc_vmid_pasid_mapping_pasid =
148 get_atc_vmid_pasid_mapping_pasid,
149 .get_atc_vmid_pasid_mapping_valid =
150 get_atc_vmid_pasid_mapping_valid,
151 .write_vmid_invalidate_request = write_vmid_invalidate_request,
152 .get_fw_version = get_fw_version,
153 .set_scratch_backing_va = set_scratch_backing_va,
154 .get_tile_config = get_tile_config,
157 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
159 return (struct kfd2kgd_calls *)&kfd2kgd;
162 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
164 return (struct amdgpu_device *)kgd;
167 static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
168 uint32_t queue, uint32_t vmid)
170 struct amdgpu_device *adev = get_amdgpu_device(kgd);
171 uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
173 mutex_lock(&adev->srbm_mutex);
174 WREG32(mmSRBM_GFX_CNTL, value);
177 static void unlock_srbm(struct kgd_dev *kgd)
179 struct amdgpu_device *adev = get_amdgpu_device(kgd);
181 WREG32(mmSRBM_GFX_CNTL, 0);
182 mutex_unlock(&adev->srbm_mutex);
185 static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
188 struct amdgpu_device *adev = get_amdgpu_device(kgd);
190 uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
191 uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
193 lock_srbm(kgd, mec, pipe, queue_id, 0);
196 static void release_queue(struct kgd_dev *kgd)
201 static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
202 uint32_t sh_mem_config,
203 uint32_t sh_mem_ape1_base,
204 uint32_t sh_mem_ape1_limit,
205 uint32_t sh_mem_bases)
207 struct amdgpu_device *adev = get_amdgpu_device(kgd);
209 lock_srbm(kgd, 0, 0, 0, vmid);
211 WREG32(mmSH_MEM_CONFIG, sh_mem_config);
212 WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
213 WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
214 WREG32(mmSH_MEM_BASES, sh_mem_bases);
219 static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
222 struct amdgpu_device *adev = get_amdgpu_device(kgd);
225 * We have to assume that there is no outstanding mapping.
226 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
227 * a mapping is in progress or because a mapping finished
228 * and the SW cleared it.
229 * So the protocol is to always wait & clear.
231 uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
232 ATC_VMID0_PASID_MAPPING__VALID_MASK;
234 WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping);
236 while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid)))
238 WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
240 /* Mapping vmid to pasid also for IH block */
241 WREG32(mmIH_VMID_0_LUT + vmid, pasid_mapping);
246 static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
247 uint32_t hpd_size, uint64_t hpd_gpu_addr)
249 /* amdgpu owns the per-pipe state */
253 static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
255 struct amdgpu_device *adev = get_amdgpu_device(kgd);
259 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
260 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
262 lock_srbm(kgd, mec, pipe, 0, 0);
264 WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK);
271 static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m)
276 static inline struct vi_mqd *get_mqd(void *mqd)
278 return (struct vi_mqd *)mqd;
281 static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
283 return (struct cik_sdma_rlc_registers *)mqd;
286 static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
287 uint32_t queue_id, uint32_t __user *wptr,
288 uint32_t wptr_shift, uint32_t wptr_mask,
289 struct mm_struct *mm)
291 struct amdgpu_device *adev = get_amdgpu_device(kgd);
294 uint32_t reg, wptr_val, data;
295 bool valid_wptr = false;
299 acquire_queue(kgd, pipe_id, queue_id);
301 /* HIQ is set during driver init period with vmid set to 0*/
302 if (m->cp_hqd_vmid == 0) {
303 uint32_t value, mec, pipe;
305 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
306 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
308 pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
309 mec, pipe, queue_id);
310 value = RREG32(mmRLC_CP_SCHEDULERS);
311 value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
312 ((mec << 5) | (pipe << 3) | queue_id | 0x80));
313 WREG32(mmRLC_CP_SCHEDULERS, value);
316 /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
317 mqd_hqd = &m->cp_mqd_base_addr_lo;
319 for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_CONTROL; reg++)
320 WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
322 /* Tonga errata: EOP RPTR/WPTR should be left unmodified.
323 * This is safe since EOP RPTR==WPTR for any inactive HQD
324 * on ASICs that do not support context-save.
325 * EOP writes/reads can start anywhere in the ring.
327 if (get_amdgpu_device(kgd)->asic_type != CHIP_TONGA) {
328 WREG32(mmCP_HQD_EOP_RPTR, m->cp_hqd_eop_rptr);
329 WREG32(mmCP_HQD_EOP_WPTR, m->cp_hqd_eop_wptr);
330 WREG32(mmCP_HQD_EOP_WPTR_MEM, m->cp_hqd_eop_wptr_mem);
333 for (reg = mmCP_HQD_EOP_EVENTS; reg <= mmCP_HQD_ERROR; reg++)
334 WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
336 /* Copy userspace write pointer value to register.
337 * Activate doorbell logic to monitor subsequent changes.
339 data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
340 CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
341 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
343 /* read_user_ptr may take the mm->mmap_sem.
344 * release srbm_mutex to avoid circular dependency between
345 * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
348 valid_wptr = read_user_wptr(mm, wptr, wptr_val);
349 acquire_queue(kgd, pipe_id, queue_id);
351 WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
353 data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
354 WREG32(mmCP_HQD_ACTIVE, data);
361 static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
366 static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
367 uint32_t pipe_id, uint32_t queue_id)
369 struct amdgpu_device *adev = get_amdgpu_device(kgd);
374 acquire_queue(kgd, pipe_id, queue_id);
375 act = RREG32(mmCP_HQD_ACTIVE);
377 low = lower_32_bits(queue_address >> 8);
378 high = upper_32_bits(queue_address >> 8);
380 if (low == RREG32(mmCP_HQD_PQ_BASE) &&
381 high == RREG32(mmCP_HQD_PQ_BASE_HI))
388 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
390 struct amdgpu_device *adev = get_amdgpu_device(kgd);
391 struct cik_sdma_rlc_registers *m;
392 uint32_t sdma_base_addr;
393 uint32_t sdma_rlc_rb_cntl;
395 m = get_sdma_mqd(mqd);
396 sdma_base_addr = get_sdma_base_addr(m);
398 sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
400 if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
406 static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
407 enum kfd_preempt_type reset_type,
408 unsigned int utimeout, uint32_t pipe_id,
411 struct amdgpu_device *adev = get_amdgpu_device(kgd);
413 enum hqd_dequeue_request_type type;
414 unsigned long flags, end_jiffies;
416 struct vi_mqd *m = get_mqd(mqd);
418 acquire_queue(kgd, pipe_id, queue_id);
420 if (m->cp_hqd_vmid == 0)
421 WREG32_FIELD(RLC_CP_SCHEDULERS, scheduler1, 0);
423 switch (reset_type) {
424 case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
427 case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
435 /* Workaround: If IQ timer is active and the wait time is close to or
436 * equal to 0, dequeueing is not safe. Wait until either the wait time
437 * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
438 * cleared before continuing. Also, ensure wait times are set to at
441 local_irq_save(flags);
443 retry = 5000; /* wait for 500 usecs at maximum */
445 temp = RREG32(mmCP_HQD_IQ_TIMER);
446 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) {
447 pr_debug("HW is processing IQ\n");
450 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) {
451 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE)
452 == 3) /* SEM-rearm is safe */
454 /* Wait time 3 is safe for CP, but our MMIO read/write
455 * time is close to 1 microsecond, so check for 10 to
456 * leave more buffer room
458 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME)
461 pr_debug("IQ timer is active\n");
466 pr_err("CP HQD IQ timer status time out\n");
474 temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
475 if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK))
477 pr_debug("Dequeue request is pending\n");
480 pr_err("CP HQD dequeue request time out\n");
486 local_irq_restore(flags);
489 WREG32(mmCP_HQD_DEQUEUE_REQUEST, type);
491 end_jiffies = (utimeout * HZ / 1000) + jiffies;
493 temp = RREG32(mmCP_HQD_ACTIVE);
494 if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
496 if (time_after(jiffies, end_jiffies)) {
497 pr_err("cp queue preemption time out.\n");
501 usleep_range(500, 1000);
508 static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
509 unsigned int utimeout)
511 struct amdgpu_device *adev = get_amdgpu_device(kgd);
512 struct cik_sdma_rlc_registers *m;
513 uint32_t sdma_base_addr;
515 int timeout = utimeout;
517 m = get_sdma_mqd(mqd);
518 sdma_base_addr = get_sdma_base_addr(m);
520 temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
521 temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
522 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
525 temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
526 if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
534 WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
535 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
536 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
537 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0);
542 static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
546 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
548 reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
549 return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
552 static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
556 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
558 reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
559 return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
562 static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
564 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
566 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
569 static int kgd_address_watch_disable(struct kgd_dev *kgd)
574 static int kgd_address_watch_execute(struct kgd_dev *kgd,
575 unsigned int watch_point_id,
583 static int kgd_wave_control_execute(struct kgd_dev *kgd,
584 uint32_t gfx_index_val,
587 struct amdgpu_device *adev = get_amdgpu_device(kgd);
590 mutex_lock(&adev->grbm_idx_mutex);
592 WREG32(mmGRBM_GFX_INDEX, gfx_index_val);
593 WREG32(mmSQ_CMD, sq_cmd);
595 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
596 INSTANCE_BROADCAST_WRITES, 1);
597 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
598 SH_BROADCAST_WRITES, 1);
599 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
600 SE_BROADCAST_WRITES, 1);
602 WREG32(mmGRBM_GFX_INDEX, data);
603 mutex_unlock(&adev->grbm_idx_mutex);
608 static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
609 unsigned int watch_point_id,
610 unsigned int reg_offset)
615 static void set_scratch_backing_va(struct kgd_dev *kgd,
616 uint64_t va, uint32_t vmid)
618 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
620 lock_srbm(kgd, 0, 0, 0, vmid);
621 WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va);
625 static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
627 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
628 const union amdgpu_firmware_header *hdr;
634 hdr = (const union amdgpu_firmware_header *)
635 adev->gfx.pfp_fw->data;
639 hdr = (const union amdgpu_firmware_header *)
640 adev->gfx.me_fw->data;
644 hdr = (const union amdgpu_firmware_header *)
645 adev->gfx.ce_fw->data;
648 case KGD_ENGINE_MEC1:
649 hdr = (const union amdgpu_firmware_header *)
650 adev->gfx.mec_fw->data;
653 case KGD_ENGINE_MEC2:
654 hdr = (const union amdgpu_firmware_header *)
655 adev->gfx.mec2_fw->data;
659 hdr = (const union amdgpu_firmware_header *)
660 adev->gfx.rlc_fw->data;
663 case KGD_ENGINE_SDMA1:
664 hdr = (const union amdgpu_firmware_header *)
665 adev->sdma.instance[0].fw->data;
668 case KGD_ENGINE_SDMA2:
669 hdr = (const union amdgpu_firmware_header *)
670 adev->sdma.instance[1].fw->data;
680 /* Only 12 bit in use*/
681 return hdr->common.ucode_version;