2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/fdtable.h>
24 #include <linux/uaccess.h>
25 #include <linux/firmware.h>
28 #include "amdgpu_amdkfd.h"
31 #include "amdgpu_ucode.h"
33 #include "gca/gfx_7_2_d.h"
34 #include "gca/gfx_7_2_enum.h"
35 #include "gca/gfx_7_2_sh_mask.h"
36 #include "oss/oss_2_0_d.h"
37 #include "oss/oss_2_0_sh_mask.h"
38 #include "gmc/gmc_7_1_d.h"
39 #include "gmc/gmc_7_1_sh_mask.h"
40 #include "cik_structs.h"
42 enum hqd_dequeue_request_type {
49 MAX_TRAPID = 8, /* 3 bits in the bitfield. */
50 MAX_WATCH_ADDRESSES = 4
54 ADDRESS_WATCH_REG_ADDR_HI = 0,
55 ADDRESS_WATCH_REG_ADDR_LO,
56 ADDRESS_WATCH_REG_CNTL,
60 /* not defined in the CI/KV reg file */
62 ADDRESS_WATCH_REG_CNTL_ATC_BIT = 0x10000000UL,
63 ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK = 0x00FFFFFF,
64 ADDRESS_WATCH_REG_ADDLOW_MASK_EXTENSION = 0x03000000,
65 /* extend the mask to 26 bits to match the low address field */
66 ADDRESS_WATCH_REG_ADDLOW_SHIFT = 6,
67 ADDRESS_WATCH_REG_ADDHIGH_MASK = 0xFFFF
70 static const uint32_t watchRegs[MAX_WATCH_ADDRESSES * ADDRESS_WATCH_REG_MAX] = {
71 mmTCP_WATCH0_ADDR_H, mmTCP_WATCH0_ADDR_L, mmTCP_WATCH0_CNTL,
72 mmTCP_WATCH1_ADDR_H, mmTCP_WATCH1_ADDR_L, mmTCP_WATCH1_CNTL,
73 mmTCP_WATCH2_ADDR_H, mmTCP_WATCH2_ADDR_L, mmTCP_WATCH2_CNTL,
74 mmTCP_WATCH3_ADDR_H, mmTCP_WATCH3_ADDR_L, mmTCP_WATCH3_CNTL
77 union TCP_WATCH_CNTL_BITS {
91 * Register access functions
94 static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
95 uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
96 uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases);
98 static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
101 static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
102 uint32_t hpd_size, uint64_t hpd_gpu_addr);
103 static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
104 static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
105 uint32_t queue_id, uint32_t __user *wptr,
106 uint32_t wptr_shift, uint32_t wptr_mask,
107 struct mm_struct *mm);
108 static int kgd_hqd_dump(struct kgd_dev *kgd,
109 uint32_t pipe_id, uint32_t queue_id,
110 uint32_t (**dump)[2], uint32_t *n_regs);
111 static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
112 uint32_t __user *wptr, struct mm_struct *mm);
113 static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
114 uint32_t engine_id, uint32_t queue_id,
115 uint32_t (**dump)[2], uint32_t *n_regs);
116 static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
117 uint32_t pipe_id, uint32_t queue_id);
119 static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
120 enum kfd_preempt_type reset_type,
121 unsigned int utimeout, uint32_t pipe_id,
123 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
124 static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
125 unsigned int utimeout);
126 static int kgd_address_watch_disable(struct kgd_dev *kgd);
127 static int kgd_address_watch_execute(struct kgd_dev *kgd,
128 unsigned int watch_point_id,
132 static int kgd_wave_control_execute(struct kgd_dev *kgd,
133 uint32_t gfx_index_val,
135 static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
136 unsigned int watch_point_id,
137 unsigned int reg_offset);
139 static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid);
140 static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
142 static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
144 static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
145 static void set_scratch_backing_va(struct kgd_dev *kgd,
146 uint64_t va, uint32_t vmid);
148 /* Because of REG_GET_FIELD() being used, we put this function in the
149 * asic specific file.
151 static int get_tile_config(struct kgd_dev *kgd,
152 struct tile_config *config)
154 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
156 config->gb_addr_config = adev->gfx.config.gb_addr_config;
157 config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
158 MC_ARB_RAMCFG, NOOFBANK);
159 config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
160 MC_ARB_RAMCFG, NOOFRANKS);
162 config->tile_config_ptr = adev->gfx.config.tile_mode_array;
163 config->num_tile_configs =
164 ARRAY_SIZE(adev->gfx.config.tile_mode_array);
165 config->macro_tile_config_ptr =
166 adev->gfx.config.macrotile_mode_array;
167 config->num_macro_tile_configs =
168 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
173 static const struct kfd2kgd_calls kfd2kgd = {
174 .init_gtt_mem_allocation = alloc_gtt_mem,
175 .free_gtt_mem = free_gtt_mem,
176 .get_local_mem_info = get_local_mem_info,
177 .get_gpu_clock_counter = get_gpu_clock_counter,
178 .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
179 .alloc_pasid = amdgpu_vm_alloc_pasid,
180 .free_pasid = amdgpu_vm_free_pasid,
181 .program_sh_mem_settings = kgd_program_sh_mem_settings,
182 .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
183 .init_pipeline = kgd_init_pipeline,
184 .init_interrupts = kgd_init_interrupts,
185 .hqd_load = kgd_hqd_load,
186 .hqd_sdma_load = kgd_hqd_sdma_load,
187 .hqd_dump = kgd_hqd_dump,
188 .hqd_sdma_dump = kgd_hqd_sdma_dump,
189 .hqd_is_occupied = kgd_hqd_is_occupied,
190 .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
191 .hqd_destroy = kgd_hqd_destroy,
192 .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
193 .address_watch_disable = kgd_address_watch_disable,
194 .address_watch_execute = kgd_address_watch_execute,
195 .wave_control_execute = kgd_wave_control_execute,
196 .address_watch_get_offset = kgd_address_watch_get_offset,
197 .get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid,
198 .get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid,
199 .write_vmid_invalidate_request = write_vmid_invalidate_request,
200 .get_fw_version = get_fw_version,
201 .set_scratch_backing_va = set_scratch_backing_va,
202 .get_tile_config = get_tile_config,
203 .get_cu_info = get_cu_info
206 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
208 return (struct kfd2kgd_calls *)&kfd2kgd;
211 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
213 return (struct amdgpu_device *)kgd;
216 static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
217 uint32_t queue, uint32_t vmid)
219 struct amdgpu_device *adev = get_amdgpu_device(kgd);
220 uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
222 mutex_lock(&adev->srbm_mutex);
223 WREG32(mmSRBM_GFX_CNTL, value);
226 static void unlock_srbm(struct kgd_dev *kgd)
228 struct amdgpu_device *adev = get_amdgpu_device(kgd);
230 WREG32(mmSRBM_GFX_CNTL, 0);
231 mutex_unlock(&adev->srbm_mutex);
234 static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
237 struct amdgpu_device *adev = get_amdgpu_device(kgd);
239 uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
240 uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
242 lock_srbm(kgd, mec, pipe, queue_id, 0);
245 static void release_queue(struct kgd_dev *kgd)
250 static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
251 uint32_t sh_mem_config,
252 uint32_t sh_mem_ape1_base,
253 uint32_t sh_mem_ape1_limit,
254 uint32_t sh_mem_bases)
256 struct amdgpu_device *adev = get_amdgpu_device(kgd);
258 lock_srbm(kgd, 0, 0, 0, vmid);
260 WREG32(mmSH_MEM_CONFIG, sh_mem_config);
261 WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
262 WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
263 WREG32(mmSH_MEM_BASES, sh_mem_bases);
268 static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
271 struct amdgpu_device *adev = get_amdgpu_device(kgd);
274 * We have to assume that there is no outstanding mapping.
275 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
276 * a mapping is in progress or because a mapping finished and the
277 * SW cleared it. So the protocol is to always wait & clear.
279 uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
280 ATC_VMID0_PASID_MAPPING__VALID_MASK;
282 WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping);
284 while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid)))
286 WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
288 /* Mapping vmid to pasid also for IH block */
289 WREG32(mmIH_VMID_0_LUT + vmid, pasid_mapping);
294 static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
295 uint32_t hpd_size, uint64_t hpd_gpu_addr)
297 /* amdgpu owns the per-pipe state */
301 static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
303 struct amdgpu_device *adev = get_amdgpu_device(kgd);
307 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
308 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
310 lock_srbm(kgd, mec, pipe, 0, 0);
312 WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
313 CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
320 static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m)
324 retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
325 m->sdma_queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
327 pr_debug("kfd: sdma base address: 0x%x\n", retval);
332 static inline struct cik_mqd *get_mqd(void *mqd)
334 return (struct cik_mqd *)mqd;
337 static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
339 return (struct cik_sdma_rlc_registers *)mqd;
342 static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
343 uint32_t queue_id, uint32_t __user *wptr,
344 uint32_t wptr_shift, uint32_t wptr_mask,
345 struct mm_struct *mm)
347 struct amdgpu_device *adev = get_amdgpu_device(kgd);
350 uint32_t reg, wptr_val, data;
351 bool valid_wptr = false;
355 acquire_queue(kgd, pipe_id, queue_id);
357 /* HQD registers extend from CP_MQD_BASE_ADDR to CP_MQD_CONTROL. */
358 mqd_hqd = &m->cp_mqd_base_addr_lo;
360 for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_MQD_CONTROL; reg++)
361 WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
363 /* Copy userspace write pointer value to register.
364 * Activate doorbell logic to monitor subsequent changes.
366 data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
367 CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
368 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
370 /* read_user_ptr may take the mm->mmap_sem.
371 * release srbm_mutex to avoid circular dependency between
372 * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
375 valid_wptr = read_user_wptr(mm, wptr, wptr_val);
376 acquire_queue(kgd, pipe_id, queue_id);
378 WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
380 data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
381 WREG32(mmCP_HQD_ACTIVE, data);
388 static int kgd_hqd_dump(struct kgd_dev *kgd,
389 uint32_t pipe_id, uint32_t queue_id,
390 uint32_t (**dump)[2], uint32_t *n_regs)
392 struct amdgpu_device *adev = get_amdgpu_device(kgd);
394 #define HQD_N_REGS (35+4)
395 #define DUMP_REG(addr) do { \
396 if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
398 (*dump)[i][0] = (addr) << 2; \
399 (*dump)[i++][1] = RREG32(addr); \
402 *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
406 acquire_queue(kgd, pipe_id, queue_id);
408 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0);
409 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1);
410 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE2);
411 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE3);
413 for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_MQD_CONTROL; reg++)
418 WARN_ON_ONCE(i != HQD_N_REGS);
424 static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
425 uint32_t __user *wptr, struct mm_struct *mm)
427 struct amdgpu_device *adev = get_amdgpu_device(kgd);
428 struct cik_sdma_rlc_registers *m;
429 unsigned long end_jiffies;
430 uint32_t sdma_base_addr;
433 m = get_sdma_mqd(mqd);
434 sdma_base_addr = get_sdma_base_addr(m);
436 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
437 m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
439 end_jiffies = msecs_to_jiffies(2000) + jiffies;
441 data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
442 if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
444 if (time_after(jiffies, end_jiffies))
446 usleep_range(500, 1000);
448 if (m->sdma_engine_id) {
449 data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL);
450 data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL,
452 WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data);
454 data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL);
455 data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
457 WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data);
460 data = REG_SET_FIELD(m->sdma_rlc_doorbell, SDMA0_RLC0_DOORBELL,
462 WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
463 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdma_rlc_rb_rptr);
465 if (read_user_wptr(mm, wptr, data))
466 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, data);
468 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
469 m->sdma_rlc_rb_rptr);
471 WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
472 m->sdma_rlc_virtual_addr);
473 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base);
474 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
475 m->sdma_rlc_rb_base_hi);
476 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
477 m->sdma_rlc_rb_rptr_addr_lo);
478 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
479 m->sdma_rlc_rb_rptr_addr_hi);
481 data = REG_SET_FIELD(m->sdma_rlc_rb_cntl, SDMA0_RLC0_RB_CNTL,
483 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
488 static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
489 uint32_t engine_id, uint32_t queue_id,
490 uint32_t (**dump)[2], uint32_t *n_regs)
492 struct amdgpu_device *adev = get_amdgpu_device(kgd);
493 uint32_t sdma_offset = engine_id * SDMA1_REGISTER_OFFSET +
494 queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
497 #define HQD_N_REGS (19+4)
499 *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
503 for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
504 DUMP_REG(sdma_offset + reg);
505 for (reg = mmSDMA0_RLC0_VIRTUAL_ADDR; reg <= mmSDMA0_RLC0_WATERMARK;
507 DUMP_REG(sdma_offset + reg);
509 WARN_ON_ONCE(i != HQD_N_REGS);
515 static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
516 uint32_t pipe_id, uint32_t queue_id)
518 struct amdgpu_device *adev = get_amdgpu_device(kgd);
523 acquire_queue(kgd, pipe_id, queue_id);
524 act = RREG32(mmCP_HQD_ACTIVE);
526 low = lower_32_bits(queue_address >> 8);
527 high = upper_32_bits(queue_address >> 8);
529 if (low == RREG32(mmCP_HQD_PQ_BASE) &&
530 high == RREG32(mmCP_HQD_PQ_BASE_HI))
537 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
539 struct amdgpu_device *adev = get_amdgpu_device(kgd);
540 struct cik_sdma_rlc_registers *m;
541 uint32_t sdma_base_addr;
542 uint32_t sdma_rlc_rb_cntl;
544 m = get_sdma_mqd(mqd);
545 sdma_base_addr = get_sdma_base_addr(m);
547 sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
549 if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
555 static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
556 enum kfd_preempt_type reset_type,
557 unsigned int utimeout, uint32_t pipe_id,
560 struct amdgpu_device *adev = get_amdgpu_device(kgd);
562 enum hqd_dequeue_request_type type;
563 unsigned long flags, end_jiffies;
566 acquire_queue(kgd, pipe_id, queue_id);
567 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
569 switch (reset_type) {
570 case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
573 case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
581 /* Workaround: If IQ timer is active and the wait time is close to or
582 * equal to 0, dequeueing is not safe. Wait until either the wait time
583 * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
584 * cleared before continuing. Also, ensure wait times are set to at
587 local_irq_save(flags);
589 retry = 5000; /* wait for 500 usecs at maximum */
591 temp = RREG32(mmCP_HQD_IQ_TIMER);
592 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) {
593 pr_debug("HW is processing IQ\n");
596 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) {
597 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE)
598 == 3) /* SEM-rearm is safe */
600 /* Wait time 3 is safe for CP, but our MMIO read/write
601 * time is close to 1 microsecond, so check for 10 to
602 * leave more buffer room
604 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME)
607 pr_debug("IQ timer is active\n");
612 pr_err("CP HQD IQ timer status time out\n");
620 temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
621 if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK))
623 pr_debug("Dequeue request is pending\n");
626 pr_err("CP HQD dequeue request time out\n");
632 local_irq_restore(flags);
635 WREG32(mmCP_HQD_DEQUEUE_REQUEST, type);
637 end_jiffies = (utimeout * HZ / 1000) + jiffies;
639 temp = RREG32(mmCP_HQD_ACTIVE);
640 if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
642 if (time_after(jiffies, end_jiffies)) {
643 pr_err("cp queue preemption time out\n");
647 usleep_range(500, 1000);
654 static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
655 unsigned int utimeout)
657 struct amdgpu_device *adev = get_amdgpu_device(kgd);
658 struct cik_sdma_rlc_registers *m;
659 uint32_t sdma_base_addr;
661 unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
663 m = get_sdma_mqd(mqd);
664 sdma_base_addr = get_sdma_base_addr(m);
666 temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
667 temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
668 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
671 temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
672 if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
674 if (time_after(jiffies, end_jiffies))
676 usleep_range(500, 1000);
679 WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
680 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
681 RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
682 SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
684 m->sdma_rlc_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
689 static int kgd_address_watch_disable(struct kgd_dev *kgd)
691 struct amdgpu_device *adev = get_amdgpu_device(kgd);
692 union TCP_WATCH_CNTL_BITS cntl;
697 cntl.bitfields.valid = 0;
698 cntl.bitfields.mask = ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK;
699 cntl.bitfields.atc = 1;
701 /* Turning off this address until we set all the registers */
702 for (i = 0; i < MAX_WATCH_ADDRESSES; i++)
703 WREG32(watchRegs[i * ADDRESS_WATCH_REG_MAX +
704 ADDRESS_WATCH_REG_CNTL], cntl.u32All);
709 static int kgd_address_watch_execute(struct kgd_dev *kgd,
710 unsigned int watch_point_id,
715 struct amdgpu_device *adev = get_amdgpu_device(kgd);
716 union TCP_WATCH_CNTL_BITS cntl;
718 cntl.u32All = cntl_val;
720 /* Turning off this watch point until we set all the registers */
721 cntl.bitfields.valid = 0;
722 WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
723 ADDRESS_WATCH_REG_CNTL], cntl.u32All);
725 WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
726 ADDRESS_WATCH_REG_ADDR_HI], addr_hi);
728 WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
729 ADDRESS_WATCH_REG_ADDR_LO], addr_lo);
731 /* Enable the watch point */
732 cntl.bitfields.valid = 1;
734 WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
735 ADDRESS_WATCH_REG_CNTL], cntl.u32All);
740 static int kgd_wave_control_execute(struct kgd_dev *kgd,
741 uint32_t gfx_index_val,
744 struct amdgpu_device *adev = get_amdgpu_device(kgd);
747 mutex_lock(&adev->grbm_idx_mutex);
749 WREG32(mmGRBM_GFX_INDEX, gfx_index_val);
750 WREG32(mmSQ_CMD, sq_cmd);
752 /* Restore the GRBM_GFX_INDEX register */
754 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK |
755 GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
756 GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK;
758 WREG32(mmGRBM_GFX_INDEX, data);
760 mutex_unlock(&adev->grbm_idx_mutex);
765 static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
766 unsigned int watch_point_id,
767 unsigned int reg_offset)
769 return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset];
772 static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
776 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
778 reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
779 return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
782 static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
786 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
788 reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
789 return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
792 static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
794 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
796 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
799 static void set_scratch_backing_va(struct kgd_dev *kgd,
800 uint64_t va, uint32_t vmid)
802 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
804 lock_srbm(kgd, 0, 0, 0, vmid);
805 WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va);
809 static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
811 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
812 const union amdgpu_firmware_header *hdr;
818 hdr = (const union amdgpu_firmware_header *)
819 adev->gfx.pfp_fw->data;
823 hdr = (const union amdgpu_firmware_header *)
824 adev->gfx.me_fw->data;
828 hdr = (const union amdgpu_firmware_header *)
829 adev->gfx.ce_fw->data;
832 case KGD_ENGINE_MEC1:
833 hdr = (const union amdgpu_firmware_header *)
834 adev->gfx.mec_fw->data;
837 case KGD_ENGINE_MEC2:
838 hdr = (const union amdgpu_firmware_header *)
839 adev->gfx.mec2_fw->data;
843 hdr = (const union amdgpu_firmware_header *)
844 adev->gfx.rlc_fw->data;
847 case KGD_ENGINE_SDMA1:
848 hdr = (const union amdgpu_firmware_header *)
849 adev->sdma.instance[0].fw->data;
852 case KGD_ENGINE_SDMA2:
853 hdr = (const union amdgpu_firmware_header *)
854 adev->sdma.instance[1].fw->data;
864 /* Only 12 bit in use*/
865 return hdr->common.ucode_version;