2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include "amdgpu_amdkfd.h"
24 #include "amd_shared.h"
27 #include "amdgpu_gfx.h"
28 #include <linux/module.h>
30 const struct kgd2kfd_calls *kgd2kfd;
31 bool (*kgd2kfd_init_p)(unsigned int, const struct kgd2kfd_calls**);
33 static const unsigned int compute_vmid_bitmap = 0xFF00;
35 int amdgpu_amdkfd_init(void)
40 ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd);
43 amdgpu_amdkfd_gpuvm_init_mem_limits();
52 void amdgpu_amdkfd_fini(void)
56 symbol_put(kgd2kfd_init);
60 void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
62 const struct kfd2kgd_calls *kfd2kgd;
67 switch (adev->asic_type) {
68 #ifdef CONFIG_DRM_AMDGPU_CIK
71 kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
79 kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
83 kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions();
86 dev_info(adev->dev, "kfd not supported on this ASIC\n");
90 adev->kfd = kgd2kfd->probe((struct kgd_dev *)adev,
95 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
98 * @adev: amdgpu_device pointer
99 * @aperture_base: output returning doorbell aperture base physical address
100 * @aperture_size: output returning doorbell aperture size in bytes
101 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
103 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
104 * takes doorbells required for its own rings and reports the setup to amdkfd.
105 * amdgpu reserved doorbells are at the start of the doorbell aperture.
107 static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
108 phys_addr_t *aperture_base,
109 size_t *aperture_size,
110 size_t *start_offset)
113 * The first num_doorbells are used by amdgpu.
114 * amdkfd takes whatever's left in the aperture.
116 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
117 *aperture_base = adev->doorbell.base;
118 *aperture_size = adev->doorbell.size;
119 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
127 void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
132 struct kgd2kfd_shared_resources gpu_resources = {
133 .compute_vmid_bitmap = compute_vmid_bitmap,
134 .num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
135 .num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe,
136 .gpuvm_size = min(adev->vm_manager.max_pfn
137 << AMDGPU_GPU_PAGE_SHIFT,
138 AMDGPU_VA_HOLE_START),
139 .drm_render_minor = adev->ddev->render->index
142 /* this is going to have a few of the MSBs set that we need to
144 bitmap_complement(gpu_resources.queue_bitmap,
145 adev->gfx.mec.queue_bitmap,
148 /* remove the KIQ bit as well */
149 if (adev->gfx.kiq.ring.ready)
150 clear_bit(amdgpu_gfx_queue_to_bit(adev,
151 adev->gfx.kiq.ring.me - 1,
152 adev->gfx.kiq.ring.pipe,
153 adev->gfx.kiq.ring.queue),
154 gpu_resources.queue_bitmap);
156 /* According to linux/bitmap.h we shouldn't use bitmap_clear if
157 * nbits is not compile time constant */
158 last_valid_bit = 1 /* only first MEC can have compute queues */
159 * adev->gfx.mec.num_pipe_per_mec
160 * adev->gfx.mec.num_queue_per_pipe;
161 for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i)
162 clear_bit(i, gpu_resources.queue_bitmap);
164 amdgpu_doorbell_get_kfd_info(adev,
165 &gpu_resources.doorbell_physical_address,
166 &gpu_resources.doorbell_aperture_size,
167 &gpu_resources.doorbell_start_offset);
168 if (adev->asic_type >= CHIP_VEGA10) {
169 /* On SOC15 the BIF is involved in routing
170 * doorbells using the low 12 bits of the
171 * address. Communicate the assignments to
172 * KFD. KFD uses two doorbell pages per
173 * process in case of 64-bit doorbells so we
174 * can use each doorbell assignment twice.
176 gpu_resources.sdma_doorbell[0][0] =
177 AMDGPU_DOORBELL64_sDMA_ENGINE0;
178 gpu_resources.sdma_doorbell[0][1] =
179 AMDGPU_DOORBELL64_sDMA_ENGINE0 + 0x200;
180 gpu_resources.sdma_doorbell[1][0] =
181 AMDGPU_DOORBELL64_sDMA_ENGINE1;
182 gpu_resources.sdma_doorbell[1][1] =
183 AMDGPU_DOORBELL64_sDMA_ENGINE1 + 0x200;
184 /* Doorbells 0x0f0-0ff and 0x2f0-2ff are reserved for
185 * SDMA, IH and VCN. So don't use them for the CP.
187 gpu_resources.reserved_doorbell_mask = 0x1f0;
188 gpu_resources.reserved_doorbell_val = 0x0f0;
191 kgd2kfd->device_init(adev->kfd, &gpu_resources);
195 void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev)
198 kgd2kfd->device_exit(adev->kfd);
203 void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
204 const void *ih_ring_entry)
207 kgd2kfd->interrupt(adev->kfd, ih_ring_entry);
210 void amdgpu_amdkfd_suspend(struct amdgpu_device *adev)
213 kgd2kfd->suspend(adev->kfd);
216 int amdgpu_amdkfd_resume(struct amdgpu_device *adev)
221 r = kgd2kfd->resume(adev->kfd);
226 int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev)
231 r = kgd2kfd->pre_reset(adev->kfd);
236 int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev)
241 r = kgd2kfd->post_reset(adev->kfd);
246 void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
248 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
250 if (amdgpu_device_should_recover_gpu(adev))
251 amdgpu_device_gpu_recover(adev, NULL);
254 int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
255 void **mem_obj, uint64_t *gpu_addr,
258 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
259 struct amdgpu_bo *bo = NULL;
260 struct amdgpu_bo_param bp;
262 void *cpu_ptr_tmp = NULL;
264 memset(&bp, 0, sizeof(bp));
266 bp.byte_align = PAGE_SIZE;
267 bp.domain = AMDGPU_GEM_DOMAIN_GTT;
268 bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
269 bp.type = ttm_bo_type_kernel;
271 r = amdgpu_bo_create(adev, &bp, &bo);
274 "failed to allocate BO for amdkfd (%d)\n", r);
279 r = amdgpu_bo_reserve(bo, true);
281 dev_err(adev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
282 goto allocate_mem_reserve_bo_failed;
285 r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
287 dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r);
288 goto allocate_mem_pin_bo_failed;
291 r = amdgpu_ttm_alloc_gart(&bo->tbo);
293 dev_err(adev->dev, "%p bind failed\n", bo);
294 goto allocate_mem_kmap_bo_failed;
297 r = amdgpu_bo_kmap(bo, &cpu_ptr_tmp);
300 "(%d) failed to map bo to kernel for amdkfd\n", r);
301 goto allocate_mem_kmap_bo_failed;
305 *gpu_addr = amdgpu_bo_gpu_offset(bo);
306 *cpu_ptr = cpu_ptr_tmp;
308 amdgpu_bo_unreserve(bo);
312 allocate_mem_kmap_bo_failed:
314 allocate_mem_pin_bo_failed:
315 amdgpu_bo_unreserve(bo);
316 allocate_mem_reserve_bo_failed:
317 amdgpu_bo_unref(&bo);
322 void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
324 struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj;
326 amdgpu_bo_reserve(bo, true);
327 amdgpu_bo_kunmap(bo);
329 amdgpu_bo_unreserve(bo);
330 amdgpu_bo_unref(&(bo));
333 void get_local_mem_info(struct kgd_dev *kgd,
334 struct kfd_local_mem_info *mem_info)
336 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
337 uint64_t address_mask = adev->dev->dma_mask ? ~*adev->dev->dma_mask :
339 resource_size_t aper_limit = adev->gmc.aper_base + adev->gmc.aper_size;
341 memset(mem_info, 0, sizeof(*mem_info));
342 if (!(adev->gmc.aper_base & address_mask || aper_limit & address_mask)) {
343 mem_info->local_mem_size_public = adev->gmc.visible_vram_size;
344 mem_info->local_mem_size_private = adev->gmc.real_vram_size -
345 adev->gmc.visible_vram_size;
347 mem_info->local_mem_size_public = 0;
348 mem_info->local_mem_size_private = adev->gmc.real_vram_size;
350 mem_info->vram_width = adev->gmc.vram_width;
352 pr_debug("Address base: %pap limit %pap public 0x%llx private 0x%llx\n",
353 &adev->gmc.aper_base, &aper_limit,
354 mem_info->local_mem_size_public,
355 mem_info->local_mem_size_private);
357 if (amdgpu_sriov_vf(adev))
358 mem_info->mem_clk_max = adev->clock.default_mclk / 100;
359 else if (adev->powerplay.pp_funcs)
360 mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, false) / 100;
362 mem_info->mem_clk_max = 100;
365 uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
367 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
369 if (adev->gfx.funcs->get_gpu_clock_counter)
370 return adev->gfx.funcs->get_gpu_clock_counter(adev);
374 uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
376 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
378 /* the sclk is in quantas of 10kHz */
379 if (amdgpu_sriov_vf(adev))
380 return adev->clock.default_sclk / 100;
381 else if (adev->powerplay.pp_funcs)
382 return amdgpu_dpm_get_sclk(adev, false) / 100;
387 void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
389 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
390 struct amdgpu_cu_info acu_info = adev->gfx.cu_info;
392 memset(cu_info, 0, sizeof(*cu_info));
393 if (sizeof(cu_info->cu_bitmap) != sizeof(acu_info.bitmap))
396 cu_info->cu_active_number = acu_info.number;
397 cu_info->cu_ao_mask = acu_info.ao_cu_mask;
398 memcpy(&cu_info->cu_bitmap[0], &acu_info.bitmap[0],
399 sizeof(acu_info.bitmap));
400 cu_info->num_shader_engines = adev->gfx.config.max_shader_engines;
401 cu_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
402 cu_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
403 cu_info->simd_per_cu = acu_info.simd_per_cu;
404 cu_info->max_waves_per_simd = acu_info.max_waves_per_simd;
405 cu_info->wave_front_size = acu_info.wave_front_size;
406 cu_info->max_scratch_slots_per_cu = acu_info.max_scratch_slots_per_cu;
407 cu_info->lds_size = acu_info.lds_size;
410 uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd)
412 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
414 return amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
417 int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
418 uint32_t vmid, uint64_t gpu_addr,
419 uint32_t *ib_cmd, uint32_t ib_len)
421 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
422 struct amdgpu_job *job;
423 struct amdgpu_ib *ib;
424 struct amdgpu_ring *ring;
425 struct dma_fence *f = NULL;
429 case KGD_ENGINE_MEC1:
430 ring = &adev->gfx.compute_ring[0];
432 case KGD_ENGINE_SDMA1:
433 ring = &adev->sdma.instance[0].ring;
435 case KGD_ENGINE_SDMA2:
436 ring = &adev->sdma.instance[1].ring;
439 pr_err("Invalid engine in IB submission: %d\n", engine);
444 ret = amdgpu_job_alloc(adev, 1, &job, NULL);
449 memset(ib, 0, sizeof(struct amdgpu_ib));
451 ib->gpu_addr = gpu_addr;
453 ib->length_dw = ib_len;
454 /* This works for NO_HWS. TODO: need to handle without knowing VMID */
457 ret = amdgpu_ib_schedule(ring, 1, ib, job, &f);
459 DRM_ERROR("amdgpu: failed to schedule IB.\n");
463 ret = dma_fence_wait(f, false);
467 amdgpu_job_free(job);
472 void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
474 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
476 amdgpu_dpm_switch_power_profile(adev,
477 PP_SMC_POWER_PROFILE_COMPUTE, !idle);
480 bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
483 if ((1 << vmid) & compute_vmid_bitmap)
490 #ifndef CONFIG_HSA_AMD
491 bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
496 void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo)
500 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
501 struct amdgpu_vm *vm)
505 struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
510 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm)
515 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
520 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
525 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void)