2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
24 #include <linux/pci.h>
26 #include "amdgpu_atomfirmware.h"
27 #include "gmc_v10_0.h"
29 #include "hdp/hdp_5_0_0_offset.h"
30 #include "hdp/hdp_5_0_0_sh_mask.h"
31 #include "gc/gc_10_1_0_sh_mask.h"
32 #include "mmhub/mmhub_2_0_0_sh_mask.h"
33 #include "dcn/dcn_2_0_0_offset.h"
34 #include "dcn/dcn_2_0_0_sh_mask.h"
35 #include "oss/osssys_5_0_0_offset.h"
36 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
37 #include "navi10_enum.h"
40 #include "soc15_common.h"
42 #include "nbio_v2_3.h"
44 #include "gfxhub_v2_0.h"
45 #include "mmhub_v2_0.h"
46 #include "athub_v2_0.h"
47 /* XXX Move this macro to navi10 header file, which is like vid.h for VI.*/
48 #define AMDGPU_NUM_OF_VMIDS 8
51 static const struct soc15_reg_golden golden_settings_navi10_hdp[] =
53 /* TODO add golden setting for hdp */
58 gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
59 struct amdgpu_irq_src *src, unsigned type,
60 enum amdgpu_interrupt_state state)
62 struct amdgpu_vmhub *hub;
63 u32 tmp, reg, bits[AMDGPU_MAX_VMHUBS], i;
65 bits[AMDGPU_GFXHUB_0] = GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
66 GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
67 GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
68 GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
69 GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
70 GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
71 GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
73 bits[AMDGPU_MMHUB_0] = MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
74 MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
75 MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
76 MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
77 MMVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
78 MMVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
79 MMVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
82 case AMDGPU_IRQ_STATE_DISABLE:
84 hub = &adev->vmhub[AMDGPU_MMHUB_0];
85 for (i = 0; i < 16; i++) {
86 reg = hub->vm_context0_cntl + i;
88 tmp &= ~bits[AMDGPU_MMHUB_0];
93 hub = &adev->vmhub[AMDGPU_GFXHUB_0];
94 for (i = 0; i < 16; i++) {
95 reg = hub->vm_context0_cntl + i;
97 tmp &= ~bits[AMDGPU_GFXHUB_0];
101 case AMDGPU_IRQ_STATE_ENABLE:
103 hub = &adev->vmhub[AMDGPU_MMHUB_0];
104 for (i = 0; i < 16; i++) {
105 reg = hub->vm_context0_cntl + i;
107 tmp |= bits[AMDGPU_MMHUB_0];
112 hub = &adev->vmhub[AMDGPU_GFXHUB_0];
113 for (i = 0; i < 16; i++) {
114 reg = hub->vm_context0_cntl + i;
116 tmp |= bits[AMDGPU_GFXHUB_0];
127 static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
128 struct amdgpu_irq_src *source,
129 struct amdgpu_iv_entry *entry)
131 struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
135 addr = (u64)entry->src_data[0] << 12;
136 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
138 if (!amdgpu_sriov_vf(adev)) {
139 status = RREG32(hub->vm_l2_pro_fault_status);
140 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
143 if (printk_ratelimit()) {
144 struct amdgpu_task_info task_info;
146 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
147 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
150 "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, "
151 "for process %s pid %d thread %s pid %d)\n",
152 entry->vmid_src ? "mmhub" : "gfxhub",
153 entry->src_id, entry->ring_id, entry->vmid,
154 entry->pasid, task_info.process_name, task_info.tgid,
155 task_info.task_name, task_info.pid);
156 dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n",
157 addr, entry->client_id);
158 if (!amdgpu_sriov_vf(adev)) {
160 "GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
162 dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
163 REG_GET_FIELD(status,
164 GCVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
165 dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
166 REG_GET_FIELD(status,
167 GCVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
168 dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
169 REG_GET_FIELD(status,
170 GCVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
171 dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
172 REG_GET_FIELD(status,
173 GCVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
174 dev_err(adev->dev, "\t RW: 0x%lx\n",
175 REG_GET_FIELD(status,
176 GCVM_L2_PROTECTION_FAULT_STATUS, RW));
183 static const struct amdgpu_irq_src_funcs gmc_v10_0_irq_funcs = {
184 .set = gmc_v10_0_vm_fault_interrupt_state,
185 .process = gmc_v10_0_process_interrupt,
188 static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev)
190 adev->gmc.vm_fault.num_types = 1;
191 adev->gmc.vm_fault.funcs = &gmc_v10_0_irq_funcs;
194 static uint32_t gmc_v10_0_get_invalidate_req(unsigned int vmid,
199 /* invalidate using legacy mode on vmid*/
200 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
201 PER_VMID_INVALIDATE_REQ, 1 << vmid);
202 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
203 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
204 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
205 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
206 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
207 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
208 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
209 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
216 * VMID 0 is the physical GPU addresses as used by the kernel.
217 * VMIDs 1-15 are used for userspace clients and are handled
218 * by the amdgpu vm/hsa code.
221 static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
222 unsigned int vmhub, uint32_t flush_type)
224 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
225 u32 tmp = gmc_v10_0_get_invalidate_req(vmid, flush_type);
226 /* Use register 17 for GART */
227 const unsigned eng = 17;
230 WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
232 /* Wait for ACK with a delay.*/
233 for (i = 0; i < adev->usec_timeout; i++) {
234 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
242 if (i < adev->usec_timeout)
245 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
249 * gmc_v10_0_flush_gpu_tlb - gart tlb flush callback
251 * @adev: amdgpu_device pointer
252 * @vmid: vm instance to flush
254 * Flush the TLB for the requested page table.
256 static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
257 uint32_t vmhub, uint32_t flush_type)
259 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
260 struct dma_fence *fence;
261 struct amdgpu_job *job;
265 /* flush hdp cache */
266 adev->nbio_funcs->hdp_flush(adev, NULL);
268 mutex_lock(&adev->mman.gtt_window_lock);
270 if (vmhub == AMDGPU_MMHUB_0) {
271 gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB_0, 0);
272 mutex_unlock(&adev->mman.gtt_window_lock);
276 BUG_ON(vmhub != AMDGPU_GFXHUB_0);
278 if (!adev->mman.buffer_funcs_enabled ||
279 !adev->ib_pool_ready ||
280 adev->in_gpu_reset) {
281 gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_0, 0);
282 mutex_unlock(&adev->mman.gtt_window_lock);
286 /* The SDMA on Navi has a bug which can theoretically result in memory
287 * corruption if an invalidation happens at the same time as an VA
288 * translation. Avoid this by doing the invalidation from the SDMA
291 r = amdgpu_job_alloc_with_ib(adev, 16 * 4, &job);
295 job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
296 job->vm_needs_flush = true;
297 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
298 r = amdgpu_job_submit(job, &adev->mman.entity,
299 AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
303 mutex_unlock(&adev->mman.gtt_window_lock);
305 dma_fence_wait(fence, false);
306 dma_fence_put(fence);
311 amdgpu_job_free(job);
314 mutex_unlock(&adev->mman.gtt_window_lock);
315 DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r);
318 static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
319 unsigned vmid, uint64_t pd_addr)
321 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
322 uint32_t req = gmc_v10_0_get_invalidate_req(vmid, 0);
323 unsigned eng = ring->vm_inv_eng;
325 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
326 lower_32_bits(pd_addr));
328 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
329 upper_32_bits(pd_addr));
331 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_req + eng, req);
333 /* wait for the invalidate to complete */
334 amdgpu_ring_emit_reg_wait(ring, hub->vm_inv_eng0_ack + eng,
335 1 << vmid, 1 << vmid);
340 static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
343 struct amdgpu_device *adev = ring->adev;
346 if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
347 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
349 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
351 amdgpu_ring_emit_wreg(ring, reg, pasid);
355 * PTE format on NAVI 10:
364 * 47:12 4k physical page base address
374 * PDE format on NAVI 10:
375 * 63:59 block fragment size
379 * 47:6 physical base address of PD or PTE
385 static uint64_t gmc_v10_0_get_vm_pte_flags(struct amdgpu_device *adev,
388 uint64_t pte_flag = 0;
390 if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
391 pte_flag |= AMDGPU_PTE_EXECUTABLE;
392 if (flags & AMDGPU_VM_PAGE_READABLE)
393 pte_flag |= AMDGPU_PTE_READABLE;
394 if (flags & AMDGPU_VM_PAGE_WRITEABLE)
395 pte_flag |= AMDGPU_PTE_WRITEABLE;
397 switch (flags & AMDGPU_VM_MTYPE_MASK) {
398 case AMDGPU_VM_MTYPE_DEFAULT:
399 pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
401 case AMDGPU_VM_MTYPE_NC:
402 pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
404 case AMDGPU_VM_MTYPE_WC:
405 pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
407 case AMDGPU_VM_MTYPE_CC:
408 pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
410 case AMDGPU_VM_MTYPE_UC:
411 pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
414 pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
418 if (flags & AMDGPU_VM_PAGE_PRT)
419 pte_flag |= AMDGPU_PTE_PRT;
424 static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
425 uint64_t *addr, uint64_t *flags)
427 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
428 *addr = adev->vm_manager.vram_base_offset + *addr -
429 adev->gmc.vram_start;
430 BUG_ON(*addr & 0xFFFF00000000003FULL);
432 if (!adev->gmc.translate_further)
435 if (level == AMDGPU_VM_PDB1) {
436 /* Set the block fragment size */
437 if (!(*flags & AMDGPU_PDE_PTE))
438 *flags |= AMDGPU_PDE_BFS(0x9);
440 } else if (level == AMDGPU_VM_PDB0) {
441 if (*flags & AMDGPU_PDE_PTE)
442 *flags &= ~AMDGPU_PDE_PTE;
444 *flags |= AMDGPU_PTE_TF;
448 static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
449 .flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
450 .emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
451 .emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
452 .get_vm_pte_flags = gmc_v10_0_get_vm_pte_flags,
453 .get_vm_pde = gmc_v10_0_get_vm_pde
456 static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
458 if (adev->gmc.gmc_funcs == NULL)
459 adev->gmc.gmc_funcs = &gmc_v10_0_gmc_funcs;
462 static int gmc_v10_0_early_init(void *handle)
464 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
466 gmc_v10_0_set_gmc_funcs(adev);
467 gmc_v10_0_set_irq_funcs(adev);
469 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
470 adev->gmc.shared_aperture_end =
471 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
472 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
473 adev->gmc.private_aperture_end =
474 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
479 static int gmc_v10_0_late_init(void *handle)
481 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
482 unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
485 for(i = 0; i < adev->num_rings; ++i) {
486 struct amdgpu_ring *ring = adev->rings[i];
487 unsigned vmhub = ring->funcs->vmhub;
489 ring->vm_inv_eng = vm_inv_eng[vmhub]++;
490 dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n",
491 ring->idx, ring->name, ring->vm_inv_eng,
495 /* Engine 17 is used for GART flushes */
496 for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
497 BUG_ON(vm_inv_eng[i] > 17);
499 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
502 static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
503 struct amdgpu_gmc *mc)
507 if (!amdgpu_sriov_vf(adev))
508 base = gfxhub_v2_0_get_fb_location(adev);
510 amdgpu_gmc_vram_location(adev, &adev->gmc, base);
511 amdgpu_gmc_gart_location(adev, mc);
513 /* base offset of vram pages */
514 adev->vm_manager.vram_base_offset = gfxhub_v2_0_get_mc_fb_offset(adev);
518 * gmc_v10_0_mc_init - initialize the memory controller driver params
520 * @adev: amdgpu_device pointer
522 * Look up the amount of vram, vram width, and decide how to place
523 * vram and gart within the GPU's physical address space.
524 * Returns 0 for success.
526 static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
528 int chansize, numchan;
530 if (!amdgpu_emu_mode)
531 adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
533 /* hard code vram_width for emulation */
536 adev->gmc.vram_width = numchan * chansize;
539 /* Could aper size report 0 ? */
540 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
541 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
543 /* size in MB on si */
544 adev->gmc.mc_vram_size =
545 adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
546 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
547 adev->gmc.visible_vram_size = adev->gmc.aper_size;
549 /* In case the PCI BAR is larger than the actual amount of vram */
550 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
551 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
553 /* set the gart size */
554 if (amdgpu_gart_size == -1) {
555 switch (adev->asic_type) {
560 adev->gmc.gart_size = 512ULL << 20;
564 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
566 gmc_v10_0_vram_gtt_location(adev, &adev->gmc);
571 static int gmc_v10_0_gart_init(struct amdgpu_device *adev)
576 WARN(1, "NAVI10 PCIE GART already initialized\n");
580 /* Initialize common gart structure */
581 r = amdgpu_gart_init(adev);
585 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
586 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) |
587 AMDGPU_PTE_EXECUTABLE;
589 return amdgpu_gart_table_vram_alloc(adev);
592 static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
594 u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
597 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
598 size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
603 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
604 pitch = RREG32_SOC15(DCE, 0, mmHUBPREQ0_DCSURF_SURFACE_PITCH);
605 size = (REG_GET_FIELD(viewport,
606 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
607 REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
610 /* return 0 if the pre-OS buffer uses up most of vram */
611 if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) {
612 DRM_ERROR("Warning: pre-OS buffer uses most of vram, \
613 be aware of gart table overwrite\n");
622 static int gmc_v10_0_sw_init(void *handle)
625 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
627 gfxhub_v2_0_init(adev);
628 mmhub_v2_0_init(adev);
630 spin_lock_init(&adev->gmc.invalidate_lock);
632 adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
633 switch (adev->asic_type) {
637 adev->num_vmhubs = 2;
639 * To fulfill 4-level page support,
640 * vm size is 256TB (48bit), maximum size of Navi10/Navi14/Navi12,
641 * block size 512 (9bit)
643 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
649 /* This interrupt is VMC page fault.*/
650 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC,
651 VMC_1_0__SRCID__VM_FAULT,
652 &adev->gmc.vm_fault);
653 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2,
654 UTCL2_1_0__SRCID__FAULT,
655 &adev->gmc.vm_fault);
660 * Set the internal MC address mask This is the max address of the GPU's
661 * internal address space.
663 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
666 * Reserve 8M stolen memory for navi10 like vega10
667 * TODO: will check if it's really needed on asic.
669 if (amdgpu_emu_mode == 1)
670 adev->gmc.stolen_size = 0;
672 adev->gmc.stolen_size = 9 * 1024 *1024;
674 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
676 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
680 r = gmc_v10_0_mc_init(adev);
684 adev->gmc.stolen_size = gmc_v10_0_get_vbios_fb_size(adev);
687 r = amdgpu_bo_init(adev);
691 r = gmc_v10_0_gart_init(adev);
697 * VMID 0 is reserved for System
698 * amdgpu graphics/compute will use VMIDs 1-7
699 * amdkfd will use VMIDs 8-15
701 adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
702 adev->vm_manager.id_mgr[AMDGPU_MMHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
704 amdgpu_vm_manager_init(adev);
710 * gmc_v8_0_gart_fini - vm fini callback
712 * @adev: amdgpu_device pointer
714 * Tears down the driver GART/VM setup (CIK).
716 static void gmc_v10_0_gart_fini(struct amdgpu_device *adev)
718 amdgpu_gart_table_vram_free(adev);
719 amdgpu_gart_fini(adev);
722 static int gmc_v10_0_sw_fini(void *handle)
724 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
726 amdgpu_vm_manager_fini(adev);
727 gmc_v10_0_gart_fini(adev);
728 amdgpu_gem_force_release(adev);
729 amdgpu_bo_fini(adev);
734 static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev)
736 switch (adev->asic_type) {
747 * gmc_v10_0_gart_enable - gart enable
749 * @adev: amdgpu_device pointer
751 static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
757 if (adev->gart.bo == NULL) {
758 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
762 r = amdgpu_gart_table_vram_pin(adev);
766 r = gfxhub_v2_0_gart_enable(adev);
770 r = mmhub_v2_0_gart_enable(adev);
774 tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL);
775 tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK;
776 WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp);
778 tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
779 WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
781 /* Flush HDP after it is initialized */
782 adev->nbio_funcs->hdp_flush(adev, NULL);
784 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
787 gfxhub_v2_0_set_fault_enable_default(adev, value);
788 mmhub_v2_0_set_fault_enable_default(adev, value);
789 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0);
790 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0);
792 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
793 (unsigned)(adev->gmc.gart_size >> 20),
794 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
796 adev->gart.ready = true;
801 static int gmc_v10_0_hw_init(void *handle)
804 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
806 /* The sequence of these two function calls matters.*/
807 gmc_v10_0_init_golden_registers(adev);
809 r = gmc_v10_0_gart_enable(adev);
817 * gmc_v10_0_gart_disable - gart disable
819 * @adev: amdgpu_device pointer
821 * This disables all VM page table.
823 static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
825 gfxhub_v2_0_gart_disable(adev);
826 mmhub_v2_0_gart_disable(adev);
827 amdgpu_gart_table_vram_unpin(adev);
830 static int gmc_v10_0_hw_fini(void *handle)
832 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
834 if (amdgpu_sriov_vf(adev)) {
835 /* full access mode, so don't touch any GMC register */
836 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
840 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
841 gmc_v10_0_gart_disable(adev);
846 static int gmc_v10_0_suspend(void *handle)
848 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
850 gmc_v10_0_hw_fini(adev);
855 static int gmc_v10_0_resume(void *handle)
858 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
860 r = gmc_v10_0_hw_init(adev);
864 amdgpu_vmid_reset_all(adev);
869 static bool gmc_v10_0_is_idle(void *handle)
871 /* MC is always ready in GMC v10.*/
875 static int gmc_v10_0_wait_for_idle(void *handle)
877 /* There is no need to wait for MC idle in GMC v10.*/
881 static int gmc_v10_0_soft_reset(void *handle)
886 static int gmc_v10_0_set_clockgating_state(void *handle,
887 enum amd_clockgating_state state)
890 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
892 r = mmhub_v2_0_set_clockgating(adev, state);
896 return athub_v2_0_set_clockgating(adev, state);
899 static void gmc_v10_0_get_clockgating_state(void *handle, u32 *flags)
901 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
903 mmhub_v2_0_get_clockgating(adev, flags);
905 athub_v2_0_get_clockgating(adev, flags);
908 static int gmc_v10_0_set_powergating_state(void *handle,
909 enum amd_powergating_state state)
914 const struct amd_ip_funcs gmc_v10_0_ip_funcs = {
916 .early_init = gmc_v10_0_early_init,
917 .late_init = gmc_v10_0_late_init,
918 .sw_init = gmc_v10_0_sw_init,
919 .sw_fini = gmc_v10_0_sw_fini,
920 .hw_init = gmc_v10_0_hw_init,
921 .hw_fini = gmc_v10_0_hw_fini,
922 .suspend = gmc_v10_0_suspend,
923 .resume = gmc_v10_0_resume,
924 .is_idle = gmc_v10_0_is_idle,
925 .wait_for_idle = gmc_v10_0_wait_for_idle,
926 .soft_reset = gmc_v10_0_soft_reset,
927 .set_clockgating_state = gmc_v10_0_set_clockgating_state,
928 .set_powergating_state = gmc_v10_0_set_powergating_state,
929 .get_clockgating_state = gmc_v10_0_get_clockgating_state,
932 const struct amdgpu_ip_block_version gmc_v10_0_ip_block =
934 .type = AMD_IP_BLOCK_TYPE_GMC,
938 .funcs = &gmc_v10_0_ip_funcs,