2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
29 #include "amdgpu_ucode.h"
31 #include "bif/bif_4_1_d.h"
32 #include "bif/bif_4_1_sh_mask.h"
34 #include "gmc/gmc_7_1_d.h"
35 #include "gmc/gmc_7_1_sh_mask.h"
37 #include "oss/oss_2_0_d.h"
38 #include "oss/oss_2_0_sh_mask.h"
40 static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
41 static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
43 MODULE_FIRMWARE("radeon/bonaire_mc.bin");
44 MODULE_FIRMWARE("radeon/hawaii_mc.bin");
45 MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
47 static const u32 golden_settings_iceland_a11[] =
49 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
50 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
51 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
52 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
55 static const u32 iceland_mgcg_cgcg_init[] =
57 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
60 static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
62 switch (adev->asic_type) {
64 amdgpu_program_register_sequence(adev,
65 iceland_mgcg_cgcg_init,
66 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
67 amdgpu_program_register_sequence(adev,
68 golden_settings_iceland_a11,
69 (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
77 * gmc7_mc_wait_for_idle - wait for MC idle callback.
79 * @adev: amdgpu_device pointer
81 * Wait for the MC (memory controller) to be idle.
83 * Returns 0 if the MC is idle, -1 if not.
85 int gmc_v7_0_mc_wait_for_idle(struct amdgpu_device *adev)
90 for (i = 0; i < adev->usec_timeout; i++) {
92 tmp = RREG32(mmSRBM_STATUS) & 0x1F00;
100 void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
101 struct amdgpu_mode_mc_save *save)
105 if (adev->mode_info.num_crtc)
106 amdgpu_display_stop_mc_access(adev, save);
108 amdgpu_asic_wait_for_mc_idle(adev);
110 blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
111 if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
112 /* Block CPU access */
113 WREG32(mmBIF_FB_EN, 0);
114 /* blackout the MC */
115 blackout = REG_SET_FIELD(blackout,
116 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
117 WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
119 /* wait for the MC to settle */
123 void gmc_v7_0_mc_resume(struct amdgpu_device *adev,
124 struct amdgpu_mode_mc_save *save)
128 /* unblackout the MC */
129 tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
130 tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
131 WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
132 /* allow CPU access */
133 tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
134 tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
135 WREG32(mmBIF_FB_EN, tmp);
137 if (adev->mode_info.num_crtc)
138 amdgpu_display_resume_mc_access(adev, save);
142 * gmc_v7_0_init_microcode - load ucode images from disk
144 * @adev: amdgpu_device pointer
146 * Use the firmware interface to load the ucode images into
147 * the driver (not loaded into hw).
148 * Returns 0 on success, error on failure.
150 static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
152 const char *chip_name;
158 switch (adev->asic_type) {
160 chip_name = "bonaire";
163 chip_name = "hawaii";
174 if (adev->asic_type == CHIP_TOPAZ)
175 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
177 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
179 err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
182 err = amdgpu_ucode_validate(adev->mc.fw);
187 "cik_mc: Failed to load firmware \"%s\"\n",
189 release_firmware(adev->mc.fw);
196 * gmc_v7_0_mc_load_microcode - load MC ucode into the hw
198 * @adev: amdgpu_device pointer
200 * Load the GDDR MC ucode into the hw (CIK).
201 * Returns 0 on success, error on failure.
203 static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
205 const struct mc_firmware_header_v1_0 *hdr;
206 const __le32 *fw_data = NULL;
207 const __le32 *io_mc_regs = NULL;
208 u32 running, blackout = 0;
209 int i, ucode_size, regs_size;
214 hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
215 amdgpu_ucode_print_mc_hdr(&hdr->header);
217 adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
218 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
219 io_mc_regs = (const __le32 *)
220 (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
221 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
222 fw_data = (const __le32 *)
223 (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
225 running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
229 blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
230 WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
233 /* reset the engine and set to writable */
234 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
235 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
237 /* load mc io regs */
238 for (i = 0; i < regs_size; i++) {
239 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
240 WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
242 /* load the MC ucode */
243 for (i = 0; i < ucode_size; i++)
244 WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
246 /* put the engine back into the active state */
247 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
248 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
249 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
251 /* wait for training to complete */
252 for (i = 0; i < adev->usec_timeout; i++) {
253 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
254 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
258 for (i = 0; i < adev->usec_timeout; i++) {
259 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
260 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
266 WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
272 static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
273 struct amdgpu_mc *mc)
275 if (mc->mc_vram_size > 0xFFC0000000ULL) {
276 /* leave room for at least 1024M GTT */
277 dev_warn(adev->dev, "limiting VRAM\n");
278 mc->real_vram_size = 0xFFC0000000ULL;
279 mc->mc_vram_size = 0xFFC0000000ULL;
281 amdgpu_vram_location(adev, &adev->mc, 0);
282 adev->mc.gtt_base_align = 0;
283 amdgpu_gtt_location(adev, mc);
287 * gmc_v7_0_mc_program - program the GPU memory controller
289 * @adev: amdgpu_device pointer
291 * Set the location of vram, gart, and AGP in the GPU's
292 * physical address space (CIK).
294 static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
296 struct amdgpu_mode_mc_save save;
301 for (i = 0, j = 0; i < 32; i++, j += 0x6) {
302 WREG32((0xb05 + j), 0x00000000);
303 WREG32((0xb06 + j), 0x00000000);
304 WREG32((0xb07 + j), 0x00000000);
305 WREG32((0xb08 + j), 0x00000000);
306 WREG32((0xb09 + j), 0x00000000);
308 WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
310 if (adev->mode_info.num_crtc)
311 amdgpu_display_set_vga_render_state(adev, false);
313 gmc_v7_0_mc_stop(adev, &save);
314 if (amdgpu_asic_wait_for_mc_idle(adev)) {
315 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
317 /* Update configuration */
318 WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
319 adev->mc.vram_start >> 12);
320 WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
321 adev->mc.vram_end >> 12);
322 WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
323 adev->vram_scratch.gpu_addr >> 12);
324 tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
325 tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
326 WREG32(mmMC_VM_FB_LOCATION, tmp);
327 /* XXX double check these! */
328 WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
329 WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
330 WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
331 WREG32(mmMC_VM_AGP_BASE, 0);
332 WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
333 WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
334 if (amdgpu_asic_wait_for_mc_idle(adev)) {
335 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
337 gmc_v7_0_mc_resume(adev, &save);
339 WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
341 tmp = RREG32(mmHDP_MISC_CNTL);
342 tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
343 WREG32(mmHDP_MISC_CNTL, tmp);
345 tmp = RREG32(mmHDP_HOST_PATH_CNTL);
346 WREG32(mmHDP_HOST_PATH_CNTL, tmp);
350 * gmc_v7_0_mc_init - initialize the memory controller driver params
352 * @adev: amdgpu_device pointer
354 * Look up the amount of vram, vram width, and decide how to place
355 * vram and gart within the GPU's physical address space (CIK).
356 * Returns 0 for success.
358 static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
361 int chansize, numchan;
363 /* Get VRAM informations */
364 tmp = RREG32(mmMC_ARB_RAMCFG);
365 if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
370 tmp = RREG32(mmMC_SHARED_CHMAP);
371 switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
401 adev->mc.vram_width = numchan * chansize;
402 /* Could aper size report 0 ? */
403 adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
404 adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
405 /* size in MB on si */
406 adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
407 adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
408 adev->mc.visible_vram_size = adev->mc.aper_size;
410 /* In case the PCI BAR is larger than the actual amount of vram */
411 if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
412 adev->mc.visible_vram_size = adev->mc.real_vram_size;
414 /* unless the user had overridden it, set the gart
415 * size equal to the 1024 or vram, whichever is larger.
417 if (amdgpu_gart_size == -1)
418 adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
420 adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
422 gmc_v7_0_vram_gtt_location(adev, &adev->mc);
429 * VMID 0 is the physical GPU addresses as used by the kernel.
430 * VMIDs 1-15 are used for userspace clients and are handled
431 * by the amdgpu vm/hsa code.
435 * gmc_v7_0_gart_flush_gpu_tlb - gart tlb flush callback
437 * @adev: amdgpu_device pointer
438 * @vmid: vm instance to flush
440 * Flush the TLB for the requested page table (CIK).
442 static void gmc_v7_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
445 /* flush hdp cache */
446 WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
448 /* bits 0-15 are the VM contexts0-15 */
449 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
453 * gmc_v7_0_gart_set_pte_pde - update the page tables using MMIO
455 * @adev: amdgpu_device pointer
456 * @cpu_pt_addr: cpu address of the page table
457 * @gpu_page_idx: entry in the page table to update
458 * @addr: dst addr to write into pte/pde
459 * @flags: access flags
461 * Update the page tables using the CPU.
463 static int gmc_v7_0_gart_set_pte_pde(struct amdgpu_device *adev,
465 uint32_t gpu_page_idx,
469 void __iomem *ptr = (void *)cpu_pt_addr;
472 value = addr & 0xFFFFFFFFFFFFF000ULL;
474 writeq(value, ptr + (gpu_page_idx * 8));
480 * gmc_v8_0_set_fault_enable_default - update VM fault handling
482 * @adev: amdgpu_device pointer
483 * @value: true redirects VM faults to the default page
485 static void gmc_v7_0_set_fault_enable_default(struct amdgpu_device *adev,
490 tmp = RREG32(mmVM_CONTEXT1_CNTL);
491 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
492 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
493 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
494 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
495 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
496 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
497 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
498 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
499 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
500 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
501 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
502 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
503 WREG32(mmVM_CONTEXT1_CNTL, tmp);
507 * gmc_v7_0_gart_enable - gart enable
509 * @adev: amdgpu_device pointer
511 * This sets up the TLBs, programs the page tables for VMID0,
512 * sets up the hw for VMIDs 1-15 which are allocated on
513 * demand, and sets up the global locations for the LDS, GDS,
514 * and GPUVM for FSA64 clients (CIK).
515 * Returns 0 for success, errors for failure.
517 static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
522 if (adev->gart.robj == NULL) {
523 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
526 r = amdgpu_gart_table_vram_pin(adev);
529 /* Setup TLB control */
530 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
531 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
532 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
533 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
534 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
535 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
536 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
538 tmp = RREG32(mmVM_L2_CNTL);
539 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
540 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
541 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
542 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
543 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
544 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
545 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
546 WREG32(mmVM_L2_CNTL, tmp);
547 tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
548 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
549 WREG32(mmVM_L2_CNTL2, tmp);
550 tmp = RREG32(mmVM_L2_CNTL3);
551 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
552 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 4);
553 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4);
554 WREG32(mmVM_L2_CNTL3, tmp);
556 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
557 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
558 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
559 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
560 (u32)(adev->dummy_page.addr >> 12));
561 WREG32(mmVM_CONTEXT0_CNTL2, 0);
562 tmp = RREG32(mmVM_CONTEXT0_CNTL);
563 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
564 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
565 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
566 WREG32(mmVM_CONTEXT0_CNTL, tmp);
572 /* empty context1-15 */
573 /* FIXME start with 4G, once using 2 level pt switch to full
576 /* set vm size, must be a multiple of 4 */
577 WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
578 WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
579 for (i = 1; i < 16; i++) {
581 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
582 adev->gart.table_addr >> 12);
584 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
585 adev->gart.table_addr >> 12);
588 /* enable context1-15 */
589 WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
590 (u32)(adev->dummy_page.addr >> 12));
591 WREG32(mmVM_CONTEXT1_CNTL2, 4);
592 tmp = RREG32(mmVM_CONTEXT1_CNTL);
593 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
594 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
595 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
596 amdgpu_vm_block_size - 9);
597 WREG32(mmVM_CONTEXT1_CNTL, tmp);
598 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
599 gmc_v7_0_set_fault_enable_default(adev, false);
601 gmc_v7_0_set_fault_enable_default(adev, true);
603 if (adev->asic_type == CHIP_KAVERI) {
604 tmp = RREG32(mmCHUB_CONTROL);
606 WREG32(mmCHUB_CONTROL, tmp);
609 gmc_v7_0_gart_flush_gpu_tlb(adev, 0);
610 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
611 (unsigned)(adev->mc.gtt_size >> 20),
612 (unsigned long long)adev->gart.table_addr);
613 adev->gart.ready = true;
617 static int gmc_v7_0_gart_init(struct amdgpu_device *adev)
621 if (adev->gart.robj) {
622 WARN(1, "R600 PCIE GART already initialized\n");
625 /* Initialize common gart structure */
626 r = amdgpu_gart_init(adev);
629 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
630 return amdgpu_gart_table_vram_alloc(adev);
634 * gmc_v7_0_gart_disable - gart disable
636 * @adev: amdgpu_device pointer
638 * This disables all VM page table (CIK).
640 static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
644 /* Disable all tables */
645 WREG32(mmVM_CONTEXT0_CNTL, 0);
646 WREG32(mmVM_CONTEXT1_CNTL, 0);
647 /* Setup TLB control */
648 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
649 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
650 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
651 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
652 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
654 tmp = RREG32(mmVM_L2_CNTL);
655 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
656 WREG32(mmVM_L2_CNTL, tmp);
657 WREG32(mmVM_L2_CNTL2, 0);
658 amdgpu_gart_table_vram_unpin(adev);
662 * gmc_v7_0_gart_fini - vm fini callback
664 * @adev: amdgpu_device pointer
666 * Tears down the driver GART/VM setup (CIK).
668 static void gmc_v7_0_gart_fini(struct amdgpu_device *adev)
670 amdgpu_gart_table_vram_free(adev);
671 amdgpu_gart_fini(adev);
676 * VMID 0 is the physical GPU addresses as used by the kernel.
677 * VMIDs 1-15 are used for userspace clients and are handled
678 * by the amdgpu vm/hsa code.
681 * gmc_v7_0_vm_init - cik vm init callback
683 * @adev: amdgpu_device pointer
685 * Inits cik specific vm parameters (number of VMs, base of vram for
687 * Returns 0 for success.
689 static int gmc_v7_0_vm_init(struct amdgpu_device *adev)
693 * VMID 0 is reserved for System
694 * amdgpu graphics/compute will use VMIDs 1-7
695 * amdkfd will use VMIDs 8-15
697 adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS;
699 /* base offset of vram pages */
700 if (adev->flags & AMD_IS_APU) {
701 u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
703 adev->vm_manager.vram_base_offset = tmp;
705 adev->vm_manager.vram_base_offset = 0;
711 * gmc_v7_0_vm_fini - cik vm fini callback
713 * @adev: amdgpu_device pointer
715 * Tear down any asic specific VM setup (CIK).
717 static void gmc_v7_0_vm_fini(struct amdgpu_device *adev)
722 * gmc_v7_0_vm_decode_fault - print human readable fault info
724 * @adev: amdgpu_device pointer
725 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
726 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
728 * Print human readable fault information (CIK).
730 static void gmc_v7_0_vm_decode_fault(struct amdgpu_device *adev,
731 u32 status, u32 addr, u32 mc_client)
734 u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
735 u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
737 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
738 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
740 mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
743 printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
744 protections, vmid, addr,
745 REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
747 "write" : "read", block, mc_client, mc_id);
751 static const u32 mc_cg_registers[] = {
752 mmMC_HUB_MISC_HUB_CG,
753 mmMC_HUB_MISC_SIP_CG,
757 mmMC_CITF_MISC_WR_CG,
758 mmMC_CITF_MISC_RD_CG,
759 mmMC_CITF_MISC_VM_CG,
763 static const u32 mc_cg_ls_en[] = {
764 MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK,
765 MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK,
766 MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK,
767 MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK,
768 ATC_MISC_CG__MEM_LS_ENABLE_MASK,
769 MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK,
770 MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK,
771 MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK,
772 VM_L2_CG__MEM_LS_ENABLE_MASK,
775 static const u32 mc_cg_en[] = {
776 MC_HUB_MISC_HUB_CG__ENABLE_MASK,
777 MC_HUB_MISC_SIP_CG__ENABLE_MASK,
778 MC_HUB_MISC_VM_CG__ENABLE_MASK,
779 MC_XPB_CLK_GAT__ENABLE_MASK,
780 ATC_MISC_CG__ENABLE_MASK,
781 MC_CITF_MISC_WR_CG__ENABLE_MASK,
782 MC_CITF_MISC_RD_CG__ENABLE_MASK,
783 MC_CITF_MISC_VM_CG__ENABLE_MASK,
784 VM_L2_CG__ENABLE_MASK,
787 static void gmc_v7_0_enable_mc_ls(struct amdgpu_device *adev,
793 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
794 orig = data = RREG32(mc_cg_registers[i]);
795 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS))
796 data |= mc_cg_ls_en[i];
798 data &= ~mc_cg_ls_en[i];
800 WREG32(mc_cg_registers[i], data);
804 static void gmc_v7_0_enable_mc_mgcg(struct amdgpu_device *adev,
810 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
811 orig = data = RREG32(mc_cg_registers[i]);
812 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG))
815 data &= ~mc_cg_en[i];
817 WREG32(mc_cg_registers[i], data);
821 static void gmc_v7_0_enable_bif_mgls(struct amdgpu_device *adev,
826 orig = data = RREG32_PCIE(ixPCIE_CNTL2);
828 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) {
829 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
830 data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
831 data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
832 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1);
834 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0);
835 data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0);
836 data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0);
837 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0);
841 WREG32_PCIE(ixPCIE_CNTL2, data);
844 static void gmc_v7_0_enable_hdp_mgcg(struct amdgpu_device *adev,
849 orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
851 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG))
852 data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
854 data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
857 WREG32(mmHDP_HOST_PATH_CNTL, data);
860 static void gmc_v7_0_enable_hdp_ls(struct amdgpu_device *adev,
865 orig = data = RREG32(mmHDP_MEM_POWER_LS);
867 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS))
868 data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
870 data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
873 WREG32(mmHDP_MEM_POWER_LS, data);
876 static int gmc_v7_0_convert_vram_type(int mc_seq_vram_type)
878 switch (mc_seq_vram_type) {
879 case MC_SEQ_MISC0__MT__GDDR1:
880 return AMDGPU_VRAM_TYPE_GDDR1;
881 case MC_SEQ_MISC0__MT__DDR2:
882 return AMDGPU_VRAM_TYPE_DDR2;
883 case MC_SEQ_MISC0__MT__GDDR3:
884 return AMDGPU_VRAM_TYPE_GDDR3;
885 case MC_SEQ_MISC0__MT__GDDR4:
886 return AMDGPU_VRAM_TYPE_GDDR4;
887 case MC_SEQ_MISC0__MT__GDDR5:
888 return AMDGPU_VRAM_TYPE_GDDR5;
889 case MC_SEQ_MISC0__MT__HBM:
890 return AMDGPU_VRAM_TYPE_HBM;
891 case MC_SEQ_MISC0__MT__DDR3:
892 return AMDGPU_VRAM_TYPE_DDR3;
894 return AMDGPU_VRAM_TYPE_UNKNOWN;
898 static int gmc_v7_0_early_init(void *handle)
900 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
902 gmc_v7_0_set_gart_funcs(adev);
903 gmc_v7_0_set_irq_funcs(adev);
905 if (adev->flags & AMD_IS_APU) {
906 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
908 u32 tmp = RREG32(mmMC_SEQ_MISC0);
909 tmp &= MC_SEQ_MISC0__MT__MASK;
910 adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
916 static int gmc_v7_0_late_init(void *handle)
918 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
920 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
923 static int gmc_v7_0_sw_init(void *handle)
927 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
929 r = amdgpu_gem_init(adev);
933 r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
937 r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault);
941 /* Adjust VM size here.
942 * Currently set to 4GB ((1 << 20) 4k pages).
943 * Max GPUVM size for cayman and SI is 40 bits.
945 adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
947 /* Set the internal MC address mask
948 * This is the max address of the GPU's
949 * internal address space.
951 adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
953 /* set DMA mask + need_dma32 flags.
954 * PCIE - can handle 40-bits.
955 * IGP - can handle 40-bits
956 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
958 adev->need_dma32 = false;
959 dma_bits = adev->need_dma32 ? 32 : 40;
960 r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
962 adev->need_dma32 = true;
964 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
966 r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
968 pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
969 printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
972 r = gmc_v7_0_init_microcode(adev);
974 DRM_ERROR("Failed to load mc firmware!\n");
978 r = gmc_v7_0_mc_init(adev);
983 r = amdgpu_bo_init(adev);
987 r = gmc_v7_0_gart_init(adev);
991 if (!adev->vm_manager.enabled) {
992 r = gmc_v7_0_vm_init(adev);
994 dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
997 adev->vm_manager.enabled = true;
1003 static int gmc_v7_0_sw_fini(void *handle)
1005 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1007 if (adev->vm_manager.enabled) {
1008 amdgpu_vm_manager_fini(adev);
1009 gmc_v7_0_vm_fini(adev);
1010 adev->vm_manager.enabled = false;
1012 gmc_v7_0_gart_fini(adev);
1013 amdgpu_gem_fini(adev);
1014 amdgpu_bo_fini(adev);
1019 static int gmc_v7_0_hw_init(void *handle)
1022 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1024 gmc_v7_0_init_golden_registers(adev);
1026 gmc_v7_0_mc_program(adev);
1028 if (!(adev->flags & AMD_IS_APU)) {
1029 r = gmc_v7_0_mc_load_microcode(adev);
1031 DRM_ERROR("Failed to load MC firmware!\n");
1036 r = gmc_v7_0_gart_enable(adev);
1043 static int gmc_v7_0_hw_fini(void *handle)
1045 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1047 amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
1048 gmc_v7_0_gart_disable(adev);
1053 static int gmc_v7_0_suspend(void *handle)
1055 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1057 if (adev->vm_manager.enabled) {
1058 gmc_v7_0_vm_fini(adev);
1059 adev->vm_manager.enabled = false;
1061 gmc_v7_0_hw_fini(adev);
1066 static int gmc_v7_0_resume(void *handle)
1069 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1071 r = gmc_v7_0_hw_init(adev);
1075 if (!adev->vm_manager.enabled) {
1076 r = gmc_v7_0_vm_init(adev);
1078 dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
1081 adev->vm_manager.enabled = true;
1087 static bool gmc_v7_0_is_idle(void *handle)
1089 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1090 u32 tmp = RREG32(mmSRBM_STATUS);
1092 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1093 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
1099 static int gmc_v7_0_wait_for_idle(void *handle)
1103 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1105 for (i = 0; i < adev->usec_timeout; i++) {
1106 /* read MC_STATUS */
1107 tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
1108 SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1109 SRBM_STATUS__MCC_BUSY_MASK |
1110 SRBM_STATUS__MCD_BUSY_MASK |
1111 SRBM_STATUS__VMC_BUSY_MASK);
1120 static void gmc_v7_0_print_status(void *handle)
1123 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1125 dev_info(adev->dev, "GMC 8.x registers\n");
1126 dev_info(adev->dev, " SRBM_STATUS=0x%08X\n",
1127 RREG32(mmSRBM_STATUS));
1128 dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
1129 RREG32(mmSRBM_STATUS2));
1131 dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1132 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR));
1133 dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1134 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS));
1135 dev_info(adev->dev, " MC_VM_MX_L1_TLB_CNTL=0x%08X\n",
1136 RREG32(mmMC_VM_MX_L1_TLB_CNTL));
1137 dev_info(adev->dev, " VM_L2_CNTL=0x%08X\n",
1138 RREG32(mmVM_L2_CNTL));
1139 dev_info(adev->dev, " VM_L2_CNTL2=0x%08X\n",
1140 RREG32(mmVM_L2_CNTL2));
1141 dev_info(adev->dev, " VM_L2_CNTL3=0x%08X\n",
1142 RREG32(mmVM_L2_CNTL3));
1143 dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_START_ADDR=0x%08X\n",
1144 RREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR));
1145 dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_END_ADDR=0x%08X\n",
1146 RREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR));
1147 dev_info(adev->dev, " VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
1148 RREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR));
1149 dev_info(adev->dev, " VM_CONTEXT0_CNTL2=0x%08X\n",
1150 RREG32(mmVM_CONTEXT0_CNTL2));
1151 dev_info(adev->dev, " VM_CONTEXT0_CNTL=0x%08X\n",
1152 RREG32(mmVM_CONTEXT0_CNTL));
1153 dev_info(adev->dev, " 0x15D4=0x%08X\n",
1155 dev_info(adev->dev, " 0x15D8=0x%08X\n",
1157 dev_info(adev->dev, " 0x15DC=0x%08X\n",
1159 dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_START_ADDR=0x%08X\n",
1160 RREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR));
1161 dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_END_ADDR=0x%08X\n",
1162 RREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR));
1163 dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
1164 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR));
1165 dev_info(adev->dev, " VM_CONTEXT1_CNTL2=0x%08X\n",
1166 RREG32(mmVM_CONTEXT1_CNTL2));
1167 dev_info(adev->dev, " VM_CONTEXT1_CNTL=0x%08X\n",
1168 RREG32(mmVM_CONTEXT1_CNTL));
1169 for (i = 0; i < 16; i++) {
1171 dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
1172 i, RREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i));
1174 dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
1175 i, RREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8));
1177 dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_LOW_ADDR=0x%08X\n",
1178 RREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR));
1179 dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_HIGH_ADDR=0x%08X\n",
1180 RREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR));
1181 dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR=0x%08X\n",
1182 RREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR));
1183 dev_info(adev->dev, " MC_VM_FB_LOCATION=0x%08X\n",
1184 RREG32(mmMC_VM_FB_LOCATION));
1185 dev_info(adev->dev, " MC_VM_AGP_BASE=0x%08X\n",
1186 RREG32(mmMC_VM_AGP_BASE));
1187 dev_info(adev->dev, " MC_VM_AGP_TOP=0x%08X\n",
1188 RREG32(mmMC_VM_AGP_TOP));
1189 dev_info(adev->dev, " MC_VM_AGP_BOT=0x%08X\n",
1190 RREG32(mmMC_VM_AGP_BOT));
1192 if (adev->asic_type == CHIP_KAVERI) {
1193 dev_info(adev->dev, " CHUB_CONTROL=0x%08X\n",
1194 RREG32(mmCHUB_CONTROL));
1197 dev_info(adev->dev, " HDP_REG_COHERENCY_FLUSH_CNTL=0x%08X\n",
1198 RREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL));
1199 dev_info(adev->dev, " HDP_NONSURFACE_BASE=0x%08X\n",
1200 RREG32(mmHDP_NONSURFACE_BASE));
1201 dev_info(adev->dev, " HDP_NONSURFACE_INFO=0x%08X\n",
1202 RREG32(mmHDP_NONSURFACE_INFO));
1203 dev_info(adev->dev, " HDP_NONSURFACE_SIZE=0x%08X\n",
1204 RREG32(mmHDP_NONSURFACE_SIZE));
1205 dev_info(adev->dev, " HDP_MISC_CNTL=0x%08X\n",
1206 RREG32(mmHDP_MISC_CNTL));
1207 dev_info(adev->dev, " HDP_HOST_PATH_CNTL=0x%08X\n",
1208 RREG32(mmHDP_HOST_PATH_CNTL));
1210 for (i = 0, j = 0; i < 32; i++, j += 0x6) {
1211 dev_info(adev->dev, " %d:\n", i);
1212 dev_info(adev->dev, " 0x%04X=0x%08X\n",
1213 0xb05 + j, RREG32(0xb05 + j));
1214 dev_info(adev->dev, " 0x%04X=0x%08X\n",
1215 0xb06 + j, RREG32(0xb06 + j));
1216 dev_info(adev->dev, " 0x%04X=0x%08X\n",
1217 0xb07 + j, RREG32(0xb07 + j));
1218 dev_info(adev->dev, " 0x%04X=0x%08X\n",
1219 0xb08 + j, RREG32(0xb08 + j));
1220 dev_info(adev->dev, " 0x%04X=0x%08X\n",
1221 0xb09 + j, RREG32(0xb09 + j));
1224 dev_info(adev->dev, " BIF_FB_EN=0x%08X\n",
1225 RREG32(mmBIF_FB_EN));
1228 static int gmc_v7_0_soft_reset(void *handle)
1230 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1231 struct amdgpu_mode_mc_save save;
1232 u32 srbm_soft_reset = 0;
1233 u32 tmp = RREG32(mmSRBM_STATUS);
1235 if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
1236 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1237 SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
1239 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1240 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1241 if (!(adev->flags & AMD_IS_APU))
1242 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1243 SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1246 if (srbm_soft_reset) {
1247 gmc_v7_0_print_status((void *)adev);
1249 gmc_v7_0_mc_stop(adev, &save);
1250 if (gmc_v7_0_wait_for_idle(adev)) {
1251 dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1255 tmp = RREG32(mmSRBM_SOFT_RESET);
1256 tmp |= srbm_soft_reset;
1257 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1258 WREG32(mmSRBM_SOFT_RESET, tmp);
1259 tmp = RREG32(mmSRBM_SOFT_RESET);
1263 tmp &= ~srbm_soft_reset;
1264 WREG32(mmSRBM_SOFT_RESET, tmp);
1265 tmp = RREG32(mmSRBM_SOFT_RESET);
1267 /* Wait a little for things to settle down */
1270 gmc_v7_0_mc_resume(adev, &save);
1273 gmc_v7_0_print_status((void *)adev);
1279 static int gmc_v7_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1280 struct amdgpu_irq_src *src,
1282 enum amdgpu_interrupt_state state)
1285 u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1286 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1287 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1288 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1289 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1290 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1293 case AMDGPU_IRQ_STATE_DISABLE:
1294 /* system context */
1295 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1297 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1299 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1301 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1303 case AMDGPU_IRQ_STATE_ENABLE:
1304 /* system context */
1305 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1307 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1309 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1311 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1320 static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
1321 struct amdgpu_irq_src *source,
1322 struct amdgpu_iv_entry *entry)
1324 u32 addr, status, mc_client;
1326 addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1327 status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1328 mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
1329 /* reset addr and status */
1330 WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1332 if (!addr && !status)
1335 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1336 gmc_v7_0_set_fault_enable_default(adev, false);
1338 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1339 entry->src_id, entry->src_data);
1340 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1342 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1344 gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client);
1349 static int gmc_v7_0_set_clockgating_state(void *handle,
1350 enum amd_clockgating_state state)
1353 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1355 if (state == AMD_CG_STATE_GATE)
1358 if (!(adev->flags & AMD_IS_APU)) {
1359 gmc_v7_0_enable_mc_mgcg(adev, gate);
1360 gmc_v7_0_enable_mc_ls(adev, gate);
1362 gmc_v7_0_enable_bif_mgls(adev, gate);
1363 gmc_v7_0_enable_hdp_mgcg(adev, gate);
1364 gmc_v7_0_enable_hdp_ls(adev, gate);
1369 static int gmc_v7_0_set_powergating_state(void *handle,
1370 enum amd_powergating_state state)
1375 const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
1376 .early_init = gmc_v7_0_early_init,
1377 .late_init = gmc_v7_0_late_init,
1378 .sw_init = gmc_v7_0_sw_init,
1379 .sw_fini = gmc_v7_0_sw_fini,
1380 .hw_init = gmc_v7_0_hw_init,
1381 .hw_fini = gmc_v7_0_hw_fini,
1382 .suspend = gmc_v7_0_suspend,
1383 .resume = gmc_v7_0_resume,
1384 .is_idle = gmc_v7_0_is_idle,
1385 .wait_for_idle = gmc_v7_0_wait_for_idle,
1386 .soft_reset = gmc_v7_0_soft_reset,
1387 .print_status = gmc_v7_0_print_status,
1388 .set_clockgating_state = gmc_v7_0_set_clockgating_state,
1389 .set_powergating_state = gmc_v7_0_set_powergating_state,
1392 static const struct amdgpu_gart_funcs gmc_v7_0_gart_funcs = {
1393 .flush_gpu_tlb = gmc_v7_0_gart_flush_gpu_tlb,
1394 .set_pte_pde = gmc_v7_0_gart_set_pte_pde,
1397 static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
1398 .set = gmc_v7_0_vm_fault_interrupt_state,
1399 .process = gmc_v7_0_process_interrupt,
1402 static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev)
1404 if (adev->gart.gart_funcs == NULL)
1405 adev->gart.gart_funcs = &gmc_v7_0_gart_funcs;
1408 static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1410 adev->mc.vm_fault.num_types = 1;
1411 adev->mc.vm_fault.funcs = &gmc_v7_0_irq_funcs;