]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
1be7f3e4d650d13d236bad6e6f58daeb798c0aae
[linux.git] / drivers / gpu / drm / amd / amdgpu / sdma_v5_0.c
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/firmware.h>
25 #include <drm/drmP.h>
26 #include "amdgpu.h"
27 #include "amdgpu_ucode.h"
28 #include "amdgpu_trace.h"
29
30 #include "gc/gc_10_1_0_offset.h"
31 #include "gc/gc_10_1_0_sh_mask.h"
32 #include "hdp/hdp_5_0_0_offset.h"
33 #include "ivsrcid/sdma0/irqsrcs_sdma0_5_0.h"
34 #include "ivsrcid/sdma1/irqsrcs_sdma1_5_0.h"
35
36 #include "soc15_common.h"
37 #include "soc15.h"
38 #include "navi10_sdma_pkt_open.h"
39 #include "nbio_v2_3.h"
40 #include "sdma_v5_0.h"
41
42 MODULE_FIRMWARE("amdgpu/navi10_sdma.bin");
43 MODULE_FIRMWARE("amdgpu/navi10_sdma1.bin");
44
45 MODULE_FIRMWARE("amdgpu/navi14_sdma.bin");
46 MODULE_FIRMWARE("amdgpu/navi14_sdma1.bin");
47
48 #define SDMA1_REG_OFFSET 0x600
49 #define SDMA0_HYP_DEC_REG_START 0x5880
50 #define SDMA0_HYP_DEC_REG_END 0x5893
51 #define SDMA1_HYP_DEC_REG_OFFSET 0x20
52
53 static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev);
54 static void sdma_v5_0_set_buffer_funcs(struct amdgpu_device *adev);
55 static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev);
56 static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev);
57
58 static const struct soc15_reg_golden golden_settings_sdma_5[] = {
59         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107),
60         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
61         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
62         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
63         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
64         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
65         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
66         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
67         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
68         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
69         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
70         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_UTCL1_PAGE, 0x00ffffff, 0x000c5c00),
71         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107),
72         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
73         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
74         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
75         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
76         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
77         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
78         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
79         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
80         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
81         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
82         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_UTCL1_PAGE, 0x00ffffff, 0x000c5c00)
83 };
84
85 static const struct soc15_reg_golden golden_settings_sdma_nv10[] = {
86 };
87
88 static u32 sdma_v5_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 internal_offset)
89 {
90         u32 base;
91
92         if (internal_offset >= SDMA0_HYP_DEC_REG_START &&
93             internal_offset <= SDMA0_HYP_DEC_REG_END) {
94                 base = adev->reg_offset[GC_HWIP][0][1];
95                 if (instance == 1)
96                         internal_offset += SDMA1_HYP_DEC_REG_OFFSET;
97         } else {
98                 base = adev->reg_offset[GC_HWIP][0][0];
99                 if (instance == 1)
100                         internal_offset += SDMA1_REG_OFFSET;
101         }
102
103         return base + internal_offset;
104 }
105
106 static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev)
107 {
108         switch (adev->asic_type) {
109         case CHIP_NAVI10:
110                 soc15_program_register_sequence(adev,
111                                                 golden_settings_sdma_5,
112                                                 (const u32)ARRAY_SIZE(golden_settings_sdma_5));
113                 soc15_program_register_sequence(adev,
114                                                 golden_settings_sdma_nv10,
115                                                 (const u32)ARRAY_SIZE(golden_settings_sdma_nv10));
116                 break;
117         default:
118                 break;
119         }
120 }
121
122 /**
123  * sdma_v5_0_init_microcode - load ucode images from disk
124  *
125  * @adev: amdgpu_device pointer
126  *
127  * Use the firmware interface to load the ucode images into
128  * the driver (not loaded into hw).
129  * Returns 0 on success, error on failure.
130  */
131
132 // emulation only, won't work on real chip
133 // navi10 real chip need to use PSP to load firmware
134 static int sdma_v5_0_init_microcode(struct amdgpu_device *adev)
135 {
136         const char *chip_name;
137         char fw_name[30];
138         int err = 0, i;
139         struct amdgpu_firmware_info *info = NULL;
140         const struct common_firmware_header *header = NULL;
141         const struct sdma_firmware_header_v1_0 *hdr;
142
143         DRM_DEBUG("\n");
144
145         switch (adev->asic_type) {
146         case CHIP_NAVI10:
147                 chip_name = "navi10";
148                 break;
149         case CHIP_NAVI14:
150                 chip_name = "navi14";
151                 break;
152         default:
153                 BUG();
154         }
155
156         for (i = 0; i < adev->sdma.num_instances; i++) {
157                 if (i == 0)
158                         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
159                 else
160                         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
161                 err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
162                 if (err)
163                         goto out;
164                 err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
165                 if (err)
166                         goto out;
167                 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
168                 adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
169                 adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
170                 if (adev->sdma.instance[i].feature_version >= 20)
171                         adev->sdma.instance[i].burst_nop = true;
172                 DRM_DEBUG("psp_load == '%s'\n",
173                                 adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
174
175                 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
176                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
177                         info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
178                         info->fw = adev->sdma.instance[i].fw;
179                         header = (const struct common_firmware_header *)info->fw->data;
180                         adev->firmware.fw_size +=
181                                 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
182                 }
183         }
184 out:
185         if (err) {
186                 DRM_ERROR("sdma_v5_0: Failed to load firmware \"%s\"\n", fw_name);
187                 for (i = 0; i < adev->sdma.num_instances; i++) {
188                         release_firmware(adev->sdma.instance[i].fw);
189                         adev->sdma.instance[i].fw = NULL;
190                 }
191         }
192         return err;
193 }
194
195 static unsigned sdma_v5_0_ring_init_cond_exec(struct amdgpu_ring *ring)
196 {
197         unsigned ret;
198
199         amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COND_EXE));
200         amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
201         amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
202         amdgpu_ring_write(ring, 1);
203         ret = ring->wptr & ring->buf_mask;/* this is the offset we need patch later */
204         amdgpu_ring_write(ring, 0x55aa55aa);/* insert dummy here and patch it later */
205
206         return ret;
207 }
208
209 static void sdma_v5_0_ring_patch_cond_exec(struct amdgpu_ring *ring,
210                                            unsigned offset)
211 {
212         unsigned cur;
213
214         BUG_ON(offset > ring->buf_mask);
215         BUG_ON(ring->ring[offset] != 0x55aa55aa);
216
217         cur = (ring->wptr - 1) & ring->buf_mask;
218         if (cur > offset)
219                 ring->ring[offset] = cur - offset;
220         else
221                 ring->ring[offset] = (ring->buf_mask + 1) - offset + cur;
222 }
223
224 /**
225  * sdma_v5_0_ring_get_rptr - get the current read pointer
226  *
227  * @ring: amdgpu ring pointer
228  *
229  * Get the current rptr from the hardware (NAVI10+).
230  */
231 static uint64_t sdma_v5_0_ring_get_rptr(struct amdgpu_ring *ring)
232 {
233         u64 *rptr;
234
235         /* XXX check if swapping is necessary on BE */
236         rptr = ((u64 *)&ring->adev->wb.wb[ring->rptr_offs]);
237
238         DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr);
239         return ((*rptr) >> 2);
240 }
241
242 /**
243  * sdma_v5_0_ring_get_wptr - get the current write pointer
244  *
245  * @ring: amdgpu ring pointer
246  *
247  * Get the current wptr from the hardware (NAVI10+).
248  */
249 static uint64_t sdma_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
250 {
251         struct amdgpu_device *adev = ring->adev;
252         u64 *wptr = NULL;
253         uint64_t local_wptr = 0;
254
255         if (ring->use_doorbell) {
256                 /* XXX check if swapping is necessary on BE */
257                 wptr = ((u64 *)&adev->wb.wb[ring->wptr_offs]);
258                 DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", *wptr);
259                 *wptr = (*wptr) >> 2;
260                 DRM_DEBUG("wptr/doorbell after shift == 0x%016llx\n", *wptr);
261         } else {
262                 u32 lowbit, highbit;
263
264                 wptr = &local_wptr;
265                 lowbit = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR)) >> 2;
266                 highbit = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
267
268                 DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n",
269                                 ring->me, highbit, lowbit);
270                 *wptr = highbit;
271                 *wptr = (*wptr) << 32;
272                 *wptr |= lowbit;
273         }
274
275         return *wptr;
276 }
277
278 /**
279  * sdma_v5_0_ring_set_wptr - commit the write pointer
280  *
281  * @ring: amdgpu ring pointer
282  *
283  * Write the wptr back to the hardware (NAVI10+).
284  */
285 static void sdma_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
286 {
287         struct amdgpu_device *adev = ring->adev;
288
289         DRM_DEBUG("Setting write pointer\n");
290         if (ring->use_doorbell) {
291                 DRM_DEBUG("Using doorbell -- "
292                                 "wptr_offs == 0x%08x "
293                                 "lower_32_bits(ring->wptr) << 2 == 0x%08x "
294                                 "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
295                                 ring->wptr_offs,
296                                 lower_32_bits(ring->wptr << 2),
297                                 upper_32_bits(ring->wptr << 2));
298                 /* XXX check if swapping is necessary on BE */
299                 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr << 2);
300                 adev->wb.wb[ring->wptr_offs + 1] = upper_32_bits(ring->wptr << 2);
301                 DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
302                                 ring->doorbell_index, ring->wptr << 2);
303                 WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
304         } else {
305                 DRM_DEBUG("Not using doorbell -- "
306                                 "mmSDMA%i_GFX_RB_WPTR == 0x%08x "
307                                 "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
308                                 ring->me,
309                                 lower_32_bits(ring->wptr << 2),
310                                 ring->me,
311                                 upper_32_bits(ring->wptr << 2));
312                 WREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR),
313                         lower_32_bits(ring->wptr << 2));
314                 WREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI),
315                         upper_32_bits(ring->wptr << 2));
316         }
317 }
318
319 static void sdma_v5_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
320 {
321         struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
322         int i;
323
324         for (i = 0; i < count; i++)
325                 if (sdma && sdma->burst_nop && (i == 0))
326                         amdgpu_ring_write(ring, ring->funcs->nop |
327                                 SDMA_PKT_NOP_HEADER_COUNT(count - 1));
328                 else
329                         amdgpu_ring_write(ring, ring->funcs->nop);
330 }
331
332 /**
333  * sdma_v5_0_ring_emit_ib - Schedule an IB on the DMA engine
334  *
335  * @ring: amdgpu ring pointer
336  * @ib: IB object to schedule
337  *
338  * Schedule an IB in the DMA ring (NAVI10).
339  */
340 static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
341                                    struct amdgpu_job *job,
342                                    struct amdgpu_ib *ib,
343                                    uint32_t flags)
344 {
345         unsigned vmid = AMDGPU_JOB_GET_VMID(job);
346         uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid);
347
348         /* IB packet must end on a 8 DW boundary */
349         sdma_v5_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
350
351         amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
352                           SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
353         /* base must be 32 byte aligned */
354         amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
355         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
356         amdgpu_ring_write(ring, ib->length_dw);
357         amdgpu_ring_write(ring, lower_32_bits(csa_mc_addr));
358         amdgpu_ring_write(ring, upper_32_bits(csa_mc_addr));
359 }
360
361 /**
362  * sdma_v5_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
363  *
364  * @ring: amdgpu ring pointer
365  *
366  * Emit an hdp flush packet on the requested DMA ring.
367  */
368 static void sdma_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
369 {
370         struct amdgpu_device *adev = ring->adev;
371         u32 ref_and_mask = 0;
372         const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
373
374         if (ring->me == 0)
375                 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0;
376         else
377                 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma1;
378
379         amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
380                           SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
381                           SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
382         amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_done_offset(adev)) << 2);
383         amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_req_offset(adev)) << 2);
384         amdgpu_ring_write(ring, ref_and_mask); /* reference */
385         amdgpu_ring_write(ring, ref_and_mask); /* mask */
386         amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
387                           SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
388 }
389
390 /**
391  * sdma_v5_0_ring_emit_fence - emit a fence on the DMA ring
392  *
393  * @ring: amdgpu ring pointer
394  * @fence: amdgpu fence object
395  *
396  * Add a DMA fence packet to the ring to write
397  * the fence seq number and DMA trap packet to generate
398  * an interrupt if needed (NAVI10).
399  */
400 static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
401                                       unsigned flags)
402 {
403         struct amdgpu_device *adev = ring->adev;
404         bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
405         /* write the fence */
406         amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE) |
407                           SDMA_PKT_FENCE_HEADER_MTYPE(0x3)); /* Ucached(UC) */
408         /* zero in first two bits */
409         BUG_ON(addr & 0x3);
410         amdgpu_ring_write(ring, lower_32_bits(addr));
411         amdgpu_ring_write(ring, upper_32_bits(addr));
412         amdgpu_ring_write(ring, lower_32_bits(seq));
413
414         /* optionally write high bits as well */
415         if (write64bit) {
416                 addr += 4;
417                 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE) |
418                                   SDMA_PKT_FENCE_HEADER_MTYPE(0x3));
419                 /* zero in first two bits */
420                 BUG_ON(addr & 0x3);
421                 amdgpu_ring_write(ring, lower_32_bits(addr));
422                 amdgpu_ring_write(ring, upper_32_bits(addr));
423                 amdgpu_ring_write(ring, upper_32_bits(seq));
424         }
425
426         /* Interrupt not work fine on GFX10.1 model yet. Use fallback instead */
427         if ((flags & AMDGPU_FENCE_FLAG_INT) && adev->pdev->device != 0x50) {
428                 /* generate an interrupt */
429                 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
430                 amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
431         }
432 }
433
434
435 /**
436  * sdma_v5_0_gfx_stop - stop the gfx async dma engines
437  *
438  * @adev: amdgpu_device pointer
439  *
440  * Stop the gfx async dma ring buffers (NAVI10).
441  */
442 static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev)
443 {
444         struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
445         struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
446         u32 rb_cntl, ib_cntl;
447         int i;
448
449         if ((adev->mman.buffer_funcs_ring == sdma0) ||
450             (adev->mman.buffer_funcs_ring == sdma1))
451                 amdgpu_ttm_set_buffer_funcs_status(adev, false);
452
453         for (i = 0; i < adev->sdma.num_instances; i++) {
454                 rb_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
455                 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
456                 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
457                 ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
458                 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
459                 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
460         }
461
462         sdma0->sched.ready = false;
463         sdma1->sched.ready = false;
464 }
465
466 /**
467  * sdma_v5_0_rlc_stop - stop the compute async dma engines
468  *
469  * @adev: amdgpu_device pointer
470  *
471  * Stop the compute async dma queues (NAVI10).
472  */
473 static void sdma_v5_0_rlc_stop(struct amdgpu_device *adev)
474 {
475         /* XXX todo */
476 }
477
478 /**
479  * sdma_v_0_ctx_switch_enable - stop the async dma engines context switch
480  *
481  * @adev: amdgpu_device pointer
482  * @enable: enable/disable the DMA MEs context switch.
483  *
484  * Halt or unhalt the async dma engines context switch (NAVI10).
485  */
486 static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
487 {
488         u32 f32_cntl, phase_quantum = 0;
489         int i;
490
491         if (amdgpu_sdma_phase_quantum) {
492                 unsigned value = amdgpu_sdma_phase_quantum;
493                 unsigned unit = 0;
494
495                 while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
496                                 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) {
497                         value = (value + 1) >> 1;
498                         unit++;
499                 }
500                 if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
501                             SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) {
502                         value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
503                                  SDMA0_PHASE0_QUANTUM__VALUE__SHIFT);
504                         unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
505                                 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT);
506                         WARN_ONCE(1,
507                         "clamping sdma_phase_quantum to %uK clock cycles\n",
508                                   value << unit);
509                 }
510                 phase_quantum =
511                         value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
512                         unit  << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
513         }
514
515         for (i = 0; i < adev->sdma.num_instances; i++) {
516                 f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
517                 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
518                                 AUTO_CTXSW_ENABLE, enable ? 1 : 0);
519                 if (enable && amdgpu_sdma_phase_quantum) {
520                         WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
521                                phase_quantum);
522                         WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM),
523                                phase_quantum);
524                         WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
525                                phase_quantum);
526                 }
527                 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
528         }
529
530 }
531
532 /**
533  * sdma_v5_0_enable - stop the async dma engines
534  *
535  * @adev: amdgpu_device pointer
536  * @enable: enable/disable the DMA MEs.
537  *
538  * Halt or unhalt the async dma engines (NAVI10).
539  */
540 static void sdma_v5_0_enable(struct amdgpu_device *adev, bool enable)
541 {
542         u32 f32_cntl;
543         int i;
544
545         if (enable == false) {
546                 sdma_v5_0_gfx_stop(adev);
547                 sdma_v5_0_rlc_stop(adev);
548         }
549
550         for (i = 0; i < adev->sdma.num_instances; i++) {
551                 f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
552                 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
553                 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
554         }
555 }
556
557 /**
558  * sdma_v5_0_gfx_resume - setup and start the async dma engines
559  *
560  * @adev: amdgpu_device pointer
561  *
562  * Set up the gfx DMA ring buffers and enable them (NAVI10).
563  * Returns 0 for success, error for failure.
564  */
565 static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
566 {
567         struct amdgpu_ring *ring;
568         u32 rb_cntl, ib_cntl;
569         u32 rb_bufsz;
570         u32 wb_offset;
571         u32 doorbell;
572         u32 doorbell_offset;
573         u32 temp;
574         u32 wptr_poll_cntl;
575         u64 wptr_gpu_addr;
576         int i, r;
577
578         for (i = 0; i < adev->sdma.num_instances; i++) {
579                 ring = &adev->sdma.instance[i].ring;
580                 wb_offset = (ring->rptr_offs * 4);
581
582                 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
583
584                 /* Set ring buffer size in dwords */
585                 rb_bufsz = order_base_2(ring->ring_size / 4);
586                 rb_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
587                 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
588 #ifdef __BIG_ENDIAN
589                 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
590                 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
591                                         RPTR_WRITEBACK_SWAP_ENABLE, 1);
592 #endif
593                 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
594
595                 /* Initialize the ring buffer's read and write pointers */
596                 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0);
597                 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0);
598                 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0);
599                 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0);
600
601                 /* setup the wptr shadow polling */
602                 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
603                 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
604                        lower_32_bits(wptr_gpu_addr));
605                 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
606                        upper_32_bits(wptr_gpu_addr));
607                 wptr_poll_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i,
608                                                          mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
609                 wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
610                                                SDMA0_GFX_RB_WPTR_POLL_CNTL,
611                                                F32_POLL_ENABLE, 1);
612                 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL),
613                        wptr_poll_cntl);
614
615                 /* set the wb address whether it's enabled or not */
616                 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
617                        upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
618                 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
619                        lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
620
621                 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
622
623                 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8);
624                 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40);
625
626                 ring->wptr = 0;
627
628                 /* before programing wptr to a less value, need set minor_ptr_update first */
629                 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
630
631                 if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
632                         WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr) << 2);
633                         WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
634                 }
635
636                 doorbell = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
637                 doorbell_offset = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET));
638
639                 if (ring->use_doorbell) {
640                         doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
641                         doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET,
642                                         OFFSET, ring->doorbell_index);
643                 } else {
644                         doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
645                 }
646                 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
647                 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
648
649                 adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
650                                                       ring->doorbell_index, 20);
651
652                 if (amdgpu_sriov_vf(adev))
653                         sdma_v5_0_ring_set_wptr(ring);
654
655                 /* set minor_ptr_update to 0 after wptr programed */
656                 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
657
658                 /* set utc l1 enable flag always to 1 */
659                 temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
660                 temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
661
662                 /* enable MCBP */
663                 temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1);
664                 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
665
666                 /* Set up RESP_MODE to non-copy addresses */
667                 temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL));
668                 temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
669                 temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
670                 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp);
671
672                 /* program default cache read and write policy */
673                 temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE));
674                 /* clean read policy and write policy bits */
675                 temp &= 0xFF0FFF;
676                 temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14));
677                 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
678
679                 if (!amdgpu_sriov_vf(adev)) {
680                         /* unhalt engine */
681                         temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
682                         temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
683                         WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp);
684                 }
685
686                 /* enable DMA RB */
687                 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
688                 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
689
690                 ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
691                 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
692 #ifdef __BIG_ENDIAN
693                 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
694 #endif
695                 /* enable DMA IBs */
696                 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
697
698                 ring->sched.ready = true;
699
700                 if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
701                         sdma_v5_0_ctx_switch_enable(adev, true);
702                         sdma_v5_0_enable(adev, true);
703                 }
704
705                 r = amdgpu_ring_test_ring(ring);
706                 if (r) {
707                         ring->sched.ready = false;
708                         return r;
709                 }
710
711                 if (adev->mman.buffer_funcs_ring == ring)
712                         amdgpu_ttm_set_buffer_funcs_status(adev, true);
713         }
714
715         return 0;
716 }
717
718 /**
719  * sdma_v5_0_rlc_resume - setup and start the async dma engines
720  *
721  * @adev: amdgpu_device pointer
722  *
723  * Set up the compute DMA queues and enable them (NAVI10).
724  * Returns 0 for success, error for failure.
725  */
726 static int sdma_v5_0_rlc_resume(struct amdgpu_device *adev)
727 {
728         return 0;
729 }
730
731 /**
732  * sdma_v5_0_load_microcode - load the sDMA ME ucode
733  *
734  * @adev: amdgpu_device pointer
735  *
736  * Loads the sDMA0/1 ucode.
737  * Returns 0 for success, -EINVAL if the ucode is not available.
738  */
739 static int sdma_v5_0_load_microcode(struct amdgpu_device *adev)
740 {
741         const struct sdma_firmware_header_v1_0 *hdr;
742         const __le32 *fw_data;
743         u32 fw_size;
744         int i, j;
745
746         /* halt the MEs */
747         sdma_v5_0_enable(adev, false);
748
749         for (i = 0; i < adev->sdma.num_instances; i++) {
750                 if (!adev->sdma.instance[i].fw)
751                         return -EINVAL;
752
753                 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
754                 amdgpu_ucode_print_sdma_hdr(&hdr->header);
755                 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
756
757                 fw_data = (const __le32 *)
758                         (adev->sdma.instance[i].fw->data +
759                                 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
760
761                 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), 0);
762
763                 for (j = 0; j < fw_size; j++) {
764                         if (amdgpu_emu_mode == 1 && j % 500 == 0)
765                                 msleep(1);
766                         WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
767                 }
768
769                 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version);
770         }
771
772         return 0;
773 }
774
775 /**
776  * sdma_v5_0_start - setup and start the async dma engines
777  *
778  * @adev: amdgpu_device pointer
779  *
780  * Set up the DMA engines and enable them (NAVI10).
781  * Returns 0 for success, error for failure.
782  */
783 static int sdma_v5_0_start(struct amdgpu_device *adev)
784 {
785         int r = 0;
786
787         if (amdgpu_sriov_vf(adev)) {
788                 sdma_v5_0_ctx_switch_enable(adev, false);
789                 sdma_v5_0_enable(adev, false);
790
791                 /* set RB registers */
792                 r = sdma_v5_0_gfx_resume(adev);
793                 return r;
794         }
795
796         if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
797                 r = sdma_v5_0_load_microcode(adev);
798                 if (r)
799                         return r;
800
801                 /* The value of mmSDMA_F32_CNTL is invalid the moment after loading fw */
802                 if (amdgpu_emu_mode == 1 && adev->pdev->device == 0x4d)
803                         msleep(1000);
804         }
805
806         /* unhalt the MEs */
807         sdma_v5_0_enable(adev, true);
808         /* enable sdma ring preemption */
809         sdma_v5_0_ctx_switch_enable(adev, true);
810
811         /* start the gfx rings and rlc compute queues */
812         r = sdma_v5_0_gfx_resume(adev);
813         if (r)
814                 return r;
815         r = sdma_v5_0_rlc_resume(adev);
816
817         return r;
818 }
819
820 /**
821  * sdma_v5_0_ring_test_ring - simple async dma engine test
822  *
823  * @ring: amdgpu_ring structure holding ring information
824  *
825  * Test the DMA engine by writing using it to write an
826  * value to memory. (NAVI10).
827  * Returns 0 for success, error for failure.
828  */
829 static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring)
830 {
831         struct amdgpu_device *adev = ring->adev;
832         unsigned i;
833         unsigned index;
834         int r;
835         u32 tmp;
836         u64 gpu_addr;
837
838         r = amdgpu_device_wb_get(adev, &index);
839         if (r) {
840                 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
841                 return r;
842         }
843
844         gpu_addr = adev->wb.gpu_addr + (index * 4);
845         tmp = 0xCAFEDEAD;
846         adev->wb.wb[index] = cpu_to_le32(tmp);
847
848         r = amdgpu_ring_alloc(ring, 5);
849         if (r) {
850                 DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
851                 amdgpu_device_wb_free(adev, index);
852                 return r;
853         }
854
855         amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
856                           SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
857         amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
858         amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
859         amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0));
860         amdgpu_ring_write(ring, 0xDEADBEEF);
861         amdgpu_ring_commit(ring);
862
863         for (i = 0; i < adev->usec_timeout; i++) {
864                 tmp = le32_to_cpu(adev->wb.wb[index]);
865                 if (tmp == 0xDEADBEEF)
866                         break;
867                 if (amdgpu_emu_mode == 1)
868                         msleep(1);
869                 else
870                         DRM_UDELAY(1);
871         }
872
873         if (i < adev->usec_timeout) {
874                 if (amdgpu_emu_mode == 1)
875                         DRM_INFO("ring test on %d succeeded in %d msecs\n", ring->idx, i);
876                 else
877                         DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
878         } else {
879                 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
880                           ring->idx, tmp);
881                 r = -EINVAL;
882         }
883         amdgpu_device_wb_free(adev, index);
884
885         return r;
886 }
887
888 /**
889  * sdma_v5_0_ring_test_ib - test an IB on the DMA engine
890  *
891  * @ring: amdgpu_ring structure holding ring information
892  *
893  * Test a simple IB in the DMA ring (NAVI10).
894  * Returns 0 on success, error on failure.
895  */
896 static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
897 {
898         struct amdgpu_device *adev = ring->adev;
899         struct amdgpu_ib ib;
900         struct dma_fence *f = NULL;
901         unsigned index;
902         long r;
903         u32 tmp = 0;
904         u64 gpu_addr;
905
906         r = amdgpu_device_wb_get(adev, &index);
907         if (r) {
908                 dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
909                 return r;
910         }
911
912         gpu_addr = adev->wb.gpu_addr + (index * 4);
913         tmp = 0xCAFEDEAD;
914         adev->wb.wb[index] = cpu_to_le32(tmp);
915         memset(&ib, 0, sizeof(ib));
916         r = amdgpu_ib_get(adev, NULL, 256, &ib);
917         if (r) {
918                 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
919                 goto err0;
920         }
921
922         ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
923                 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
924         ib.ptr[1] = lower_32_bits(gpu_addr);
925         ib.ptr[2] = upper_32_bits(gpu_addr);
926         ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0);
927         ib.ptr[4] = 0xDEADBEEF;
928         ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
929         ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
930         ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
931         ib.length_dw = 8;
932
933         r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
934         if (r)
935                 goto err1;
936
937         r = dma_fence_wait_timeout(f, false, timeout);
938         if (r == 0) {
939                 DRM_ERROR("amdgpu: IB test timed out\n");
940                 r = -ETIMEDOUT;
941                 goto err1;
942         } else if (r < 0) {
943                 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
944                 goto err1;
945         }
946         tmp = le32_to_cpu(adev->wb.wb[index]);
947         if (tmp == 0xDEADBEEF) {
948                 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
949                 r = 0;
950         } else {
951                 DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
952                 r = -EINVAL;
953         }
954
955 err1:
956         amdgpu_ib_free(adev, &ib, NULL);
957         dma_fence_put(f);
958 err0:
959         amdgpu_device_wb_free(adev, index);
960         return r;
961 }
962
963
964 /**
965  * sdma_v5_0_vm_copy_pte - update PTEs by copying them from the GART
966  *
967  * @ib: indirect buffer to fill with commands
968  * @pe: addr of the page entry
969  * @src: src addr to copy from
970  * @count: number of page entries to update
971  *
972  * Update PTEs by copying them from the GART using sDMA (NAVI10).
973  */
974 static void sdma_v5_0_vm_copy_pte(struct amdgpu_ib *ib,
975                                   uint64_t pe, uint64_t src,
976                                   unsigned count)
977 {
978         unsigned bytes = count * 8;
979
980         ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
981                 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
982         ib->ptr[ib->length_dw++] = bytes - 1;
983         ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
984         ib->ptr[ib->length_dw++] = lower_32_bits(src);
985         ib->ptr[ib->length_dw++] = upper_32_bits(src);
986         ib->ptr[ib->length_dw++] = lower_32_bits(pe);
987         ib->ptr[ib->length_dw++] = upper_32_bits(pe);
988
989 }
990
991 /**
992  * sdma_v5_0_vm_write_pte - update PTEs by writing them manually
993  *
994  * @ib: indirect buffer to fill with commands
995  * @pe: addr of the page entry
996  * @addr: dst addr to write into pe
997  * @count: number of page entries to update
998  * @incr: increase next addr by incr bytes
999  * @flags: access flags
1000  *
1001  * Update PTEs by writing them manually using sDMA (NAVI10).
1002  */
1003 static void sdma_v5_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
1004                                    uint64_t value, unsigned count,
1005                                    uint32_t incr)
1006 {
1007         unsigned ndw = count * 2;
1008
1009         ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1010                 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1011         ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1012         ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1013         ib->ptr[ib->length_dw++] = ndw - 1;
1014         for (; ndw > 0; ndw -= 2) {
1015                 ib->ptr[ib->length_dw++] = lower_32_bits(value);
1016                 ib->ptr[ib->length_dw++] = upper_32_bits(value);
1017                 value += incr;
1018         }
1019 }
1020
1021 /**
1022  * sdma_v5_0_vm_set_pte_pde - update the page tables using sDMA
1023  *
1024  * @ib: indirect buffer to fill with commands
1025  * @pe: addr of the page entry
1026  * @addr: dst addr to write into pe
1027  * @count: number of page entries to update
1028  * @incr: increase next addr by incr bytes
1029  * @flags: access flags
1030  *
1031  * Update the page tables using sDMA (NAVI10).
1032  */
1033 static void sdma_v5_0_vm_set_pte_pde(struct amdgpu_ib *ib,
1034                                      uint64_t pe,
1035                                      uint64_t addr, unsigned count,
1036                                      uint32_t incr, uint64_t flags)
1037 {
1038         /* for physically contiguous pages (vram) */
1039         ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_PTEPDE);
1040         ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
1041         ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1042         ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
1043         ib->ptr[ib->length_dw++] = upper_32_bits(flags);
1044         ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
1045         ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1046         ib->ptr[ib->length_dw++] = incr; /* increment size */
1047         ib->ptr[ib->length_dw++] = 0;
1048         ib->ptr[ib->length_dw++] = count - 1; /* number of entries */
1049 }
1050
1051 /**
1052  * sdma_v5_0_ring_pad_ib - pad the IB to the required number of dw
1053  *
1054  * @ib: indirect buffer to fill with padding
1055  *
1056  */
1057 static void sdma_v5_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
1058 {
1059         struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
1060         u32 pad_count;
1061         int i;
1062
1063         pad_count = (8 - (ib->length_dw & 0x7)) % 8;
1064         for (i = 0; i < pad_count; i++)
1065                 if (sdma && sdma->burst_nop && (i == 0))
1066                         ib->ptr[ib->length_dw++] =
1067                                 SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
1068                                 SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
1069                 else
1070                         ib->ptr[ib->length_dw++] =
1071                                 SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
1072 }
1073
1074
1075 /**
1076  * sdma_v5_0_ring_emit_pipeline_sync - sync the pipeline
1077  *
1078  * @ring: amdgpu_ring pointer
1079  *
1080  * Make sure all previous operations are completed (CIK).
1081  */
1082 static void sdma_v5_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1083 {
1084         uint32_t seq = ring->fence_drv.sync_seq;
1085         uint64_t addr = ring->fence_drv.gpu_addr;
1086
1087         /* wait for idle */
1088         amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1089                           SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1090                           SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
1091                           SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
1092         amdgpu_ring_write(ring, addr & 0xfffffffc);
1093         amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
1094         amdgpu_ring_write(ring, seq); /* reference */
1095         amdgpu_ring_write(ring, 0xfffffff); /* mask */
1096         amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1097                           SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
1098 }
1099
1100
1101 /**
1102  * sdma_v5_0_ring_emit_vm_flush - vm flush using sDMA
1103  *
1104  * @ring: amdgpu_ring pointer
1105  * @vm: amdgpu_vm pointer
1106  *
1107  * Update the page table base and flush the VM TLB
1108  * using sDMA (NAVI10).
1109  */
1110 static void sdma_v5_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1111                                          unsigned vmid, uint64_t pd_addr)
1112 {
1113         amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1114 }
1115
1116 static void sdma_v5_0_ring_emit_wreg(struct amdgpu_ring *ring,
1117                                      uint32_t reg, uint32_t val)
1118 {
1119         amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1120                           SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1121         amdgpu_ring_write(ring, reg);
1122         amdgpu_ring_write(ring, val);
1123 }
1124
1125 static void sdma_v5_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1126                                          uint32_t val, uint32_t mask)
1127 {
1128         amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1129                           SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1130                           SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */
1131         amdgpu_ring_write(ring, reg << 2);
1132         amdgpu_ring_write(ring, 0);
1133         amdgpu_ring_write(ring, val); /* reference */
1134         amdgpu_ring_write(ring, mask); /* mask */
1135         amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1136                           SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
1137 }
1138
1139 static int sdma_v5_0_early_init(void *handle)
1140 {
1141         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1142
1143         adev->sdma.num_instances = 2;
1144
1145         sdma_v5_0_set_ring_funcs(adev);
1146         sdma_v5_0_set_buffer_funcs(adev);
1147         sdma_v5_0_set_vm_pte_funcs(adev);
1148         sdma_v5_0_set_irq_funcs(adev);
1149
1150         return 0;
1151 }
1152
1153
1154 static int sdma_v5_0_sw_init(void *handle)
1155 {
1156         struct amdgpu_ring *ring;
1157         int r, i;
1158         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1159
1160         /* SDMA trap event */
1161         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0,
1162                               SDMA0_5_0__SRCID__SDMA_TRAP,
1163                               &adev->sdma.trap_irq);
1164         if (r)
1165                 return r;
1166
1167         /* SDMA trap event */
1168         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1,
1169                               SDMA1_5_0__SRCID__SDMA_TRAP,
1170                               &adev->sdma.trap_irq);
1171         if (r)
1172                 return r;
1173
1174         r = sdma_v5_0_init_microcode(adev);
1175         if (r) {
1176                 DRM_ERROR("Failed to load sdma firmware!\n");
1177                 return r;
1178         }
1179
1180         for (i = 0; i < adev->sdma.num_instances; i++) {
1181                 ring = &adev->sdma.instance[i].ring;
1182                 ring->ring_obj = NULL;
1183                 ring->use_doorbell = true;
1184
1185                 DRM_INFO("use_doorbell being set to: [%s]\n",
1186                                 ring->use_doorbell?"true":"false");
1187
1188                 ring->doorbell_index = (i == 0) ?
1189                         (adev->doorbell_index.sdma_engine[0] << 1) //get DWORD offset
1190                         : (adev->doorbell_index.sdma_engine[1] << 1); // get DWORD offset
1191
1192                 sprintf(ring->name, "sdma%d", i);
1193                 r = amdgpu_ring_init(adev, ring, 1024,
1194                                      &adev->sdma.trap_irq,
1195                                      (i == 0) ?
1196                                      AMDGPU_SDMA_IRQ_INSTANCE0 :
1197                                      AMDGPU_SDMA_IRQ_INSTANCE1);
1198                 if (r)
1199                         return r;
1200         }
1201
1202         return r;
1203 }
1204
1205 static int sdma_v5_0_sw_fini(void *handle)
1206 {
1207         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1208         int i;
1209
1210         for (i = 0; i < adev->sdma.num_instances; i++)
1211                 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1212
1213         return 0;
1214 }
1215
1216 static int sdma_v5_0_hw_init(void *handle)
1217 {
1218         int r;
1219         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1220
1221         sdma_v5_0_init_golden_registers(adev);
1222
1223         r = sdma_v5_0_start(adev);
1224
1225         return r;
1226 }
1227
1228 static int sdma_v5_0_hw_fini(void *handle)
1229 {
1230         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1231
1232         if (amdgpu_sriov_vf(adev))
1233                 return 0;
1234
1235         sdma_v5_0_ctx_switch_enable(adev, false);
1236         sdma_v5_0_enable(adev, false);
1237
1238         return 0;
1239 }
1240
1241 static int sdma_v5_0_suspend(void *handle)
1242 {
1243         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1244
1245         return sdma_v5_0_hw_fini(adev);
1246 }
1247
1248 static int sdma_v5_0_resume(void *handle)
1249 {
1250         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1251
1252         return sdma_v5_0_hw_init(adev);
1253 }
1254
1255 static bool sdma_v5_0_is_idle(void *handle)
1256 {
1257         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1258         u32 i;
1259
1260         for (i = 0; i < adev->sdma.num_instances; i++) {
1261                 u32 tmp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_STATUS_REG));
1262
1263                 if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
1264                         return false;
1265         }
1266
1267         return true;
1268 }
1269
1270 static int sdma_v5_0_wait_for_idle(void *handle)
1271 {
1272         unsigned i;
1273         u32 sdma0, sdma1;
1274         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1275
1276         for (i = 0; i < adev->usec_timeout; i++) {
1277                 sdma0 = RREG32(sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG));
1278                 sdma1 = RREG32(sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_STATUS_REG));
1279
1280                 if (sdma0 & sdma1 & SDMA0_STATUS_REG__IDLE_MASK)
1281                         return 0;
1282                 udelay(1);
1283         }
1284         return -ETIMEDOUT;
1285 }
1286
1287 static int sdma_v5_0_soft_reset(void *handle)
1288 {
1289         /* todo */
1290
1291         return 0;
1292 }
1293
1294 static int sdma_v5_0_ring_preempt_ib(struct amdgpu_ring *ring)
1295 {
1296         int i, r = 0;
1297         struct amdgpu_device *adev = ring->adev;
1298         u32 index = 0;
1299         u64 sdma_gfx_preempt;
1300
1301         amdgpu_sdma_get_index_from_ring(ring, &index);
1302         if (index == 0)
1303                 sdma_gfx_preempt = mmSDMA0_GFX_PREEMPT;
1304         else
1305                 sdma_gfx_preempt = mmSDMA1_GFX_PREEMPT;
1306
1307         /* assert preemption condition */
1308         amdgpu_ring_set_preempt_cond_exec(ring, false);
1309
1310         /* emit the trailing fence */
1311         ring->trail_seq += 1;
1312         amdgpu_ring_alloc(ring, 10);
1313         sdma_v5_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr,
1314                                   ring->trail_seq, 0);
1315         amdgpu_ring_commit(ring);
1316
1317         /* assert IB preemption */
1318         WREG32(sdma_gfx_preempt, 1);
1319
1320         /* poll the trailing fence */
1321         for (i = 0; i < adev->usec_timeout; i++) {
1322                 if (ring->trail_seq ==
1323                     le32_to_cpu(*(ring->trail_fence_cpu_addr)))
1324                         break;
1325                 DRM_UDELAY(1);
1326         }
1327
1328         if (i >= adev->usec_timeout) {
1329                 r = -EINVAL;
1330                 DRM_ERROR("ring %d failed to be preempted\n", ring->idx);
1331         }
1332
1333         /* deassert IB preemption */
1334         WREG32(sdma_gfx_preempt, 0);
1335
1336         /* deassert the preemption condition */
1337         amdgpu_ring_set_preempt_cond_exec(ring, true);
1338         return r;
1339 }
1340
1341 static int sdma_v5_0_set_trap_irq_state(struct amdgpu_device *adev,
1342                                         struct amdgpu_irq_src *source,
1343                                         unsigned type,
1344                                         enum amdgpu_interrupt_state state)
1345 {
1346         u32 sdma_cntl;
1347
1348         u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ?
1349                 sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) :
1350                 sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_CNTL);
1351
1352         sdma_cntl = RREG32(reg_offset);
1353         sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
1354                        state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
1355         WREG32(reg_offset, sdma_cntl);
1356
1357         return 0;
1358 }
1359
1360 static int sdma_v5_0_process_trap_irq(struct amdgpu_device *adev,
1361                                       struct amdgpu_irq_src *source,
1362                                       struct amdgpu_iv_entry *entry)
1363 {
1364         DRM_DEBUG("IH: SDMA trap\n");
1365         switch (entry->client_id) {
1366         case SOC15_IH_CLIENTID_SDMA0:
1367                 switch (entry->ring_id) {
1368                 case 0:
1369                         amdgpu_fence_process(&adev->sdma.instance[0].ring);
1370                         break;
1371                 case 1:
1372                         /* XXX compute */
1373                         break;
1374                 case 2:
1375                         /* XXX compute */
1376                         break;
1377                 case 3:
1378                         /* XXX page queue*/
1379                         break;
1380                 }
1381                 break;
1382         case SOC15_IH_CLIENTID_SDMA1:
1383                 switch (entry->ring_id) {
1384                 case 0:
1385                         amdgpu_fence_process(&adev->sdma.instance[1].ring);
1386                         break;
1387                 case 1:
1388                         /* XXX compute */
1389                         break;
1390                 case 2:
1391                         /* XXX compute */
1392                         break;
1393                 case 3:
1394                         /* XXX page queue*/
1395                         break;
1396                 }
1397                 break;
1398         }
1399         return 0;
1400 }
1401
1402 static int sdma_v5_0_process_illegal_inst_irq(struct amdgpu_device *adev,
1403                                               struct amdgpu_irq_src *source,
1404                                               struct amdgpu_iv_entry *entry)
1405 {
1406         return 0;
1407 }
1408
1409 static void sdma_v5_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
1410                                                        bool enable)
1411 {
1412         uint32_t data, def;
1413         int i;
1414
1415         for (i = 0; i < adev->sdma.num_instances; i++) {
1416                 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
1417                         /* Enable sdma clock gating */
1418                         def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL));
1419                         data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1420                                   SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1421                                   SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1422                                   SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1423                                   SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1424                                   SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1425                                   SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1426                                   SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1427                         if (def != data)
1428                                 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL), data);
1429                 } else {
1430                         /* Disable sdma clock gating */
1431                         def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL));
1432                         data |= (SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1433                                  SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1434                                  SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1435                                  SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1436                                  SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1437                                  SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1438                                  SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1439                                  SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1440                         if (def != data)
1441                                 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL), data);
1442                 }
1443         }
1444 }
1445
1446 static void sdma_v5_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
1447                                                       bool enable)
1448 {
1449         uint32_t data, def;
1450         int i;
1451
1452         for (i = 0; i < adev->sdma.num_instances; i++) {
1453                 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
1454                         /* Enable sdma mem light sleep */
1455                         def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL));
1456                         data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1457                         if (def != data)
1458                                 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL), data);
1459
1460                 } else {
1461                         /* Disable sdma mem light sleep */
1462                         def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL));
1463                         data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1464                         if (def != data)
1465                                 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL), data);
1466
1467                 }
1468         }
1469 }
1470
1471 static int sdma_v5_0_set_clockgating_state(void *handle,
1472                                            enum amd_clockgating_state state)
1473 {
1474         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1475
1476         if (amdgpu_sriov_vf(adev))
1477                 return 0;
1478
1479         switch (adev->asic_type) {
1480         case CHIP_NAVI10:
1481                 sdma_v5_0_update_medium_grain_clock_gating(adev,
1482                                 state == AMD_CG_STATE_GATE ? true : false);
1483                 sdma_v5_0_update_medium_grain_light_sleep(adev,
1484                                 state == AMD_CG_STATE_GATE ? true : false);
1485                 break;
1486         default:
1487                 break;
1488         }
1489
1490         return 0;
1491 }
1492
1493 static int sdma_v5_0_set_powergating_state(void *handle,
1494                                           enum amd_powergating_state state)
1495 {
1496         return 0;
1497 }
1498
1499 static void sdma_v5_0_get_clockgating_state(void *handle, u32 *flags)
1500 {
1501         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1502         int data;
1503
1504         if (amdgpu_sriov_vf(adev))
1505                 *flags = 0;
1506
1507         /* AMD_CG_SUPPORT_SDMA_MGCG */
1508         data = RREG32(sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CLK_CTRL));
1509         if (!(data & SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK))
1510                 *flags |= AMD_CG_SUPPORT_SDMA_MGCG;
1511
1512         /* AMD_CG_SUPPORT_SDMA_LS */
1513         data = RREG32(sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_POWER_CNTL));
1514         if (data & SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK)
1515                 *flags |= AMD_CG_SUPPORT_SDMA_LS;
1516 }
1517
1518 const struct amd_ip_funcs sdma_v5_0_ip_funcs = {
1519         .name = "sdma_v5_0",
1520         .early_init = sdma_v5_0_early_init,
1521         .late_init = NULL,
1522         .sw_init = sdma_v5_0_sw_init,
1523         .sw_fini = sdma_v5_0_sw_fini,
1524         .hw_init = sdma_v5_0_hw_init,
1525         .hw_fini = sdma_v5_0_hw_fini,
1526         .suspend = sdma_v5_0_suspend,
1527         .resume = sdma_v5_0_resume,
1528         .is_idle = sdma_v5_0_is_idle,
1529         .wait_for_idle = sdma_v5_0_wait_for_idle,
1530         .soft_reset = sdma_v5_0_soft_reset,
1531         .set_clockgating_state = sdma_v5_0_set_clockgating_state,
1532         .set_powergating_state = sdma_v5_0_set_powergating_state,
1533         .get_clockgating_state = sdma_v5_0_get_clockgating_state,
1534 };
1535
1536 static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
1537         .type = AMDGPU_RING_TYPE_SDMA,
1538         .align_mask = 0xf,
1539         .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
1540         .support_64bit_ptrs = true,
1541         .vmhub = AMDGPU_GFXHUB,
1542         .get_rptr = sdma_v5_0_ring_get_rptr,
1543         .get_wptr = sdma_v5_0_ring_get_wptr,
1544         .set_wptr = sdma_v5_0_ring_set_wptr,
1545         .emit_frame_size =
1546                 5 + /* sdma_v5_0_ring_init_cond_exec */
1547                 6 + /* sdma_v5_0_ring_emit_hdp_flush */
1548                 3 + /* hdp_invalidate */
1549                 6 + /* sdma_v5_0_ring_emit_pipeline_sync */
1550                 /* sdma_v5_0_ring_emit_vm_flush */
1551                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1552                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
1553                 10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */
1554         .emit_ib_size = 7 + 6, /* sdma_v5_0_ring_emit_ib */
1555         .emit_ib = sdma_v5_0_ring_emit_ib,
1556         .emit_fence = sdma_v5_0_ring_emit_fence,
1557         .emit_pipeline_sync = sdma_v5_0_ring_emit_pipeline_sync,
1558         .emit_vm_flush = sdma_v5_0_ring_emit_vm_flush,
1559         .emit_hdp_flush = sdma_v5_0_ring_emit_hdp_flush,
1560         .test_ring = sdma_v5_0_ring_test_ring,
1561         .test_ib = sdma_v5_0_ring_test_ib,
1562         .insert_nop = sdma_v5_0_ring_insert_nop,
1563         .pad_ib = sdma_v5_0_ring_pad_ib,
1564         .emit_wreg = sdma_v5_0_ring_emit_wreg,
1565         .emit_reg_wait = sdma_v5_0_ring_emit_reg_wait,
1566         .init_cond_exec = sdma_v5_0_ring_init_cond_exec,
1567         .patch_cond_exec = sdma_v5_0_ring_patch_cond_exec,
1568         .preempt_ib = sdma_v5_0_ring_preempt_ib,
1569 };
1570
1571 static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev)
1572 {
1573         int i;
1574
1575         for (i = 0; i < adev->sdma.num_instances; i++) {
1576                 adev->sdma.instance[i].ring.funcs = &sdma_v5_0_ring_funcs;
1577                 adev->sdma.instance[i].ring.me = i;
1578         }
1579 }
1580
1581 static const struct amdgpu_irq_src_funcs sdma_v5_0_trap_irq_funcs = {
1582         .set = sdma_v5_0_set_trap_irq_state,
1583         .process = sdma_v5_0_process_trap_irq,
1584 };
1585
1586 static const struct amdgpu_irq_src_funcs sdma_v5_0_illegal_inst_irq_funcs = {
1587         .process = sdma_v5_0_process_illegal_inst_irq,
1588 };
1589
1590 static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev)
1591 {
1592         adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
1593         adev->sdma.trap_irq.funcs = &sdma_v5_0_trap_irq_funcs;
1594         adev->sdma.illegal_inst_irq.funcs = &sdma_v5_0_illegal_inst_irq_funcs;
1595 }
1596
1597 /**
1598  * sdma_v5_0_emit_copy_buffer - copy buffer using the sDMA engine
1599  *
1600  * @ring: amdgpu_ring structure holding ring information
1601  * @src_offset: src GPU address
1602  * @dst_offset: dst GPU address
1603  * @byte_count: number of bytes to xfer
1604  *
1605  * Copy GPU buffers using the DMA engine (NAVI10).
1606  * Used by the amdgpu ttm implementation to move pages if
1607  * registered as the asic copy callback.
1608  */
1609 static void sdma_v5_0_emit_copy_buffer(struct amdgpu_ib *ib,
1610                                        uint64_t src_offset,
1611                                        uint64_t dst_offset,
1612                                        uint32_t byte_count)
1613 {
1614         ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1615                 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1616         ib->ptr[ib->length_dw++] = byte_count - 1;
1617         ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1618         ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
1619         ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
1620         ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1621         ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1622 }
1623
1624 /**
1625  * sdma_v5_0_emit_fill_buffer - fill buffer using the sDMA engine
1626  *
1627  * @ring: amdgpu_ring structure holding ring information
1628  * @src_data: value to write to buffer
1629  * @dst_offset: dst GPU address
1630  * @byte_count: number of bytes to xfer
1631  *
1632  * Fill GPU buffers using the DMA engine (NAVI10).
1633  */
1634 static void sdma_v5_0_emit_fill_buffer(struct amdgpu_ib *ib,
1635                                        uint32_t src_data,
1636                                        uint64_t dst_offset,
1637                                        uint32_t byte_count)
1638 {
1639         ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
1640         ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1641         ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1642         ib->ptr[ib->length_dw++] = src_data;
1643         ib->ptr[ib->length_dw++] = byte_count - 1;
1644 }
1645
1646 static const struct amdgpu_buffer_funcs sdma_v5_0_buffer_funcs = {
1647         .copy_max_bytes = 0x400000,
1648         .copy_num_dw = 7,
1649         .emit_copy_buffer = sdma_v5_0_emit_copy_buffer,
1650
1651         .fill_max_bytes = 0x400000,
1652         .fill_num_dw = 5,
1653         .emit_fill_buffer = sdma_v5_0_emit_fill_buffer,
1654 };
1655
1656 static void sdma_v5_0_set_buffer_funcs(struct amdgpu_device *adev)
1657 {
1658         if (adev->mman.buffer_funcs == NULL) {
1659                 adev->mman.buffer_funcs = &sdma_v5_0_buffer_funcs;
1660                 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
1661         }
1662 }
1663
1664 static const struct amdgpu_vm_pte_funcs sdma_v5_0_vm_pte_funcs = {
1665         .copy_pte_num_dw = 7,
1666         .copy_pte = sdma_v5_0_vm_copy_pte,
1667         .write_pte = sdma_v5_0_vm_write_pte,
1668         .set_pte_pde = sdma_v5_0_vm_set_pte_pde,
1669 };
1670
1671 static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev)
1672 {
1673         struct drm_gpu_scheduler *sched;
1674         unsigned i;
1675
1676         if (adev->vm_manager.vm_pte_funcs == NULL) {
1677                 adev->vm_manager.vm_pte_funcs = &sdma_v5_0_vm_pte_funcs;
1678                 for (i = 0; i < adev->sdma.num_instances; i++) {
1679                         sched = &adev->sdma.instance[i].ring.sched;
1680                         adev->vm_manager.vm_pte_rqs[i] =
1681                                 &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
1682                 }
1683                 adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
1684         }
1685 }
1686
1687 const struct amdgpu_ip_block_version sdma_v5_0_ip_block = {
1688         .type = AMD_IP_BLOCK_TYPE_SDMA,
1689         .major = 5,
1690         .minor = 0,
1691         .rev = 0,
1692         .funcs = &sdma_v5_0_ip_funcs,
1693 };