1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
7 #include "msm_gpu_trace.h"
9 #include "a6xx_gmu.xml.h"
11 #include <linux/devfreq.h>
15 static inline bool _a6xx_check_idle(struct msm_gpu *gpu)
17 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
18 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
20 /* Check that the GMU is idle */
21 if (!a6xx_gmu_isidle(&a6xx_gpu->gmu))
24 /* Check tha the CX master is idle */
25 if (gpu_read(gpu, REG_A6XX_RBBM_STATUS) &
26 ~A6XX_RBBM_STATUS_CP_AHB_BUSY_CX_MASTER)
29 return !(gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS) &
30 A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT);
33 bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
35 /* wait for CP to drain ringbuffer: */
36 if (!adreno_idle(gpu, ring))
39 if (spin_until(_a6xx_check_idle(gpu))) {
40 DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n",
41 gpu->name, __builtin_return_address(0),
42 gpu_read(gpu, REG_A6XX_RBBM_STATUS),
43 gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS),
44 gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
45 gpu_read(gpu, REG_A6XX_CP_RB_WPTR));
52 static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
57 spin_lock_irqsave(&ring->lock, flags);
59 /* Copy the shadow to the actual register */
60 ring->cur = ring->next;
62 /* Make sure to wrap wptr if we need to */
63 wptr = get_wptr(ring);
65 spin_unlock_irqrestore(&ring->lock, flags);
67 /* Make sure everything is posted before making a decision */
70 gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
73 static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter,
76 OUT_PKT7(ring, CP_REG_TO_MEM, 3);
77 OUT_RING(ring, counter | (1 << 30) | (2 << 18));
78 OUT_RING(ring, lower_32_bits(iova));
79 OUT_RING(ring, upper_32_bits(iova));
82 static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
83 struct msm_file_private *ctx)
85 unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
86 struct msm_drm_private *priv = gpu->dev->dev_private;
87 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
88 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
89 struct msm_ringbuffer *ring = submit->ring;
92 get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
93 rbmemptr_stats(ring, index, cpcycles_start));
96 * For PM4 the GMU register offsets are calculated from the base of the
97 * GPU registers so we need to add 0x1a800 to the register value on A630
98 * to get the right value from PM4.
100 get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
101 rbmemptr_stats(ring, index, alwayson_start));
103 /* Invalidate CCU depth and color */
104 OUT_PKT7(ring, CP_EVENT_WRITE, 1);
105 OUT_RING(ring, PC_CCU_INVALIDATE_DEPTH);
107 OUT_PKT7(ring, CP_EVENT_WRITE, 1);
108 OUT_RING(ring, PC_CCU_INVALIDATE_COLOR);
110 /* Submit the commands */
111 for (i = 0; i < submit->nr_cmds; i++) {
112 switch (submit->cmd[i].type) {
113 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
115 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
116 if (priv->lastctx == ctx)
118 case MSM_SUBMIT_CMD_BUF:
119 OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
120 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
121 OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
122 OUT_RING(ring, submit->cmd[i].size);
127 get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
128 rbmemptr_stats(ring, index, cpcycles_end));
129 get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
130 rbmemptr_stats(ring, index, alwayson_end));
132 /* Write the fence to the scratch register */
133 OUT_PKT4(ring, REG_A6XX_CP_SCRATCH_REG(2), 1);
134 OUT_RING(ring, submit->seqno);
137 * Execute a CACHE_FLUSH_TS event. This will ensure that the
138 * timestamp is written to the memory and then triggers the interrupt
140 OUT_PKT7(ring, CP_EVENT_WRITE, 4);
141 OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31));
142 OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
143 OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
144 OUT_RING(ring, submit->seqno);
146 trace_msm_gpu_submit_flush(submit,
147 gmu_read64(&a6xx_gpu->gmu, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L,
148 REG_A6XX_GMU_ALWAYS_ON_COUNTER_H));
150 a6xx_flush(gpu, ring);
153 static const struct {
157 {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222},
158 {REG_A6XX_RBBM_CLOCK_CNTL_SP1, 0x22222222},
159 {REG_A6XX_RBBM_CLOCK_CNTL_SP2, 0x22222222},
160 {REG_A6XX_RBBM_CLOCK_CNTL_SP3, 0x22222222},
161 {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02022220},
162 {REG_A6XX_RBBM_CLOCK_CNTL2_SP1, 0x02022220},
163 {REG_A6XX_RBBM_CLOCK_CNTL2_SP2, 0x02022220},
164 {REG_A6XX_RBBM_CLOCK_CNTL2_SP3, 0x02022220},
165 {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
166 {REG_A6XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
167 {REG_A6XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
168 {REG_A6XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
169 {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000f3cf},
170 {REG_A6XX_RBBM_CLOCK_HYST_SP1, 0x0000f3cf},
171 {REG_A6XX_RBBM_CLOCK_HYST_SP2, 0x0000f3cf},
172 {REG_A6XX_RBBM_CLOCK_HYST_SP3, 0x0000f3cf},
173 {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
174 {REG_A6XX_RBBM_CLOCK_CNTL_TP1, 0x02222222},
175 {REG_A6XX_RBBM_CLOCK_CNTL_TP2, 0x02222222},
176 {REG_A6XX_RBBM_CLOCK_CNTL_TP3, 0x02222222},
177 {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
178 {REG_A6XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
179 {REG_A6XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
180 {REG_A6XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
181 {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
182 {REG_A6XX_RBBM_CLOCK_CNTL3_TP1, 0x22222222},
183 {REG_A6XX_RBBM_CLOCK_CNTL3_TP2, 0x22222222},
184 {REG_A6XX_RBBM_CLOCK_CNTL3_TP3, 0x22222222},
185 {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
186 {REG_A6XX_RBBM_CLOCK_CNTL4_TP1, 0x00022222},
187 {REG_A6XX_RBBM_CLOCK_CNTL4_TP2, 0x00022222},
188 {REG_A6XX_RBBM_CLOCK_CNTL4_TP3, 0x00022222},
189 {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
190 {REG_A6XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
191 {REG_A6XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
192 {REG_A6XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
193 {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
194 {REG_A6XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
195 {REG_A6XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
196 {REG_A6XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
197 {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
198 {REG_A6XX_RBBM_CLOCK_HYST3_TP1, 0x77777777},
199 {REG_A6XX_RBBM_CLOCK_HYST3_TP2, 0x77777777},
200 {REG_A6XX_RBBM_CLOCK_HYST3_TP3, 0x77777777},
201 {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
202 {REG_A6XX_RBBM_CLOCK_HYST4_TP1, 0x00077777},
203 {REG_A6XX_RBBM_CLOCK_HYST4_TP2, 0x00077777},
204 {REG_A6XX_RBBM_CLOCK_HYST4_TP3, 0x00077777},
205 {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
206 {REG_A6XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
207 {REG_A6XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
208 {REG_A6XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
209 {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
210 {REG_A6XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
211 {REG_A6XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
212 {REG_A6XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
213 {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
214 {REG_A6XX_RBBM_CLOCK_DELAY3_TP1, 0x11111111},
215 {REG_A6XX_RBBM_CLOCK_DELAY3_TP2, 0x11111111},
216 {REG_A6XX_RBBM_CLOCK_DELAY3_TP3, 0x11111111},
217 {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
218 {REG_A6XX_RBBM_CLOCK_DELAY4_TP1, 0x00011111},
219 {REG_A6XX_RBBM_CLOCK_DELAY4_TP2, 0x00011111},
220 {REG_A6XX_RBBM_CLOCK_DELAY4_TP3, 0x00011111},
221 {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
222 {REG_A6XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
223 {REG_A6XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
224 {REG_A6XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
225 {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
226 {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
227 {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
228 {REG_A6XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
229 {REG_A6XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
230 {REG_A6XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
231 {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x00002222},
232 {REG_A6XX_RBBM_CLOCK_CNTL2_RB1, 0x00002222},
233 {REG_A6XX_RBBM_CLOCK_CNTL2_RB2, 0x00002222},
234 {REG_A6XX_RBBM_CLOCK_CNTL2_RB3, 0x00002222},
235 {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
236 {REG_A6XX_RBBM_CLOCK_CNTL_CCU1, 0x00002220},
237 {REG_A6XX_RBBM_CLOCK_CNTL_CCU2, 0x00002220},
238 {REG_A6XX_RBBM_CLOCK_CNTL_CCU3, 0x00002220},
239 {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040f00},
240 {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU1, 0x00040f00},
241 {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU2, 0x00040f00},
242 {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU3, 0x00040f00},
243 {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x05022022},
244 {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
245 {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
246 {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
247 {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
248 {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
249 {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
250 {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
251 {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
252 {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
253 {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
254 {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
255 {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
256 {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
257 {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
258 {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
259 {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
260 {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
261 {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555}
264 static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
266 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
267 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
268 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
272 val = gpu_read(gpu, REG_A6XX_RBBM_CLOCK_CNTL);
274 /* Don't re-program the registers if they are already correct */
275 if ((!state && !val) || (state && (val == 0x8aa8aa02)))
278 /* Disable SP clock before programming HWCG registers */
279 gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0);
281 for (i = 0; i < ARRAY_SIZE(a6xx_hwcg); i++)
282 gpu_write(gpu, a6xx_hwcg[i].offset,
283 state ? a6xx_hwcg[i].value : 0);
285 /* Enable SP clock */
286 gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);
288 gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? 0x8aa8aa02 : 0);
291 static int a6xx_cp_init(struct msm_gpu *gpu)
293 struct msm_ringbuffer *ring = gpu->rb[0];
295 OUT_PKT7(ring, CP_ME_INIT, 8);
297 OUT_RING(ring, 0x0000002f);
299 /* Enable multiple hardware contexts */
300 OUT_RING(ring, 0x00000003);
302 /* Enable error detection */
303 OUT_RING(ring, 0x20000000);
305 /* Don't enable header dump */
306 OUT_RING(ring, 0x00000000);
307 OUT_RING(ring, 0x00000000);
309 /* No workarounds enabled */
310 OUT_RING(ring, 0x00000000);
312 /* Pad rest of the cmds with 0's */
313 OUT_RING(ring, 0x00000000);
314 OUT_RING(ring, 0x00000000);
316 a6xx_flush(gpu, ring);
317 return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
320 static int a6xx_ucode_init(struct msm_gpu *gpu)
322 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
323 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
325 if (!a6xx_gpu->sqe_bo) {
326 a6xx_gpu->sqe_bo = adreno_fw_create_bo(gpu,
327 adreno_gpu->fw[ADRENO_FW_SQE], &a6xx_gpu->sqe_iova);
329 if (IS_ERR(a6xx_gpu->sqe_bo)) {
330 int ret = PTR_ERR(a6xx_gpu->sqe_bo);
332 a6xx_gpu->sqe_bo = NULL;
333 DRM_DEV_ERROR(&gpu->pdev->dev,
334 "Could not allocate SQE ucode: %d\n", ret);
339 msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw");
342 gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE_LO,
343 REG_A6XX_CP_SQE_INSTR_BASE_HI, a6xx_gpu->sqe_iova);
348 static int a6xx_zap_shader_init(struct msm_gpu *gpu)
356 ret = adreno_zap_shader_load(gpu, GPU_PAS_ID);
362 #define A6XX_INT_MASK (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \
363 A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \
364 A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
365 A6XX_RBBM_INT_0_MASK_CP_IB2 | \
366 A6XX_RBBM_INT_0_MASK_CP_IB1 | \
367 A6XX_RBBM_INT_0_MASK_CP_RB | \
368 A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
369 A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \
370 A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \
371 A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
372 A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR)
374 static int a6xx_hw_init(struct msm_gpu *gpu)
376 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
377 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
380 /* Make sure the GMU keeps the GPU on while we set it up */
381 a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
383 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0);
386 * Disable the trusted memory range - we don't actually supported secure
387 * memory rendering at this point in time and we don't want to block off
388 * part of the virtual memory space.
390 gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
391 REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
392 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
394 /* Turn on 64 bit addressing for all blocks */
395 gpu_write(gpu, REG_A6XX_CP_ADDR_MODE_CNTL, 0x1);
396 gpu_write(gpu, REG_A6XX_VSC_ADDR_MODE_CNTL, 0x1);
397 gpu_write(gpu, REG_A6XX_GRAS_ADDR_MODE_CNTL, 0x1);
398 gpu_write(gpu, REG_A6XX_RB_ADDR_MODE_CNTL, 0x1);
399 gpu_write(gpu, REG_A6XX_PC_ADDR_MODE_CNTL, 0x1);
400 gpu_write(gpu, REG_A6XX_HLSQ_ADDR_MODE_CNTL, 0x1);
401 gpu_write(gpu, REG_A6XX_VFD_ADDR_MODE_CNTL, 0x1);
402 gpu_write(gpu, REG_A6XX_VPC_ADDR_MODE_CNTL, 0x1);
403 gpu_write(gpu, REG_A6XX_UCHE_ADDR_MODE_CNTL, 0x1);
404 gpu_write(gpu, REG_A6XX_SP_ADDR_MODE_CNTL, 0x1);
405 gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1);
406 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
408 /* enable hardware clockgating */
409 a6xx_set_hwcg(gpu, true);
412 gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
413 gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3);
415 /* Make all blocks contribute to the GPU BUSY perf counter */
416 gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff);
418 /* Disable L2 bypass in the UCHE */
419 gpu_write(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX_LO, 0xffffffc0);
420 gpu_write(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX_HI, 0x0001ffff);
421 gpu_write(gpu, REG_A6XX_UCHE_TRAP_BASE_LO, 0xfffff000);
422 gpu_write(gpu, REG_A6XX_UCHE_TRAP_BASE_HI, 0x0001ffff);
423 gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_LO, 0xfffff000);
424 gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_HI, 0x0001ffff);
426 /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
427 gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN_LO,
428 REG_A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x00100000);
430 gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX_LO,
431 REG_A6XX_UCHE_GMEM_RANGE_MAX_HI,
432 0x00100000 + adreno_gpu->gmem - 1);
434 gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804);
435 gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4);
437 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0);
438 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c);
440 /* Setting the mem pool size */
441 gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128);
443 /* Setting the primFifo thresholds default values */
444 gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, (0x300 << 11));
446 /* Set the AHB default slave response to "ERROR" */
447 gpu_write(gpu, REG_A6XX_CP_AHB_CNTL, 0x1);
449 /* Turn on performance counters */
450 gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_CNTL, 0x1);
452 /* Select CP0 to always count cycles */
453 gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);
455 gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 2 << 1);
456 gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, 2 << 1);
457 gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, 2 << 1);
458 gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, 2 << 21);
460 /* Enable fault detection */
461 gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
462 (1 << 30) | 0x1fffff);
464 gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, 1);
466 /* Protect registers from the CP */
467 gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, 0x00000003);
469 gpu_write(gpu, REG_A6XX_CP_PROTECT(0),
470 A6XX_PROTECT_RDONLY(0x600, 0x51));
471 gpu_write(gpu, REG_A6XX_CP_PROTECT(1), A6XX_PROTECT_RW(0xae50, 0x2));
472 gpu_write(gpu, REG_A6XX_CP_PROTECT(2), A6XX_PROTECT_RW(0x9624, 0x13));
473 gpu_write(gpu, REG_A6XX_CP_PROTECT(3), A6XX_PROTECT_RW(0x8630, 0x8));
474 gpu_write(gpu, REG_A6XX_CP_PROTECT(4), A6XX_PROTECT_RW(0x9e70, 0x1));
475 gpu_write(gpu, REG_A6XX_CP_PROTECT(5), A6XX_PROTECT_RW(0x9e78, 0x187));
476 gpu_write(gpu, REG_A6XX_CP_PROTECT(6), A6XX_PROTECT_RW(0xf000, 0x810));
477 gpu_write(gpu, REG_A6XX_CP_PROTECT(7),
478 A6XX_PROTECT_RDONLY(0xfc00, 0x3));
479 gpu_write(gpu, REG_A6XX_CP_PROTECT(8), A6XX_PROTECT_RW(0x50e, 0x0));
480 gpu_write(gpu, REG_A6XX_CP_PROTECT(9), A6XX_PROTECT_RDONLY(0x50f, 0x0));
481 gpu_write(gpu, REG_A6XX_CP_PROTECT(10), A6XX_PROTECT_RW(0x510, 0x0));
482 gpu_write(gpu, REG_A6XX_CP_PROTECT(11),
483 A6XX_PROTECT_RDONLY(0x0, 0x4f9));
484 gpu_write(gpu, REG_A6XX_CP_PROTECT(12),
485 A6XX_PROTECT_RDONLY(0x501, 0xa));
486 gpu_write(gpu, REG_A6XX_CP_PROTECT(13),
487 A6XX_PROTECT_RDONLY(0x511, 0x44));
488 gpu_write(gpu, REG_A6XX_CP_PROTECT(14), A6XX_PROTECT_RW(0xe00, 0xe));
489 gpu_write(gpu, REG_A6XX_CP_PROTECT(15), A6XX_PROTECT_RW(0x8e00, 0x0));
490 gpu_write(gpu, REG_A6XX_CP_PROTECT(16), A6XX_PROTECT_RW(0x8e50, 0xf));
491 gpu_write(gpu, REG_A6XX_CP_PROTECT(17), A6XX_PROTECT_RW(0xbe02, 0x0));
492 gpu_write(gpu, REG_A6XX_CP_PROTECT(18),
493 A6XX_PROTECT_RW(0xbe20, 0x11f3));
494 gpu_write(gpu, REG_A6XX_CP_PROTECT(19), A6XX_PROTECT_RW(0x800, 0x82));
495 gpu_write(gpu, REG_A6XX_CP_PROTECT(20), A6XX_PROTECT_RW(0x8a0, 0x8));
496 gpu_write(gpu, REG_A6XX_CP_PROTECT(21), A6XX_PROTECT_RW(0x8ab, 0x19));
497 gpu_write(gpu, REG_A6XX_CP_PROTECT(22), A6XX_PROTECT_RW(0x900, 0x4d));
498 gpu_write(gpu, REG_A6XX_CP_PROTECT(23), A6XX_PROTECT_RW(0x98d, 0x76));
499 gpu_write(gpu, REG_A6XX_CP_PROTECT(24),
500 A6XX_PROTECT_RDONLY(0x980, 0x4));
501 gpu_write(gpu, REG_A6XX_CP_PROTECT(25), A6XX_PROTECT_RW(0xa630, 0x0));
503 /* Enable interrupts */
504 gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, A6XX_INT_MASK);
506 ret = adreno_hw_init(gpu);
510 ret = a6xx_ucode_init(gpu);
514 /* Always come up on rb 0 */
515 a6xx_gpu->cur_ring = gpu->rb[0];
517 /* Enable the SQE_to start the CP engine */
518 gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
520 ret = a6xx_cp_init(gpu);
525 * Try to load a zap shader into the secure world. If successful
526 * we can use the CP to switch out of secure mode. If not then we
527 * have no resource but to try to switch ourselves out manually. If we
528 * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will
529 * be blocked and a permissions violation will soon follow.
531 ret = a6xx_zap_shader_init(gpu);
533 OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
534 OUT_RING(gpu->rb[0], 0x00000000);
536 a6xx_flush(gpu, gpu->rb[0]);
537 if (!a6xx_idle(gpu, gpu->rb[0]))
540 /* Print a warning so if we die, we know why */
541 dev_warn_once(gpu->dev->dev,
542 "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
543 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0);
549 * Tell the GMU that we are done touching the GPU and it can start power
552 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
554 /* Take the GMU out of its special boot mode */
555 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_BOOT_SLUMBER);
560 static void a6xx_dump(struct msm_gpu *gpu)
562 DRM_DEV_INFO(&gpu->pdev->dev, "status: %08x\n",
563 gpu_read(gpu, REG_A6XX_RBBM_STATUS));
567 #define VBIF_RESET_ACK_TIMEOUT 100
568 #define VBIF_RESET_ACK_MASK 0x00f0
570 static void a6xx_recover(struct msm_gpu *gpu)
572 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
573 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
576 adreno_dump_info(gpu);
578 for (i = 0; i < 8; i++)
579 DRM_DEV_INFO(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i,
580 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(i)));
586 * Turn off keep alive that might have been enabled by the hang
589 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0);
591 gpu->funcs->pm_suspend(gpu);
592 gpu->funcs->pm_resume(gpu);
594 msm_gpu_hw_init(gpu);
597 static int a6xx_fault_handler(void *arg, unsigned long iova, int flags)
599 struct msm_gpu *gpu = arg;
601 pr_warn_ratelimited("*** gpu fault: iova=%08lx, flags=%d (%u,%u,%u,%u)\n",
603 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(4)),
604 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(5)),
605 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(6)),
606 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(7)));
611 static void a6xx_cp_hw_err_irq(struct msm_gpu *gpu)
613 u32 status = gpu_read(gpu, REG_A6XX_CP_INTERRUPT_STATUS);
615 if (status & A6XX_CP_INT_CP_OPCODE_ERROR) {
618 gpu_write(gpu, REG_A6XX_CP_SQE_STAT_ADDR, 1);
619 val = gpu_read(gpu, REG_A6XX_CP_SQE_STAT_DATA);
620 dev_err_ratelimited(&gpu->pdev->dev,
621 "CP | opcode error | possible opcode=0x%8.8X\n",
625 if (status & A6XX_CP_INT_CP_UCODE_ERROR)
626 dev_err_ratelimited(&gpu->pdev->dev,
627 "CP ucode error interrupt\n");
629 if (status & A6XX_CP_INT_CP_HW_FAULT_ERROR)
630 dev_err_ratelimited(&gpu->pdev->dev, "CP | HW fault | status=0x%8.8X\n",
631 gpu_read(gpu, REG_A6XX_CP_HW_FAULT));
633 if (status & A6XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) {
634 u32 val = gpu_read(gpu, REG_A6XX_CP_PROTECT_STATUS);
636 dev_err_ratelimited(&gpu->pdev->dev,
637 "CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n",
638 val & (1 << 20) ? "READ" : "WRITE",
639 (val & 0x3ffff), val);
642 if (status & A6XX_CP_INT_CP_AHB_ERROR)
643 dev_err_ratelimited(&gpu->pdev->dev, "CP AHB error interrupt\n");
645 if (status & A6XX_CP_INT_CP_VSD_PARITY_ERROR)
646 dev_err_ratelimited(&gpu->pdev->dev, "CP VSD decoder parity error\n");
648 if (status & A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR)
649 dev_err_ratelimited(&gpu->pdev->dev, "CP illegal instruction error\n");
653 static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
655 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
656 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
657 struct drm_device *dev = gpu->dev;
658 struct msm_drm_private *priv = dev->dev_private;
659 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
662 * Force the GPU to stay on until after we finish
663 * collecting information
665 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 1);
667 DRM_DEV_ERROR(&gpu->pdev->dev,
668 "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
669 ring ? ring->id : -1, ring ? ring->seqno : 0,
670 gpu_read(gpu, REG_A6XX_RBBM_STATUS),
671 gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
672 gpu_read(gpu, REG_A6XX_CP_RB_WPTR),
673 gpu_read64(gpu, REG_A6XX_CP_IB1_BASE, REG_A6XX_CP_IB1_BASE_HI),
674 gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE),
675 gpu_read64(gpu, REG_A6XX_CP_IB2_BASE, REG_A6XX_CP_IB2_BASE_HI),
676 gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE));
678 /* Turn off the hangcheck timer to keep it from bothering us */
679 del_timer(&gpu->hangcheck_timer);
681 queue_work(priv->wq, &gpu->recover_work);
684 static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
686 u32 status = gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS);
688 gpu_write(gpu, REG_A6XX_RBBM_INT_CLEAR_CMD, status);
690 if (status & A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT)
691 a6xx_fault_detect_irq(gpu);
693 if (status & A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR)
694 dev_err_ratelimited(&gpu->pdev->dev, "CP | AHB bus error\n");
696 if (status & A6XX_RBBM_INT_0_MASK_CP_HW_ERROR)
697 a6xx_cp_hw_err_irq(gpu);
699 if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW)
700 dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB ASYNC overflow\n");
702 if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW)
703 dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB bus overflow\n");
705 if (status & A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
706 dev_err_ratelimited(&gpu->pdev->dev, "UCHE | Out of bounds access\n");
708 if (status & A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS)
714 static const u32 a6xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
715 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A6XX_CP_RB_BASE),
716 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A6XX_CP_RB_BASE_HI),
717 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR,
718 REG_A6XX_CP_RB_RPTR_ADDR_LO),
719 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI,
720 REG_A6XX_CP_RB_RPTR_ADDR_HI),
721 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A6XX_CP_RB_RPTR),
722 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A6XX_CP_RB_WPTR),
723 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL),
726 static int a6xx_pm_resume(struct msm_gpu *gpu)
728 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
729 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
732 gpu->needs_hw_init = true;
734 ret = a6xx_gmu_resume(a6xx_gpu);
738 msm_gpu_resume_devfreq(gpu);
743 static int a6xx_pm_suspend(struct msm_gpu *gpu)
745 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
746 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
748 devfreq_suspend_device(gpu->devfreq.devfreq);
750 return a6xx_gmu_stop(a6xx_gpu);
753 static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
755 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
756 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
758 /* Force the GPU power on so we can read this register */
759 a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
761 *value = gpu_read64(gpu, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
762 REG_A6XX_RBBM_PERFCTR_CP_0_HI);
764 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
768 static struct msm_ringbuffer *a6xx_active_ring(struct msm_gpu *gpu)
770 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
771 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
773 return a6xx_gpu->cur_ring;
776 static void a6xx_destroy(struct msm_gpu *gpu)
778 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
779 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
781 if (a6xx_gpu->sqe_bo) {
782 msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
783 drm_gem_object_put_unlocked(a6xx_gpu->sqe_bo);
786 a6xx_gmu_remove(a6xx_gpu);
788 adreno_gpu_cleanup(adreno_gpu);
792 static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
794 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
795 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
796 u64 busy_cycles, busy_time;
798 busy_cycles = gmu_read64(&a6xx_gpu->gmu,
799 REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
800 REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H);
802 busy_time = (busy_cycles - gpu->devfreq.busy_cycles) * 10;
803 do_div(busy_time, 192);
805 gpu->devfreq.busy_cycles = busy_cycles;
807 if (WARN_ON(busy_time > ~0LU))
810 return (unsigned long)busy_time;
813 static const struct adreno_gpu_funcs funcs = {
815 .get_param = adreno_get_param,
816 .hw_init = a6xx_hw_init,
817 .pm_suspend = a6xx_pm_suspend,
818 .pm_resume = a6xx_pm_resume,
819 .recover = a6xx_recover,
820 .submit = a6xx_submit,
822 .active_ring = a6xx_active_ring,
824 .destroy = a6xx_destroy,
825 #if defined(CONFIG_DRM_MSM_GPU_STATE)
828 .gpu_busy = a6xx_gpu_busy,
829 .gpu_get_freq = a6xx_gmu_get_freq,
830 .gpu_set_freq = a6xx_gmu_set_freq,
831 #if defined(CONFIG_DRM_MSM_GPU_STATE)
832 .gpu_state_get = a6xx_gpu_state_get,
833 .gpu_state_put = a6xx_gpu_state_put,
836 .get_timestamp = a6xx_get_timestamp,
839 struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
841 struct msm_drm_private *priv = dev->dev_private;
842 struct platform_device *pdev = priv->gpu_pdev;
843 struct device_node *node;
844 struct a6xx_gpu *a6xx_gpu;
845 struct adreno_gpu *adreno_gpu;
849 a6xx_gpu = kzalloc(sizeof(*a6xx_gpu), GFP_KERNEL);
851 return ERR_PTR(-ENOMEM);
853 adreno_gpu = &a6xx_gpu->base;
854 gpu = &adreno_gpu->base;
856 adreno_gpu->registers = NULL;
857 adreno_gpu->reg_offsets = a6xx_register_offsets;
859 ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
861 a6xx_destroy(&(a6xx_gpu->base.base));
865 /* Check if there is a GMU phandle and set it up */
866 node = of_parse_phandle(pdev->dev.of_node, "qcom,gmu", 0);
868 /* FIXME: How do we gracefully handle this? */
871 ret = a6xx_gmu_init(a6xx_gpu, node);
873 a6xx_destroy(&(a6xx_gpu->base.base));
878 msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu,