1 /* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
14 #include <linux/types.h>
15 #include <linux/cpumask.h>
16 #include <linux/qcom_scm.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/of_address.h>
19 #include <linux/soc/qcom/mdt_loader.h>
24 extern bool hang_debug;
25 static void a5xx_dump(struct msm_gpu *gpu);
29 static int zap_shader_load_mdt(struct device *dev, const char *fwname)
31 const struct firmware *fw;
32 struct device_node *np;
36 void *mem_region = NULL;
39 if (!IS_ENABLED(CONFIG_ARCH_QCOM))
42 np = of_get_child_by_name(dev->of_node, "zap-shader");
46 np = of_parse_phandle(np, "memory-region", 0);
50 ret = of_address_to_resource(np, 0, &r);
55 mem_size = resource_size(&r);
57 /* Request the MDT file for the firmware */
58 ret = request_firmware(&fw, fwname, dev);
60 DRM_DEV_ERROR(dev, "Unable to load %s\n", fwname);
64 /* Figure out how much memory we need */
65 mem_size = qcom_mdt_get_size(fw);
71 /* Allocate memory for the firmware image */
72 mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC);
78 /* Load the rest of the MDT */
79 ret = qcom_mdt_load(dev, fw, fwname, GPU_PAS_ID, mem_region, mem_phys,
84 /* Send the image to the secure world */
85 ret = qcom_scm_pas_auth_and_reset(GPU_PAS_ID);
87 DRM_DEV_ERROR(dev, "Unable to authorize the image\n");
98 static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
99 struct msm_file_private *ctx)
101 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
102 struct msm_drm_private *priv = gpu->dev->dev_private;
103 struct msm_ringbuffer *ring = gpu->rb;
104 unsigned int i, ibs = 0;
106 for (i = 0; i < submit->nr_cmds; i++) {
107 switch (submit->cmd[i].type) {
108 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
110 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
111 if (priv->lastctx == ctx)
113 case MSM_SUBMIT_CMD_BUF:
114 OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
115 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
116 OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
117 OUT_RING(ring, submit->cmd[i].size);
123 OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1);
124 OUT_RING(ring, submit->fence->seqno);
126 OUT_PKT7(ring, CP_EVENT_WRITE, 4);
127 OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31));
128 OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, fence)));
129 OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, fence)));
130 OUT_RING(ring, submit->fence->seqno);
132 gpu->funcs->flush(gpu);
135 static const struct {
139 {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
140 {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
141 {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
142 {REG_A5XX_RBBM_CLOCK_CNTL_SP3, 0x02222222},
143 {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
144 {REG_A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
145 {REG_A5XX_RBBM_CLOCK_CNTL2_SP2, 0x02222220},
146 {REG_A5XX_RBBM_CLOCK_CNTL2_SP3, 0x02222220},
147 {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
148 {REG_A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
149 {REG_A5XX_RBBM_CLOCK_HYST_SP2, 0x0000F3CF},
150 {REG_A5XX_RBBM_CLOCK_HYST_SP3, 0x0000F3CF},
151 {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
152 {REG_A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
153 {REG_A5XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
154 {REG_A5XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
155 {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
156 {REG_A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
157 {REG_A5XX_RBBM_CLOCK_CNTL_TP2, 0x22222222},
158 {REG_A5XX_RBBM_CLOCK_CNTL_TP3, 0x22222222},
159 {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
160 {REG_A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
161 {REG_A5XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
162 {REG_A5XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
163 {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
164 {REG_A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
165 {REG_A5XX_RBBM_CLOCK_CNTL3_TP2, 0x00002222},
166 {REG_A5XX_RBBM_CLOCK_CNTL3_TP3, 0x00002222},
167 {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
168 {REG_A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
169 {REG_A5XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
170 {REG_A5XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
171 {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
172 {REG_A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
173 {REG_A5XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
174 {REG_A5XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
175 {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
176 {REG_A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
177 {REG_A5XX_RBBM_CLOCK_HYST3_TP2, 0x00007777},
178 {REG_A5XX_RBBM_CLOCK_HYST3_TP3, 0x00007777},
179 {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
180 {REG_A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
181 {REG_A5XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
182 {REG_A5XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
183 {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
184 {REG_A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
185 {REG_A5XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
186 {REG_A5XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
187 {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
188 {REG_A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
189 {REG_A5XX_RBBM_CLOCK_DELAY3_TP2, 0x00001111},
190 {REG_A5XX_RBBM_CLOCK_DELAY3_TP3, 0x00001111},
191 {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
192 {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
193 {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
194 {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
195 {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
196 {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
197 {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
198 {REG_A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
199 {REG_A5XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
200 {REG_A5XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
201 {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
202 {REG_A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
203 {REG_A5XX_RBBM_CLOCK_CNTL2_RB2, 0x00222222},
204 {REG_A5XX_RBBM_CLOCK_CNTL2_RB3, 0x00222222},
205 {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
206 {REG_A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
207 {REG_A5XX_RBBM_CLOCK_CNTL_CCU2, 0x00022220},
208 {REG_A5XX_RBBM_CLOCK_CNTL_CCU3, 0x00022220},
209 {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
210 {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
211 {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
212 {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
213 {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2, 0x04040404},
214 {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3, 0x04040404},
215 {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
216 {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
217 {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
218 {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2, 0x00000002},
219 {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3, 0x00000002},
220 {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
221 {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
222 {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
223 {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
224 {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
225 {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
226 {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
227 {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
228 {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
229 {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
230 {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
233 void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
237 for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++)
238 gpu_write(gpu, a5xx_hwcg[i].offset,
239 state ? a5xx_hwcg[i].value : 0);
241 gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0);
242 gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180);
245 static int a5xx_me_init(struct msm_gpu *gpu)
247 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
248 struct msm_ringbuffer *ring = gpu->rb;
250 OUT_PKT7(ring, CP_ME_INIT, 8);
252 OUT_RING(ring, 0x0000002F);
254 /* Enable multiple hardware contexts */
255 OUT_RING(ring, 0x00000003);
257 /* Enable error detection */
258 OUT_RING(ring, 0x20000000);
260 /* Don't enable header dump */
261 OUT_RING(ring, 0x00000000);
262 OUT_RING(ring, 0x00000000);
264 /* Specify workarounds for various microcode issues */
265 if (adreno_is_a530(adreno_gpu)) {
266 /* Workaround for token end syncs
267 * Force a WFI after every direct-render 3D mode draw and every
270 OUT_RING(ring, 0x0000000B);
272 /* No workarounds enabled */
273 OUT_RING(ring, 0x00000000);
276 OUT_RING(ring, 0x00000000);
277 OUT_RING(ring, 0x00000000);
279 gpu->funcs->flush(gpu);
281 return a5xx_idle(gpu) ? 0 : -EINVAL;
284 static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
285 const struct firmware *fw, u64 *iova)
287 struct drm_gem_object *bo;
290 ptr = msm_gem_kernel_new_locked(gpu->dev, fw->size - 4,
291 MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova);
294 return ERR_CAST(ptr);
296 memcpy(ptr, &fw->data[4], fw->size - 4);
298 msm_gem_put_vaddr(bo);
302 static int a5xx_ucode_init(struct msm_gpu *gpu)
304 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
305 struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
308 if (!a5xx_gpu->pm4_bo) {
309 a5xx_gpu->pm4_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pm4,
310 &a5xx_gpu->pm4_iova);
312 if (IS_ERR(a5xx_gpu->pm4_bo)) {
313 ret = PTR_ERR(a5xx_gpu->pm4_bo);
314 a5xx_gpu->pm4_bo = NULL;
315 dev_err(gpu->dev->dev, "could not allocate PM4: %d\n",
321 if (!a5xx_gpu->pfp_bo) {
322 a5xx_gpu->pfp_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pfp,
323 &a5xx_gpu->pfp_iova);
325 if (IS_ERR(a5xx_gpu->pfp_bo)) {
326 ret = PTR_ERR(a5xx_gpu->pfp_bo);
327 a5xx_gpu->pfp_bo = NULL;
328 dev_err(gpu->dev->dev, "could not allocate PFP: %d\n",
334 gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO,
335 REG_A5XX_CP_ME_INSTR_BASE_HI, a5xx_gpu->pm4_iova);
337 gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO,
338 REG_A5XX_CP_PFP_INSTR_BASE_HI, a5xx_gpu->pfp_iova);
343 #define SCM_GPU_ZAP_SHADER_RESUME 0
345 static int a5xx_zap_shader_resume(struct msm_gpu *gpu)
349 ret = qcom_scm_set_remote_state(SCM_GPU_ZAP_SHADER_RESUME, GPU_PAS_ID);
351 DRM_ERROR("%s: zap-shader resume failed: %d\n",
357 static int a5xx_zap_shader_init(struct msm_gpu *gpu)
360 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
361 struct platform_device *pdev = gpu->pdev;
365 * If the zap shader is already loaded into memory we just need to kick
366 * the remote processor to reinitialize it
369 return a5xx_zap_shader_resume(gpu);
371 /* We need SCM to be able to load the firmware */
372 if (!qcom_scm_is_available()) {
373 DRM_DEV_ERROR(&pdev->dev, "SCM is not available\n");
374 return -EPROBE_DEFER;
377 /* Each GPU has a target specific zap shader firmware name to use */
378 if (!adreno_gpu->info->zapfw) {
379 DRM_DEV_ERROR(&pdev->dev,
380 "Zap shader firmware file not specified for this target\n");
384 ret = zap_shader_load_mdt(&pdev->dev, adreno_gpu->info->zapfw);
391 #define A5XX_INT_MASK (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
392 A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
393 A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
394 A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
395 A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
396 A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW | \
397 A5XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
398 A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT | \
399 A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
400 A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
401 A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
403 static int a5xx_hw_init(struct msm_gpu *gpu)
405 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
408 gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
410 /* Make all blocks contribute to the GPU BUSY perf counter */
411 gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF);
413 /* Enable RBBM error reporting bits */
414 gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL0, 0x00000001);
416 if (adreno_gpu->info->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) {
418 * Mask out the activity signals from RB1-3 to avoid false
422 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11,
424 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12,
426 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13,
428 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14,
430 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15,
432 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16,
434 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17,
436 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18,
440 /* Enable fault detection */
441 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL,
444 /* Turn on performance counters */
445 gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_CNTL, 0x01);
447 /* Increase VFD cache access so LRZ and other data gets evicted less */
448 gpu_write(gpu, REG_A5XX_UCHE_CACHE_WAYS, 0x02);
450 /* Disable L2 bypass in the UCHE */
451 gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_LO, 0xFFFF0000);
452 gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_HI, 0x0001FFFF);
453 gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_LO, 0xFFFF0000);
454 gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_HI, 0x0001FFFF);
456 /* Set the GMEM VA range (0 to gpu->gmem) */
457 gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000);
458 gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_HI, 0x00000000);
459 gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_LO,
460 0x00100000 + adreno_gpu->gmem - 1);
461 gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000);
463 gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40);
464 gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
465 gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
466 gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
468 gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, (0x400 << 11 | 0x300 << 22));
470 if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
471 gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
473 gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100);
475 /* Enable USE_RETENTION_FLOPS */
476 gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000);
478 /* Enable ME/PFP split notification */
479 gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
482 a5xx_set_hwcg(gpu, true);
484 gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
486 /* Set the highest bank bit */
487 gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, 2 << 7);
488 gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, 2 << 1);
490 /* Protect registers from the CP */
491 gpu_write(gpu, REG_A5XX_CP_PROTECT_CNTL, 0x00000007);
494 gpu_write(gpu, REG_A5XX_CP_PROTECT(0), ADRENO_PROTECT_RW(0x04, 4));
495 gpu_write(gpu, REG_A5XX_CP_PROTECT(1), ADRENO_PROTECT_RW(0x08, 8));
496 gpu_write(gpu, REG_A5XX_CP_PROTECT(2), ADRENO_PROTECT_RW(0x10, 16));
497 gpu_write(gpu, REG_A5XX_CP_PROTECT(3), ADRENO_PROTECT_RW(0x20, 32));
498 gpu_write(gpu, REG_A5XX_CP_PROTECT(4), ADRENO_PROTECT_RW(0x40, 64));
499 gpu_write(gpu, REG_A5XX_CP_PROTECT(5), ADRENO_PROTECT_RW(0x80, 64));
501 /* Content protect */
502 gpu_write(gpu, REG_A5XX_CP_PROTECT(6),
503 ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
505 gpu_write(gpu, REG_A5XX_CP_PROTECT(7),
506 ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TRUST_CNTL, 2));
509 gpu_write(gpu, REG_A5XX_CP_PROTECT(8), ADRENO_PROTECT_RW(0x800, 64));
510 gpu_write(gpu, REG_A5XX_CP_PROTECT(9), ADRENO_PROTECT_RW(0x840, 8));
511 gpu_write(gpu, REG_A5XX_CP_PROTECT(10), ADRENO_PROTECT_RW(0x880, 32));
512 gpu_write(gpu, REG_A5XX_CP_PROTECT(11), ADRENO_PROTECT_RW(0xAA0, 1));
515 gpu_write(gpu, REG_A5XX_CP_PROTECT(12), ADRENO_PROTECT_RW(0xCC0, 1));
516 gpu_write(gpu, REG_A5XX_CP_PROTECT(13), ADRENO_PROTECT_RW(0xCF0, 2));
519 gpu_write(gpu, REG_A5XX_CP_PROTECT(14), ADRENO_PROTECT_RW(0xE68, 8));
520 gpu_write(gpu, REG_A5XX_CP_PROTECT(15), ADRENO_PROTECT_RW(0xE70, 4));
523 gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16));
525 if (adreno_is_a530(adreno_gpu))
526 gpu_write(gpu, REG_A5XX_CP_PROTECT(17),
527 ADRENO_PROTECT_RW(0x10000, 0x8000));
529 gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_CNTL, 0);
531 * Disable the trusted memory range - we don't actually supported secure
532 * memory rendering at this point in time and we don't want to block off
533 * part of the virtual memory space.
535 gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
536 REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
537 gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
539 ret = adreno_hw_init(gpu);
543 a5xx_gpmu_ucode_init(gpu);
545 ret = a5xx_ucode_init(gpu);
549 /* Disable the interrupts through the initial bringup stage */
550 gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK);
552 /* Clear ME_HALT to start the micro engine */
553 gpu_write(gpu, REG_A5XX_CP_PFP_ME_CNTL, 0);
554 ret = a5xx_me_init(gpu);
558 ret = a5xx_power_init(gpu);
563 * Send a pipeline event stat to get misbehaving counters to start
566 if (adreno_is_a530(adreno_gpu)) {
567 OUT_PKT7(gpu->rb, CP_EVENT_WRITE, 1);
568 OUT_RING(gpu->rb, 0x0F);
570 gpu->funcs->flush(gpu);
576 * Try to load a zap shader into the secure world. If successful
577 * we can use the CP to switch out of secure mode. If not then we
578 * have no resource but to try to switch ourselves out manually. If we
579 * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will
580 * be blocked and a permissions violation will soon follow.
582 ret = a5xx_zap_shader_init(gpu);
584 OUT_PKT7(gpu->rb, CP_SET_SECURE_MODE, 1);
585 OUT_RING(gpu->rb, 0x00000000);
587 gpu->funcs->flush(gpu);
591 /* Print a warning so if we die, we know why */
592 dev_warn_once(gpu->dev->dev,
593 "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
594 gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
600 static void a5xx_recover(struct msm_gpu *gpu)
604 adreno_dump_info(gpu);
606 for (i = 0; i < 8; i++) {
607 printk("CP_SCRATCH_REG%d: %u\n", i,
608 gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(i)));
614 gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 1);
615 gpu_read(gpu, REG_A5XX_RBBM_SW_RESET_CMD);
616 gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 0);
620 static void a5xx_destroy(struct msm_gpu *gpu)
622 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
623 struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
625 DBG("%s", gpu->name);
627 if (a5xx_gpu->pm4_bo) {
628 if (a5xx_gpu->pm4_iova)
629 msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
630 drm_gem_object_unreference_unlocked(a5xx_gpu->pm4_bo);
633 if (a5xx_gpu->pfp_bo) {
634 if (a5xx_gpu->pfp_iova)
635 msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
636 drm_gem_object_unreference_unlocked(a5xx_gpu->pfp_bo);
639 if (a5xx_gpu->gpmu_bo) {
640 if (a5xx_gpu->gpmu_iova)
641 msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
642 drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
645 adreno_gpu_cleanup(adreno_gpu);
649 static inline bool _a5xx_check_idle(struct msm_gpu *gpu)
651 if (gpu_read(gpu, REG_A5XX_RBBM_STATUS) & ~A5XX_RBBM_STATUS_HI_BUSY)
655 * Nearly every abnormality ends up pausing the GPU and triggering a
656 * fault so we can safely just watch for this one interrupt to fire
658 return !(gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS) &
659 A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT);
662 bool a5xx_idle(struct msm_gpu *gpu)
664 /* wait for CP to drain ringbuffer: */
665 if (!adreno_idle(gpu))
668 if (spin_until(_a5xx_check_idle(gpu))) {
669 DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X\n",
670 gpu->name, __builtin_return_address(0),
671 gpu_read(gpu, REG_A5XX_RBBM_STATUS),
672 gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS));
680 static int a5xx_fault_handler(void *arg, unsigned long iova, int flags)
682 struct msm_gpu *gpu = arg;
683 pr_warn_ratelimited("*** gpu fault: iova=%08lx, flags=%d (%u,%u,%u,%u)\n",
685 gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(4)),
686 gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(5)),
687 gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(6)),
688 gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(7)));
693 static void a5xx_cp_err_irq(struct msm_gpu *gpu)
695 u32 status = gpu_read(gpu, REG_A5XX_CP_INTERRUPT_STATUS);
697 if (status & A5XX_CP_INT_CP_OPCODE_ERROR) {
700 gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, 0);
703 * REG_A5XX_CP_PFP_STAT_DATA is indexed, and we want index 1 so
707 gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
708 val = gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
710 dev_err_ratelimited(gpu->dev->dev, "CP | opcode error | possible opcode=0x%8.8X\n",
714 if (status & A5XX_CP_INT_CP_HW_FAULT_ERROR)
715 dev_err_ratelimited(gpu->dev->dev, "CP | HW fault | status=0x%8.8X\n",
716 gpu_read(gpu, REG_A5XX_CP_HW_FAULT));
718 if (status & A5XX_CP_INT_CP_DMA_ERROR)
719 dev_err_ratelimited(gpu->dev->dev, "CP | DMA error\n");
721 if (status & A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) {
722 u32 val = gpu_read(gpu, REG_A5XX_CP_PROTECT_STATUS);
724 dev_err_ratelimited(gpu->dev->dev,
725 "CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n",
726 val & (1 << 24) ? "WRITE" : "READ",
727 (val & 0xFFFFF) >> 2, val);
730 if (status & A5XX_CP_INT_CP_AHB_ERROR) {
731 u32 status = gpu_read(gpu, REG_A5XX_CP_AHB_FAULT);
732 const char *access[16] = { "reserved", "reserved",
733 "timestamp lo", "timestamp hi", "pfp read", "pfp write",
734 "", "", "me read", "me write", "", "", "crashdump read",
737 dev_err_ratelimited(gpu->dev->dev,
738 "CP | AHB error | addr=%X access=%s error=%d | status=0x%8.8X\n",
739 status & 0xFFFFF, access[(status >> 24) & 0xF],
740 (status & (1 << 31)), status);
744 static void a5xx_rbbm_err_irq(struct msm_gpu *gpu, u32 status)
746 if (status & A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR) {
747 u32 val = gpu_read(gpu, REG_A5XX_RBBM_AHB_ERROR_STATUS);
749 dev_err_ratelimited(gpu->dev->dev,
750 "RBBM | AHB bus error | %s | addr=0x%X | ports=0x%X:0x%X\n",
751 val & (1 << 28) ? "WRITE" : "READ",
752 (val & 0xFFFFF) >> 2, (val >> 20) & 0x3,
755 /* Clear the error */
756 gpu_write(gpu, REG_A5XX_RBBM_AHB_CMD, (1 << 4));
758 /* Clear the interrupt */
759 gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
760 A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
763 if (status & A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT)
764 dev_err_ratelimited(gpu->dev->dev, "RBBM | AHB transfer timeout\n");
766 if (status & A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT)
767 dev_err_ratelimited(gpu->dev->dev, "RBBM | ME master split | status=0x%X\n",
768 gpu_read(gpu, REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS));
770 if (status & A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT)
771 dev_err_ratelimited(gpu->dev->dev, "RBBM | PFP master split | status=0x%X\n",
772 gpu_read(gpu, REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS));
774 if (status & A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT)
775 dev_err_ratelimited(gpu->dev->dev, "RBBM | ETS master split | status=0x%X\n",
776 gpu_read(gpu, REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS));
778 if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
779 dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB ASYNC overflow\n");
781 if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW)
782 dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB bus overflow\n");
785 static void a5xx_uche_err_irq(struct msm_gpu *gpu)
787 uint64_t addr = (uint64_t) gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_HI);
789 addr |= gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_LO);
791 dev_err_ratelimited(gpu->dev->dev, "UCHE | Out of bounds access | addr=0x%llX\n",
795 static void a5xx_gpmu_err_irq(struct msm_gpu *gpu)
797 dev_err_ratelimited(gpu->dev->dev, "GPMU | voltage droop\n");
800 static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
802 struct drm_device *dev = gpu->dev;
803 struct msm_drm_private *priv = dev->dev_private;
805 dev_err(dev->dev, "gpu fault fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
806 gpu->funcs->last_fence(gpu),
807 gpu_read(gpu, REG_A5XX_RBBM_STATUS),
808 gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
809 gpu_read(gpu, REG_A5XX_CP_RB_WPTR),
810 gpu_read64(gpu, REG_A5XX_CP_IB1_BASE, REG_A5XX_CP_IB1_BASE_HI),
811 gpu_read(gpu, REG_A5XX_CP_IB1_BUFSZ),
812 gpu_read64(gpu, REG_A5XX_CP_IB2_BASE, REG_A5XX_CP_IB2_BASE_HI),
813 gpu_read(gpu, REG_A5XX_CP_IB2_BUFSZ));
815 /* Turn off the hangcheck timer to keep it from bothering us */
816 del_timer(&gpu->hangcheck_timer);
818 queue_work(priv->wq, &gpu->recover_work);
821 #define RBBM_ERROR_MASK \
822 (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
823 A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
824 A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
825 A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
826 A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
827 A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
829 static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
831 u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
834 * Clear all the interrupts except RBBM_AHB_ERROR - if we clear it
835 * before the source is cleared the interrupt will storm.
837 gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
838 status & ~A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
840 /* Pass status to a5xx_rbbm_err_irq because we've already cleared it */
841 if (status & RBBM_ERROR_MASK)
842 a5xx_rbbm_err_irq(gpu, status);
844 if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR)
845 a5xx_cp_err_irq(gpu);
847 if (status & A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT)
848 a5xx_fault_detect_irq(gpu);
850 if (status & A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
851 a5xx_uche_err_irq(gpu);
853 if (status & A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
854 a5xx_gpmu_err_irq(gpu);
856 if (status & A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS)
862 static const u32 a5xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
863 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A5XX_CP_RB_BASE),
864 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A5XX_CP_RB_BASE_HI),
865 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A5XX_CP_RB_RPTR_ADDR),
866 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI,
867 REG_A5XX_CP_RB_RPTR_ADDR_HI),
868 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A5XX_CP_RB_RPTR),
869 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A5XX_CP_RB_WPTR),
870 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A5XX_CP_RB_CNTL),
873 static const u32 a5xx_registers[] = {
874 0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B,
875 0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
876 0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3,
877 0x04E0, 0x0533, 0x0540, 0x0555, 0x0800, 0x081A, 0x081F, 0x0841,
878 0x0860, 0x0860, 0x0880, 0x08A0, 0x0B00, 0x0B12, 0x0B15, 0x0B28,
879 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53,
880 0x0C60, 0x0C61, 0x0C80, 0x0C82, 0x0C84, 0x0C85, 0x0C90, 0x0C98,
881 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, 0x2180, 0x2185, 0x2580, 0x2585,
882 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8,
883 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E,
884 0x2100, 0x211E, 0x2140, 0x2145, 0x2500, 0x251E, 0x2540, 0x2545,
885 0x0D10, 0x0D17, 0x0D20, 0x0D23, 0x0D30, 0x0D30, 0x20C0, 0x20C0,
886 0x24C0, 0x24C0, 0x0E40, 0x0E43, 0x0E4A, 0x0E4A, 0x0E50, 0x0E57,
887 0x0E60, 0x0E7C, 0x0E80, 0x0E8E, 0x0E90, 0x0E96, 0x0EA0, 0x0EA8,
888 0x0EB0, 0x0EB2, 0xE140, 0xE147, 0xE150, 0xE187, 0xE1A0, 0xE1A9,
889 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, 0xE1D0, 0xE1D1, 0xE200, 0xE201,
890 0xE210, 0xE21C, 0xE240, 0xE268, 0xE000, 0xE006, 0xE010, 0xE09A,
891 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, 0xE100, 0xE105, 0xE380, 0xE38F,
892 0xE3B0, 0xE3B0, 0xE400, 0xE405, 0xE408, 0xE4E9, 0xE4F0, 0xE4F0,
893 0xE280, 0xE280, 0xE282, 0xE2A3, 0xE2A5, 0xE2C2, 0xE940, 0xE947,
894 0xE950, 0xE987, 0xE9A0, 0xE9A9, 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7,
895 0xE9D0, 0xE9D1, 0xEA00, 0xEA01, 0xEA10, 0xEA1C, 0xEA40, 0xEA68,
896 0xE800, 0xE806, 0xE810, 0xE89A, 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB,
897 0xE900, 0xE905, 0xEB80, 0xEB8F, 0xEBB0, 0xEBB0, 0xEC00, 0xEC05,
898 0xEC08, 0xECE9, 0xECF0, 0xECF0, 0xEA80, 0xEA80, 0xEA82, 0xEAA3,
899 0xEAA5, 0xEAC2, 0xA800, 0xA8FF, 0xAC60, 0xAC60, 0xB000, 0xB97F,
903 static void a5xx_dump(struct msm_gpu *gpu)
905 dev_info(gpu->dev->dev, "status: %08x\n",
906 gpu_read(gpu, REG_A5XX_RBBM_STATUS));
910 static int a5xx_pm_resume(struct msm_gpu *gpu)
914 /* Turn on the core power */
915 ret = msm_gpu_pm_resume(gpu);
919 /* Turn the RBCCU domain first to limit the chances of voltage droop */
920 gpu_write(gpu, REG_A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000);
922 /* Wait 3 usecs before polling */
925 ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS,
926 (1 << 20), (1 << 20));
928 DRM_ERROR("%s: timeout waiting for RBCCU GDSC enable: %X\n",
930 gpu_read(gpu, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS));
934 /* Turn on the SP domain */
935 gpu_write(gpu, REG_A5XX_GPMU_SP_POWER_CNTL, 0x778000);
936 ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_SP_PWR_CLK_STATUS,
937 (1 << 20), (1 << 20));
939 DRM_ERROR("%s: timeout waiting for SP GDSC enable\n",
945 static int a5xx_pm_suspend(struct msm_gpu *gpu)
947 /* Clear the VBIF pipe before shutting down */
948 gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0xF);
949 spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) & 0xF) == 0xF);
951 gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0);
954 * Reset the VBIF before power collapse to avoid issue with FIFO
957 gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000);
958 gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000);
960 return msm_gpu_pm_suspend(gpu);
963 static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
965 *value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_CP_0_LO,
966 REG_A5XX_RBBM_PERFCTR_CP_0_HI);
971 #ifdef CONFIG_DEBUG_FS
972 static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
974 seq_printf(m, "status: %08x\n",
975 gpu_read(gpu, REG_A5XX_RBBM_STATUS));
978 * Temporarily disable hardware clock gating before going into
979 * adreno_show to avoid issues while reading the registers
981 a5xx_set_hwcg(gpu, false);
983 a5xx_set_hwcg(gpu, true);
987 static const struct adreno_gpu_funcs funcs = {
989 .get_param = adreno_get_param,
990 .hw_init = a5xx_hw_init,
991 .pm_suspend = a5xx_pm_suspend,
992 .pm_resume = a5xx_pm_resume,
993 .recover = a5xx_recover,
994 .last_fence = adreno_last_fence,
995 .submit = a5xx_submit,
996 .flush = adreno_flush,
998 .destroy = a5xx_destroy,
999 #ifdef CONFIG_DEBUG_FS
1003 .get_timestamp = a5xx_get_timestamp,
1006 struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
1008 struct msm_drm_private *priv = dev->dev_private;
1009 struct platform_device *pdev = priv->gpu_pdev;
1010 struct a5xx_gpu *a5xx_gpu = NULL;
1011 struct adreno_gpu *adreno_gpu;
1012 struct msm_gpu *gpu;
1016 dev_err(dev->dev, "No A5XX device is defined\n");
1017 return ERR_PTR(-ENXIO);
1020 a5xx_gpu = kzalloc(sizeof(*a5xx_gpu), GFP_KERNEL);
1022 return ERR_PTR(-ENOMEM);
1024 adreno_gpu = &a5xx_gpu->base;
1025 gpu = &adreno_gpu->base;
1027 adreno_gpu->registers = a5xx_registers;
1028 adreno_gpu->reg_offsets = a5xx_register_offsets;
1030 a5xx_gpu->lm_leakage = 0x4E001A;
1032 ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
1034 a5xx_destroy(&(a5xx_gpu->base.base));
1035 return ERR_PTR(ret);
1039 msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, a5xx_fault_handler);