1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
6 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
9 #include <linux/ascii85.h>
10 #include <linux/interconnect.h>
11 #include <linux/qcom_scm.h>
12 #include <linux/kernel.h>
13 #include <linux/of_address.h>
14 #include <linux/pm_opp.h>
15 #include <linux/slab.h>
16 #include <linux/soc/qcom/mdt_loader.h>
17 #include <soc/qcom/ocmem.h>
18 #include "adreno_gpu.h"
22 static bool zap_available = true;
24 static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
27 struct device *dev = &gpu->pdev->dev;
28 const struct firmware *fw;
29 struct device_node *np, *mem_np;
33 void *mem_region = NULL;
36 if (!IS_ENABLED(CONFIG_ARCH_QCOM)) {
37 zap_available = false;
41 np = of_get_child_by_name(dev->of_node, "zap-shader");
43 zap_available = false;
47 mem_np = of_parse_phandle(np, "memory-region", 0);
50 zap_available = false;
54 ret = of_address_to_resource(mem_np, 0, &r);
61 /* Request the MDT file for the firmware */
62 fw = adreno_request_fw(to_adreno_gpu(gpu), fwname);
64 DRM_DEV_ERROR(dev, "Unable to load %s\n", fwname);
68 /* Figure out how much memory we need */
69 mem_size = qcom_mdt_get_size(fw);
75 if (mem_size > resource_size(&r)) {
77 "memory region is too small to load the MDT\n");
82 /* Allocate memory for the firmware image */
83 mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC);
90 * Load the rest of the MDT
92 * Note that we could be dealing with two different paths, since
93 * with upstream linux-firmware it would be in a qcom/ subdir..
94 * adreno_request_fw() handles this, but qcom_mdt_load() does
95 * not. But since we've already gotten through adreno_request_fw()
96 * we know which of the two cases it is:
98 if (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY) {
99 ret = qcom_mdt_load(dev, fw, fwname, pasid,
100 mem_region, mem_phys, mem_size, NULL);
104 newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
106 ret = qcom_mdt_load(dev, fw, newname, pasid,
107 mem_region, mem_phys, mem_size, NULL);
113 /* Send the image to the secure world */
114 ret = qcom_scm_pas_auth_and_reset(pasid);
117 * If the scm call returns -EOPNOTSUPP we assume that this target
118 * doesn't need/support the zap shader so quietly fail
120 if (ret == -EOPNOTSUPP)
121 zap_available = false;
123 DRM_DEV_ERROR(dev, "Unable to authorize the image\n");
127 memunmap(mem_region);
129 release_firmware(fw);
134 int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid)
136 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
137 struct platform_device *pdev = gpu->pdev;
139 /* Short cut if we determine the zap shader isn't available/needed */
143 /* We need SCM to be able to load the firmware */
144 if (!qcom_scm_is_available()) {
145 DRM_DEV_ERROR(&pdev->dev, "SCM is not available\n");
146 return -EPROBE_DEFER;
149 /* Each GPU has a target specific zap shader firmware name to use */
150 if (!adreno_gpu->info->zapfw) {
151 zap_available = false;
152 DRM_DEV_ERROR(&pdev->dev,
153 "Zap shader firmware file not specified for this target\n");
157 return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
160 int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
162 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
165 case MSM_PARAM_GPU_ID:
166 *value = adreno_gpu->info->revn;
168 case MSM_PARAM_GMEM_SIZE:
169 *value = adreno_gpu->gmem;
171 case MSM_PARAM_GMEM_BASE:
174 case MSM_PARAM_CHIP_ID:
175 *value = adreno_gpu->rev.patchid |
176 (adreno_gpu->rev.minor << 8) |
177 (adreno_gpu->rev.major << 16) |
178 (adreno_gpu->rev.core << 24);
180 case MSM_PARAM_MAX_FREQ:
181 *value = adreno_gpu->base.fast_rate;
183 case MSM_PARAM_TIMESTAMP:
184 if (adreno_gpu->funcs->get_timestamp) {
187 pm_runtime_get_sync(&gpu->pdev->dev);
188 ret = adreno_gpu->funcs->get_timestamp(gpu, value);
189 pm_runtime_put_autosuspend(&gpu->pdev->dev);
194 case MSM_PARAM_NR_RINGS:
195 *value = gpu->nr_rings;
197 case MSM_PARAM_PP_PGTABLE:
200 case MSM_PARAM_FAULTS:
201 *value = gpu->global_faults;
204 DBG("%s: invalid param: %u", gpu->name, param);
209 const struct firmware *
210 adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
212 struct drm_device *drm = adreno_gpu->base.dev;
213 const struct firmware *fw = NULL;
217 newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
219 return ERR_PTR(-ENOMEM);
222 * Try first to load from qcom/$fwfile using a direct load (to avoid
223 * a potential timeout waiting for usermode helper)
225 if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) ||
226 (adreno_gpu->fwloc == FW_LOCATION_NEW)) {
228 ret = request_firmware_direct(&fw, newname, drm->dev);
230 DRM_DEV_INFO(drm->dev, "loaded %s from new location\n",
232 adreno_gpu->fwloc = FW_LOCATION_NEW;
234 } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
235 DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
243 * Then try the legacy location without qcom/ prefix
245 if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) ||
246 (adreno_gpu->fwloc == FW_LOCATION_LEGACY)) {
248 ret = request_firmware_direct(&fw, fwname, drm->dev);
250 DRM_DEV_INFO(drm->dev, "loaded %s from legacy location\n",
252 adreno_gpu->fwloc = FW_LOCATION_LEGACY;
254 } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
255 DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
263 * Finally fall back to request_firmware() for cases where the
264 * usermode helper is needed (I think mainly android)
266 if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) ||
267 (adreno_gpu->fwloc == FW_LOCATION_HELPER)) {
269 ret = request_firmware(&fw, newname, drm->dev);
271 DRM_DEV_INFO(drm->dev, "loaded %s with helper\n",
273 adreno_gpu->fwloc = FW_LOCATION_HELPER;
275 } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
276 DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
283 DRM_DEV_ERROR(drm->dev, "failed to load %s\n", fwname);
284 fw = ERR_PTR(-ENOENT);
290 int adreno_load_fw(struct adreno_gpu *adreno_gpu)
294 for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) {
295 const struct firmware *fw;
297 if (!adreno_gpu->info->fw[i])
300 /* Skip if the firmware has already been loaded */
301 if (adreno_gpu->fw[i])
304 fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->fw[i]);
308 adreno_gpu->fw[i] = fw;
314 struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
315 const struct firmware *fw, u64 *iova)
317 struct drm_gem_object *bo;
320 ptr = msm_gem_kernel_new_locked(gpu->dev, fw->size - 4,
321 MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova);
324 return ERR_CAST(ptr);
326 memcpy(ptr, &fw->data[4], fw->size - 4);
328 msm_gem_put_vaddr(bo);
333 int adreno_hw_init(struct msm_gpu *gpu)
335 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
338 DBG("%s", gpu->name);
340 ret = adreno_load_fw(adreno_gpu);
344 for (i = 0; i < gpu->nr_rings; i++) {
345 struct msm_ringbuffer *ring = gpu->rb[i];
350 ring->cur = ring->start;
351 ring->next = ring->start;
353 /* reset completed fence seqno: */
354 ring->memptrs->fence = ring->seqno;
355 ring->memptrs->rptr = 0;
359 * Setup REG_CP_RB_CNTL. The same value is used across targets (with
360 * the excpetion of A430 that disables the RPTR shadow) - the cacluation
361 * for the ringbuffer size and block size is moved to msm_gpu.h for the
362 * pre-processor to deal with and the A430 variant is ORed in here
364 adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
365 MSM_GPU_RB_CNTL_DEFAULT |
366 (adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0));
368 /* Setup ringbuffer address - use ringbuffer[0] for GPU init */
369 adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_BASE,
370 REG_ADRENO_CP_RB_BASE_HI, gpu->rb[0]->iova);
372 if (!adreno_is_a430(adreno_gpu)) {
373 adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
374 REG_ADRENO_CP_RB_RPTR_ADDR_HI,
375 rbmemptr(gpu->rb[0], rptr));
381 /* Use this helper to read rptr, since a430 doesn't update rptr in memory */
382 static uint32_t get_rptr(struct adreno_gpu *adreno_gpu,
383 struct msm_ringbuffer *ring)
385 if (adreno_is_a430(adreno_gpu))
386 return ring->memptrs->rptr = adreno_gpu_read(
387 adreno_gpu, REG_ADRENO_CP_RB_RPTR);
389 return ring->memptrs->rptr;
392 struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu)
397 void adreno_recover(struct msm_gpu *gpu)
399 struct drm_device *dev = gpu->dev;
402 // XXX pm-runtime?? we *need* the device to be off after this
403 // so maybe continuing to call ->pm_suspend/resume() is better?
405 gpu->funcs->pm_suspend(gpu);
406 gpu->funcs->pm_resume(gpu);
408 ret = msm_gpu_hw_init(gpu);
410 DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
415 void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
416 struct msm_file_private *ctx)
418 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
419 struct msm_drm_private *priv = gpu->dev->dev_private;
420 struct msm_ringbuffer *ring = submit->ring;
423 for (i = 0; i < submit->nr_cmds; i++) {
424 switch (submit->cmd[i].type) {
425 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
426 /* ignore IB-targets */
428 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
429 /* ignore if there has not been a ctx switch: */
430 if (priv->lastctx == ctx)
433 case MSM_SUBMIT_CMD_BUF:
434 OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ?
435 CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
436 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
437 OUT_RING(ring, submit->cmd[i].size);
443 OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
444 OUT_RING(ring, submit->seqno);
446 if (adreno_is_a3xx(adreno_gpu) || adreno_is_a4xx(adreno_gpu)) {
447 /* Flush HLSQ lazy updates to make sure there is nothing
448 * pending for indirect loads after the timestamp has
451 OUT_PKT3(ring, CP_EVENT_WRITE, 1);
452 OUT_RING(ring, HLSQ_FLUSH);
455 /* wait for idle before cache flush/interrupt */
456 OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
457 OUT_RING(ring, 0x00000000);
459 if (!adreno_is_a2xx(adreno_gpu)) {
460 /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
461 OUT_PKT3(ring, CP_EVENT_WRITE, 3);
462 OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
463 OUT_RING(ring, rbmemptr(ring, fence));
464 OUT_RING(ring, submit->seqno);
466 /* BIT(31) means something else on a2xx */
467 OUT_PKT3(ring, CP_EVENT_WRITE, 3);
468 OUT_RING(ring, CACHE_FLUSH_TS);
469 OUT_RING(ring, rbmemptr(ring, fence));
470 OUT_RING(ring, submit->seqno);
471 OUT_PKT3(ring, CP_INTERRUPT, 1);
472 OUT_RING(ring, 0x80000000);
476 if (adreno_is_a3xx(adreno_gpu)) {
477 /* Dummy set-constant to trigger context rollover */
478 OUT_PKT3(ring, CP_SET_CONSTANT, 2);
479 OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG));
480 OUT_RING(ring, 0x00000000);
484 gpu->funcs->flush(gpu, ring);
487 void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
489 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
492 /* Copy the shadow to the actual register */
493 ring->cur = ring->next;
496 * Mask wptr value that we calculate to fit in the HW range. This is
497 * to account for the possibility that the last command fit exactly into
498 * the ringbuffer and rb->next hasn't wrapped to zero yet
500 wptr = get_wptr(ring);
502 /* ensure writes to ringbuffer have hit system memory: */
505 adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_WPTR, wptr);
508 bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
510 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
511 uint32_t wptr = get_wptr(ring);
513 /* wait for CP to drain ringbuffer: */
514 if (!spin_until(get_rptr(adreno_gpu, ring) == wptr))
517 /* TODO maybe we need to reset GPU here to recover from hang? */
518 DRM_ERROR("%s: timeout waiting to drain ringbuffer %d rptr/wptr = %X/%X\n",
519 gpu->name, ring->id, get_rptr(adreno_gpu, ring), wptr);
524 int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state)
526 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
529 kref_init(&state->ref);
531 ktime_get_real_ts64(&state->time);
533 for (i = 0; i < gpu->nr_rings; i++) {
536 state->ring[i].fence = gpu->rb[i]->memptrs->fence;
537 state->ring[i].iova = gpu->rb[i]->iova;
538 state->ring[i].seqno = gpu->rb[i]->seqno;
539 state->ring[i].rptr = get_rptr(adreno_gpu, gpu->rb[i]);
540 state->ring[i].wptr = get_wptr(gpu->rb[i]);
542 /* Copy at least 'wptr' dwords of the data */
543 size = state->ring[i].wptr;
545 /* After wptr find the last non zero dword to save space */
546 for (j = state->ring[i].wptr; j < MSM_GPU_RINGBUFFER_SZ >> 2; j++)
547 if (gpu->rb[i]->start[j])
551 state->ring[i].data = kvmalloc(size << 2, GFP_KERNEL);
552 if (state->ring[i].data) {
553 memcpy(state->ring[i].data, gpu->rb[i]->start, size << 2);
554 state->ring[i].data_size = size << 2;
559 /* Some targets prefer to collect their own registers */
560 if (!adreno_gpu->registers)
563 /* Count the number of registers */
564 for (i = 0; adreno_gpu->registers[i] != ~0; i += 2)
565 count += adreno_gpu->registers[i + 1] -
566 adreno_gpu->registers[i] + 1;
568 state->registers = kcalloc(count * 2, sizeof(u32), GFP_KERNEL);
569 if (state->registers) {
572 for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
573 u32 start = adreno_gpu->registers[i];
574 u32 end = adreno_gpu->registers[i + 1];
577 for (addr = start; addr <= end; addr++) {
578 state->registers[pos++] = addr;
579 state->registers[pos++] = gpu_read(gpu, addr);
583 state->nr_registers = count;
589 void adreno_gpu_state_destroy(struct msm_gpu_state *state)
593 for (i = 0; i < ARRAY_SIZE(state->ring); i++)
594 kvfree(state->ring[i].data);
596 for (i = 0; state->bos && i < state->nr_bos; i++)
597 kvfree(state->bos[i].data);
602 kfree(state->registers);
605 static void adreno_gpu_state_kref_destroy(struct kref *kref)
607 struct msm_gpu_state *state = container_of(kref,
608 struct msm_gpu_state, ref);
610 adreno_gpu_state_destroy(state);
614 int adreno_gpu_state_put(struct msm_gpu_state *state)
616 if (IS_ERR_OR_NULL(state))
619 return kref_put(&state->ref, adreno_gpu_state_kref_destroy);
622 #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
624 static char *adreno_gpu_ascii85_encode(u32 *src, size_t len)
627 size_t buf_itr = 0, buffer_size;
628 char out[ASCII85_BUFSZ];
635 l = ascii85_encode_len(len);
638 * Ascii85 outputs either a 5 byte string or a 1 byte string. So we
639 * account for the worst case of 5 bytes per dword plus the 1 for '\0'
641 buffer_size = (l * 5) + 1;
643 buf = kvmalloc(buffer_size, GFP_KERNEL);
647 for (i = 0; i < l; i++)
648 buf_itr += snprintf(buf + buf_itr, buffer_size - buf_itr, "%s",
649 ascii85_encode(src[i], out));
654 /* len is expected to be in bytes */
655 static void adreno_show_object(struct drm_printer *p, void **ptr, int len,
666 * Only dump the non-zero part of the buffer - rarely will
667 * any data completely fill the entire allocated size of
670 for (datalen = 0, i = 0; i < len >> 2; i++)
672 datalen = ((i + 1) << 2);
675 * If we reach here, then the originally captured binary buffer
676 * will be replaced with the ascii85 encoded string
678 *ptr = adreno_gpu_ascii85_encode(buf, datalen);
688 drm_puts(p, " data: !!ascii85 |\n");
696 void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
697 struct drm_printer *p)
699 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
702 if (IS_ERR_OR_NULL(state))
705 drm_printf(p, "revision: %d (%d.%d.%d.%d)\n",
706 adreno_gpu->info->revn, adreno_gpu->rev.core,
707 adreno_gpu->rev.major, adreno_gpu->rev.minor,
708 adreno_gpu->rev.patchid);
710 drm_printf(p, "rbbm-status: 0x%08x\n", state->rbbm_status);
712 drm_puts(p, "ringbuffer:\n");
714 for (i = 0; i < gpu->nr_rings; i++) {
715 drm_printf(p, " - id: %d\n", i);
716 drm_printf(p, " iova: 0x%016llx\n", state->ring[i].iova);
717 drm_printf(p, " last-fence: %d\n", state->ring[i].seqno);
718 drm_printf(p, " retired-fence: %d\n", state->ring[i].fence);
719 drm_printf(p, " rptr: %d\n", state->ring[i].rptr);
720 drm_printf(p, " wptr: %d\n", state->ring[i].wptr);
721 drm_printf(p, " size: %d\n", MSM_GPU_RINGBUFFER_SZ);
723 adreno_show_object(p, &state->ring[i].data,
724 state->ring[i].data_size, &state->ring[i].encoded);
728 drm_puts(p, "bos:\n");
730 for (i = 0; i < state->nr_bos; i++) {
731 drm_printf(p, " - iova: 0x%016llx\n",
733 drm_printf(p, " size: %zd\n", state->bos[i].size);
735 adreno_show_object(p, &state->bos[i].data,
736 state->bos[i].size, &state->bos[i].encoded);
740 if (state->nr_registers) {
741 drm_puts(p, "registers:\n");
743 for (i = 0; i < state->nr_registers; i++) {
744 drm_printf(p, " - { offset: 0x%04x, value: 0x%08x }\n",
745 state->registers[i * 2] << 2,
746 state->registers[(i * 2) + 1]);
752 /* Dump common gpu status and scratch registers on any hang, to make
753 * the hangcheck logs more useful. The scratch registers seem always
754 * safe to read when GPU has hung (unlike some other regs, depending
755 * on how the GPU hung), and they are useful to match up to cmdstream
756 * dumps when debugging hangs:
758 void adreno_dump_info(struct msm_gpu *gpu)
760 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
763 printk("revision: %d (%d.%d.%d.%d)\n",
764 adreno_gpu->info->revn, adreno_gpu->rev.core,
765 adreno_gpu->rev.major, adreno_gpu->rev.minor,
766 adreno_gpu->rev.patchid);
768 for (i = 0; i < gpu->nr_rings; i++) {
769 struct msm_ringbuffer *ring = gpu->rb[i];
771 printk("rb %d: fence: %d/%d\n", i,
772 ring->memptrs->fence,
775 printk("rptr: %d\n", get_rptr(adreno_gpu, ring));
776 printk("rb wptr: %d\n", get_wptr(ring));
780 /* would be nice to not have to duplicate the _show() stuff with printk(): */
781 void adreno_dump(struct msm_gpu *gpu)
783 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
786 if (!adreno_gpu->registers)
789 /* dump these out in a form that can be parsed by demsm: */
790 printk("IO:region %s 00000000 00020000\n", gpu->name);
791 for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
792 uint32_t start = adreno_gpu->registers[i];
793 uint32_t end = adreno_gpu->registers[i+1];
796 for (addr = start; addr <= end; addr++) {
797 uint32_t val = gpu_read(gpu, addr);
798 printk("IO:R %08x %08x\n", addr<<2, val);
803 static uint32_t ring_freewords(struct msm_ringbuffer *ring)
805 struct adreno_gpu *adreno_gpu = to_adreno_gpu(ring->gpu);
806 uint32_t size = MSM_GPU_RINGBUFFER_SZ >> 2;
807 /* Use ring->next to calculate free size */
808 uint32_t wptr = ring->next - ring->start;
809 uint32_t rptr = get_rptr(adreno_gpu, ring);
810 return (rptr + (size - 1) - wptr) % size;
813 void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords)
815 if (spin_until(ring_freewords(ring) >= ndwords))
816 DRM_DEV_ERROR(ring->gpu->dev->dev,
817 "timeout waiting for space in ringbuffer %d\n",
821 /* Get legacy powerlevels from qcom,gpu-pwrlevels and populate the opp table */
822 static int adreno_get_legacy_pwrlevels(struct device *dev)
824 struct device_node *child, *node;
827 node = of_get_compatible_child(dev->of_node, "qcom,gpu-pwrlevels");
829 DRM_DEV_ERROR(dev, "Could not find the GPU powerlevels\n");
833 for_each_child_of_node(node, child) {
836 ret = of_property_read_u32(child, "qcom,gpu-freq", &val);
841 * Skip the intentionally bogus clock value found at the bottom
842 * of most legacy frequency tables
845 dev_pm_opp_add(dev, val, 0);
853 static int adreno_get_pwrlevels(struct device *dev,
856 unsigned long freq = ULONG_MAX;
857 struct dev_pm_opp *opp;
862 /* You down with OPP? */
863 if (!of_find_property(dev->of_node, "operating-points-v2", NULL))
864 ret = adreno_get_legacy_pwrlevels(dev);
866 ret = dev_pm_opp_of_add_table(dev);
868 DRM_DEV_ERROR(dev, "Unable to set the OPP table\n");
872 /* Find the fastest defined rate */
873 opp = dev_pm_opp_find_freq_floor(dev, &freq);
875 gpu->fast_rate = freq;
880 if (!gpu->fast_rate) {
882 "Could not find a clock rate. Using a reasonable default\n");
883 /* Pick a suitably safe clock speed for any target */
884 gpu->fast_rate = 200000000;
887 DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate);
889 /* Check for an interconnect path for the bus */
890 gpu->icc_path = of_icc_get(dev, NULL);
891 if (IS_ERR(gpu->icc_path))
892 gpu->icc_path = NULL;
897 int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu,
898 struct adreno_ocmem *adreno_ocmem)
900 struct ocmem_buf *ocmem_hdl;
903 ocmem = of_get_ocmem(dev);
905 if (PTR_ERR(ocmem) == -ENODEV) {
907 * Return success since either the ocmem property was
908 * not specified in device tree, or ocmem support is
909 * not compiled into the kernel.
914 return PTR_ERR(ocmem);
917 ocmem_hdl = ocmem_allocate(ocmem, OCMEM_GRAPHICS, adreno_gpu->gmem);
918 if (IS_ERR(ocmem_hdl))
919 return PTR_ERR(ocmem_hdl);
921 adreno_ocmem->ocmem = ocmem;
922 adreno_ocmem->base = ocmem_hdl->addr;
923 adreno_ocmem->hdl = ocmem_hdl;
924 adreno_gpu->gmem = ocmem_hdl->len;
929 void adreno_gpu_ocmem_cleanup(struct adreno_ocmem *adreno_ocmem)
931 if (adreno_ocmem && adreno_ocmem->base)
932 ocmem_free(adreno_ocmem->ocmem, OCMEM_GRAPHICS,
936 int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
937 struct adreno_gpu *adreno_gpu,
938 const struct adreno_gpu_funcs *funcs, int nr_rings)
940 struct adreno_platform_config *config = pdev->dev.platform_data;
941 struct msm_gpu_config adreno_gpu_config = { 0 };
942 struct msm_gpu *gpu = &adreno_gpu->base;
944 adreno_gpu->funcs = funcs;
945 adreno_gpu->info = adreno_info(config->rev);
946 adreno_gpu->gmem = adreno_gpu->info->gmem;
947 adreno_gpu->revn = adreno_gpu->info->revn;
948 adreno_gpu->rev = config->rev;
950 adreno_gpu_config.ioname = "kgsl_3d0_reg_memory";
952 adreno_gpu_config.va_start = SZ_16M;
953 adreno_gpu_config.va_end = 0xffffffff;
954 /* maximum range of a2xx mmu */
955 if (adreno_is_a2xx(adreno_gpu))
956 adreno_gpu_config.va_end = SZ_16M + 0xfff * SZ_64K;
958 adreno_gpu_config.nr_rings = nr_rings;
960 adreno_get_pwrlevels(&pdev->dev, gpu);
962 pm_runtime_set_autosuspend_delay(&pdev->dev,
963 adreno_gpu->info->inactive_period);
964 pm_runtime_use_autosuspend(&pdev->dev);
965 pm_runtime_enable(&pdev->dev);
967 return msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
968 adreno_gpu->info->name, &adreno_gpu_config);
971 void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
973 struct msm_gpu *gpu = &adreno_gpu->base;
976 for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++)
977 release_firmware(adreno_gpu->fw[i]);
979 icc_put(gpu->icc_path);
981 msm_gpu_cleanup(&adreno_gpu->base);