1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
5 #include <linux/interconnect.h>
6 #include <linux/pm_domain.h>
7 #include <linux/pm_opp.h>
8 #include <soc/qcom/cmd-db.h>
11 #include "a6xx_gmu.xml.h"
13 static void a6xx_gmu_fault(struct a6xx_gmu *gmu)
15 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
16 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
17 struct msm_gpu *gpu = &adreno_gpu->base;
18 struct drm_device *dev = gpu->dev;
19 struct msm_drm_private *priv = dev->dev_private;
21 /* FIXME: add a banner here */
24 /* Turn off the hangcheck timer while we are resetting */
25 del_timer(&gpu->hangcheck_timer);
27 /* Queue the GPU handler because we need to treat this as a recovery */
28 queue_work(priv->wq, &gpu->recover_work);
31 static irqreturn_t a6xx_gmu_irq(int irq, void *data)
33 struct a6xx_gmu *gmu = data;
36 status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS);
37 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status);
39 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) {
40 dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n");
45 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR)
46 dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n");
48 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
49 dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n",
50 gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS));
55 static irqreturn_t a6xx_hfi_irq(int irq, void *data)
57 struct a6xx_gmu *gmu = data;
60 status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO);
61 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status);
63 if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) {
64 dev_err_ratelimited(gmu->dev, "GMU firmware fault\n");
72 bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu)
76 /* This can be called from gpu state code so make sure GMU is valid */
77 if (!gmu->initialized)
80 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
83 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF |
84 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF));
87 /* Check to see if the GX rail is still powered */
88 bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
92 /* This can be called from gpu state code so make sure GMU is valid */
93 if (!gmu->initialized)
96 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
99 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF |
100 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF));
103 static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
105 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
106 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
107 struct msm_gpu *gpu = &adreno_gpu->base;
110 gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0);
112 gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING,
113 ((3 & 0xf) << 28) | index);
116 * Send an invalid index as a vote for the bus bandwidth and let the
117 * firmware decide on the right vote
119 gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff);
121 /* Set and clear the OOB for DCVS to trigger the GMU */
122 a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET);
123 a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET);
125 ret = gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN);
127 dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret);
129 gmu->freq = gmu->gpu_freqs[index];
132 * Eventually we will want to scale the path vote with the frequency but
133 * for now leave it at max so that the performance is nominal.
135 icc_set_bw(gpu->icc_path, 0, MBps_to_icc(7216));
138 void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq)
140 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
141 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
142 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
145 if (freq == gmu->freq)
148 for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++)
149 if (freq == gmu->gpu_freqs[perf_index])
152 __a6xx_gmu_set_freq(gmu, perf_index);
155 unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu)
157 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
158 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
159 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
164 static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu)
167 int local = gmu->idle_level;
169 /* SPTP and IFPC both report as IFPC */
170 if (gmu->idle_level == GMU_IDLE_STATE_SPTP)
171 local = GMU_IDLE_STATE_IFPC;
173 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
176 if (gmu->idle_level != GMU_IDLE_STATE_IFPC ||
177 !a6xx_gmu_gx_is_on(gmu))
184 /* Wait for the GMU to get to its most idle state */
185 int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu)
187 return spin_until(a6xx_gmu_check_idle_level(gmu));
190 static int a6xx_gmu_start(struct a6xx_gmu *gmu)
195 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
196 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0);
198 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val,
199 val == 0xbabeface, 100, 10000);
202 DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n");
207 static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
212 gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1);
214 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val,
215 val & 1, 100, 10000);
217 DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n");
222 /* Trigger a OOB (out of band) request to the GMU */
223 int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
231 case GMU_OOB_GPU_SET:
232 request = GMU_OOB_GPU_SET_REQUEST;
233 ack = GMU_OOB_GPU_SET_ACK;
236 case GMU_OOB_BOOT_SLUMBER:
237 request = GMU_OOB_BOOT_SLUMBER_REQUEST;
238 ack = GMU_OOB_BOOT_SLUMBER_ACK;
239 name = "BOOT_SLUMBER";
241 case GMU_OOB_DCVS_SET:
242 request = GMU_OOB_DCVS_REQUEST;
243 ack = GMU_OOB_DCVS_ACK;
250 /* Trigger the equested OOB operation */
251 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request);
253 /* Wait for the acknowledge interrupt */
254 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
255 val & (1 << ack), 100, 10000);
258 DRM_DEV_ERROR(gmu->dev,
259 "Timeout waiting for GMU OOB set %s: 0x%x\n",
261 gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO));
263 /* Clear the acknowledge interrupt */
264 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack);
269 /* Clear a pending OOB state in the GMU */
270 void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
273 case GMU_OOB_GPU_SET:
274 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
275 1 << GMU_OOB_GPU_SET_CLEAR);
277 case GMU_OOB_BOOT_SLUMBER:
278 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
279 1 << GMU_OOB_BOOT_SLUMBER_CLEAR);
281 case GMU_OOB_DCVS_SET:
282 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
283 1 << GMU_OOB_DCVS_CLEAR);
288 /* Enable CPU control of SPTP power power collapse */
289 static int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
294 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000);
296 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
297 (val & 0x38) == 0x28, 1, 100);
300 DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
301 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
307 /* Disable CPU control of SPTP power power collapse */
308 static void a6xx_sptprac_disable(struct a6xx_gmu *gmu)
313 /* Make sure retention is on */
314 gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11));
316 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001);
318 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
319 (val & 0x04), 100, 10000);
322 DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
323 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
326 /* Let the GMU know we are starting a boot sequence */
327 static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu)
331 /* Let the GMU know we are getting ready for boot */
332 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0);
334 /* Choose the "default" power level as the highest available */
335 vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1];
337 gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff);
338 gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff);
340 /* Let the GMU know the boot sequence has started */
341 return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
344 /* Let the GMU know that we are about to go into slumber */
345 static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
349 /* Disable the power counter so the GMU isn't busy */
350 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
352 /* Disable SPTP_PC if the CPU is responsible for it */
353 if (gmu->idle_level < GMU_IDLE_STATE_SPTP)
354 a6xx_sptprac_disable(gmu);
356 /* Tell the GMU to get ready to slumber */
357 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1);
359 ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
360 a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
363 /* Check to see if the GMU really did slumber */
364 if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE)
366 DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n");
371 /* Put fence into allow mode */
372 gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
376 static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
381 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1);
382 /* Wait for the register to finish posting */
385 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
386 val & (1 << 1), 100, 10000);
388 DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n");
392 ret = gmu_poll_timeout(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
396 DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
400 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
402 /* Set up CX GMU counter 0 to count busy ticks */
403 gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000);
404 gmu_rmw(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, 0x20);
406 /* Enable the power counter */
407 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
411 static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
416 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1);
418 ret = gmu_poll_timeout(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
419 val, val & (1 << 16), 100, 10000);
421 DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n");
423 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
426 static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value)
428 return msm_writel(value, ptr + (offset << 2));
431 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
434 static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
436 struct platform_device *pdev = to_platform_device(gmu->dev);
437 void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
438 void __iomem *seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq");
440 if (!pdcptr || !seqptr)
443 /* Disable SDE clock gating */
444 gmu_write(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
446 /* Setup RSC PDC handshake for sleep and wakeup */
447 gmu_write(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
448 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
449 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
450 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0);
451 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0);
452 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000);
453 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0);
454 gmu_write(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0);
455 gmu_write(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
456 gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
457 gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
459 /* Load RSC sequencer uCode for sleep and wakeup */
460 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0);
461 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7);
462 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1);
463 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2);
464 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8);
466 /* Load PDC sequencer uCode for power up and power down sequence */
467 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1);
468 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2);
469 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0);
470 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284);
471 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc);
473 /* Set TCS commands used by PDC sequence for low power modes */
474 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7);
475 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0);
476 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0);
477 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108);
478 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010);
479 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1);
480 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108);
481 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000);
482 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
483 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
484 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080);
485 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
486 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
487 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
488 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0);
489 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
490 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010);
491 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2);
492 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
493 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
494 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
495 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
496 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080);
497 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
500 pdc_write(pdcptr, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0);
501 pdc_write(pdcptr, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001);
503 /* ensure no writes happen before the uCode is fully written */
507 if (!IS_ERR_OR_NULL(pdcptr))
509 if (!IS_ERR_OR_NULL(seqptr))
514 * The lowest 16 bits of this value are the number of XO clock cycles for main
515 * hysteresis which is set at 0x1680 cycles (300 us). The higher 16 bits are
516 * for the shorter hysteresis that happens after main - this is 0xa (.5 us)
519 #define GMU_PWR_COL_HYST 0x000a1680
521 /* Set up the idle state for the GMU */
522 static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
524 /* Disable GMU WB/RB buffer */
525 gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1);
527 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400);
529 switch (gmu->idle_level) {
530 case GMU_IDLE_STATE_IFPC:
531 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST,
533 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
534 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
535 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE);
537 case GMU_IDLE_STATE_SPTP:
538 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST,
540 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
541 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
542 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE);
545 /* Enable RPMh GPU client */
546 gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0,
547 A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE |
548 A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE |
549 A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE |
550 A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE |
551 A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE |
552 A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE);
555 static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
557 static bool rpmh_init;
558 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
559 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
564 if (state == GMU_WARM_BOOT) {
565 ret = a6xx_rpmh_start(gmu);
569 if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU],
570 "GMU firmware is not loaded\n"))
573 /* Sanity check the size of the firmware that was loaded */
574 if (adreno_gpu->fw[ADRENO_FW_GMU]->size > 0x8000) {
575 DRM_DEV_ERROR(gmu->dev,
576 "GMU firmware is bigger than the available region\n");
580 /* Turn on register retention */
581 gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
583 /* We only need to load the RPMh microcode once */
585 a6xx_gmu_rpmh_init(gmu);
588 ret = a6xx_rpmh_start(gmu);
593 image = (u32 *) adreno_gpu->fw[ADRENO_FW_GMU]->data;
595 for (i = 0; i < adreno_gpu->fw[ADRENO_FW_GMU]->size >> 2; i++)
596 gmu_write(gmu, REG_A6XX_GMU_CM3_ITCM_START + i,
600 gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0);
601 gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02);
603 /* Write the iova of the HFI table */
604 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi->iova);
605 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1);
607 gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0,
608 (1 << 31) | (0xa << 18) | (0xa0));
610 chipid = adreno_gpu->rev.core << 24;
611 chipid |= adreno_gpu->rev.major << 16;
612 chipid |= adreno_gpu->rev.minor << 12;
613 chipid |= adreno_gpu->rev.patchid << 8;
615 gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid);
617 /* Set up the lowest idle level on the GMU */
618 a6xx_gmu_power_config(gmu);
620 ret = a6xx_gmu_start(gmu);
624 ret = a6xx_gmu_gfx_rail_on(gmu);
628 /* Enable SPTP_PC if the CPU is responsible for it */
629 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) {
630 ret = a6xx_sptprac_enable(gmu);
635 ret = a6xx_gmu_hfi_start(gmu);
639 /* FIXME: Do we need this wmb() here? */
645 #define A6XX_HFI_IRQ_MASK \
646 (A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT)
648 #define A6XX_GMU_IRQ_MASK \
649 (A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \
650 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \
651 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
653 static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu)
655 disable_irq(gmu->gmu_irq);
656 disable_irq(gmu->hfi_irq);
658 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0);
659 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0);
662 static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
666 /* Make sure there are no outstanding RPMh votes */
667 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val,
668 (val & 1), 100, 10000);
669 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val,
670 (val & 1), 100, 10000);
671 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val,
672 (val & 1), 100, 10000);
673 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val,
674 (val & 1), 100, 1000);
677 /* Force the GMU off in case it isn't responsive */
678 static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
680 /* Flush all the queues */
683 /* Stop the interrupts */
684 a6xx_gmu_irq_disable(gmu);
686 /* Force off SPTP in case the GMU is managing it */
687 a6xx_sptprac_disable(gmu);
689 /* Make sure there are no outstanding RPMh votes */
690 a6xx_gmu_rpmh_off(gmu);
693 int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
695 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
696 struct msm_gpu *gpu = &adreno_gpu->base;
697 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
700 if (WARN(!gmu->initialized, "The GMU is not set up yet\n"))
705 /* Turn on the resources */
706 pm_runtime_get_sync(gmu->dev);
708 /* Use a known rate to bring up the GMU */
709 clk_set_rate(gmu->core_clk, 200000000);
710 ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
712 pm_runtime_put(gmu->dev);
716 /* Set the bus quota to a reasonable value for boot */
717 icc_set_bw(gpu->icc_path, 0, MBps_to_icc(3072));
719 /* Enable the GMU interrupt */
720 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0);
721 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~A6XX_GMU_IRQ_MASK);
722 enable_irq(gmu->gmu_irq);
724 /* Check to see if we are doing a cold or warm boot */
725 status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
726 GMU_WARM_BOOT : GMU_COLD_BOOT;
728 ret = a6xx_gmu_fw_start(gmu, status);
732 ret = a6xx_hfi_start(gmu, status);
737 * Turn on the GMU firmware fault interrupt after we know the boot
738 * sequence is successful
740 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0);
741 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK);
742 enable_irq(gmu->hfi_irq);
744 /* Set the GPU to the highest power frequency */
745 __a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
748 * "enable" the GX power domain which won't actually do anything but it
749 * will make sure that the refcounting is correct in case we need to
750 * bring down the GX after a GMU failure
752 if (!IS_ERR_OR_NULL(gmu->gxpd))
753 pm_runtime_get(gmu->gxpd);
756 /* On failure, shut down the GMU to leave it in a good state */
758 disable_irq(gmu->gmu_irq);
760 pm_runtime_put(gmu->dev);
766 bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
770 if (!gmu->initialized)
773 reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS);
775 if (reg & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB)
781 /* Gracefully try to shut down the GMU and by extension the GPU */
782 static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
784 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
785 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
786 struct msm_gpu *gpu = &adreno_gpu->base;
790 * The GMU may still be in slumber unless the GPU started so check and
791 * skip putting it back into slumber if so
793 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
796 int ret = a6xx_gmu_wait_for_idle(gmu);
798 /* If the GMU isn't responding assume it is hung */
800 a6xx_gmu_force_off(gmu);
804 /* Clear the VBIF pipe before shutting down */
805 gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
806 spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & 0xf)
808 gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
810 /* tell the GMU we want to slumber */
811 a6xx_gmu_notify_slumber(gmu);
813 ret = gmu_poll_timeout(gmu,
814 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val,
815 !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB),
819 * Let the user know we failed to slumber but don't worry too
820 * much because we are powering down anyway
824 DRM_DEV_ERROR(gmu->dev,
825 "Unable to slumber GMU: status = 0%x/0%x\n",
827 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
829 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2));
835 /* Stop the interrupts and mask the hardware */
836 a6xx_gmu_irq_disable(gmu);
838 /* Tell RPMh to power off the GPU */
843 int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
845 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
846 struct msm_gpu *gpu = &a6xx_gpu->base.base;
848 if (!pm_runtime_active(gmu->dev))
852 * Force the GMU off if we detected a hang, otherwise try to shut it
856 a6xx_gmu_force_off(gmu);
858 a6xx_gmu_shutdown(gmu);
860 /* Remove the bus vote */
861 icc_set_bw(gpu->icc_path, 0, 0);
864 * Make sure the GX domain is off before turning off the GMU (CX)
865 * domain. Usually the GMU does this but only if the shutdown sequence
868 if (!IS_ERR_OR_NULL(gmu->gxpd))
869 pm_runtime_put_sync(gmu->gxpd);
871 clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
873 pm_runtime_put_sync(gmu->dev);
878 static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo)
883 if (IS_ERR_OR_NULL(bo))
886 count = bo->size >> PAGE_SHIFT;
889 for (i = 0; i < count; i++, iova += PAGE_SIZE) {
890 iommu_unmap(gmu->domain, iova, PAGE_SIZE);
891 __free_pages(bo->pages[i], 0);
898 static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
901 struct a6xx_gmu_bo *bo;
904 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
906 return ERR_PTR(-ENOMEM);
908 bo->size = PAGE_ALIGN(size);
910 count = bo->size >> PAGE_SHIFT;
912 bo->pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL);
915 return ERR_PTR(-ENOMEM);
918 for (i = 0; i < count; i++) {
919 bo->pages[i] = alloc_page(GFP_KERNEL);
924 bo->iova = gmu->uncached_iova_base;
926 for (i = 0; i < count; i++) {
927 ret = iommu_map(gmu->domain,
928 bo->iova + (PAGE_SIZE * i),
929 page_to_phys(bo->pages[i]), PAGE_SIZE,
930 IOMMU_READ | IOMMU_WRITE);
933 DRM_DEV_ERROR(gmu->dev, "Unable to map GMU buffer object\n");
935 for (i = i - 1 ; i >= 0; i--)
936 iommu_unmap(gmu->domain,
937 bo->iova + (PAGE_SIZE * i),
944 bo->virt = vmap(bo->pages, count, VM_IOREMAP,
945 pgprot_writecombine(PAGE_KERNEL));
949 /* Align future IOVA addresses on 1MB boundaries */
950 gmu->uncached_iova_base += ALIGN(size, SZ_1M);
955 for (i = 0; i < count; i++) {
957 __free_pages(bo->pages[i], 0);
963 return ERR_PTR(-ENOMEM);
966 static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
971 * The GMU address space is hardcoded to treat the range
972 * 0x60000000 - 0x80000000 as un-cached memory. All buffers shared
973 * between the GMU and the CPU will live in this space
975 gmu->uncached_iova_base = 0x60000000;
978 gmu->domain = iommu_domain_alloc(&platform_bus_type);
982 ret = iommu_attach_device(gmu->domain, gmu->dev);
985 iommu_domain_free(gmu->domain);
992 /* Return the 'arc-level' for the given frequency */
993 static unsigned int a6xx_gmu_get_arc_level(struct device *dev,
996 struct dev_pm_opp *opp;
1002 opp = dev_pm_opp_find_freq_exact(dev, freq, true);
1006 val = dev_pm_opp_get_level(opp);
1008 dev_pm_opp_put(opp);
1013 static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
1014 unsigned long *freqs, int freqs_count, const char *id)
1017 const u16 *pri, *sec;
1018 size_t pri_count, sec_count;
1020 pri = cmd_db_read_aux_data(id, &pri_count);
1022 return PTR_ERR(pri);
1024 * The data comes back as an array of unsigned shorts so adjust the
1031 sec = cmd_db_read_aux_data("mx.lvl", &sec_count);
1033 return PTR_ERR(sec);
1039 /* Construct a vote for each frequency */
1040 for (i = 0; i < freqs_count; i++) {
1041 u8 pindex = 0, sindex = 0;
1042 unsigned int level = a6xx_gmu_get_arc_level(dev, freqs[i]);
1044 /* Get the primary index that matches the arc level */
1045 for (j = 0; j < pri_count; j++) {
1046 if (pri[j] >= level) {
1052 if (j == pri_count) {
1054 "Level %u not found in in the RPMh list\n",
1056 DRM_DEV_ERROR(dev, "Available levels:\n");
1057 for (j = 0; j < pri_count; j++)
1058 DRM_DEV_ERROR(dev, " %u\n", pri[j]);
1064 * Look for a level in in the secondary list that matches. If
1065 * nothing fits, use the maximum non zero vote
1068 for (j = 0; j < sec_count; j++) {
1069 if (sec[j] >= level) {
1072 } else if (sec[j]) {
1077 /* Construct the vote */
1078 votes[i] = ((pri[pindex] & 0xffff) << 16) |
1079 (sindex << 8) | pindex;
1086 * The GMU votes with the RPMh for itself and on behalf of the GPU but we need
1087 * to construct the list of votes on the CPU and send it over. Query the RPMh
1088 * voltage levels and build the votes
1091 static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu)
1093 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1094 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1095 struct msm_gpu *gpu = &adreno_gpu->base;
1098 /* Build the GX votes */
1099 ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes,
1100 gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl");
1102 /* Build the CX votes */
1103 ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes,
1104 gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl");
1109 static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs,
1112 int count = dev_pm_opp_get_opp_count(dev);
1113 struct dev_pm_opp *opp;
1115 unsigned long freq = 1;
1118 * The OPP table doesn't contain the "off" frequency level so we need to
1119 * add 1 to the table size to account for it
1122 if (WARN(count + 1 > size,
1123 "The GMU frequency table is being truncated\n"))
1126 /* Set the "off" frequency */
1129 for (i = 0; i < count; i++) {
1130 opp = dev_pm_opp_find_freq_ceil(dev, &freq);
1134 dev_pm_opp_put(opp);
1135 freqs[index++] = freq++;
1141 static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
1143 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1144 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1145 struct msm_gpu *gpu = &adreno_gpu->base;
1150 * The GMU handles its own frequency switching so build a list of
1151 * available frequencies to send during initialization
1153 ret = dev_pm_opp_of_add_table(gmu->dev);
1155 DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n");
1159 gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev,
1160 gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs));
1163 * The GMU also handles GPU frequency switching so build a list
1164 * from the GPU OPP table
1166 gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev,
1167 gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs));
1169 /* Build the list of RPMh votes that we'll send to the GMU */
1170 return a6xx_gmu_rpmh_votes_init(gmu);
1173 static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu)
1175 int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks);
1180 gmu->nr_clocks = ret;
1182 gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks,
1183 gmu->nr_clocks, "gmu");
1188 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
1192 struct resource *res = platform_get_resource_byname(pdev,
1193 IORESOURCE_MEM, name);
1196 DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name);
1197 return ERR_PTR(-EINVAL);
1200 ret = ioremap(res->start, resource_size(res));
1202 DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name);
1203 return ERR_PTR(-EINVAL);
1209 static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
1210 const char *name, irq_handler_t handler)
1214 irq = platform_get_irq_byname(pdev, name);
1216 ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH, name, gmu);
1218 DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n",
1228 void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
1230 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1232 if (!gmu->initialized)
1235 pm_runtime_force_suspend(gmu->dev);
1237 if (!IS_ERR_OR_NULL(gmu->gxpd)) {
1238 pm_runtime_disable(gmu->gxpd);
1239 dev_pm_domain_detach(gmu->gxpd, false);
1245 a6xx_gmu_memory_free(gmu, gmu->hfi);
1247 iommu_detach_device(gmu->domain, gmu->dev);
1249 iommu_domain_free(gmu->domain);
1251 free_irq(gmu->gmu_irq, gmu);
1252 free_irq(gmu->hfi_irq, gmu);
1254 /* Drop reference taken in of_find_device_by_node */
1255 put_device(gmu->dev);
1257 gmu->initialized = false;
1260 int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
1262 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1263 struct platform_device *pdev = of_find_device_by_node(node);
1269 gmu->dev = &pdev->dev;
1271 of_dma_configure(gmu->dev, node, true);
1273 /* Fow now, don't do anything fancy until we get our feet under us */
1274 gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
1276 pm_runtime_enable(gmu->dev);
1278 /* Get the list of clocks */
1279 ret = a6xx_gmu_clocks_probe(gmu);
1281 goto err_put_device;
1283 /* Set up the IOMMU context bank */
1284 ret = a6xx_gmu_memory_probe(gmu);
1286 goto err_put_device;
1288 /* Allocate memory for for the HFI queues */
1289 gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K);
1290 if (IS_ERR(gmu->hfi))
1293 /* Allocate memory for the GMU debug region */
1294 gmu->debug = a6xx_gmu_memory_alloc(gmu, SZ_16K);
1295 if (IS_ERR(gmu->debug))
1298 /* Map the GMU registers */
1299 gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
1300 if (IS_ERR(gmu->mmio))
1303 /* Get the HFI and GMU interrupts */
1304 gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq);
1305 gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq);
1307 if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0)
1311 * Get a link to the GX power domain to reset the GPU in case of GMU
1314 gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx");
1316 /* Get the power levels for the GMU and GPU */
1317 a6xx_gmu_pwrlevels_probe(gmu);
1319 /* Set up the HFI queues */
1322 gmu->initialized = true;
1328 free_irq(gmu->gmu_irq, gmu);
1329 free_irq(gmu->hfi_irq, gmu);
1331 a6xx_gmu_memory_free(gmu, gmu->hfi);
1334 iommu_detach_device(gmu->domain, gmu->dev);
1336 iommu_domain_free(gmu->domain);
1341 /* Drop reference taken in of_find_device_by_node */
1342 put_device(gmu->dev);