2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "msm_fence.h"
23 #include <linux/string_helpers.h>
30 #ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
31 #include <mach/board.h>
32 static void bs_init(struct msm_gpu *gpu)
34 if (gpu->bus_scale_table) {
35 gpu->bsc = msm_bus_scale_register_client(gpu->bus_scale_table);
36 DBG("bus scale client: %08x", gpu->bsc);
40 static void bs_fini(struct msm_gpu *gpu)
43 msm_bus_scale_unregister_client(gpu->bsc);
48 static void bs_set(struct msm_gpu *gpu, int idx)
51 DBG("set bus scaling: %d", idx);
52 msm_bus_scale_client_update_request(gpu->bsc, idx);
56 static void bs_init(struct msm_gpu *gpu) {}
57 static void bs_fini(struct msm_gpu *gpu) {}
58 static void bs_set(struct msm_gpu *gpu, int idx) {}
61 static int enable_pwrrail(struct msm_gpu *gpu)
63 struct drm_device *dev = gpu->dev;
67 ret = regulator_enable(gpu->gpu_reg);
69 dev_err(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
75 ret = regulator_enable(gpu->gpu_cx);
77 dev_err(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
85 static int disable_pwrrail(struct msm_gpu *gpu)
88 regulator_disable(gpu->gpu_cx);
90 regulator_disable(gpu->gpu_reg);
94 static int enable_clk(struct msm_gpu *gpu)
98 if (gpu->core_clk && gpu->fast_rate)
99 clk_set_rate(gpu->core_clk, gpu->fast_rate);
101 /* Set the RBBM timer rate to 19.2Mhz */
102 if (gpu->rbbmtimer_clk)
103 clk_set_rate(gpu->rbbmtimer_clk, 19200000);
105 for (i = gpu->nr_clocks - 1; i >= 0; i--)
106 if (gpu->grp_clks[i])
107 clk_prepare(gpu->grp_clks[i]);
109 for (i = gpu->nr_clocks - 1; i >= 0; i--)
110 if (gpu->grp_clks[i])
111 clk_enable(gpu->grp_clks[i]);
116 static int disable_clk(struct msm_gpu *gpu)
120 for (i = gpu->nr_clocks - 1; i >= 0; i--)
121 if (gpu->grp_clks[i])
122 clk_disable(gpu->grp_clks[i]);
124 for (i = gpu->nr_clocks - 1; i >= 0; i--)
125 if (gpu->grp_clks[i])
126 clk_unprepare(gpu->grp_clks[i]);
129 * Set the clock to a deliberately low rate. On older targets the clock
130 * speed had to be non zero to avoid problems. On newer targets this
131 * will be rounded down to zero anyway so it all works out.
134 clk_set_rate(gpu->core_clk, 27000000);
136 if (gpu->rbbmtimer_clk)
137 clk_set_rate(gpu->rbbmtimer_clk, 0);
142 static int enable_axi(struct msm_gpu *gpu)
145 clk_prepare_enable(gpu->ebi1_clk);
147 bs_set(gpu, gpu->bus_freq);
151 static int disable_axi(struct msm_gpu *gpu)
154 clk_disable_unprepare(gpu->ebi1_clk);
160 int msm_gpu_pm_resume(struct msm_gpu *gpu)
164 DBG("%s", gpu->name);
166 ret = enable_pwrrail(gpu);
170 ret = enable_clk(gpu);
174 ret = enable_axi(gpu);
178 gpu->needs_hw_init = true;
183 int msm_gpu_pm_suspend(struct msm_gpu *gpu)
187 DBG("%s", gpu->name);
189 ret = disable_axi(gpu);
193 ret = disable_clk(gpu);
197 ret = disable_pwrrail(gpu);
204 int msm_gpu_hw_init(struct msm_gpu *gpu)
208 WARN_ON(!mutex_is_locked(&gpu->dev->struct_mutex));
210 if (!gpu->needs_hw_init)
213 disable_irq(gpu->irq);
214 ret = gpu->funcs->hw_init(gpu);
216 gpu->needs_hw_init = false;
217 enable_irq(gpu->irq);
223 * Hangcheck detection for locked gpu:
226 static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
229 struct msm_gem_submit *submit;
231 list_for_each_entry(submit, &ring->submits, node) {
232 if (submit->seqno > fence)
235 msm_update_fence(submit->ring->fctx,
236 submit->fence->seqno);
240 static struct msm_gem_submit *
241 find_submit(struct msm_ringbuffer *ring, uint32_t fence)
243 struct msm_gem_submit *submit;
245 WARN_ON(!mutex_is_locked(&ring->gpu->dev->struct_mutex));
247 list_for_each_entry(submit, &ring->submits, node)
248 if (submit->seqno == fence)
254 static void retire_submits(struct msm_gpu *gpu);
256 static void recover_worker(struct work_struct *work)
258 struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
259 struct drm_device *dev = gpu->dev;
260 struct msm_gem_submit *submit;
261 struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
265 /* Update all the rings with the latest and greatest fence */
266 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
267 struct msm_ringbuffer *ring = gpu->rb[i];
269 fence = ring->memptrs->fence;
272 * For the current (faulting?) ring/submit advance the fence by
273 * one more to clear the faulting submit
275 if (ring == cur_ring)
278 update_fences(gpu, ring, fence);
281 mutex_lock(&dev->struct_mutex);
284 dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name);
285 fence = cur_ring->memptrs->fence + 1;
287 submit = find_submit(cur_ring, fence);
289 struct task_struct *task;
292 task = pid_task(submit->pid, PIDTYPE_PID);
297 * So slightly annoying, in other paths like
298 * mmap'ing gem buffers, mmap_sem is acquired
299 * before struct_mutex, which means we can't
300 * hold struct_mutex across the call to
301 * get_cmdline(). But submits are retired
302 * from the same in-order workqueue, so we can
303 * safely drop the lock here without worrying
304 * about the submit going away.
306 mutex_unlock(&dev->struct_mutex);
307 cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
308 mutex_lock(&dev->struct_mutex);
310 dev_err(dev->dev, "%s: offending task: %s (%s)\n",
311 gpu->name, task->comm, cmd);
317 if (msm_gpu_active(gpu)) {
318 /* retire completed submits, plus the one that hung: */
321 pm_runtime_get_sync(&gpu->pdev->dev);
322 gpu->funcs->recover(gpu);
323 pm_runtime_put_sync(&gpu->pdev->dev);
326 * Replay all remaining submits starting with highest priority
329 for (i = 0; i < gpu->nr_rings; i++) {
330 struct msm_ringbuffer *ring = gpu->rb[i];
332 list_for_each_entry(submit, &ring->submits, node)
333 gpu->funcs->submit(gpu, submit, NULL);
337 mutex_unlock(&dev->struct_mutex);
342 static void hangcheck_timer_reset(struct msm_gpu *gpu)
344 DBG("%s", gpu->name);
345 mod_timer(&gpu->hangcheck_timer,
346 round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES));
349 static void hangcheck_handler(unsigned long data)
351 struct msm_gpu *gpu = (struct msm_gpu *)data;
352 struct drm_device *dev = gpu->dev;
353 struct msm_drm_private *priv = dev->dev_private;
354 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
355 uint32_t fence = ring->memptrs->fence;
357 if (fence != ring->hangcheck_fence) {
358 /* some progress has been made.. ya! */
359 ring->hangcheck_fence = fence;
360 } else if (fence < ring->seqno) {
361 /* no progress and not done.. hung! */
362 ring->hangcheck_fence = fence;
363 dev_err(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
364 gpu->name, ring->id);
365 dev_err(dev->dev, "%s: completed fence: %u\n",
367 dev_err(dev->dev, "%s: submitted fence: %u\n",
368 gpu->name, ring->seqno);
370 queue_work(priv->wq, &gpu->recover_work);
373 /* if still more pending work, reset the hangcheck timer: */
374 if (ring->seqno > ring->hangcheck_fence)
375 hangcheck_timer_reset(gpu);
377 /* workaround for missing irq: */
378 queue_work(priv->wq, &gpu->retire_work);
382 * Performance Counters:
385 /* called under perf_lock */
386 static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs)
388 uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)];
389 int i, n = min(ncntrs, gpu->num_perfcntrs);
391 /* read current values: */
392 for (i = 0; i < gpu->num_perfcntrs; i++)
393 current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg);
396 for (i = 0; i < n; i++)
397 cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i];
399 /* save current values: */
400 for (i = 0; i < gpu->num_perfcntrs; i++)
401 gpu->last_cntrs[i] = current_cntrs[i];
406 static void update_sw_cntrs(struct msm_gpu *gpu)
412 spin_lock_irqsave(&gpu->perf_lock, flags);
413 if (!gpu->perfcntr_active)
417 elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time));
419 gpu->totaltime += elapsed;
420 if (gpu->last_sample.active)
421 gpu->activetime += elapsed;
423 gpu->last_sample.active = msm_gpu_active(gpu);
424 gpu->last_sample.time = time;
427 spin_unlock_irqrestore(&gpu->perf_lock, flags);
430 void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
434 pm_runtime_get_sync(&gpu->pdev->dev);
436 spin_lock_irqsave(&gpu->perf_lock, flags);
437 /* we could dynamically enable/disable perfcntr registers too.. */
438 gpu->last_sample.active = msm_gpu_active(gpu);
439 gpu->last_sample.time = ktime_get();
440 gpu->activetime = gpu->totaltime = 0;
441 gpu->perfcntr_active = true;
442 update_hw_cntrs(gpu, 0, NULL);
443 spin_unlock_irqrestore(&gpu->perf_lock, flags);
446 void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
448 gpu->perfcntr_active = false;
449 pm_runtime_put_sync(&gpu->pdev->dev);
452 /* returns -errno or # of cntrs sampled */
453 int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
454 uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs)
459 spin_lock_irqsave(&gpu->perf_lock, flags);
461 if (!gpu->perfcntr_active) {
466 *activetime = gpu->activetime;
467 *totaltime = gpu->totaltime;
469 gpu->activetime = gpu->totaltime = 0;
471 ret = update_hw_cntrs(gpu, ncntrs, cntrs);
474 spin_unlock_irqrestore(&gpu->perf_lock, flags);
480 * Cmdstream submission/retirement:
483 static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
487 for (i = 0; i < submit->nr_bos; i++) {
488 struct msm_gem_object *msm_obj = submit->bos[i].obj;
489 /* move to inactive: */
490 msm_gem_move_to_inactive(&msm_obj->base);
491 msm_gem_put_iova(&msm_obj->base, gpu->aspace);
492 drm_gem_object_unreference(&msm_obj->base);
495 pm_runtime_mark_last_busy(&gpu->pdev->dev);
496 pm_runtime_put_autosuspend(&gpu->pdev->dev);
497 msm_gem_submit_free(submit);
500 static void retire_submits(struct msm_gpu *gpu)
502 struct drm_device *dev = gpu->dev;
503 struct msm_gem_submit *submit, *tmp;
506 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
508 /* Retire the commits starting with highest priority */
509 for (i = 0; i < gpu->nr_rings; i++) {
510 struct msm_ringbuffer *ring = gpu->rb[i];
512 list_for_each_entry_safe(submit, tmp, &ring->submits, node) {
513 if (dma_fence_is_signaled(submit->fence))
514 retire_submit(gpu, submit);
519 static void retire_worker(struct work_struct *work)
521 struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
522 struct drm_device *dev = gpu->dev;
525 for (i = 0; i < gpu->nr_rings; i++)
526 update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence);
528 mutex_lock(&dev->struct_mutex);
530 mutex_unlock(&dev->struct_mutex);
533 /* call from irq handler to schedule work to retire bo's */
534 void msm_gpu_retire(struct msm_gpu *gpu)
536 struct msm_drm_private *priv = gpu->dev->dev_private;
537 queue_work(priv->wq, &gpu->retire_work);
538 update_sw_cntrs(gpu);
541 /* add bo's to gpu's ring, and kick gpu: */
542 void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
543 struct msm_file_private *ctx)
545 struct drm_device *dev = gpu->dev;
546 struct msm_drm_private *priv = dev->dev_private;
547 struct msm_ringbuffer *ring = submit->ring;
550 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
552 pm_runtime_get_sync(&gpu->pdev->dev);
554 msm_gpu_hw_init(gpu);
556 submit->seqno = ++ring->seqno;
558 list_add_tail(&submit->node, &ring->submits);
560 msm_rd_dump_submit(submit);
562 update_sw_cntrs(gpu);
564 for (i = 0; i < submit->nr_bos; i++) {
565 struct msm_gem_object *msm_obj = submit->bos[i].obj;
568 /* can't happen yet.. but when we add 2d support we'll have
569 * to deal w/ cross-ring synchronization:
571 WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
573 /* submit takes a reference to the bo and iova until retired: */
574 drm_gem_object_reference(&msm_obj->base);
575 msm_gem_get_iova(&msm_obj->base,
576 submit->gpu->aspace, &iova);
578 if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
579 msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
580 else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
581 msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
584 gpu->funcs->submit(gpu, submit, ctx);
587 hangcheck_timer_reset(gpu);
594 static irqreturn_t irq_handler(int irq, void *data)
596 struct msm_gpu *gpu = data;
597 return gpu->funcs->irq(gpu);
600 static struct clk *get_clock(struct device *dev, const char *name)
602 struct clk *clk = devm_clk_get(dev, name);
604 return IS_ERR(clk) ? NULL : clk;
607 static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
609 struct device *dev = &pdev->dev;
610 struct property *prop;
614 gpu->nr_clocks = of_property_count_strings(dev->of_node, "clock-names");
615 if (gpu->nr_clocks < 1) {
620 gpu->grp_clks = devm_kcalloc(dev, sizeof(struct clk *), gpu->nr_clocks,
625 of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
626 gpu->grp_clks[i] = get_clock(dev, name);
628 /* Remember the key clocks that we need to control later */
629 if (!strcmp(name, "core") || !strcmp(name, "core_clk"))
630 gpu->core_clk = gpu->grp_clks[i];
631 else if (!strcmp(name, "rbbmtimer") || !strcmp(name, "rbbmtimer_clk"))
632 gpu->rbbmtimer_clk = gpu->grp_clks[i];
640 static struct msm_gem_address_space *
641 msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev,
642 uint64_t va_start, uint64_t va_end)
644 struct iommu_domain *iommu;
645 struct msm_gem_address_space *aspace;
649 * Setup IOMMU.. eventually we will (I think) do this once per context
650 * and have separate page tables per context. For now, to keep things
651 * simple and to get something working, just use a single address space:
653 iommu = iommu_domain_alloc(&platform_bus_type);
657 iommu->geometry.aperture_start = va_start;
658 iommu->geometry.aperture_end = va_end;
660 dev_info(gpu->dev->dev, "%s: using IOMMU\n", gpu->name);
662 aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu");
663 if (IS_ERR(aspace)) {
664 dev_err(gpu->dev->dev, "failed to init iommu: %ld\n",
666 iommu_domain_free(iommu);
667 return ERR_CAST(aspace);
670 ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0);
672 msm_gem_address_space_put(aspace);
679 int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
680 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
681 const char *name, struct msm_gpu_config *config)
683 int i, ret, nr_rings = config->nr_rings;
685 uint64_t memptrs_iova;
687 if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
688 gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
694 INIT_LIST_HEAD(&gpu->active_list);
695 INIT_WORK(&gpu->retire_work, retire_worker);
696 INIT_WORK(&gpu->recover_work, recover_worker);
699 setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
702 spin_lock_init(&gpu->perf_lock);
706 gpu->mmio = msm_ioremap(pdev, config->ioname, name);
707 if (IS_ERR(gpu->mmio)) {
708 ret = PTR_ERR(gpu->mmio);
713 gpu->irq = platform_get_irq_byname(pdev, config->irqname);
716 dev_err(drm->dev, "failed to get irq: %d\n", ret);
720 ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
721 IRQF_TRIGGER_HIGH, gpu->name, gpu);
723 dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
727 ret = get_clocks(pdev, gpu);
731 gpu->ebi1_clk = msm_clk_get(pdev, "bus");
732 DBG("ebi1_clk: %p", gpu->ebi1_clk);
733 if (IS_ERR(gpu->ebi1_clk))
734 gpu->ebi1_clk = NULL;
736 /* Acquire regulators: */
737 gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
738 DBG("gpu_reg: %p", gpu->gpu_reg);
739 if (IS_ERR(gpu->gpu_reg))
742 gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
743 DBG("gpu_cx: %p", gpu->gpu_cx);
744 if (IS_ERR(gpu->gpu_cx))
748 platform_set_drvdata(pdev, gpu);
752 gpu->aspace = msm_gpu_create_address_space(gpu, pdev,
753 config->va_start, config->va_end);
755 if (gpu->aspace == NULL)
756 dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
757 else if (IS_ERR(gpu->aspace)) {
758 ret = PTR_ERR(gpu->aspace);
762 memptrs = msm_gem_kernel_new(drm, sizeof(*gpu->memptrs_bo),
763 MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo,
766 if (IS_ERR(memptrs)) {
767 ret = PTR_ERR(memptrs);
768 dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
772 if (nr_rings > ARRAY_SIZE(gpu->rb)) {
773 DRM_DEV_INFO_ONCE(drm->dev, "Only creating %lu ringbuffers\n",
774 ARRAY_SIZE(gpu->rb));
775 nr_rings = ARRAY_SIZE(gpu->rb);
778 /* Create ringbuffer(s): */
779 for (i = 0; i < nr_rings; i++) {
780 gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
782 if (IS_ERR(gpu->rb[i])) {
783 ret = PTR_ERR(gpu->rb[i]);
785 "could not create ringbuffer %d: %d\n", i, ret);
789 memptrs += sizeof(struct msm_rbmemptrs);
790 memptrs_iova += sizeof(struct msm_rbmemptrs);
793 gpu->nr_rings = nr_rings;
798 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
799 msm_ringbuffer_destroy(gpu->rb[i]);
803 if (gpu->memptrs_bo) {
804 msm_gem_put_vaddr(gpu->memptrs_bo);
805 msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
806 drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
809 platform_set_drvdata(pdev, NULL);
813 void msm_gpu_cleanup(struct msm_gpu *gpu)
817 DBG("%s", gpu->name);
819 WARN_ON(!list_empty(&gpu->active_list));
823 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
824 msm_ringbuffer_destroy(gpu->rb[i]);
828 if (gpu->memptrs_bo) {
829 msm_gem_put_vaddr(gpu->memptrs_bo);
830 msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
831 drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
834 if (!IS_ERR_OR_NULL(gpu->aspace)) {
835 gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
837 msm_gem_address_space_put(gpu->aspace);