2 * Copyright © 2013 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "intel_drv.h"
26 #include "i915_vgpu.h"
28 #include <asm/iosf_mbi.h>
29 #include <linux/pm_runtime.h>
31 #define FORCEWAKE_ACK_TIMEOUT_MS 50
32 #define GT_FIFO_TIMEOUT_MS 10
34 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
36 static const char * const forcewake_domain_names[] = {
43 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
45 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
47 if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
48 return forcewake_domain_names[id];
56 fw_domain_reset(struct drm_i915_private *i915,
57 const struct intel_uncore_forcewake_domain *d)
59 __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_reset);
63 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
66 hrtimer_start_range_ns(&d->timer,
73 fw_domain_wait_ack_clear(const struct drm_i915_private *i915,
74 const struct intel_uncore_forcewake_domain *d)
76 if (wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) &
77 FORCEWAKE_KERNEL) == 0,
78 FORCEWAKE_ACK_TIMEOUT_MS))
79 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
80 intel_uncore_forcewake_domain_to_str(d->id));
84 fw_domain_get(struct drm_i915_private *i915,
85 const struct intel_uncore_forcewake_domain *d)
87 __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_set);
91 fw_domain_wait_ack(const struct drm_i915_private *i915,
92 const struct intel_uncore_forcewake_domain *d)
94 if (wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) &
96 FORCEWAKE_ACK_TIMEOUT_MS))
97 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
98 intel_uncore_forcewake_domain_to_str(d->id));
102 fw_domain_put(const struct drm_i915_private *i915,
103 const struct intel_uncore_forcewake_domain *d)
105 __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_clear);
109 fw_domains_get(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
111 struct intel_uncore_forcewake_domain *d;
114 GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
116 for_each_fw_domain_masked(d, fw_domains, i915, tmp) {
117 fw_domain_wait_ack_clear(i915, d);
118 fw_domain_get(i915, d);
121 for_each_fw_domain_masked(d, fw_domains, i915, tmp)
122 fw_domain_wait_ack(i915, d);
124 i915->uncore.fw_domains_active |= fw_domains;
128 fw_domains_put(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
130 struct intel_uncore_forcewake_domain *d;
133 GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
135 for_each_fw_domain_masked(d, fw_domains, i915, tmp)
136 fw_domain_put(i915, d);
138 i915->uncore.fw_domains_active &= ~fw_domains;
142 fw_domains_reset(struct drm_i915_private *i915,
143 enum forcewake_domains fw_domains)
145 struct intel_uncore_forcewake_domain *d;
151 GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
153 for_each_fw_domain_masked(d, fw_domains, i915, tmp)
154 fw_domain_reset(i915, d);
157 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
159 /* w/a for a sporadic read returning 0 by waiting for the GT
162 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
163 GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
164 DRM_ERROR("GT thread status wait timed out\n");
167 static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
168 enum forcewake_domains fw_domains)
170 fw_domains_get(dev_priv, fw_domains);
172 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
173 __gen6_gt_wait_for_thread_c0(dev_priv);
176 static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
178 u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
180 return count & GT_FIFO_FREE_ENTRIES_MASK;
183 static void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
187 /* On VLV, FIFO will be shared by both SW and HW.
188 * So, we need to read the FREE_ENTRIES everytime */
189 if (IS_VALLEYVIEW(dev_priv))
190 n = fifo_free_entries(dev_priv);
192 n = dev_priv->uncore.fifo_count;
194 if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
195 if (wait_for_atomic((n = fifo_free_entries(dev_priv)) >
196 GT_FIFO_NUM_RESERVED_ENTRIES,
197 GT_FIFO_TIMEOUT_MS)) {
198 DRM_DEBUG("GT_FIFO timeout, entries: %u\n", n);
203 dev_priv->uncore.fifo_count = n - 1;
206 static enum hrtimer_restart
207 intel_uncore_fw_release_timer(struct hrtimer *timer)
209 struct intel_uncore_forcewake_domain *domain =
210 container_of(timer, struct intel_uncore_forcewake_domain, timer);
211 struct drm_i915_private *dev_priv =
212 container_of(domain, struct drm_i915_private, uncore.fw_domain[domain->id]);
213 unsigned long irqflags;
215 assert_rpm_device_not_suspended(dev_priv);
217 if (xchg(&domain->active, false))
218 return HRTIMER_RESTART;
220 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
221 if (WARN_ON(domain->wake_count == 0))
222 domain->wake_count++;
224 if (--domain->wake_count == 0)
225 dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
227 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
229 return HRTIMER_NORESTART;
232 static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
235 unsigned long irqflags;
236 struct intel_uncore_forcewake_domain *domain;
237 int retry_count = 100;
238 enum forcewake_domains fw, active_domains;
240 /* Hold uncore.lock across reset to prevent any register access
241 * with forcewake not set correctly. Wait until all pending
242 * timers are run before holding.
249 for_each_fw_domain(domain, dev_priv, tmp) {
250 smp_store_mb(domain->active, false);
251 if (hrtimer_cancel(&domain->timer) == 0)
254 intel_uncore_fw_release_timer(&domain->timer);
257 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
259 for_each_fw_domain(domain, dev_priv, tmp) {
260 if (hrtimer_active(&domain->timer))
261 active_domains |= domain->mask;
264 if (active_domains == 0)
267 if (--retry_count == 0) {
268 DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
272 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
276 WARN_ON(active_domains);
278 fw = dev_priv->uncore.fw_domains_active;
280 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
282 fw_domains_reset(dev_priv, dev_priv->uncore.fw_domains);
284 if (restore) { /* If reset with a user forcewake, try to restore */
286 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
288 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
289 dev_priv->uncore.fifo_count =
290 fifo_free_entries(dev_priv);
294 assert_forcewakes_inactive(dev_priv);
296 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
299 static u64 gen9_edram_size(struct drm_i915_private *dev_priv)
301 const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
302 const unsigned int sets[4] = { 1, 1, 2, 2 };
303 const u32 cap = dev_priv->edram_cap;
305 return EDRAM_NUM_BANKS(cap) *
306 ways[EDRAM_WAYS_IDX(cap)] *
307 sets[EDRAM_SETS_IDX(cap)] *
311 u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv)
313 if (!HAS_EDRAM(dev_priv))
316 /* The needed capability bits for size calculation
317 * are not there with pre gen9 so return 128MB always.
319 if (INTEL_GEN(dev_priv) < 9)
320 return 128 * 1024 * 1024;
322 return gen9_edram_size(dev_priv);
325 static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv)
327 if (IS_HASWELL(dev_priv) ||
328 IS_BROADWELL(dev_priv) ||
329 INTEL_GEN(dev_priv) >= 9) {
330 dev_priv->edram_cap = __raw_i915_read32(dev_priv,
333 /* NB: We can't write IDICR yet because we do not have gt funcs
336 dev_priv->edram_cap = 0;
339 if (HAS_EDRAM(dev_priv))
340 DRM_INFO("Found %lluMB of eDRAM\n",
341 intel_uncore_edram_size(dev_priv) / (1024 * 1024));
345 fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
349 dbg = __raw_i915_read32(dev_priv, FPGA_DBG);
350 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
353 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
359 vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
363 cer = __raw_i915_read32(dev_priv, CLAIM_ER);
364 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
367 __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR);
373 gen6_check_for_fifo_debug(struct drm_i915_private *dev_priv)
377 fifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
379 if (unlikely(fifodbg)) {
380 DRM_DEBUG_DRIVER("GTFIFODBG = 0x08%x\n", fifodbg);
381 __raw_i915_write32(dev_priv, GTFIFODBG, fifodbg);
388 check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
392 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
393 ret |= fpga_check_for_unclaimed_mmio(dev_priv);
395 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
396 ret |= vlv_check_for_unclaimed_mmio(dev_priv);
398 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
399 ret |= gen6_check_for_fifo_debug(dev_priv);
404 static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
405 bool restore_forcewake)
407 /* clear out unclaimed reg detection bit */
408 if (check_for_unclaimed_mmio(dev_priv))
409 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
411 /* WaDisableShadowRegForCpd:chv */
412 if (IS_CHERRYVIEW(dev_priv)) {
413 __raw_i915_write32(dev_priv, GTFIFOCTL,
414 __raw_i915_read32(dev_priv, GTFIFOCTL) |
415 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
416 GT_FIFO_CTL_RC6_POLICY_STALL);
419 intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
422 void intel_uncore_suspend(struct drm_i915_private *dev_priv)
424 iosf_mbi_unregister_pmic_bus_access_notifier(
425 &dev_priv->uncore.pmic_bus_access_nb);
426 intel_uncore_forcewake_reset(dev_priv, false);
429 void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
431 __intel_uncore_early_sanitize(dev_priv, true);
432 iosf_mbi_register_pmic_bus_access_notifier(
433 &dev_priv->uncore.pmic_bus_access_nb);
434 i915_check_and_clear_faults(dev_priv);
437 void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
439 i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6);
441 /* BIOS often leaves RC6 enabled, but disable it for hw init */
442 intel_sanitize_gt_powersave(dev_priv);
445 static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
446 enum forcewake_domains fw_domains)
448 struct intel_uncore_forcewake_domain *domain;
451 fw_domains &= dev_priv->uncore.fw_domains;
453 for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) {
454 if (domain->wake_count++) {
455 fw_domains &= ~domain->mask;
456 domain->active = true;
461 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
465 * intel_uncore_forcewake_get - grab forcewake domain references
466 * @dev_priv: i915 device instance
467 * @fw_domains: forcewake domains to get reference on
469 * This function can be used get GT's forcewake domain references.
470 * Normal register access will handle the forcewake domains automatically.
471 * However if some sequence requires the GT to not power down a particular
472 * forcewake domains this function should be called at the beginning of the
473 * sequence. And subsequently the reference should be dropped by symmetric
474 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
475 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
477 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
478 enum forcewake_domains fw_domains)
480 unsigned long irqflags;
482 if (!dev_priv->uncore.funcs.force_wake_get)
485 assert_rpm_wakelock_held(dev_priv);
487 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
488 __intel_uncore_forcewake_get(dev_priv, fw_domains);
489 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
493 * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
494 * @dev_priv: i915 device instance
496 * This function is a wrapper around intel_uncore_forcewake_get() to acquire
497 * the GT powerwell and in the process disable our debugging for the
498 * duration of userspace's bypass.
500 void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv)
502 spin_lock_irq(&dev_priv->uncore.lock);
503 if (!dev_priv->uncore.user_forcewake.count++) {
504 intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
506 /* Save and disable mmio debugging for the user bypass */
507 dev_priv->uncore.user_forcewake.saved_mmio_check =
508 dev_priv->uncore.unclaimed_mmio_check;
509 dev_priv->uncore.user_forcewake.saved_mmio_debug =
512 dev_priv->uncore.unclaimed_mmio_check = 0;
515 spin_unlock_irq(&dev_priv->uncore.lock);
519 * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
520 * @dev_priv: i915 device instance
522 * This function complements intel_uncore_forcewake_user_get() and releases
523 * the GT powerwell taken on behalf of the userspace bypass.
525 void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv)
527 spin_lock_irq(&dev_priv->uncore.lock);
528 if (!--dev_priv->uncore.user_forcewake.count) {
529 if (intel_uncore_unclaimed_mmio(dev_priv))
530 dev_info(dev_priv->drm.dev,
531 "Invalid mmio detected during user access\n");
533 dev_priv->uncore.unclaimed_mmio_check =
534 dev_priv->uncore.user_forcewake.saved_mmio_check;
536 dev_priv->uncore.user_forcewake.saved_mmio_debug;
538 intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
540 spin_unlock_irq(&dev_priv->uncore.lock);
544 * intel_uncore_forcewake_get__locked - grab forcewake domain references
545 * @dev_priv: i915 device instance
546 * @fw_domains: forcewake domains to get reference on
548 * See intel_uncore_forcewake_get(). This variant places the onus
549 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
551 void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
552 enum forcewake_domains fw_domains)
554 lockdep_assert_held(&dev_priv->uncore.lock);
556 if (!dev_priv->uncore.funcs.force_wake_get)
559 __intel_uncore_forcewake_get(dev_priv, fw_domains);
562 static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
563 enum forcewake_domains fw_domains)
565 struct intel_uncore_forcewake_domain *domain;
568 fw_domains &= dev_priv->uncore.fw_domains;
570 for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) {
571 if (WARN_ON(domain->wake_count == 0))
574 if (--domain->wake_count) {
575 domain->active = true;
579 fw_domain_arm_timer(domain);
584 * intel_uncore_forcewake_put - release a forcewake domain reference
585 * @dev_priv: i915 device instance
586 * @fw_domains: forcewake domains to put references
588 * This function drops the device-level forcewakes for specified
589 * domains obtained by intel_uncore_forcewake_get().
591 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
592 enum forcewake_domains fw_domains)
594 unsigned long irqflags;
596 if (!dev_priv->uncore.funcs.force_wake_put)
599 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
600 __intel_uncore_forcewake_put(dev_priv, fw_domains);
601 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
605 * intel_uncore_forcewake_put__locked - grab forcewake domain references
606 * @dev_priv: i915 device instance
607 * @fw_domains: forcewake domains to get reference on
609 * See intel_uncore_forcewake_put(). This variant places the onus
610 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
612 void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
613 enum forcewake_domains fw_domains)
615 lockdep_assert_held(&dev_priv->uncore.lock);
617 if (!dev_priv->uncore.funcs.force_wake_put)
620 __intel_uncore_forcewake_put(dev_priv, fw_domains);
623 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
625 if (!dev_priv->uncore.funcs.force_wake_get)
628 WARN_ON(dev_priv->uncore.fw_domains_active);
631 /* We give fast paths for the really cool registers */
632 #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
634 #define __gen6_reg_read_fw_domains(offset) \
636 enum forcewake_domains __fwd; \
637 if (NEEDS_FORCE_WAKE(offset)) \
638 __fwd = FORCEWAKE_RENDER; \
644 static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
646 if (offset < entry->start)
648 else if (offset > entry->end)
654 /* Copied and "macroized" from lib/bsearch.c */
655 #define BSEARCH(key, base, num, cmp) ({ \
656 unsigned int start__ = 0, end__ = (num); \
657 typeof(base) result__ = NULL; \
658 while (start__ < end__) { \
659 unsigned int mid__ = start__ + (end__ - start__) / 2; \
660 int ret__ = (cmp)((key), (base) + mid__); \
663 } else if (ret__ > 0) { \
664 start__ = mid__ + 1; \
666 result__ = (base) + mid__; \
673 static enum forcewake_domains
674 find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
676 const struct intel_forcewake_range *entry;
678 entry = BSEARCH(offset,
679 dev_priv->uncore.fw_domains_table,
680 dev_priv->uncore.fw_domains_table_entries,
686 WARN(entry->domains & ~dev_priv->uncore.fw_domains,
687 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
688 entry->domains & ~dev_priv->uncore.fw_domains, offset);
690 return entry->domains;
693 #define GEN_FW_RANGE(s, e, d) \
694 { .start = (s), .end = (e), .domains = (d) }
696 #define HAS_FWTABLE(dev_priv) \
697 (INTEL_GEN(dev_priv) >= 9 || \
698 IS_CHERRYVIEW(dev_priv) || \
699 IS_VALLEYVIEW(dev_priv))
701 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
702 static const struct intel_forcewake_range __vlv_fw_ranges[] = {
703 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
704 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
705 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
706 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
707 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
708 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
709 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
712 #define __fwtable_reg_read_fw_domains(offset) \
714 enum forcewake_domains __fwd = 0; \
715 if (NEEDS_FORCE_WAKE((offset))) \
716 __fwd = find_fw_domain(dev_priv, offset); \
720 /* *Must* be sorted by offset! See intel_shadow_table_check(). */
721 static const i915_reg_t gen8_shadowed_regs[] = {
722 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
723 GEN6_RPNSWREQ, /* 0xA008 */
724 GEN6_RC_VIDEO_FREQ, /* 0xA00C */
725 RING_TAIL(GEN6_BSD_RING_BASE), /* 0x12000 (base) */
726 RING_TAIL(VEBOX_RING_BASE), /* 0x1a000 (base) */
727 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
728 /* TODO: Other registers are not yet used */
731 static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
733 u32 offset = i915_mmio_reg_offset(*reg);
737 else if (key > offset)
743 static bool is_gen8_shadowed(u32 offset)
745 const i915_reg_t *regs = gen8_shadowed_regs;
747 return BSEARCH(offset, regs, ARRAY_SIZE(gen8_shadowed_regs),
751 #define __gen8_reg_write_fw_domains(offset) \
753 enum forcewake_domains __fwd; \
754 if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
755 __fwd = FORCEWAKE_RENDER; \
761 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
762 static const struct intel_forcewake_range __chv_fw_ranges[] = {
763 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
764 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
765 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
766 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
767 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
768 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
769 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
770 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
771 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
772 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
773 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
774 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
775 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
776 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
777 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
778 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
781 #define __fwtable_reg_write_fw_domains(offset) \
783 enum forcewake_domains __fwd = 0; \
784 if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
785 __fwd = find_fw_domain(dev_priv, offset); \
789 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
790 static const struct intel_forcewake_range __gen9_fw_ranges[] = {
791 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
792 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
793 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
794 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
795 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
796 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
797 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
798 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER),
799 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
800 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
801 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
802 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
803 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER),
804 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
805 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER),
806 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
807 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
808 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
809 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
810 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
811 GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER),
812 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
813 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER),
814 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
815 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER),
816 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
817 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER),
818 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
819 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER),
820 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
821 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER),
822 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
826 ilk_dummy_write(struct drm_i915_private *dev_priv)
828 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
829 * the chip from rc6 before touching it for real. MI_MODE is masked,
830 * hence harmless to write 0 into. */
831 __raw_i915_write32(dev_priv, MI_MODE, 0);
835 __unclaimed_reg_debug(struct drm_i915_private *dev_priv,
836 const i915_reg_t reg,
840 if (WARN(check_for_unclaimed_mmio(dev_priv) && !before,
841 "Unclaimed %s register 0x%x\n",
842 read ? "read from" : "write to",
843 i915_mmio_reg_offset(reg)))
844 i915.mmio_debug--; /* Only report the first N failures */
848 unclaimed_reg_debug(struct drm_i915_private *dev_priv,
849 const i915_reg_t reg,
853 if (likely(!i915.mmio_debug))
856 __unclaimed_reg_debug(dev_priv, reg, read, before);
859 #define GEN2_READ_HEADER(x) \
861 assert_rpm_wakelock_held(dev_priv);
863 #define GEN2_READ_FOOTER \
864 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
867 #define __gen2_read(x) \
869 gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
870 GEN2_READ_HEADER(x); \
871 val = __raw_i915_read##x(dev_priv, reg); \
875 #define __gen5_read(x) \
877 gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
878 GEN2_READ_HEADER(x); \
879 ilk_dummy_write(dev_priv); \
880 val = __raw_i915_read##x(dev_priv, reg); \
896 #undef GEN2_READ_FOOTER
897 #undef GEN2_READ_HEADER
899 #define GEN6_READ_HEADER(x) \
900 u32 offset = i915_mmio_reg_offset(reg); \
901 unsigned long irqflags; \
903 assert_rpm_wakelock_held(dev_priv); \
904 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
905 unclaimed_reg_debug(dev_priv, reg, true, true)
907 #define GEN6_READ_FOOTER \
908 unclaimed_reg_debug(dev_priv, reg, true, false); \
909 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
910 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
913 static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
914 enum forcewake_domains fw_domains)
916 struct intel_uncore_forcewake_domain *domain;
919 GEM_BUG_ON(fw_domains & ~dev_priv->uncore.fw_domains);
921 for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp)
922 fw_domain_arm_timer(domain);
924 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
927 static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
928 enum forcewake_domains fw_domains)
930 if (WARN_ON(!fw_domains))
933 /* Turn on all requested but inactive supported forcewake domains. */
934 fw_domains &= dev_priv->uncore.fw_domains;
935 fw_domains &= ~dev_priv->uncore.fw_domains_active;
938 ___force_wake_auto(dev_priv, fw_domains);
941 #define __gen_read(func, x) \
943 func##_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
944 enum forcewake_domains fw_engine; \
945 GEN6_READ_HEADER(x); \
946 fw_engine = __##func##_reg_read_fw_domains(offset); \
948 __force_wake_auto(dev_priv, fw_engine); \
949 val = __raw_i915_read##x(dev_priv, reg); \
952 #define __gen6_read(x) __gen_read(gen6, x)
953 #define __fwtable_read(x) __gen_read(fwtable, x)
964 #undef __fwtable_read
966 #undef GEN6_READ_FOOTER
967 #undef GEN6_READ_HEADER
969 #define GEN2_WRITE_HEADER \
970 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
971 assert_rpm_wakelock_held(dev_priv); \
973 #define GEN2_WRITE_FOOTER
975 #define __gen2_write(x) \
977 gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
979 __raw_i915_write##x(dev_priv, reg, val); \
983 #define __gen5_write(x) \
985 gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
987 ilk_dummy_write(dev_priv); \
988 __raw_i915_write##x(dev_priv, reg, val); \
1002 #undef GEN2_WRITE_FOOTER
1003 #undef GEN2_WRITE_HEADER
1005 #define GEN6_WRITE_HEADER \
1006 u32 offset = i915_mmio_reg_offset(reg); \
1007 unsigned long irqflags; \
1008 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1009 assert_rpm_wakelock_held(dev_priv); \
1010 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
1011 unclaimed_reg_debug(dev_priv, reg, false, true)
1013 #define GEN6_WRITE_FOOTER \
1014 unclaimed_reg_debug(dev_priv, reg, false, false); \
1015 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
1017 #define __gen6_write(x) \
1019 gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1020 GEN6_WRITE_HEADER; \
1021 if (NEEDS_FORCE_WAKE(offset)) \
1022 __gen6_gt_wait_for_fifo(dev_priv); \
1023 __raw_i915_write##x(dev_priv, reg, val); \
1024 GEN6_WRITE_FOOTER; \
1027 #define __gen_write(func, x) \
1029 func##_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1030 enum forcewake_domains fw_engine; \
1031 GEN6_WRITE_HEADER; \
1032 fw_engine = __##func##_reg_write_fw_domains(offset); \
1034 __force_wake_auto(dev_priv, fw_engine); \
1035 __raw_i915_write##x(dev_priv, reg, val); \
1036 GEN6_WRITE_FOOTER; \
1038 #define __gen8_write(x) __gen_write(gen8, x)
1039 #define __fwtable_write(x) __gen_write(fwtable, x)
1051 #undef __fwtable_write
1054 #undef GEN6_WRITE_FOOTER
1055 #undef GEN6_WRITE_HEADER
1057 #define ASSIGN_WRITE_MMIO_VFUNCS(i915, x) \
1059 (i915)->uncore.funcs.mmio_writeb = x##_write8; \
1060 (i915)->uncore.funcs.mmio_writew = x##_write16; \
1061 (i915)->uncore.funcs.mmio_writel = x##_write32; \
1064 #define ASSIGN_READ_MMIO_VFUNCS(i915, x) \
1066 (i915)->uncore.funcs.mmio_readb = x##_read8; \
1067 (i915)->uncore.funcs.mmio_readw = x##_read16; \
1068 (i915)->uncore.funcs.mmio_readl = x##_read32; \
1069 (i915)->uncore.funcs.mmio_readq = x##_read64; \
1073 static void fw_domain_init(struct drm_i915_private *dev_priv,
1074 enum forcewake_domain_id domain_id,
1078 struct intel_uncore_forcewake_domain *d;
1080 if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
1083 d = &dev_priv->uncore.fw_domain[domain_id];
1085 WARN_ON(d->wake_count);
1087 WARN_ON(!i915_mmio_reg_valid(reg_set));
1088 WARN_ON(!i915_mmio_reg_valid(reg_ack));
1091 d->reg_set = reg_set;
1092 d->reg_ack = reg_ack;
1096 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
1097 BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
1098 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
1100 d->mask = BIT(domain_id);
1102 hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1103 d->timer.function = intel_uncore_fw_release_timer;
1105 dev_priv->uncore.fw_domains |= BIT(domain_id);
1107 fw_domain_reset(dev_priv, d);
1110 static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
1112 if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv))
1115 if (IS_GEN6(dev_priv)) {
1116 dev_priv->uncore.fw_reset = 0;
1117 dev_priv->uncore.fw_set = FORCEWAKE_KERNEL;
1118 dev_priv->uncore.fw_clear = 0;
1120 /* WaRsClearFWBitsAtReset:bdw,skl */
1121 dev_priv->uncore.fw_reset = _MASKED_BIT_DISABLE(0xffff);
1122 dev_priv->uncore.fw_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
1123 dev_priv->uncore.fw_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
1126 if (INTEL_GEN(dev_priv) >= 9) {
1127 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1128 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1129 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1130 FORCEWAKE_RENDER_GEN9,
1131 FORCEWAKE_ACK_RENDER_GEN9);
1132 fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
1133 FORCEWAKE_BLITTER_GEN9,
1134 FORCEWAKE_ACK_BLITTER_GEN9);
1135 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1136 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
1137 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1138 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1139 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1140 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1141 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1142 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1143 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
1144 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
1145 dev_priv->uncore.funcs.force_wake_get =
1146 fw_domains_get_with_thread_status;
1147 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1148 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1149 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1150 } else if (IS_IVYBRIDGE(dev_priv)) {
1153 /* IVB configs may use multi-threaded forcewake */
1155 /* A small trick here - if the bios hasn't configured
1156 * MT forcewake, and if the device is in RC6, then
1157 * force_wake_mt_get will not wake the device and the
1158 * ECOBUS read will return zero. Which will be
1159 * (correctly) interpreted by the test below as MT
1160 * forcewake being disabled.
1162 dev_priv->uncore.funcs.force_wake_get =
1163 fw_domains_get_with_thread_status;
1164 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1166 /* We need to init first for ECOBUS access and then
1167 * determine later if we want to reinit, in case of MT access is
1168 * not working. In this stage we don't know which flavour this
1169 * ivb is, so it is better to reset also the gen6 fw registers
1170 * before the ecobus check.
1173 __raw_i915_write32(dev_priv, FORCEWAKE, 0);
1174 __raw_posting_read(dev_priv, ECOBUS);
1176 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1177 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1179 spin_lock_irq(&dev_priv->uncore.lock);
1180 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_RENDER);
1181 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
1182 fw_domains_put(dev_priv, FORCEWAKE_RENDER);
1183 spin_unlock_irq(&dev_priv->uncore.lock);
1185 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1186 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1187 DRM_INFO("when using vblank-synced partial screen updates.\n");
1188 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1189 FORCEWAKE, FORCEWAKE_ACK);
1191 } else if (IS_GEN6(dev_priv)) {
1192 dev_priv->uncore.funcs.force_wake_get =
1193 fw_domains_get_with_thread_status;
1194 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1195 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1196 FORCEWAKE, FORCEWAKE_ACK);
1199 /* All future platforms are expected to require complex power gating */
1200 WARN_ON(dev_priv->uncore.fw_domains == 0);
1203 #define ASSIGN_FW_DOMAINS_TABLE(d) \
1205 dev_priv->uncore.fw_domains_table = \
1206 (struct intel_forcewake_range *)(d); \
1207 dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \
1210 static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
1211 unsigned long action, void *data)
1213 struct drm_i915_private *dev_priv = container_of(nb,
1214 struct drm_i915_private, uncore.pmic_bus_access_nb);
1217 case MBI_PMIC_BUS_ACCESS_BEGIN:
1219 * forcewake all now to make sure that we don't need to do a
1220 * forcewake later which on systems where this notifier gets
1221 * called requires the punit to access to the shared pmic i2c
1222 * bus, which will be busy after this notification, leading to:
1223 * "render: timed out waiting for forcewake ack request."
1226 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1228 case MBI_PMIC_BUS_ACCESS_END:
1229 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1236 void intel_uncore_init(struct drm_i915_private *dev_priv)
1238 i915_check_vgpu(dev_priv);
1240 intel_uncore_edram_detect(dev_priv);
1241 intel_uncore_fw_domains_init(dev_priv);
1242 __intel_uncore_early_sanitize(dev_priv, false);
1244 dev_priv->uncore.unclaimed_mmio_check = 1;
1245 dev_priv->uncore.pmic_bus_access_nb.notifier_call =
1246 i915_pmic_bus_access_notifier;
1248 if (IS_GEN(dev_priv, 2, 4) || intel_vgpu_active(dev_priv)) {
1249 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen2);
1250 ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen2);
1251 } else if (IS_GEN5(dev_priv)) {
1252 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen5);
1253 ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen5);
1254 } else if (IS_GEN(dev_priv, 6, 7)) {
1255 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen6);
1257 if (IS_VALLEYVIEW(dev_priv)) {
1258 ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges);
1259 ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
1261 ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
1263 } else if (IS_GEN8(dev_priv)) {
1264 if (IS_CHERRYVIEW(dev_priv)) {
1265 ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges);
1266 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
1267 ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
1270 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen8);
1271 ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
1274 ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
1275 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
1276 ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
1279 iosf_mbi_register_pmic_bus_access_notifier(
1280 &dev_priv->uncore.pmic_bus_access_nb);
1282 i915_check_and_clear_faults(dev_priv);
1285 void intel_uncore_fini(struct drm_i915_private *dev_priv)
1287 iosf_mbi_unregister_pmic_bus_access_notifier(
1288 &dev_priv->uncore.pmic_bus_access_nb);
1290 /* Paranoia: make sure we have disabled everything before we exit. */
1291 intel_uncore_sanitize(dev_priv);
1292 intel_uncore_forcewake_reset(dev_priv, false);
1295 static const struct reg_whitelist {
1296 i915_reg_t offset_ldw;
1297 i915_reg_t offset_udw;
1300 } reg_read_whitelist[] = { {
1301 .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
1302 .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
1303 .gen_mask = INTEL_GEN_MASK(4, 10),
1307 int i915_reg_read_ioctl(struct drm_device *dev,
1308 void *data, struct drm_file *file)
1310 struct drm_i915_private *dev_priv = to_i915(dev);
1311 struct drm_i915_reg_read *reg = data;
1312 struct reg_whitelist const *entry;
1317 entry = reg_read_whitelist;
1318 remain = ARRAY_SIZE(reg_read_whitelist);
1320 u32 entry_offset = i915_mmio_reg_offset(entry->offset_ldw);
1322 GEM_BUG_ON(!is_power_of_2(entry->size));
1323 GEM_BUG_ON(entry->size > 8);
1324 GEM_BUG_ON(entry_offset & (entry->size - 1));
1326 if (INTEL_INFO(dev_priv)->gen_mask & entry->gen_mask &&
1327 entry_offset == (reg->offset & -entry->size))
1336 flags = reg->offset & (entry->size - 1);
1338 intel_runtime_pm_get(dev_priv);
1339 if (entry->size == 8 && flags == I915_REG_READ_8B_WA)
1340 reg->val = I915_READ64_2x32(entry->offset_ldw,
1342 else if (entry->size == 8 && flags == 0)
1343 reg->val = I915_READ64(entry->offset_ldw);
1344 else if (entry->size == 4 && flags == 0)
1345 reg->val = I915_READ(entry->offset_ldw);
1346 else if (entry->size == 2 && flags == 0)
1347 reg->val = I915_READ16(entry->offset_ldw);
1348 else if (entry->size == 1 && flags == 0)
1349 reg->val = I915_READ8(entry->offset_ldw);
1352 intel_runtime_pm_put(dev_priv);
1357 static void gen3_stop_rings(struct drm_i915_private *dev_priv)
1359 struct intel_engine_cs *engine;
1360 enum intel_engine_id id;
1362 for_each_engine(engine, dev_priv, id) {
1363 const u32 base = engine->mmio_base;
1364 const i915_reg_t mode = RING_MI_MODE(base);
1366 I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));
1367 if (intel_wait_for_register_fw(dev_priv,
1372 DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n",
1375 I915_WRITE_FW(RING_CTL(base), 0);
1376 I915_WRITE_FW(RING_HEAD(base), 0);
1377 I915_WRITE_FW(RING_TAIL(base), 0);
1379 /* Check acts as a post */
1380 if (I915_READ_FW(RING_HEAD(base)) != 0)
1381 DRM_DEBUG_DRIVER("%s: ring head not parked\n",
1386 static bool i915_reset_complete(struct pci_dev *pdev)
1390 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
1391 return (gdrst & GRDOM_RESET_STATUS) == 0;
1394 static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1396 struct pci_dev *pdev = dev_priv->drm.pdev;
1398 /* assert reset for at least 20 usec */
1399 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1400 usleep_range(50, 200);
1401 pci_write_config_byte(pdev, I915_GDRST, 0);
1403 return wait_for(i915_reset_complete(pdev), 500);
1406 static bool g4x_reset_complete(struct pci_dev *pdev)
1410 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
1411 return (gdrst & GRDOM_RESET_ENABLE) == 0;
1414 static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1416 struct pci_dev *pdev = dev_priv->drm.pdev;
1418 /* Stop engines before we reset; see g4x_do_reset() below for why. */
1419 gen3_stop_rings(dev_priv);
1421 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1422 return wait_for(g4x_reset_complete(pdev), 500);
1425 static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1427 struct pci_dev *pdev = dev_priv->drm.pdev;
1430 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1431 I915_WRITE(VDECCLK_GATE_D,
1432 I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
1433 POSTING_READ(VDECCLK_GATE_D);
1435 /* We stop engines, otherwise we might get failed reset and a
1436 * dead gpu (on elk).
1437 * WaMediaResetMainRingCleanup:ctg,elk (presumably)
1439 gen3_stop_rings(dev_priv);
1441 pci_write_config_byte(pdev, I915_GDRST,
1442 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
1443 ret = wait_for(g4x_reset_complete(pdev), 500);
1445 DRM_DEBUG_DRIVER("Wait for media reset failed\n");
1449 pci_write_config_byte(pdev, I915_GDRST,
1450 GRDOM_RENDER | GRDOM_RESET_ENABLE);
1451 ret = wait_for(g4x_reset_complete(pdev), 500);
1453 DRM_DEBUG_DRIVER("Wait for render reset failed\n");
1458 pci_write_config_byte(pdev, I915_GDRST, 0);
1460 I915_WRITE(VDECCLK_GATE_D,
1461 I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1462 POSTING_READ(VDECCLK_GATE_D);
1467 static int ironlake_do_reset(struct drm_i915_private *dev_priv,
1468 unsigned engine_mask)
1472 I915_WRITE(ILK_GDSR, ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
1473 ret = intel_wait_for_register(dev_priv,
1474 ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
1477 DRM_DEBUG_DRIVER("Wait for render reset failed\n");
1481 I915_WRITE(ILK_GDSR, ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
1482 ret = intel_wait_for_register(dev_priv,
1483 ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
1486 DRM_DEBUG_DRIVER("Wait for media reset failed\n");
1491 I915_WRITE(ILK_GDSR, 0);
1492 POSTING_READ(ILK_GDSR);
1496 /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
1497 static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
1502 /* GEN6_GDRST is not in the gt power well, no need to check
1503 * for fifo space for the write or forcewake the chip for
1506 __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
1508 /* Wait for the device to ack the reset requests */
1509 err = intel_wait_for_register_fw(dev_priv,
1510 GEN6_GDRST, hw_domain_mask, 0,
1513 DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n",
1520 * gen6_reset_engines - reset individual engines
1521 * @dev_priv: i915 device
1522 * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
1524 * This function will reset the individual engines that are set in engine_mask.
1525 * If you provide ALL_ENGINES as mask, full global domain reset will be issued.
1527 * Note: It is responsibility of the caller to handle the difference between
1528 * asking full domain reset versus reset for all available individual engines.
1530 * Returns 0 on success, nonzero on error.
1532 static int gen6_reset_engines(struct drm_i915_private *dev_priv,
1533 unsigned engine_mask)
1535 struct intel_engine_cs *engine;
1536 const u32 hw_engine_mask[I915_NUM_ENGINES] = {
1537 [RCS] = GEN6_GRDOM_RENDER,
1538 [BCS] = GEN6_GRDOM_BLT,
1539 [VCS] = GEN6_GRDOM_MEDIA,
1540 [VCS2] = GEN8_GRDOM_MEDIA2,
1541 [VECS] = GEN6_GRDOM_VECS,
1545 if (engine_mask == ALL_ENGINES) {
1546 hw_mask = GEN6_GRDOM_FULL;
1551 for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
1552 hw_mask |= hw_engine_mask[engine->id];
1555 return gen6_hw_domain_reset(dev_priv, hw_mask);
1559 * __intel_wait_for_register_fw - wait until register matches expected state
1560 * @dev_priv: the i915 device
1561 * @reg: the register to read
1562 * @mask: mask to apply to register value
1563 * @value: expected value
1564 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
1565 * @slow_timeout_ms: slow timeout in millisecond
1566 * @out_value: optional placeholder to hold registry value
1568 * This routine waits until the target register @reg contains the expected
1569 * @value after applying the @mask, i.e. it waits until ::
1571 * (I915_READ_FW(reg) & mask) == value
1573 * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
1574 * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
1575 * must be not larger than 20,0000 microseconds.
1577 * Note that this routine assumes the caller holds forcewake asserted, it is
1578 * not suitable for very long waits. See intel_wait_for_register() if you
1579 * wish to wait without holding forcewake for the duration (i.e. you expect
1580 * the wait to be slow).
1582 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1584 int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
1588 unsigned int fast_timeout_us,
1589 unsigned int slow_timeout_ms,
1592 u32 uninitialized_var(reg_value);
1593 #define done (((reg_value = I915_READ_FW(reg)) & mask) == value)
1596 /* Catch any overuse of this function */
1597 might_sleep_if(slow_timeout_ms);
1598 GEM_BUG_ON(fast_timeout_us > 20000);
1601 if (fast_timeout_us && fast_timeout_us <= 20000)
1602 ret = _wait_for_atomic(done, fast_timeout_us, 0);
1603 if (ret && slow_timeout_ms)
1604 ret = wait_for(done, slow_timeout_ms);
1607 *out_value = reg_value;
1614 * intel_wait_for_register - wait until register matches expected state
1615 * @dev_priv: the i915 device
1616 * @reg: the register to read
1617 * @mask: mask to apply to register value
1618 * @value: expected value
1619 * @timeout_ms: timeout in millisecond
1621 * This routine waits until the target register @reg contains the expected
1622 * @value after applying the @mask, i.e. it waits until ::
1624 * (I915_READ(reg) & mask) == value
1626 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
1628 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1630 int intel_wait_for_register(struct drm_i915_private *dev_priv,
1634 unsigned int timeout_ms)
1637 intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
1642 spin_lock_irq(&dev_priv->uncore.lock);
1643 intel_uncore_forcewake_get__locked(dev_priv, fw);
1645 ret = __intel_wait_for_register_fw(dev_priv,
1649 intel_uncore_forcewake_put__locked(dev_priv, fw);
1650 spin_unlock_irq(&dev_priv->uncore.lock);
1653 ret = wait_for((I915_READ_NOTRACE(reg) & mask) == value,
1659 static int gen8_reset_engine_start(struct intel_engine_cs *engine)
1661 struct drm_i915_private *dev_priv = engine->i915;
1664 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
1665 _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
1667 ret = intel_wait_for_register_fw(dev_priv,
1668 RING_RESET_CTL(engine->mmio_base),
1669 RESET_CTL_READY_TO_RESET,
1670 RESET_CTL_READY_TO_RESET,
1673 DRM_ERROR("%s: reset request timeout\n", engine->name);
1678 static void gen8_reset_engine_cancel(struct intel_engine_cs *engine)
1680 struct drm_i915_private *dev_priv = engine->i915;
1682 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
1683 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
1686 static int gen8_reset_engines(struct drm_i915_private *dev_priv,
1687 unsigned engine_mask)
1689 struct intel_engine_cs *engine;
1692 for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
1693 if (gen8_reset_engine_start(engine))
1696 return gen6_reset_engines(dev_priv, engine_mask);
1699 for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
1700 gen8_reset_engine_cancel(engine);
1705 typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
1707 static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
1712 if (INTEL_INFO(dev_priv)->gen >= 8)
1713 return gen8_reset_engines;
1714 else if (INTEL_INFO(dev_priv)->gen >= 6)
1715 return gen6_reset_engines;
1716 else if (IS_GEN5(dev_priv))
1717 return ironlake_do_reset;
1718 else if (IS_G4X(dev_priv))
1719 return g4x_do_reset;
1720 else if (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
1721 return g33_do_reset;
1722 else if (INTEL_INFO(dev_priv)->gen >= 3)
1723 return i915_do_reset;
1728 int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1736 reset = intel_get_gpu_reset(dev_priv);
1740 /* If the power well sleeps during the reset, the reset
1741 * request may be dropped and never completes (causing -EIO).
1743 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1744 for (retry = 0; retry < 3; retry++) {
1745 ret = reset(dev_priv, engine_mask);
1746 if (ret != -ETIMEDOUT)
1751 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1756 bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
1758 return intel_get_gpu_reset(dev_priv) != NULL;
1762 * When GuC submission is enabled, GuC manages ELSP and can initiate the
1763 * engine reset too. For now, fall back to full GPU reset if it is enabled.
1765 bool intel_has_reset_engine(struct drm_i915_private *dev_priv)
1767 return (dev_priv->info.has_reset_engine &&
1768 !dev_priv->guc.execbuf_client &&
1772 int intel_guc_reset(struct drm_i915_private *dev_priv)
1776 if (!HAS_GUC(dev_priv))
1779 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1780 ret = gen6_hw_domain_reset(dev_priv, GEN9_GRDOM_GUC);
1781 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1786 bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
1788 return check_for_unclaimed_mmio(dev_priv);
1792 intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
1794 if (unlikely(i915.mmio_debug ||
1795 dev_priv->uncore.unclaimed_mmio_check <= 0))
1798 if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
1799 DRM_DEBUG("Unclaimed register detected, "
1800 "enabling oneshot unclaimed register reporting. "
1801 "Please use i915.mmio_debug=N for more information.\n");
1803 dev_priv->uncore.unclaimed_mmio_check--;
1810 static enum forcewake_domains
1811 intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
1814 u32 offset = i915_mmio_reg_offset(reg);
1815 enum forcewake_domains fw_domains;
1817 if (HAS_FWTABLE(dev_priv)) {
1818 fw_domains = __fwtable_reg_read_fw_domains(offset);
1819 } else if (INTEL_GEN(dev_priv) >= 6) {
1820 fw_domains = __gen6_reg_read_fw_domains(offset);
1822 WARN_ON(!IS_GEN(dev_priv, 2, 5));
1826 WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
1831 static enum forcewake_domains
1832 intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
1835 u32 offset = i915_mmio_reg_offset(reg);
1836 enum forcewake_domains fw_domains;
1838 if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
1839 fw_domains = __fwtable_reg_write_fw_domains(offset);
1840 } else if (IS_GEN8(dev_priv)) {
1841 fw_domains = __gen8_reg_write_fw_domains(offset);
1842 } else if (IS_GEN(dev_priv, 6, 7)) {
1843 fw_domains = FORCEWAKE_RENDER;
1845 WARN_ON(!IS_GEN(dev_priv, 2, 5));
1849 WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
1855 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
1857 * @dev_priv: pointer to struct drm_i915_private
1858 * @reg: register in question
1859 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
1861 * Returns a set of forcewake domains required to be taken with for example
1862 * intel_uncore_forcewake_get for the specified register to be accessible in the
1863 * specified mode (read, write or read/write) with raw mmio accessors.
1865 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
1866 * callers to do FIFO management on their own or risk losing writes.
1868 enum forcewake_domains
1869 intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
1870 i915_reg_t reg, unsigned int op)
1872 enum forcewake_domains fw_domains = 0;
1876 if (intel_vgpu_active(dev_priv))
1879 if (op & FW_REG_READ)
1880 fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg);
1882 if (op & FW_REG_WRITE)
1883 fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg);
1888 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1889 #include "selftests/mock_uncore.c"
1890 #include "selftests/intel_uncore.c"