1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/cpuidle.h>
34 #include <linux/circ_buf.h>
35 #include <drm/drm_irq.h>
36 #include <drm/drm_drv.h>
37 #include <drm/i915_drm.h>
39 #include "i915_trace.h"
40 #include "intel_drv.h"
43 * DOC: interrupt handling
45 * These functions provide the basic support for enabling and disabling the
46 * interrupt handling support. There's a lot more functionality in i915_irq.c
47 * and related files, but that will be described in separate chapters.
50 static const u32 hpd_ilk[HPD_NUM_PINS] = {
51 [HPD_PORT_A] = DE_DP_A_HOTPLUG,
54 static const u32 hpd_ivb[HPD_NUM_PINS] = {
55 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
58 static const u32 hpd_bdw[HPD_NUM_PINS] = {
59 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
62 static const u32 hpd_ibx[HPD_NUM_PINS] = {
63 [HPD_CRT] = SDE_CRT_HOTPLUG,
64 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
65 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
66 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
67 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
70 static const u32 hpd_cpt[HPD_NUM_PINS] = {
71 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
72 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
73 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
74 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
75 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
78 static const u32 hpd_spt[HPD_NUM_PINS] = {
79 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
80 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
81 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
82 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
83 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
86 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
87 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
88 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
89 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
90 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
91 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
92 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
95 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
96 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
97 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
98 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
99 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
100 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
101 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
104 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
105 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
106 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
107 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
108 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
109 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
110 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
114 static const u32 hpd_bxt[HPD_NUM_PINS] = {
115 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
116 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
117 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
120 static const u32 hpd_gen11[HPD_NUM_PINS] = {
121 [HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
122 [HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
123 [HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
124 [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG
127 static const u32 hpd_icp[HPD_NUM_PINS] = {
128 [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
129 [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
130 [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP,
131 [HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP,
132 [HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP,
133 [HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP
136 /* IIR can theoretically queue up two events. Be paranoid. */
137 #define GEN8_IRQ_RESET_NDX(type, which) do { \
138 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
139 POSTING_READ(GEN8_##type##_IMR(which)); \
140 I915_WRITE(GEN8_##type##_IER(which), 0); \
141 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
142 POSTING_READ(GEN8_##type##_IIR(which)); \
143 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
144 POSTING_READ(GEN8_##type##_IIR(which)); \
147 #define GEN3_IRQ_RESET(type) do { \
148 I915_WRITE(type##IMR, 0xffffffff); \
149 POSTING_READ(type##IMR); \
150 I915_WRITE(type##IER, 0); \
151 I915_WRITE(type##IIR, 0xffffffff); \
152 POSTING_READ(type##IIR); \
153 I915_WRITE(type##IIR, 0xffffffff); \
154 POSTING_READ(type##IIR); \
157 #define GEN2_IRQ_RESET(type) do { \
158 I915_WRITE16(type##IMR, 0xffff); \
159 POSTING_READ16(type##IMR); \
160 I915_WRITE16(type##IER, 0); \
161 I915_WRITE16(type##IIR, 0xffff); \
162 POSTING_READ16(type##IIR); \
163 I915_WRITE16(type##IIR, 0xffff); \
164 POSTING_READ16(type##IIR); \
168 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
170 static void gen3_assert_iir_is_zero(struct drm_i915_private *dev_priv,
173 u32 val = I915_READ(reg);
178 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
179 i915_mmio_reg_offset(reg), val);
180 I915_WRITE(reg, 0xffffffff);
182 I915_WRITE(reg, 0xffffffff);
186 static void gen2_assert_iir_is_zero(struct drm_i915_private *dev_priv,
189 u16 val = I915_READ16(reg);
194 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
195 i915_mmio_reg_offset(reg), val);
196 I915_WRITE16(reg, 0xffff);
198 I915_WRITE16(reg, 0xffff);
202 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
203 gen3_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
204 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
205 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
206 POSTING_READ(GEN8_##type##_IMR(which)); \
209 #define GEN3_IRQ_INIT(type, imr_val, ier_val) do { \
210 gen3_assert_iir_is_zero(dev_priv, type##IIR); \
211 I915_WRITE(type##IER, (ier_val)); \
212 I915_WRITE(type##IMR, (imr_val)); \
213 POSTING_READ(type##IMR); \
216 #define GEN2_IRQ_INIT(type, imr_val, ier_val) do { \
217 gen2_assert_iir_is_zero(dev_priv, type##IIR); \
218 I915_WRITE16(type##IER, (ier_val)); \
219 I915_WRITE16(type##IMR, (imr_val)); \
220 POSTING_READ16(type##IMR); \
223 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
224 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
226 /* For display hotplug interrupt */
228 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
234 lockdep_assert_held(&dev_priv->irq_lock);
235 WARN_ON(bits & ~mask);
237 val = I915_READ(PORT_HOTPLUG_EN);
240 I915_WRITE(PORT_HOTPLUG_EN, val);
244 * i915_hotplug_interrupt_update - update hotplug interrupt enable
245 * @dev_priv: driver private
246 * @mask: bits to update
247 * @bits: bits to enable
248 * NOTE: the HPD enable bits are modified both inside and outside
249 * of an interrupt context. To avoid that read-modify-write cycles
250 * interfer, these bits are protected by a spinlock. Since this
251 * function is usually not called from a context where the lock is
252 * held already, this function acquires the lock itself. A non-locking
253 * version is also available.
255 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
259 spin_lock_irq(&dev_priv->irq_lock);
260 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
261 spin_unlock_irq(&dev_priv->irq_lock);
265 gen11_gt_engine_identity(struct drm_i915_private * const i915,
266 const unsigned int bank, const unsigned int bit);
268 static bool gen11_reset_one_iir(struct drm_i915_private * const i915,
269 const unsigned int bank,
270 const unsigned int bit)
272 void __iomem * const regs = i915->uncore.regs;
275 lockdep_assert_held(&i915->irq_lock);
277 dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
280 * According to the BSpec, DW_IIR bits cannot be cleared without
281 * first servicing the Selector & Shared IIR registers.
283 gen11_gt_engine_identity(i915, bank, bit);
286 * We locked GT INT DW by reading it. If we want to (try
287 * to) recover from this succesfully, we need to clear
288 * our bit, otherwise we are locking the register for
291 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));
300 * ilk_update_display_irq - update DEIMR
301 * @dev_priv: driver private
302 * @interrupt_mask: mask of interrupt bits to update
303 * @enabled_irq_mask: mask of interrupt bits to enable
305 void ilk_update_display_irq(struct drm_i915_private *dev_priv,
307 u32 enabled_irq_mask)
311 lockdep_assert_held(&dev_priv->irq_lock);
313 WARN_ON(enabled_irq_mask & ~interrupt_mask);
315 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
318 new_val = dev_priv->irq_mask;
319 new_val &= ~interrupt_mask;
320 new_val |= (~enabled_irq_mask & interrupt_mask);
322 if (new_val != dev_priv->irq_mask) {
323 dev_priv->irq_mask = new_val;
324 I915_WRITE(DEIMR, dev_priv->irq_mask);
330 * ilk_update_gt_irq - update GTIMR
331 * @dev_priv: driver private
332 * @interrupt_mask: mask of interrupt bits to update
333 * @enabled_irq_mask: mask of interrupt bits to enable
335 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
337 u32 enabled_irq_mask)
339 lockdep_assert_held(&dev_priv->irq_lock);
341 WARN_ON(enabled_irq_mask & ~interrupt_mask);
343 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
346 dev_priv->gt_irq_mask &= ~interrupt_mask;
347 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
348 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
351 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
353 ilk_update_gt_irq(dev_priv, mask, mask);
354 POSTING_READ_FW(GTIMR);
357 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
359 ilk_update_gt_irq(dev_priv, mask, 0);
362 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
364 WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11);
366 return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
369 static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
371 if (INTEL_GEN(dev_priv) >= 11)
372 return GEN11_GPM_WGBOXPERF_INTR_MASK;
373 else if (INTEL_GEN(dev_priv) >= 8)
374 return GEN8_GT_IMR(2);
379 static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
381 if (INTEL_GEN(dev_priv) >= 11)
382 return GEN11_GPM_WGBOXPERF_INTR_ENABLE;
383 else if (INTEL_GEN(dev_priv) >= 8)
384 return GEN8_GT_IER(2);
390 * snb_update_pm_irq - update GEN6_PMIMR
391 * @dev_priv: driver private
392 * @interrupt_mask: mask of interrupt bits to update
393 * @enabled_irq_mask: mask of interrupt bits to enable
395 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
397 u32 enabled_irq_mask)
401 WARN_ON(enabled_irq_mask & ~interrupt_mask);
403 lockdep_assert_held(&dev_priv->irq_lock);
405 new_val = dev_priv->pm_imr;
406 new_val &= ~interrupt_mask;
407 new_val |= (~enabled_irq_mask & interrupt_mask);
409 if (new_val != dev_priv->pm_imr) {
410 dev_priv->pm_imr = new_val;
411 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr);
412 POSTING_READ(gen6_pm_imr(dev_priv));
416 void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
418 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
421 snb_update_pm_irq(dev_priv, mask, mask);
424 static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
426 snb_update_pm_irq(dev_priv, mask, 0);
429 void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
431 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
434 __gen6_mask_pm_irq(dev_priv, mask);
437 static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
439 i915_reg_t reg = gen6_pm_iir(dev_priv);
441 lockdep_assert_held(&dev_priv->irq_lock);
443 I915_WRITE(reg, reset_mask);
444 I915_WRITE(reg, reset_mask);
448 static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
450 lockdep_assert_held(&dev_priv->irq_lock);
452 dev_priv->pm_ier |= enable_mask;
453 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
454 gen6_unmask_pm_irq(dev_priv, enable_mask);
455 /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
458 static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
460 lockdep_assert_held(&dev_priv->irq_lock);
462 dev_priv->pm_ier &= ~disable_mask;
463 __gen6_mask_pm_irq(dev_priv, disable_mask);
464 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
465 /* though a barrier is missing here, but don't really need a one */
468 void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
470 spin_lock_irq(&dev_priv->irq_lock);
472 while (gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM))
475 dev_priv->gt_pm.rps.pm_iir = 0;
477 spin_unlock_irq(&dev_priv->irq_lock);
480 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
482 spin_lock_irq(&dev_priv->irq_lock);
483 gen6_reset_pm_iir(dev_priv, GEN6_PM_RPS_EVENTS);
484 dev_priv->gt_pm.rps.pm_iir = 0;
485 spin_unlock_irq(&dev_priv->irq_lock);
488 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
490 struct intel_rps *rps = &dev_priv->gt_pm.rps;
492 if (READ_ONCE(rps->interrupts_enabled))
495 spin_lock_irq(&dev_priv->irq_lock);
496 WARN_ON_ONCE(rps->pm_iir);
498 if (INTEL_GEN(dev_priv) >= 11)
499 WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM));
501 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
503 rps->interrupts_enabled = true;
504 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
506 spin_unlock_irq(&dev_priv->irq_lock);
509 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
511 struct intel_rps *rps = &dev_priv->gt_pm.rps;
513 if (!READ_ONCE(rps->interrupts_enabled))
516 spin_lock_irq(&dev_priv->irq_lock);
517 rps->interrupts_enabled = false;
519 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
521 gen6_disable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
523 spin_unlock_irq(&dev_priv->irq_lock);
524 synchronize_irq(dev_priv->drm.irq);
526 /* Now that we will not be generating any more work, flush any
527 * outstanding tasks. As we are called on the RPS idle path,
528 * we will reset the GPU to minimum frequencies, so the current
529 * state of the worker can be discarded.
531 cancel_work_sync(&rps->work);
532 if (INTEL_GEN(dev_priv) >= 11)
533 gen11_reset_rps_interrupts(dev_priv);
535 gen6_reset_rps_interrupts(dev_priv);
538 void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
540 assert_rpm_wakelock_held(dev_priv);
542 spin_lock_irq(&dev_priv->irq_lock);
543 gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
544 spin_unlock_irq(&dev_priv->irq_lock);
547 void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
549 assert_rpm_wakelock_held(dev_priv);
551 spin_lock_irq(&dev_priv->irq_lock);
552 if (!dev_priv->guc.interrupts_enabled) {
553 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
554 dev_priv->pm_guc_events);
555 dev_priv->guc.interrupts_enabled = true;
556 gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events);
558 spin_unlock_irq(&dev_priv->irq_lock);
561 void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
563 assert_rpm_wakelock_held(dev_priv);
565 spin_lock_irq(&dev_priv->irq_lock);
566 dev_priv->guc.interrupts_enabled = false;
568 gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);
570 spin_unlock_irq(&dev_priv->irq_lock);
571 synchronize_irq(dev_priv->drm.irq);
573 gen9_reset_guc_interrupts(dev_priv);
577 * bdw_update_port_irq - update DE port interrupt
578 * @dev_priv: driver private
579 * @interrupt_mask: mask of interrupt bits to update
580 * @enabled_irq_mask: mask of interrupt bits to enable
582 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
584 u32 enabled_irq_mask)
589 lockdep_assert_held(&dev_priv->irq_lock);
591 WARN_ON(enabled_irq_mask & ~interrupt_mask);
593 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
596 old_val = I915_READ(GEN8_DE_PORT_IMR);
599 new_val &= ~interrupt_mask;
600 new_val |= (~enabled_irq_mask & interrupt_mask);
602 if (new_val != old_val) {
603 I915_WRITE(GEN8_DE_PORT_IMR, new_val);
604 POSTING_READ(GEN8_DE_PORT_IMR);
609 * bdw_update_pipe_irq - update DE pipe interrupt
610 * @dev_priv: driver private
611 * @pipe: pipe whose interrupt to update
612 * @interrupt_mask: mask of interrupt bits to update
613 * @enabled_irq_mask: mask of interrupt bits to enable
615 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
618 u32 enabled_irq_mask)
622 lockdep_assert_held(&dev_priv->irq_lock);
624 WARN_ON(enabled_irq_mask & ~interrupt_mask);
626 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
629 new_val = dev_priv->de_irq_mask[pipe];
630 new_val &= ~interrupt_mask;
631 new_val |= (~enabled_irq_mask & interrupt_mask);
633 if (new_val != dev_priv->de_irq_mask[pipe]) {
634 dev_priv->de_irq_mask[pipe] = new_val;
635 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
636 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
641 * ibx_display_interrupt_update - update SDEIMR
642 * @dev_priv: driver private
643 * @interrupt_mask: mask of interrupt bits to update
644 * @enabled_irq_mask: mask of interrupt bits to enable
646 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
648 u32 enabled_irq_mask)
650 u32 sdeimr = I915_READ(SDEIMR);
651 sdeimr &= ~interrupt_mask;
652 sdeimr |= (~enabled_irq_mask & interrupt_mask);
654 WARN_ON(enabled_irq_mask & ~interrupt_mask);
656 lockdep_assert_held(&dev_priv->irq_lock);
658 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
661 I915_WRITE(SDEIMR, sdeimr);
662 POSTING_READ(SDEIMR);
665 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
668 u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
669 u32 enable_mask = status_mask << 16;
671 lockdep_assert_held(&dev_priv->irq_lock);
673 if (INTEL_GEN(dev_priv) < 5)
677 * On pipe A we don't support the PSR interrupt yet,
678 * on pipe B and C the same bit MBZ.
680 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
683 * On pipe B and C we don't support the PSR interrupt yet, on pipe
684 * A the same bit is for perf counters which we don't use either.
686 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
689 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
690 SPRITE0_FLIP_DONE_INT_EN_VLV |
691 SPRITE1_FLIP_DONE_INT_EN_VLV);
692 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
693 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
694 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
695 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
698 WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
699 status_mask & ~PIPESTAT_INT_STATUS_MASK,
700 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
701 pipe_name(pipe), enable_mask, status_mask);
706 void i915_enable_pipestat(struct drm_i915_private *dev_priv,
707 enum pipe pipe, u32 status_mask)
709 i915_reg_t reg = PIPESTAT(pipe);
712 WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
713 "pipe %c: status_mask=0x%x\n",
714 pipe_name(pipe), status_mask);
716 lockdep_assert_held(&dev_priv->irq_lock);
717 WARN_ON(!intel_irqs_enabled(dev_priv));
719 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
722 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
723 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
725 I915_WRITE(reg, enable_mask | status_mask);
729 void i915_disable_pipestat(struct drm_i915_private *dev_priv,
730 enum pipe pipe, u32 status_mask)
732 i915_reg_t reg = PIPESTAT(pipe);
735 WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
736 "pipe %c: status_mask=0x%x\n",
737 pipe_name(pipe), status_mask);
739 lockdep_assert_held(&dev_priv->irq_lock);
740 WARN_ON(!intel_irqs_enabled(dev_priv));
742 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
745 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
746 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
748 I915_WRITE(reg, enable_mask | status_mask);
752 static bool i915_has_asle(struct drm_i915_private *dev_priv)
754 if (!dev_priv->opregion.asle)
757 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
761 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
762 * @dev_priv: i915 device private
764 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
766 if (!i915_has_asle(dev_priv))
769 spin_lock_irq(&dev_priv->irq_lock);
771 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
772 if (INTEL_GEN(dev_priv) >= 4)
773 i915_enable_pipestat(dev_priv, PIPE_A,
774 PIPE_LEGACY_BLC_EVENT_STATUS);
776 spin_unlock_irq(&dev_priv->irq_lock);
780 * This timing diagram depicts the video signal in and
781 * around the vertical blanking period.
783 * Assumptions about the fictitious mode used in this example:
785 * vsync_start = vblank_start + 1
786 * vsync_end = vblank_start + 2
787 * vtotal = vblank_start + 3
790 * latch double buffered registers
791 * increment frame counter (ctg+)
792 * generate start of vblank interrupt (gen4+)
795 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
796 * | may be shifted forward 1-3 extra lines via PIPECONF
798 * | | start of vsync:
799 * | | generate vsync interrupt
801 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
802 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
803 * ----va---> <-----------------vb--------------------> <--------va-------------
804 * | | <----vs-----> |
805 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
806 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
807 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
809 * last visible pixel first visible pixel
810 * | increment frame counter (gen3/4)
811 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
813 * x = horizontal active
814 * _ = horizontal blanking
815 * hs = horizontal sync
816 * va = vertical active
817 * vb = vertical blanking
819 * vbs = vblank_start (number)
822 * - most events happen at the start of horizontal sync
823 * - frame start happens at the start of horizontal blank, 1-4 lines
824 * (depending on PIPECONF settings) after the start of vblank
825 * - gen3/4 pixel and frame counter are synchronized with the start
826 * of horizontal active on the first line of vertical active
829 /* Called from drm generic code, passed a 'crtc', which
830 * we use as a pipe index
832 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
834 struct drm_i915_private *dev_priv = to_i915(dev);
835 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
836 const struct drm_display_mode *mode = &vblank->hwmode;
837 i915_reg_t high_frame, low_frame;
838 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
839 unsigned long irqflags;
842 * On i965gm TV output the frame counter only works up to
843 * the point when we enable the TV encoder. After that the
844 * frame counter ceases to work and reads zero. We need a
845 * vblank wait before enabling the TV encoder and so we
846 * have to enable vblank interrupts while the frame counter
847 * is still in a working state. However the core vblank code
848 * does not like us returning non-zero frame counter values
849 * when we've told it that we don't have a working frame
850 * counter. Thus we must stop non-zero values leaking out.
852 if (!vblank->max_vblank_count)
855 htotal = mode->crtc_htotal;
856 hsync_start = mode->crtc_hsync_start;
857 vbl_start = mode->crtc_vblank_start;
858 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
859 vbl_start = DIV_ROUND_UP(vbl_start, 2);
861 /* Convert to pixel count */
864 /* Start of vblank event occurs at start of hsync */
865 vbl_start -= htotal - hsync_start;
867 high_frame = PIPEFRAME(pipe);
868 low_frame = PIPEFRAMEPIXEL(pipe);
870 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
873 * High & low register fields aren't synchronized, so make sure
874 * we get a low value that's stable across two reads of the high
878 high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
879 low = I915_READ_FW(low_frame);
880 high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
881 } while (high1 != high2);
883 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
885 high1 >>= PIPE_FRAME_HIGH_SHIFT;
886 pixel = low & PIPE_PIXEL_MASK;
887 low >>= PIPE_FRAME_LOW_SHIFT;
890 * The frame counter increments at beginning of active.
891 * Cook up a vblank counter by also checking the pixel
892 * counter against vblank start.
894 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
897 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
899 struct drm_i915_private *dev_priv = to_i915(dev);
901 return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
905 * On certain encoders on certain platforms, pipe
906 * scanline register will not work to get the scanline,
907 * since the timings are driven from the PORT or issues
908 * with scanline register updates.
909 * This function will use Framestamp and current
910 * timestamp registers to calculate the scanline.
912 static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
914 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
915 struct drm_vblank_crtc *vblank =
916 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
917 const struct drm_display_mode *mode = &vblank->hwmode;
918 u32 vblank_start = mode->crtc_vblank_start;
919 u32 vtotal = mode->crtc_vtotal;
920 u32 htotal = mode->crtc_htotal;
921 u32 clock = mode->crtc_clock;
922 u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;
925 * To avoid the race condition where we might cross into the
926 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
927 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
928 * during the same frame.
932 * This field provides read back of the display
933 * pipe frame time stamp. The time stamp value
934 * is sampled at every start of vertical blank.
936 scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
939 * The TIMESTAMP_CTR register has the current
942 scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR);
944 scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
945 } while (scan_post_time != scan_prev_time);
947 scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
948 clock), 1000 * htotal);
949 scanline = min(scanline, vtotal - 1);
950 scanline = (scanline + vblank_start) % vtotal;
955 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
956 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
958 struct drm_device *dev = crtc->base.dev;
959 struct drm_i915_private *dev_priv = to_i915(dev);
960 const struct drm_display_mode *mode;
961 struct drm_vblank_crtc *vblank;
962 enum pipe pipe = crtc->pipe;
963 int position, vtotal;
968 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
969 mode = &vblank->hwmode;
971 if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
972 return __intel_get_crtc_scanline_from_timestamp(crtc);
974 vtotal = mode->crtc_vtotal;
975 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
978 if (IS_GEN(dev_priv, 2))
979 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
981 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
984 * On HSW, the DSL reg (0x70000) appears to return 0 if we
985 * read it just before the start of vblank. So try it again
986 * so we don't accidentally end up spanning a vblank frame
987 * increment, causing the pipe_update_end() code to squak at us.
989 * The nature of this problem means we can't simply check the ISR
990 * bit and return the vblank start value; nor can we use the scanline
991 * debug register in the transcoder as it appears to have the same
992 * problem. We may need to extend this to include other platforms,
993 * but so far testing only shows the problem on HSW.
995 if (HAS_DDI(dev_priv) && !position) {
998 for (i = 0; i < 100; i++) {
1000 temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
1001 if (temp != position) {
1009 * See update_scanline_offset() for the details on the
1010 * scanline_offset adjustment.
1012 return (position + crtc->scanline_offset) % vtotal;
1015 static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
1016 bool in_vblank_irq, int *vpos, int *hpos,
1017 ktime_t *stime, ktime_t *etime,
1018 const struct drm_display_mode *mode)
1020 struct drm_i915_private *dev_priv = to_i915(dev);
1021 struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
1024 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
1025 unsigned long irqflags;
1026 bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 ||
1027 IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
1028 mode->private_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
1030 if (WARN_ON(!mode->crtc_clock)) {
1031 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
1032 "pipe %c\n", pipe_name(pipe));
1036 htotal = mode->crtc_htotal;
1037 hsync_start = mode->crtc_hsync_start;
1038 vtotal = mode->crtc_vtotal;
1039 vbl_start = mode->crtc_vblank_start;
1040 vbl_end = mode->crtc_vblank_end;
1042 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
1043 vbl_start = DIV_ROUND_UP(vbl_start, 2);
1049 * Lock uncore.lock, as we will do multiple timing critical raw
1050 * register reads, potentially with preemption disabled, so the
1051 * following code must not block on uncore.lock.
1053 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1055 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
1057 /* Get optional system timestamp before query. */
1059 *stime = ktime_get();
1061 if (use_scanline_counter) {
1062 /* No obvious pixelcount register. Only query vertical
1063 * scanout position from Display scan line register.
1065 position = __intel_get_crtc_scanline(intel_crtc);
1067 /* Have access to pixelcount since start of frame.
1068 * We can split this into vertical and horizontal
1071 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
1073 /* convert to pixel counts */
1074 vbl_start *= htotal;
1079 * In interlaced modes, the pixel counter counts all pixels,
1080 * so one field will have htotal more pixels. In order to avoid
1081 * the reported position from jumping backwards when the pixel
1082 * counter is beyond the length of the shorter field, just
1083 * clamp the position the length of the shorter field. This
1084 * matches how the scanline counter based position works since
1085 * the scanline counter doesn't count the two half lines.
1087 if (position >= vtotal)
1088 position = vtotal - 1;
1091 * Start of vblank interrupt is triggered at start of hsync,
1092 * just prior to the first active line of vblank. However we
1093 * consider lines to start at the leading edge of horizontal
1094 * active. So, should we get here before we've crossed into
1095 * the horizontal active of the first line in vblank, we would
1096 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
1097 * always add htotal-hsync_start to the current pixel position.
1099 position = (position + htotal - hsync_start) % vtotal;
1102 /* Get optional system timestamp after query. */
1104 *etime = ktime_get();
1106 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
1108 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1111 * While in vblank, position will be negative
1112 * counting up towards 0 at vbl_end. And outside
1113 * vblank, position will be positive counting
1116 if (position >= vbl_start)
1117 position -= vbl_end;
1119 position += vtotal - vbl_end;
1121 if (use_scanline_counter) {
1125 *vpos = position / htotal;
1126 *hpos = position - (*vpos * htotal);
1132 int intel_get_crtc_scanline(struct intel_crtc *crtc)
1134 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1135 unsigned long irqflags;
1138 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1139 position = __intel_get_crtc_scanline(crtc);
1140 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1145 static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
1147 u32 busy_up, busy_down, max_avg, min_avg;
1150 spin_lock(&mchdev_lock);
1152 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
1154 new_delay = dev_priv->ips.cur_delay;
1156 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
1157 busy_up = I915_READ(RCPREVBSYTUPAVG);
1158 busy_down = I915_READ(RCPREVBSYTDNAVG);
1159 max_avg = I915_READ(RCBMAXAVG);
1160 min_avg = I915_READ(RCBMINAVG);
1162 /* Handle RCS change request from hw */
1163 if (busy_up > max_avg) {
1164 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
1165 new_delay = dev_priv->ips.cur_delay - 1;
1166 if (new_delay < dev_priv->ips.max_delay)
1167 new_delay = dev_priv->ips.max_delay;
1168 } else if (busy_down < min_avg) {
1169 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
1170 new_delay = dev_priv->ips.cur_delay + 1;
1171 if (new_delay > dev_priv->ips.min_delay)
1172 new_delay = dev_priv->ips.min_delay;
1175 if (ironlake_set_drps(dev_priv, new_delay))
1176 dev_priv->ips.cur_delay = new_delay;
1178 spin_unlock(&mchdev_lock);
1183 static void vlv_c0_read(struct drm_i915_private *dev_priv,
1184 struct intel_rps_ei *ei)
1186 ei->ktime = ktime_get_raw();
1187 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
1188 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
1191 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1193 memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei));
1196 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1198 struct intel_rps *rps = &dev_priv->gt_pm.rps;
1199 const struct intel_rps_ei *prev = &rps->ei;
1200 struct intel_rps_ei now;
1203 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
1206 vlv_c0_read(dev_priv, &now);
1212 time = ktime_us_delta(now.ktime, prev->ktime);
1214 time *= dev_priv->czclk_freq;
1216 /* Workload can be split between render + media,
1217 * e.g. SwapBuffers being blitted in X after being rendered in
1218 * mesa. To account for this we need to combine both engines
1219 * into our activity counter.
1221 render = now.render_c0 - prev->render_c0;
1222 media = now.media_c0 - prev->media_c0;
1223 c0 = max(render, media);
1224 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
1226 if (c0 > time * rps->power.up_threshold)
1227 events = GEN6_PM_RP_UP_THRESHOLD;
1228 else if (c0 < time * rps->power.down_threshold)
1229 events = GEN6_PM_RP_DOWN_THRESHOLD;
1236 static void gen6_pm_rps_work(struct work_struct *work)
1238 struct drm_i915_private *dev_priv =
1239 container_of(work, struct drm_i915_private, gt_pm.rps.work);
1240 struct intel_rps *rps = &dev_priv->gt_pm.rps;
1241 bool client_boost = false;
1242 int new_delay, adj, min, max;
1245 spin_lock_irq(&dev_priv->irq_lock);
1246 if (rps->interrupts_enabled) {
1247 pm_iir = fetch_and_zero(&rps->pm_iir);
1248 client_boost = atomic_read(&rps->num_waiters);
1250 spin_unlock_irq(&dev_priv->irq_lock);
1252 /* Make sure we didn't queue anything we're not going to process. */
1253 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1254 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1257 mutex_lock(&dev_priv->pcu_lock);
1259 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1261 adj = rps->last_adj;
1262 new_delay = rps->cur_freq;
1263 min = rps->min_freq_softlimit;
1264 max = rps->max_freq_softlimit;
1266 max = rps->max_freq;
1267 if (client_boost && new_delay < rps->boost_freq) {
1268 new_delay = rps->boost_freq;
1270 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1273 else /* CHV needs even encode values */
1274 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1276 if (new_delay >= rps->max_freq_softlimit)
1278 } else if (client_boost) {
1280 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1281 if (rps->cur_freq > rps->efficient_freq)
1282 new_delay = rps->efficient_freq;
1283 else if (rps->cur_freq > rps->min_freq_softlimit)
1284 new_delay = rps->min_freq_softlimit;
1286 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1289 else /* CHV needs even encode values */
1290 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1292 if (new_delay <= rps->min_freq_softlimit)
1294 } else { /* unknown event */
1298 rps->last_adj = adj;
1301 * Limit deboosting and boosting to keep ourselves at the extremes
1302 * when in the respective power modes (i.e. slowly decrease frequencies
1303 * while in the HIGH_POWER zone and slowly increase frequencies while
1304 * in the LOW_POWER zone). On idle, we will hit the timeout and drop
1305 * to the next level quickly, and conversely if busy we expect to
1306 * hit a waitboost and rapidly switch into max power.
1308 if ((adj < 0 && rps->power.mode == HIGH_POWER) ||
1309 (adj > 0 && rps->power.mode == LOW_POWER))
1312 /* sysfs frequency interfaces may have snuck in while servicing the
1316 new_delay = clamp_t(int, new_delay, min, max);
1318 if (intel_set_rps(dev_priv, new_delay)) {
1319 DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
1323 mutex_unlock(&dev_priv->pcu_lock);
1326 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1327 spin_lock_irq(&dev_priv->irq_lock);
1328 if (rps->interrupts_enabled)
1329 gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
1330 spin_unlock_irq(&dev_priv->irq_lock);
1335 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1337 * @work: workqueue struct
1339 * Doesn't actually do anything except notify userspace. As a consequence of
1340 * this event, userspace should try to remap the bad rows since statistically
1341 * it is likely the same row is more likely to go bad again.
1343 static void ivybridge_parity_work(struct work_struct *work)
1345 struct drm_i915_private *dev_priv =
1346 container_of(work, typeof(*dev_priv), l3_parity.error_work);
1347 u32 error_status, row, bank, subbank;
1348 char *parity_event[6];
1352 /* We must turn off DOP level clock gating to access the L3 registers.
1353 * In order to prevent a get/put style interface, acquire struct mutex
1354 * any time we access those registers.
1356 mutex_lock(&dev_priv->drm.struct_mutex);
1358 /* If we've screwed up tracking, just let the interrupt fire again */
1359 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1362 misccpctl = I915_READ(GEN7_MISCCPCTL);
1363 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1364 POSTING_READ(GEN7_MISCCPCTL);
1366 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1370 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
1373 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1375 reg = GEN7_L3CDERRST1(slice);
1377 error_status = I915_READ(reg);
1378 row = GEN7_PARITY_ERROR_ROW(error_status);
1379 bank = GEN7_PARITY_ERROR_BANK(error_status);
1380 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1382 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1385 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1386 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1387 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1388 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1389 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1390 parity_event[5] = NULL;
1392 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1393 KOBJ_CHANGE, parity_event);
1395 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1396 slice, row, bank, subbank);
1398 kfree(parity_event[4]);
1399 kfree(parity_event[3]);
1400 kfree(parity_event[2]);
1401 kfree(parity_event[1]);
1404 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1407 WARN_ON(dev_priv->l3_parity.which_slice);
1408 spin_lock_irq(&dev_priv->irq_lock);
1409 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1410 spin_unlock_irq(&dev_priv->irq_lock);
1412 mutex_unlock(&dev_priv->drm.struct_mutex);
1415 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
1418 if (!HAS_L3_DPF(dev_priv))
1421 spin_lock(&dev_priv->irq_lock);
1422 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1423 spin_unlock(&dev_priv->irq_lock);
1425 iir &= GT_PARITY_ERROR(dev_priv);
1426 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1427 dev_priv->l3_parity.which_slice |= 1 << 1;
1429 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1430 dev_priv->l3_parity.which_slice |= 1 << 0;
1432 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1435 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
1438 if (gt_iir & GT_RENDER_USER_INTERRUPT)
1439 intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
1440 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1441 intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
1444 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
1447 if (gt_iir & GT_RENDER_USER_INTERRUPT)
1448 intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
1449 if (gt_iir & GT_BSD_USER_INTERRUPT)
1450 intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
1451 if (gt_iir & GT_BLT_USER_INTERRUPT)
1452 intel_engine_breadcrumbs_irq(dev_priv->engine[BCS0]);
1454 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1455 GT_BSD_CS_ERROR_INTERRUPT |
1456 GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1457 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1459 if (gt_iir & GT_PARITY_ERROR(dev_priv))
1460 ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
1464 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
1466 bool tasklet = false;
1468 if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
1471 if (iir & GT_RENDER_USER_INTERRUPT) {
1472 intel_engine_breadcrumbs_irq(engine);
1473 tasklet |= USES_GUC_SUBMISSION(engine->i915);
1477 tasklet_hi_schedule(&engine->execlists.tasklet);
1480 static void gen8_gt_irq_ack(struct drm_i915_private *i915,
1481 u32 master_ctl, u32 gt_iir[4])
1483 void __iomem * const regs = i915->uncore.regs;
1485 #define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
1487 GEN8_GT_VCS0_IRQ | \
1488 GEN8_GT_VCS1_IRQ | \
1489 GEN8_GT_VECS_IRQ | \
1493 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1494 gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0));
1495 if (likely(gt_iir[0]))
1496 raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
1499 if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
1500 gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
1501 if (likely(gt_iir[1]))
1502 raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
1505 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
1506 gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2));
1507 if (likely(gt_iir[2]))
1508 raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]);
1511 if (master_ctl & GEN8_GT_VECS_IRQ) {
1512 gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3));
1513 if (likely(gt_iir[3]))
1514 raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]);
1518 static void gen8_gt_irq_handler(struct drm_i915_private *i915,
1519 u32 master_ctl, u32 gt_iir[4])
1521 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1522 gen8_cs_irq_handler(i915->engine[RCS0],
1523 gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
1524 gen8_cs_irq_handler(i915->engine[BCS0],
1525 gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
1528 if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
1529 gen8_cs_irq_handler(i915->engine[VCS0],
1530 gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT);
1531 gen8_cs_irq_handler(i915->engine[VCS1],
1532 gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
1535 if (master_ctl & GEN8_GT_VECS_IRQ) {
1536 gen8_cs_irq_handler(i915->engine[VECS0],
1537 gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
1540 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
1541 gen6_rps_irq_handler(i915, gt_iir[2]);
1542 gen9_guc_irq_handler(i915, gt_iir[2]);
1546 static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1550 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
1552 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
1554 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
1556 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
1562 static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1566 return val & PORTA_HOTPLUG_LONG_DETECT;
1568 return val & PORTB_HOTPLUG_LONG_DETECT;
1570 return val & PORTC_HOTPLUG_LONG_DETECT;
1576 static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1580 return val & ICP_DDIA_HPD_LONG_DETECT;
1582 return val & ICP_DDIB_HPD_LONG_DETECT;
1588 static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1592 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
1594 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
1596 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
1598 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
1604 static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
1608 return val & PORTE_HOTPLUG_LONG_DETECT;
1614 static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1618 return val & PORTA_HOTPLUG_LONG_DETECT;
1620 return val & PORTB_HOTPLUG_LONG_DETECT;
1622 return val & PORTC_HOTPLUG_LONG_DETECT;
1624 return val & PORTD_HOTPLUG_LONG_DETECT;
1630 static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1634 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1640 static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1644 return val & PORTB_HOTPLUG_LONG_DETECT;
1646 return val & PORTC_HOTPLUG_LONG_DETECT;
1648 return val & PORTD_HOTPLUG_LONG_DETECT;
1654 static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1658 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1660 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1662 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1669 * Get a bit mask of pins that have triggered, and which ones may be long.
1670 * This can be called multiple times with the same masks to accumulate
1671 * hotplug detection results from several registers.
1673 * Note that the caller is expected to zero out the masks initially.
1675 static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
1676 u32 *pin_mask, u32 *long_mask,
1677 u32 hotplug_trigger, u32 dig_hotplug_reg,
1678 const u32 hpd[HPD_NUM_PINS],
1679 bool long_pulse_detect(enum hpd_pin pin, u32 val))
1683 for_each_hpd_pin(pin) {
1684 if ((hpd[pin] & hotplug_trigger) == 0)
1687 *pin_mask |= BIT(pin);
1689 if (long_pulse_detect(pin, dig_hotplug_reg))
1690 *long_mask |= BIT(pin);
1693 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
1694 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1698 static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1700 wake_up_all(&dev_priv->gmbus_wait_queue);
1703 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1705 wake_up_all(&dev_priv->gmbus_wait_queue);
1708 #if defined(CONFIG_DEBUG_FS)
1709 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1715 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1716 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1717 u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
1719 trace_intel_pipe_crc(crtc, crcs);
1721 spin_lock(&pipe_crc->lock);
1723 * For some not yet identified reason, the first CRC is
1724 * bonkers. So let's just wait for the next vblank and read
1725 * out the buggy result.
1727 * On GEN8+ sometimes the second CRC is bonkers as well, so
1728 * don't trust that one either.
1730 if (pipe_crc->skipped <= 0 ||
1731 (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
1732 pipe_crc->skipped++;
1733 spin_unlock(&pipe_crc->lock);
1736 spin_unlock(&pipe_crc->lock);
1738 drm_crtc_add_crc_entry(&crtc->base, true,
1739 drm_crtc_accurate_vblank_count(&crtc->base),
1744 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1752 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1755 display_pipe_crc_irq_handler(dev_priv, pipe,
1756 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1760 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1763 display_pipe_crc_irq_handler(dev_priv, pipe,
1764 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1765 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1766 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1767 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1768 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1771 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1776 if (INTEL_GEN(dev_priv) >= 3)
1777 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1781 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1782 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1786 display_pipe_crc_irq_handler(dev_priv, pipe,
1787 I915_READ(PIPE_CRC_RES_RED(pipe)),
1788 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1789 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1793 /* The RPS events need forcewake, so we add them to a work queue and mask their
1794 * IMR bits until the work is done. Other interrupts can be processed without
1795 * the work queue. */
1796 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1798 struct intel_rps *rps = &dev_priv->gt_pm.rps;
1800 if (pm_iir & dev_priv->pm_rps_events) {
1801 spin_lock(&dev_priv->irq_lock);
1802 gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1803 if (rps->interrupts_enabled) {
1804 rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
1805 schedule_work(&rps->work);
1807 spin_unlock(&dev_priv->irq_lock);
1810 if (INTEL_GEN(dev_priv) >= 8)
1813 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1814 intel_engine_breadcrumbs_irq(dev_priv->engine[VECS0]);
1816 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1817 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1820 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
1822 if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT)
1823 intel_guc_to_host_event_handler(&dev_priv->guc);
1826 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
1830 for_each_pipe(dev_priv, pipe) {
1831 I915_WRITE(PIPESTAT(pipe),
1832 PIPESTAT_INT_STATUS_MASK |
1833 PIPE_FIFO_UNDERRUN_STATUS);
1835 dev_priv->pipestat_irq_mask[pipe] = 0;
1839 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1840 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1844 spin_lock(&dev_priv->irq_lock);
1846 if (!dev_priv->display_irqs_enabled) {
1847 spin_unlock(&dev_priv->irq_lock);
1851 for_each_pipe(dev_priv, pipe) {
1853 u32 status_mask, enable_mask, iir_bit = 0;
1856 * PIPESTAT bits get signalled even when the interrupt is
1857 * disabled with the mask bits, and some of the status bits do
1858 * not generate interrupts at all (like the underrun bit). Hence
1859 * we need to be careful that we only handle what we want to
1863 /* fifo underruns are filterered in the underrun handler. */
1864 status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1868 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1871 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1874 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1878 status_mask |= dev_priv->pipestat_irq_mask[pipe];
1883 reg = PIPESTAT(pipe);
1884 pipe_stats[pipe] = I915_READ(reg) & status_mask;
1885 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1888 * Clear the PIPE*STAT regs before the IIR
1890 * Toggle the enable bits to make sure we get an
1891 * edge in the ISR pipe event bit if we don't clear
1892 * all the enabled status bits. Otherwise the edge
1893 * triggered IIR on i965/g4x wouldn't notice that
1894 * an interrupt is still pending.
1896 if (pipe_stats[pipe]) {
1897 I915_WRITE(reg, pipe_stats[pipe]);
1898 I915_WRITE(reg, enable_mask);
1901 spin_unlock(&dev_priv->irq_lock);
1904 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1905 u16 iir, u32 pipe_stats[I915_MAX_PIPES])
1909 for_each_pipe(dev_priv, pipe) {
1910 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1911 drm_handle_vblank(&dev_priv->drm, pipe);
1913 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1914 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1916 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1917 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1921 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1922 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1924 bool blc_event = false;
1927 for_each_pipe(dev_priv, pipe) {
1928 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1929 drm_handle_vblank(&dev_priv->drm, pipe);
1931 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1934 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1935 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1937 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1938 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1941 if (blc_event || (iir & I915_ASLE_INTERRUPT))
1942 intel_opregion_asle_intr(dev_priv);
1945 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1946 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1948 bool blc_event = false;
1951 for_each_pipe(dev_priv, pipe) {
1952 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1953 drm_handle_vblank(&dev_priv->drm, pipe);
1955 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1958 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1959 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1961 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1962 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1965 if (blc_event || (iir & I915_ASLE_INTERRUPT))
1966 intel_opregion_asle_intr(dev_priv);
1968 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1969 gmbus_irq_handler(dev_priv);
1972 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1973 u32 pipe_stats[I915_MAX_PIPES])
1977 for_each_pipe(dev_priv, pipe) {
1978 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1979 drm_handle_vblank(&dev_priv->drm, pipe);
1981 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1982 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1984 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1985 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1988 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1989 gmbus_irq_handler(dev_priv);
1992 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1994 u32 hotplug_status = 0, hotplug_status_mask;
1997 if (IS_G4X(dev_priv) ||
1998 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1999 hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
2000 DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
2002 hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
2005 * We absolutely have to clear all the pending interrupt
2006 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
2007 * interrupt bit won't have an edge, and the i965/g4x
2008 * edge triggered IIR will not notice that an interrupt
2009 * is still pending. We can't use PORT_HOTPLUG_EN to
2010 * guarantee the edge as the act of toggling the enable
2011 * bits can itself generate a new hotplug interrupt :(
2013 for (i = 0; i < 10; i++) {
2014 u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;
2017 return hotplug_status;
2019 hotplug_status |= tmp;
2020 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2024 "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
2025 I915_READ(PORT_HOTPLUG_STAT));
2027 return hotplug_status;
2030 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
2033 u32 pin_mask = 0, long_mask = 0;
2035 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
2036 IS_CHERRYVIEW(dev_priv)) {
2037 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
2039 if (hotplug_trigger) {
2040 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2041 hotplug_trigger, hotplug_trigger,
2043 i9xx_port_hotplug_long_detect);
2045 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2048 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
2049 dp_aux_irq_handler(dev_priv);
2051 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
2053 if (hotplug_trigger) {
2054 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2055 hotplug_trigger, hotplug_trigger,
2057 i9xx_port_hotplug_long_detect);
2058 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2063 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
2065 struct drm_device *dev = arg;
2066 struct drm_i915_private *dev_priv = to_i915(dev);
2067 irqreturn_t ret = IRQ_NONE;
2069 if (!intel_irqs_enabled(dev_priv))
2072 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2073 disable_rpm_wakeref_asserts(dev_priv);
2076 u32 iir, gt_iir, pm_iir;
2077 u32 pipe_stats[I915_MAX_PIPES] = {};
2078 u32 hotplug_status = 0;
2081 gt_iir = I915_READ(GTIIR);
2082 pm_iir = I915_READ(GEN6_PMIIR);
2083 iir = I915_READ(VLV_IIR);
2085 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
2091 * Theory on interrupt generation, based on empirical evidence:
2093 * x = ((VLV_IIR & VLV_IER) ||
2094 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
2095 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
2097 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
2098 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
2099 * guarantee the CPU interrupt will be raised again even if we
2100 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
2101 * bits this time around.
2103 I915_WRITE(VLV_MASTER_IER, 0);
2104 ier = I915_READ(VLV_IER);
2105 I915_WRITE(VLV_IER, 0);
2108 I915_WRITE(GTIIR, gt_iir);
2110 I915_WRITE(GEN6_PMIIR, pm_iir);
2112 if (iir & I915_DISPLAY_PORT_INTERRUPT)
2113 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
2115 /* Call regardless, as some status bits might not be
2116 * signalled in iir */
2117 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
2119 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
2120 I915_LPE_PIPE_B_INTERRUPT))
2121 intel_lpe_audio_irq_handler(dev_priv);
2124 * VLV_IIR is single buffered, and reflects the level
2125 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
2128 I915_WRITE(VLV_IIR, iir);
2130 I915_WRITE(VLV_IER, ier);
2131 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2134 snb_gt_irq_handler(dev_priv, gt_iir);
2136 gen6_rps_irq_handler(dev_priv, pm_iir);
2139 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
2141 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
2144 enable_rpm_wakeref_asserts(dev_priv);
2149 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
2151 struct drm_device *dev = arg;
2152 struct drm_i915_private *dev_priv = to_i915(dev);
2153 irqreturn_t ret = IRQ_NONE;
2155 if (!intel_irqs_enabled(dev_priv))
2158 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2159 disable_rpm_wakeref_asserts(dev_priv);
2162 u32 master_ctl, iir;
2163 u32 pipe_stats[I915_MAX_PIPES] = {};
2164 u32 hotplug_status = 0;
2168 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
2169 iir = I915_READ(VLV_IIR);
2171 if (master_ctl == 0 && iir == 0)
2177 * Theory on interrupt generation, based on empirical evidence:
2179 * x = ((VLV_IIR & VLV_IER) ||
2180 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
2181 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
2183 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
2184 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
2185 * guarantee the CPU interrupt will be raised again even if we
2186 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
2187 * bits this time around.
2189 I915_WRITE(GEN8_MASTER_IRQ, 0);
2190 ier = I915_READ(VLV_IER);
2191 I915_WRITE(VLV_IER, 0);
2193 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2195 if (iir & I915_DISPLAY_PORT_INTERRUPT)
2196 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
2198 /* Call regardless, as some status bits might not be
2199 * signalled in iir */
2200 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
2202 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
2203 I915_LPE_PIPE_B_INTERRUPT |
2204 I915_LPE_PIPE_C_INTERRUPT))
2205 intel_lpe_audio_irq_handler(dev_priv);
2208 * VLV_IIR is single buffered, and reflects the level
2209 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
2212 I915_WRITE(VLV_IIR, iir);
2214 I915_WRITE(VLV_IER, ier);
2215 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2217 gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
2220 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
2222 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
2225 enable_rpm_wakeref_asserts(dev_priv);
2230 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
2231 u32 hotplug_trigger,
2232 const u32 hpd[HPD_NUM_PINS])
2234 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2237 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
2238 * unless we touch the hotplug register, even if hotplug_trigger is
2239 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
2242 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2243 if (!hotplug_trigger) {
2244 u32 mask = PORTA_HOTPLUG_STATUS_MASK |
2245 PORTD_HOTPLUG_STATUS_MASK |
2246 PORTC_HOTPLUG_STATUS_MASK |
2247 PORTB_HOTPLUG_STATUS_MASK;
2248 dig_hotplug_reg &= ~mask;
2251 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2252 if (!hotplug_trigger)
2255 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2256 dig_hotplug_reg, hpd,
2257 pch_port_hotplug_long_detect);
2259 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2262 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2265 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
2267 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
2269 if (pch_iir & SDE_AUDIO_POWER_MASK) {
2270 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
2271 SDE_AUDIO_POWER_SHIFT);
2272 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
2276 if (pch_iir & SDE_AUX_MASK)
2277 dp_aux_irq_handler(dev_priv);
2279 if (pch_iir & SDE_GMBUS)
2280 gmbus_irq_handler(dev_priv);
2282 if (pch_iir & SDE_AUDIO_HDCP_MASK)
2283 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
2285 if (pch_iir & SDE_AUDIO_TRANS_MASK)
2286 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
2288 if (pch_iir & SDE_POISON)
2289 DRM_ERROR("PCH poison interrupt\n");
2291 if (pch_iir & SDE_FDI_MASK)
2292 for_each_pipe(dev_priv, pipe)
2293 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2295 I915_READ(FDI_RX_IIR(pipe)));
2297 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
2298 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
2300 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
2301 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2303 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2304 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
2306 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2307 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
2310 static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
2312 u32 err_int = I915_READ(GEN7_ERR_INT);
2315 if (err_int & ERR_INT_POISON)
2316 DRM_ERROR("Poison interrupt\n");
2318 for_each_pipe(dev_priv, pipe) {
2319 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
2320 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2322 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2323 if (IS_IVYBRIDGE(dev_priv))
2324 ivb_pipe_crc_irq_handler(dev_priv, pipe);
2326 hsw_pipe_crc_irq_handler(dev_priv, pipe);
2330 I915_WRITE(GEN7_ERR_INT, err_int);
2333 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
2335 u32 serr_int = I915_READ(SERR_INT);
2338 if (serr_int & SERR_INT_POISON)
2339 DRM_ERROR("PCH poison interrupt\n");
2341 for_each_pipe(dev_priv, pipe)
2342 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
2343 intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
2345 I915_WRITE(SERR_INT, serr_int);
2348 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2351 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2353 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
2355 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2356 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2357 SDE_AUDIO_POWER_SHIFT_CPT);
2358 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2362 if (pch_iir & SDE_AUX_MASK_CPT)
2363 dp_aux_irq_handler(dev_priv);
2365 if (pch_iir & SDE_GMBUS_CPT)
2366 gmbus_irq_handler(dev_priv);
2368 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2369 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2371 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2372 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2374 if (pch_iir & SDE_FDI_MASK_CPT)
2375 for_each_pipe(dev_priv, pipe)
2376 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2378 I915_READ(FDI_RX_IIR(pipe)));
2380 if (pch_iir & SDE_ERROR_CPT)
2381 cpt_serr_int_handler(dev_priv);
2384 static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2386 u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
2387 u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
2388 u32 pin_mask = 0, long_mask = 0;
2390 if (ddi_hotplug_trigger) {
2391 u32 dig_hotplug_reg;
2393 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
2394 I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
2396 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2397 ddi_hotplug_trigger,
2398 dig_hotplug_reg, hpd_icp,
2399 icp_ddi_port_hotplug_long_detect);
2402 if (tc_hotplug_trigger) {
2403 u32 dig_hotplug_reg;
2405 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
2406 I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
2408 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2410 dig_hotplug_reg, hpd_icp,
2411 icp_tc_port_hotplug_long_detect);
2415 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2417 if (pch_iir & SDE_GMBUS_ICP)
2418 gmbus_irq_handler(dev_priv);
2421 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2423 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2424 ~SDE_PORTE_HOTPLUG_SPT;
2425 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2426 u32 pin_mask = 0, long_mask = 0;
2428 if (hotplug_trigger) {
2429 u32 dig_hotplug_reg;
2431 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2432 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2434 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2435 hotplug_trigger, dig_hotplug_reg, hpd_spt,
2436 spt_port_hotplug_long_detect);
2439 if (hotplug2_trigger) {
2440 u32 dig_hotplug_reg;
2442 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
2443 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2445 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2446 hotplug2_trigger, dig_hotplug_reg, hpd_spt,
2447 spt_port_hotplug2_long_detect);
2451 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2453 if (pch_iir & SDE_GMBUS_CPT)
2454 gmbus_irq_handler(dev_priv);
2457 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2458 u32 hotplug_trigger,
2459 const u32 hpd[HPD_NUM_PINS])
2461 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2463 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2464 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2466 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2467 dig_hotplug_reg, hpd,
2468 ilk_port_hotplug_long_detect);
2470 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2473 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2477 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2479 if (hotplug_trigger)
2480 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
2482 if (de_iir & DE_AUX_CHANNEL_A)
2483 dp_aux_irq_handler(dev_priv);
2485 if (de_iir & DE_GSE)
2486 intel_opregion_asle_intr(dev_priv);
2488 if (de_iir & DE_POISON)
2489 DRM_ERROR("Poison interrupt\n");
2491 for_each_pipe(dev_priv, pipe) {
2492 if (de_iir & DE_PIPE_VBLANK(pipe))
2493 drm_handle_vblank(&dev_priv->drm, pipe);
2495 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2496 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2498 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2499 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2502 /* check event from PCH */
2503 if (de_iir & DE_PCH_EVENT) {
2504 u32 pch_iir = I915_READ(SDEIIR);
2506 if (HAS_PCH_CPT(dev_priv))
2507 cpt_irq_handler(dev_priv, pch_iir);
2509 ibx_irq_handler(dev_priv, pch_iir);
2511 /* should clear PCH hotplug event before clear CPU irq */
2512 I915_WRITE(SDEIIR, pch_iir);
2515 if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
2516 ironlake_rps_change_irq_handler(dev_priv);
2519 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2523 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2525 if (hotplug_trigger)
2526 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
2528 if (de_iir & DE_ERR_INT_IVB)
2529 ivb_err_int_handler(dev_priv);
2531 if (de_iir & DE_EDP_PSR_INT_HSW) {
2532 u32 psr_iir = I915_READ(EDP_PSR_IIR);
2534 intel_psr_irq_handler(dev_priv, psr_iir);
2535 I915_WRITE(EDP_PSR_IIR, psr_iir);
2538 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2539 dp_aux_irq_handler(dev_priv);
2541 if (de_iir & DE_GSE_IVB)
2542 intel_opregion_asle_intr(dev_priv);
2544 for_each_pipe(dev_priv, pipe) {
2545 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
2546 drm_handle_vblank(&dev_priv->drm, pipe);
2549 /* check event from PCH */
2550 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2551 u32 pch_iir = I915_READ(SDEIIR);
2553 cpt_irq_handler(dev_priv, pch_iir);
2555 /* clear PCH hotplug event before clear CPU irq */
2556 I915_WRITE(SDEIIR, pch_iir);
2561 * To handle irqs with the minimum potential races with fresh interrupts, we:
2562 * 1 - Disable Master Interrupt Control.
2563 * 2 - Find the source(s) of the interrupt.
2564 * 3 - Clear the Interrupt Identity bits (IIR).
2565 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2566 * 5 - Re-enable Master Interrupt Control.
2568 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2570 struct drm_device *dev = arg;
2571 struct drm_i915_private *dev_priv = to_i915(dev);
2572 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2573 irqreturn_t ret = IRQ_NONE;
2575 if (!intel_irqs_enabled(dev_priv))
2578 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2579 disable_rpm_wakeref_asserts(dev_priv);
2581 /* disable master interrupt before clearing iir */
2582 de_ier = I915_READ(DEIER);
2583 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2585 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2586 * interrupts will will be stored on its back queue, and then we'll be
2587 * able to process them after we restore SDEIER (as soon as we restore
2588 * it, we'll get an interrupt if SDEIIR still has something to process
2589 * due to its back queue). */
2590 if (!HAS_PCH_NOP(dev_priv)) {
2591 sde_ier = I915_READ(SDEIER);
2592 I915_WRITE(SDEIER, 0);
2595 /* Find, clear, then process each source of interrupt */
2597 gt_iir = I915_READ(GTIIR);
2599 I915_WRITE(GTIIR, gt_iir);
2601 if (INTEL_GEN(dev_priv) >= 6)
2602 snb_gt_irq_handler(dev_priv, gt_iir);
2604 ilk_gt_irq_handler(dev_priv, gt_iir);
2607 de_iir = I915_READ(DEIIR);
2609 I915_WRITE(DEIIR, de_iir);
2611 if (INTEL_GEN(dev_priv) >= 7)
2612 ivb_display_irq_handler(dev_priv, de_iir);
2614 ilk_display_irq_handler(dev_priv, de_iir);
2617 if (INTEL_GEN(dev_priv) >= 6) {
2618 u32 pm_iir = I915_READ(GEN6_PMIIR);
2620 I915_WRITE(GEN6_PMIIR, pm_iir);
2622 gen6_rps_irq_handler(dev_priv, pm_iir);
2626 I915_WRITE(DEIER, de_ier);
2627 if (!HAS_PCH_NOP(dev_priv))
2628 I915_WRITE(SDEIER, sde_ier);
2630 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2631 enable_rpm_wakeref_asserts(dev_priv);
2636 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2637 u32 hotplug_trigger,
2638 const u32 hpd[HPD_NUM_PINS])
2640 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2642 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2643 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2645 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2646 dig_hotplug_reg, hpd,
2647 bxt_port_hotplug_long_detect);
2649 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2652 static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2654 u32 pin_mask = 0, long_mask = 0;
2655 u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
2656 u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2659 u32 dig_hotplug_reg;
2661 dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
2662 I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
2664 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
2665 dig_hotplug_reg, hpd_gen11,
2666 gen11_port_hotplug_long_detect);
2670 u32 dig_hotplug_reg;
2672 dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
2673 I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
2675 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
2676 dig_hotplug_reg, hpd_gen11,
2677 gen11_port_hotplug_long_detect);
2681 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2683 DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
2686 static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
2688 u32 mask = GEN8_AUX_CHANNEL_A;
2690 if (INTEL_GEN(dev_priv) >= 9)
2691 mask |= GEN9_AUX_CHANNEL_B |
2692 GEN9_AUX_CHANNEL_C |
2695 if (IS_CNL_WITH_PORT_F(dev_priv))
2696 mask |= CNL_AUX_CHANNEL_F;
2698 if (INTEL_GEN(dev_priv) >= 11)
2699 mask |= ICL_AUX_CHANNEL_E |
2706 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2708 irqreturn_t ret = IRQ_NONE;
2712 if (master_ctl & GEN8_DE_MISC_IRQ) {
2713 iir = I915_READ(GEN8_DE_MISC_IIR);
2717 I915_WRITE(GEN8_DE_MISC_IIR, iir);
2720 if (iir & GEN8_DE_MISC_GSE) {
2721 intel_opregion_asle_intr(dev_priv);
2725 if (iir & GEN8_DE_EDP_PSR) {
2726 u32 psr_iir = I915_READ(EDP_PSR_IIR);
2728 intel_psr_irq_handler(dev_priv, psr_iir);
2729 I915_WRITE(EDP_PSR_IIR, psr_iir);
2734 DRM_ERROR("Unexpected DE Misc interrupt\n");
2737 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2740 if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2741 iir = I915_READ(GEN11_DE_HPD_IIR);
2743 I915_WRITE(GEN11_DE_HPD_IIR, iir);
2745 gen11_hpd_irq_handler(dev_priv, iir);
2747 DRM_ERROR("The master control interrupt lied, (DE HPD)!\n");
2751 if (master_ctl & GEN8_DE_PORT_IRQ) {
2752 iir = I915_READ(GEN8_DE_PORT_IIR);
2757 I915_WRITE(GEN8_DE_PORT_IIR, iir);
2760 if (iir & gen8_de_port_aux_mask(dev_priv)) {
2761 dp_aux_irq_handler(dev_priv);
2765 if (IS_GEN9_LP(dev_priv)) {
2766 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2768 bxt_hpd_irq_handler(dev_priv, tmp_mask,
2772 } else if (IS_BROADWELL(dev_priv)) {
2773 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2775 ilk_hpd_irq_handler(dev_priv,
2781 if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2782 gmbus_irq_handler(dev_priv);
2787 DRM_ERROR("Unexpected DE Port interrupt\n");
2790 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2793 for_each_pipe(dev_priv, pipe) {
2796 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2799 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2801 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2806 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2808 if (iir & GEN8_PIPE_VBLANK)
2809 drm_handle_vblank(&dev_priv->drm, pipe);
2811 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2812 hsw_pipe_crc_irq_handler(dev_priv, pipe);
2814 if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2815 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2818 if (INTEL_GEN(dev_priv) >= 9)
2819 fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2821 fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2824 DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
2829 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2830 master_ctl & GEN8_DE_PCH_IRQ) {
2832 * FIXME(BDW): Assume for now that the new interrupt handling
2833 * scheme also closed the SDE interrupt handling race we've seen
2834 * on older pch-split platforms. But this needs testing.
2836 iir = I915_READ(SDEIIR);
2838 I915_WRITE(SDEIIR, iir);
2841 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2842 icp_irq_handler(dev_priv, iir);
2843 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
2844 spt_irq_handler(dev_priv, iir);
2846 cpt_irq_handler(dev_priv, iir);
2849 * Like on previous PCH there seems to be something
2850 * fishy going on with forwarding PCH interrupts.
2852 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
2859 static inline u32 gen8_master_intr_disable(void __iomem * const regs)
2861 raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
2864 * Now with master disabled, get a sample of level indications
2865 * for this interrupt. Indications will be cleared on related acks.
2866 * New indications can and will light up during processing,
2867 * and will generate new interrupt after enabling master.
2869 return raw_reg_read(regs, GEN8_MASTER_IRQ);
2872 static inline void gen8_master_intr_enable(void __iomem * const regs)
2874 raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2877 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2879 struct drm_i915_private *dev_priv = to_i915(arg);
2880 void __iomem * const regs = dev_priv->uncore.regs;
2884 if (!intel_irqs_enabled(dev_priv))
2887 master_ctl = gen8_master_intr_disable(regs);
2889 gen8_master_intr_enable(regs);
2893 /* Find, clear, then process each source of interrupt */
2894 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2896 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2897 if (master_ctl & ~GEN8_GT_IRQS) {
2898 disable_rpm_wakeref_asserts(dev_priv);
2899 gen8_de_irq_handler(dev_priv, master_ctl);
2900 enable_rpm_wakeref_asserts(dev_priv);
2903 gen8_master_intr_enable(regs);
2905 gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
2911 gen11_gt_engine_identity(struct drm_i915_private * const i915,
2912 const unsigned int bank, const unsigned int bit)
2914 void __iomem * const regs = i915->uncore.regs;
2918 lockdep_assert_held(&i915->irq_lock);
2920 raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
2923 * NB: Specs do not specify how long to spin wait,
2924 * so we do ~100us as an educated guess.
2926 timeout_ts = (local_clock() >> 10) + 100;
2928 ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
2929 } while (!(ident & GEN11_INTR_DATA_VALID) &&
2930 !time_after32(local_clock() >> 10, timeout_ts));
2932 if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
2933 DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
2938 raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
2939 GEN11_INTR_DATA_VALID);
2945 gen11_other_irq_handler(struct drm_i915_private * const i915,
2946 const u8 instance, const u16 iir)
2948 if (instance == OTHER_GTPM_INSTANCE)
2949 return gen6_rps_irq_handler(i915, iir);
2951 WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
2956 gen11_engine_irq_handler(struct drm_i915_private * const i915,
2957 const u8 class, const u8 instance, const u16 iir)
2959 struct intel_engine_cs *engine;
2961 if (instance <= MAX_ENGINE_INSTANCE)
2962 engine = i915->engine_class[class][instance];
2967 return gen8_cs_irq_handler(engine, iir);
2969 WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
2974 gen11_gt_identity_handler(struct drm_i915_private * const i915,
2977 const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
2978 const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
2979 const u16 intr = GEN11_INTR_ENGINE_INTR(identity);
2981 if (unlikely(!intr))
2984 if (class <= COPY_ENGINE_CLASS)
2985 return gen11_engine_irq_handler(i915, class, instance, intr);
2987 if (class == OTHER_CLASS)
2988 return gen11_other_irq_handler(i915, instance, intr);
2990 WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
2991 class, instance, intr);
2995 gen11_gt_bank_handler(struct drm_i915_private * const i915,
2996 const unsigned int bank)
2998 void __iomem * const regs = i915->uncore.regs;
2999 unsigned long intr_dw;
3002 lockdep_assert_held(&i915->irq_lock);
3004 intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
3006 if (unlikely(!intr_dw)) {
3007 DRM_ERROR("GT_INTR_DW%u blank!\n", bank);
3011 for_each_set_bit(bit, &intr_dw, 32) {
3012 const u32 ident = gen11_gt_engine_identity(i915,
3015 gen11_gt_identity_handler(i915, ident);
3018 /* Clear must be after shared has been served for engine */
3019 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
3023 gen11_gt_irq_handler(struct drm_i915_private * const i915,
3024 const u32 master_ctl)
3028 spin_lock(&i915->irq_lock);
3030 for (bank = 0; bank < 2; bank++) {
3031 if (master_ctl & GEN11_GT_DW_IRQ(bank))
3032 gen11_gt_bank_handler(i915, bank);
3035 spin_unlock(&i915->irq_lock);
3039 gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
3041 void __iomem * const regs = dev_priv->uncore.regs;
3044 if (!(master_ctl & GEN11_GU_MISC_IRQ))
3047 iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
3049 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
3055 gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir)
3057 if (iir & GEN11_GU_MISC_GSE)
3058 intel_opregion_asle_intr(dev_priv);
3061 static inline u32 gen11_master_intr_disable(void __iomem * const regs)
3063 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
3066 * Now with master disabled, get a sample of level indications
3067 * for this interrupt. Indications will be cleared on related acks.
3068 * New indications can and will light up during processing,
3069 * and will generate new interrupt after enabling master.
3071 return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
3074 static inline void gen11_master_intr_enable(void __iomem * const regs)
3076 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
3079 static irqreturn_t gen11_irq_handler(int irq, void *arg)
3081 struct drm_i915_private * const i915 = to_i915(arg);
3082 void __iomem * const regs = i915->uncore.regs;
3086 if (!intel_irqs_enabled(i915))
3089 master_ctl = gen11_master_intr_disable(regs);
3091 gen11_master_intr_enable(regs);
3095 /* Find, clear, then process each source of interrupt. */
3096 gen11_gt_irq_handler(i915, master_ctl);
3098 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
3099 if (master_ctl & GEN11_DISPLAY_IRQ) {
3100 const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
3102 disable_rpm_wakeref_asserts(i915);
3104 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
3105 * for the display related bits.
3107 gen8_de_irq_handler(i915, disp_ctl);
3108 enable_rpm_wakeref_asserts(i915);
3111 gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
3113 gen11_master_intr_enable(regs);
3115 gen11_gu_misc_irq_handler(i915, gu_misc_iir);
3120 /* Called from drm generic code, passed 'crtc' which
3121 * we use as a pipe index
3123 static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
3125 struct drm_i915_private *dev_priv = to_i915(dev);
3126 unsigned long irqflags;
3128 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3129 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
3130 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3135 static int i945gm_enable_vblank(struct drm_device *dev, unsigned int pipe)
3137 struct drm_i915_private *dev_priv = to_i915(dev);
3139 if (dev_priv->i945gm_vblank.enabled++ == 0)
3140 schedule_work(&dev_priv->i945gm_vblank.work);
3142 return i8xx_enable_vblank(dev, pipe);
3145 static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
3147 struct drm_i915_private *dev_priv = to_i915(dev);
3148 unsigned long irqflags;
3150 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3151 i915_enable_pipestat(dev_priv, pipe,
3152 PIPE_START_VBLANK_INTERRUPT_STATUS);
3153 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3158 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
3160 struct drm_i915_private *dev_priv = to_i915(dev);
3161 unsigned long irqflags;
3162 u32 bit = INTEL_GEN(dev_priv) >= 7 ?
3163 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
3165 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3166 ilk_enable_display_irq(dev_priv, bit);
3167 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3169 /* Even though there is no DMC, frame counter can get stuck when
3170 * PSR is active as no frames are generated.
3172 if (HAS_PSR(dev_priv))
3173 drm_vblank_restore(dev, pipe);
3178 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
3180 struct drm_i915_private *dev_priv = to_i915(dev);
3181 unsigned long irqflags;
3183 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3184 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3185 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3187 /* Even if there is no DMC, frame counter can get stuck when
3188 * PSR is active as no frames are generated, so check only for PSR.
3190 if (HAS_PSR(dev_priv))
3191 drm_vblank_restore(dev, pipe);
3196 /* Called from drm generic code, passed 'crtc' which
3197 * we use as a pipe index
3199 static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
3201 struct drm_i915_private *dev_priv = to_i915(dev);
3202 unsigned long irqflags;
3204 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3205 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
3206 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3209 static void i945gm_disable_vblank(struct drm_device *dev, unsigned int pipe)
3211 struct drm_i915_private *dev_priv = to_i915(dev);
3213 i8xx_disable_vblank(dev, pipe);
3215 if (--dev_priv->i945gm_vblank.enabled == 0)
3216 schedule_work(&dev_priv->i945gm_vblank.work);
3219 static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
3221 struct drm_i915_private *dev_priv = to_i915(dev);
3222 unsigned long irqflags;
3224 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3225 i915_disable_pipestat(dev_priv, pipe,
3226 PIPE_START_VBLANK_INTERRUPT_STATUS);
3227 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3230 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
3232 struct drm_i915_private *dev_priv = to_i915(dev);
3233 unsigned long irqflags;
3234 u32 bit = INTEL_GEN(dev_priv) >= 7 ?
3235 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
3237 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3238 ilk_disable_display_irq(dev_priv, bit);
3239 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3242 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
3244 struct drm_i915_private *dev_priv = to_i915(dev);
3245 unsigned long irqflags;
3247 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3248 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3249 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3252 static void i945gm_vblank_work_func(struct work_struct *work)
3254 struct drm_i915_private *dev_priv =
3255 container_of(work, struct drm_i915_private, i945gm_vblank.work);
3258 * Vblank interrupts fail to wake up the device from C3,
3259 * hence we want to prevent C3 usage while vblank interrupts
3262 pm_qos_update_request(&dev_priv->i945gm_vblank.pm_qos,
3263 READ_ONCE(dev_priv->i945gm_vblank.enabled) ?
3264 dev_priv->i945gm_vblank.c3_disable_latency :
3265 PM_QOS_DEFAULT_VALUE);
3268 static int cstate_disable_latency(const char *name)
3270 const struct cpuidle_driver *drv;
3273 drv = cpuidle_get_driver();
3277 for (i = 0; i < drv->state_count; i++) {
3278 const struct cpuidle_state *state = &drv->states[i];
3280 if (!strcmp(state->name, name))
3281 return state->exit_latency ?
3282 state->exit_latency - 1 : 0;
3288 static void i945gm_vblank_work_init(struct drm_i915_private *dev_priv)
3290 INIT_WORK(&dev_priv->i945gm_vblank.work,
3291 i945gm_vblank_work_func);
3293 dev_priv->i945gm_vblank.c3_disable_latency =
3294 cstate_disable_latency("C3");
3295 pm_qos_add_request(&dev_priv->i945gm_vblank.pm_qos,
3296 PM_QOS_CPU_DMA_LATENCY,
3297 PM_QOS_DEFAULT_VALUE);
3300 static void i945gm_vblank_work_fini(struct drm_i915_private *dev_priv)
3302 cancel_work_sync(&dev_priv->i945gm_vblank.work);
3303 pm_qos_remove_request(&dev_priv->i945gm_vblank.pm_qos);
3306 static void ibx_irq_reset(struct drm_i915_private *dev_priv)
3308 if (HAS_PCH_NOP(dev_priv))
3311 GEN3_IRQ_RESET(SDE);
3313 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3314 I915_WRITE(SERR_INT, 0xffffffff);
3318 * SDEIER is also touched by the interrupt handler to work around missed PCH
3319 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3320 * instead we unconditionally enable all PCH interrupt sources here, but then
3321 * only unmask them as needed with SDEIMR.
3323 * This function needs to be called before interrupts are enabled.
3325 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3327 struct drm_i915_private *dev_priv = to_i915(dev);
3329 if (HAS_PCH_NOP(dev_priv))
3332 WARN_ON(I915_READ(SDEIER) != 0);
3333 I915_WRITE(SDEIER, 0xffffffff);
3334 POSTING_READ(SDEIER);
3337 static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
3340 if (INTEL_GEN(dev_priv) >= 6)
3341 GEN3_IRQ_RESET(GEN6_PM);
3344 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3346 if (IS_CHERRYVIEW(dev_priv))
3347 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3349 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3351 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
3352 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3354 i9xx_pipestat_irq_reset(dev_priv);
3356 GEN3_IRQ_RESET(VLV_);
3357 dev_priv->irq_mask = ~0u;
3360 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3366 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
3368 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3369 for_each_pipe(dev_priv, pipe)
3370 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3372 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3373 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3374 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3375 I915_LPE_PIPE_A_INTERRUPT |
3376 I915_LPE_PIPE_B_INTERRUPT;
3378 if (IS_CHERRYVIEW(dev_priv))
3379 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
3380 I915_LPE_PIPE_C_INTERRUPT;
3382 WARN_ON(dev_priv->irq_mask != ~0u);
3384 dev_priv->irq_mask = ~enable_mask;
3386 GEN3_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
3391 static void ironlake_irq_reset(struct drm_device *dev)
3393 struct drm_i915_private *dev_priv = to_i915(dev);
3396 if (IS_GEN(dev_priv, 7))
3397 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3399 if (IS_HASWELL(dev_priv)) {
3400 I915_WRITE(EDP_PSR_IMR, 0xffffffff);
3401 I915_WRITE(EDP_PSR_IIR, 0xffffffff);
3404 gen5_gt_irq_reset(dev_priv);
3406 ibx_irq_reset(dev_priv);
3409 static void valleyview_irq_reset(struct drm_device *dev)
3411 struct drm_i915_private *dev_priv = to_i915(dev);
3413 I915_WRITE(VLV_MASTER_IER, 0);
3414 POSTING_READ(VLV_MASTER_IER);
3416 gen5_gt_irq_reset(dev_priv);
3418 spin_lock_irq(&dev_priv->irq_lock);
3419 if (dev_priv->display_irqs_enabled)
3420 vlv_display_irq_reset(dev_priv);
3421 spin_unlock_irq(&dev_priv->irq_lock);
3424 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3426 GEN8_IRQ_RESET_NDX(GT, 0);
3427 GEN8_IRQ_RESET_NDX(GT, 1);
3428 GEN8_IRQ_RESET_NDX(GT, 2);
3429 GEN8_IRQ_RESET_NDX(GT, 3);
3432 static void gen8_irq_reset(struct drm_device *dev)
3434 struct drm_i915_private *dev_priv = to_i915(dev);
3437 gen8_master_intr_disable(dev_priv->uncore.regs);
3439 gen8_gt_irq_reset(dev_priv);
3441 I915_WRITE(EDP_PSR_IMR, 0xffffffff);
3442 I915_WRITE(EDP_PSR_IIR, 0xffffffff);
3444 for_each_pipe(dev_priv, pipe)
3445 if (intel_display_power_is_enabled(dev_priv,
3446 POWER_DOMAIN_PIPE(pipe)))
3447 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3449 GEN3_IRQ_RESET(GEN8_DE_PORT_);
3450 GEN3_IRQ_RESET(GEN8_DE_MISC_);
3451 GEN3_IRQ_RESET(GEN8_PCU_);
3453 if (HAS_PCH_SPLIT(dev_priv))
3454 ibx_irq_reset(dev_priv);
3457 static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv)
3459 /* Disable RCS, BCS, VCS and VECS class engines. */
3460 I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, 0);
3461 I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, 0);
3463 /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
3464 I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~0);
3465 I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~0);
3466 I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~0);
3467 I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~0);
3468 I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~0);
3470 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
3471 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
3474 static void gen11_irq_reset(struct drm_device *dev)
3476 struct drm_i915_private *dev_priv = dev->dev_private;
3479 gen11_master_intr_disable(dev_priv->uncore.regs);
3481 gen11_gt_irq_reset(dev_priv);
3483 I915_WRITE(GEN11_DISPLAY_INT_CTL, 0);
3485 I915_WRITE(EDP_PSR_IMR, 0xffffffff);
3486 I915_WRITE(EDP_PSR_IIR, 0xffffffff);
3488 for_each_pipe(dev_priv, pipe)
3489 if (intel_display_power_is_enabled(dev_priv,
3490 POWER_DOMAIN_PIPE(pipe)))
3491 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3493 GEN3_IRQ_RESET(GEN8_DE_PORT_);
3494 GEN3_IRQ_RESET(GEN8_DE_MISC_);
3495 GEN3_IRQ_RESET(GEN11_DE_HPD_);
3496 GEN3_IRQ_RESET(GEN11_GU_MISC_);
3497 GEN3_IRQ_RESET(GEN8_PCU_);
3499 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3500 GEN3_IRQ_RESET(SDE);
3503 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3506 u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3509 spin_lock_irq(&dev_priv->irq_lock);
3511 if (!intel_irqs_enabled(dev_priv)) {
3512 spin_unlock_irq(&dev_priv->irq_lock);
3516 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3517 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3518 dev_priv->de_irq_mask[pipe],
3519 ~dev_priv->de_irq_mask[pipe] | extra_ier);
3521 spin_unlock_irq(&dev_priv->irq_lock);
3524 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3529 spin_lock_irq(&dev_priv->irq_lock);
3531 if (!intel_irqs_enabled(dev_priv)) {
3532 spin_unlock_irq(&dev_priv->irq_lock);
3536 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3537 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3539 spin_unlock_irq(&dev_priv->irq_lock);
3541 /* make sure we're done processing display irqs */
3542 synchronize_irq(dev_priv->drm.irq);
3545 static void cherryview_irq_reset(struct drm_device *dev)
3547 struct drm_i915_private *dev_priv = to_i915(dev);
3549 I915_WRITE(GEN8_MASTER_IRQ, 0);
3550 POSTING_READ(GEN8_MASTER_IRQ);
3552 gen8_gt_irq_reset(dev_priv);
3554 GEN3_IRQ_RESET(GEN8_PCU_);
3556 spin_lock_irq(&dev_priv->irq_lock);
3557 if (dev_priv->display_irqs_enabled)
3558 vlv_display_irq_reset(dev_priv);
3559 spin_unlock_irq(&dev_priv->irq_lock);
3562 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
3563 const u32 hpd[HPD_NUM_PINS])
3565 struct intel_encoder *encoder;
3566 u32 enabled_irqs = 0;
3568 for_each_intel_encoder(&dev_priv->drm, encoder)
3569 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3570 enabled_irqs |= hpd[encoder->hpd_pin];
3572 return enabled_irqs;
3575 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
3580 * Enable digital hotplug on the PCH, and configure the DP short pulse
3581 * duration to 2ms (which is the minimum in the Display Port spec).
3582 * The pulse duration bits are reserved on LPT+.
3584 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3585 hotplug &= ~(PORTB_PULSE_DURATION_MASK |
3586 PORTC_PULSE_DURATION_MASK |
3587 PORTD_PULSE_DURATION_MASK);
3588 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3589 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3590 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3592 * When CPU and PCH are on the same package, port A
3593 * HPD must be enabled in both north and south.
3595 if (HAS_PCH_LPT_LP(dev_priv))
3596 hotplug |= PORTA_HOTPLUG_ENABLE;
3597 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3600 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3602 u32 hotplug_irqs, enabled_irqs;
3604 if (HAS_PCH_IBX(dev_priv)) {
3605 hotplug_irqs = SDE_HOTPLUG_MASK;
3606 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
3608 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3609 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
3612 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3614 ibx_hpd_detection_setup(dev_priv);
3617 static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv)
3621 hotplug = I915_READ(SHOTPLUG_CTL_DDI);
3622 hotplug |= ICP_DDIA_HPD_ENABLE |
3623 ICP_DDIB_HPD_ENABLE;
3624 I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
3626 hotplug = I915_READ(SHOTPLUG_CTL_TC);
3627 hotplug |= ICP_TC_HPD_ENABLE(PORT_TC1) |
3628 ICP_TC_HPD_ENABLE(PORT_TC2) |
3629 ICP_TC_HPD_ENABLE(PORT_TC3) |
3630 ICP_TC_HPD_ENABLE(PORT_TC4);
3631 I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
3634 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
3636 u32 hotplug_irqs, enabled_irqs;
3638 hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP;
3639 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp);
3641 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3643 icp_hpd_detection_setup(dev_priv);
3646 static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
3650 hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
3651 hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3652 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3653 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3654 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3655 I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
3657 hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
3658 hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3659 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3660 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3661 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3662 I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
3665 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
3667 u32 hotplug_irqs, enabled_irqs;
3670 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_gen11);
3671 hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
3673 val = I915_READ(GEN11_DE_HPD_IMR);
3674 val &= ~hotplug_irqs;
3675 I915_WRITE(GEN11_DE_HPD_IMR, val);
3676 POSTING_READ(GEN11_DE_HPD_IMR);
3678 gen11_hpd_detection_setup(dev_priv);
3680 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3681 icp_hpd_irq_setup(dev_priv);
3684 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3688 /* Display WA #1179 WaHardHangonHotPlug: cnp */
3689 if (HAS_PCH_CNP(dev_priv)) {
3690 val = I915_READ(SOUTH_CHICKEN1);
3691 val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
3692 val |= CHASSIS_CLK_REQ_DURATION(0xf);
3693 I915_WRITE(SOUTH_CHICKEN1, val);
3696 /* Enable digital hotplug on the PCH */
3697 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3698 hotplug |= PORTA_HOTPLUG_ENABLE |
3699 PORTB_HOTPLUG_ENABLE |
3700 PORTC_HOTPLUG_ENABLE |
3701 PORTD_HOTPLUG_ENABLE;
3702 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3704 hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3705 hotplug |= PORTE_HOTPLUG_ENABLE;
3706 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3709 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3711 u32 hotplug_irqs, enabled_irqs;
3713 hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3714 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
3716 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3718 spt_hpd_detection_setup(dev_priv);
3721 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
3726 * Enable digital hotplug on the CPU, and configure the DP short pulse
3727 * duration to 2ms (which is the minimum in the Display Port spec)
3728 * The pulse duration bits are reserved on HSW+.
3730 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3731 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3732 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
3733 DIGITAL_PORTA_PULSE_DURATION_2ms;
3734 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3737 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3739 u32 hotplug_irqs, enabled_irqs;
3741 if (INTEL_GEN(dev_priv) >= 8) {
3742 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3743 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
3745 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3746 } else if (INTEL_GEN(dev_priv) >= 7) {
3747 hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3748 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
3750 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3752 hotplug_irqs = DE_DP_A_HOTPLUG;
3753 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3755 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3758 ilk_hpd_detection_setup(dev_priv);
3760 ibx_hpd_irq_setup(dev_priv);
3763 static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
3768 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3769 hotplug |= PORTA_HOTPLUG_ENABLE |
3770 PORTB_HOTPLUG_ENABLE |
3771 PORTC_HOTPLUG_ENABLE;
3773 DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
3774 hotplug, enabled_irqs);
3775 hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3778 * For BXT invert bit has to be set based on AOB design
3779 * for HPD detection logic, update it based on VBT fields.
3781 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
3782 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3783 hotplug |= BXT_DDIA_HPD_INVERT;
3784 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
3785 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3786 hotplug |= BXT_DDIB_HPD_INVERT;
3787 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
3788 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3789 hotplug |= BXT_DDIC_HPD_INVERT;
3791 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3794 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3796 __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
3799 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3801 u32 hotplug_irqs, enabled_irqs;
3803 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
3804 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3806 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3808 __bxt_hpd_detection_setup(dev_priv, enabled_irqs);
3811 static void ibx_irq_postinstall(struct drm_device *dev)
3813 struct drm_i915_private *dev_priv = to_i915(dev);
3816 if (HAS_PCH_NOP(dev_priv))
3819 if (HAS_PCH_IBX(dev_priv))
3820 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3821 else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3822 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3824 mask = SDE_GMBUS_CPT;
3826 gen3_assert_iir_is_zero(dev_priv, SDEIIR);
3827 I915_WRITE(SDEIMR, ~mask);
3829 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
3830 HAS_PCH_LPT(dev_priv))
3831 ibx_hpd_detection_setup(dev_priv);
3833 spt_hpd_detection_setup(dev_priv);
3836 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3838 struct drm_i915_private *dev_priv = to_i915(dev);
3839 u32 pm_irqs, gt_irqs;
3841 pm_irqs = gt_irqs = 0;
3843 dev_priv->gt_irq_mask = ~0;
3844 if (HAS_L3_DPF(dev_priv)) {
3845 /* L3 parity interrupt is always unmasked. */
3846 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
3847 gt_irqs |= GT_PARITY_ERROR(dev_priv);
3850 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3851 if (IS_GEN(dev_priv, 5)) {
3852 gt_irqs |= ILK_BSD_USER_INTERRUPT;
3854 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3857 GEN3_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3859 if (INTEL_GEN(dev_priv) >= 6) {
3861 * RPS interrupts will get enabled/disabled on demand when RPS
3862 * itself is enabled/disabled.
3864 if (HAS_ENGINE(dev_priv, VECS0)) {
3865 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3866 dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
3869 dev_priv->pm_imr = 0xffffffff;
3870 GEN3_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs);
3874 static int ironlake_irq_postinstall(struct drm_device *dev)
3876 struct drm_i915_private *dev_priv = to_i915(dev);
3877 u32 display_mask, extra_mask;
3879 if (INTEL_GEN(dev_priv) >= 7) {
3880 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3881 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
3882 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3883 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3884 DE_DP_A_HOTPLUG_IVB);
3886 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3887 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
3888 DE_PIPEA_CRC_DONE | DE_POISON);
3889 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3890 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3894 if (IS_HASWELL(dev_priv)) {
3895 gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR);
3896 intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
3897 display_mask |= DE_EDP_PSR_INT_HSW;
3900 dev_priv->irq_mask = ~display_mask;
3902 ibx_irq_pre_postinstall(dev);
3904 GEN3_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3906 gen5_gt_irq_postinstall(dev);
3908 ilk_hpd_detection_setup(dev_priv);
3910 ibx_irq_postinstall(dev);
3912 if (IS_IRONLAKE_M(dev_priv)) {
3913 /* Enable PCU event interrupts
3915 * spinlocking not required here for correctness since interrupt
3916 * setup is guaranteed to run in single-threaded context. But we
3917 * need it to make the assert_spin_locked happy. */
3918 spin_lock_irq(&dev_priv->irq_lock);
3919 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
3920 spin_unlock_irq(&dev_priv->irq_lock);
3926 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3928 lockdep_assert_held(&dev_priv->irq_lock);
3930 if (dev_priv->display_irqs_enabled)
3933 dev_priv->display_irqs_enabled = true;
3935 if (intel_irqs_enabled(dev_priv)) {
3936 vlv_display_irq_reset(dev_priv);
3937 vlv_display_irq_postinstall(dev_priv);
3941 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3943 lockdep_assert_held(&dev_priv->irq_lock);
3945 if (!dev_priv->display_irqs_enabled)
3948 dev_priv->display_irqs_enabled = false;
3950 if (intel_irqs_enabled(dev_priv))
3951 vlv_display_irq_reset(dev_priv);
3955 static int valleyview_irq_postinstall(struct drm_device *dev)
3957 struct drm_i915_private *dev_priv = to_i915(dev);
3959 gen5_gt_irq_postinstall(dev);
3961 spin_lock_irq(&dev_priv->irq_lock);
3962 if (dev_priv->display_irqs_enabled)
3963 vlv_display_irq_postinstall(dev_priv);
3964 spin_unlock_irq(&dev_priv->irq_lock);
3966 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3967 POSTING_READ(VLV_MASTER_IER);
3972 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3974 /* These are interrupts we'll toggle with the ring mask register */
3975 u32 gt_interrupts[] = {
3976 (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3977 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3978 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3979 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT),
3981 (GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
3982 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
3983 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3984 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT),
3988 (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3989 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)
3992 dev_priv->pm_ier = 0x0;
3993 dev_priv->pm_imr = ~dev_priv->pm_ier;
3994 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3995 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3997 * RPS interrupts will get enabled/disabled on demand when RPS itself
3998 * is enabled/disabled. Same wil be the case for GuC interrupts.
4000 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
4001 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
4004 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
4006 u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
4007 u32 de_pipe_enables;
4008 u32 de_port_masked = GEN8_AUX_CHANNEL_A;
4009 u32 de_port_enables;
4010 u32 de_misc_masked = GEN8_DE_EDP_PSR;
4013 if (INTEL_GEN(dev_priv) <= 10)
4014 de_misc_masked |= GEN8_DE_MISC_GSE;
4016 if (INTEL_GEN(dev_priv) >= 9) {
4017 de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
4018 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
4020 if (IS_GEN9_LP(dev_priv))
4021 de_port_masked |= BXT_DE_PORT_GMBUS;
4023 de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
4026 if (INTEL_GEN(dev_priv) >= 11)
4027 de_port_masked |= ICL_AUX_CHANNEL_E;
4029 if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11)
4030 de_port_masked |= CNL_AUX_CHANNEL_F;
4032 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
4033 GEN8_PIPE_FIFO_UNDERRUN;
4035 de_port_enables = de_port_masked;
4036 if (IS_GEN9_LP(dev_priv))
4037 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
4038 else if (IS_BROADWELL(dev_priv))
4039 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
4041 gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR);
4042 intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
4044 for_each_pipe(dev_priv, pipe) {
4045 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
4047 if (intel_display_power_is_enabled(dev_priv,
4048 POWER_DOMAIN_PIPE(pipe)))
4049 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
4050 dev_priv->de_irq_mask[pipe],
4054 GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
4055 GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
4057 if (INTEL_GEN(dev_priv) >= 11) {
4058 u32 de_hpd_masked = 0;
4059 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
4060 GEN11_DE_TBT_HOTPLUG_MASK;
4062 GEN3_IRQ_INIT(GEN11_DE_HPD_, ~de_hpd_masked, de_hpd_enables);
4063 gen11_hpd_detection_setup(dev_priv);
4064 } else if (IS_GEN9_LP(dev_priv)) {
4065 bxt_hpd_detection_setup(dev_priv);
4066 } else if (IS_BROADWELL(dev_priv)) {
4067 ilk_hpd_detection_setup(dev_priv);
4071 static int gen8_irq_postinstall(struct drm_device *dev)
4073 struct drm_i915_private *dev_priv = to_i915(dev);
4075 if (HAS_PCH_SPLIT(dev_priv))
4076 ibx_irq_pre_postinstall(dev);
4078 gen8_gt_irq_postinstall(dev_priv);
4079 gen8_de_irq_postinstall(dev_priv);
4081 if (HAS_PCH_SPLIT(dev_priv))
4082 ibx_irq_postinstall(dev);
4084 gen8_master_intr_enable(dev_priv->uncore.regs);
4089 static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv)
4091 const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
4093 BUILD_BUG_ON(irqs & 0xffff0000);
4095 /* Enable RCS, BCS, VCS and VECS class interrupts. */
4096 I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, irqs << 16 | irqs);
4097 I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, irqs << 16 | irqs);
4099 /* Unmask irqs on RCS, BCS, VCS and VECS engines. */
4100 I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~(irqs << 16));
4101 I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~(irqs << 16));
4102 I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~(irqs | irqs << 16));
4103 I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~(irqs | irqs << 16));
4104 I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~(irqs | irqs << 16));
4107 * RPS interrupts will get enabled/disabled on demand when RPS itself
4108 * is enabled/disabled.
4110 dev_priv->pm_ier = 0x0;
4111 dev_priv->pm_imr = ~dev_priv->pm_ier;
4112 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
4113 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
4116 static void icp_irq_postinstall(struct drm_device *dev)
4118 struct drm_i915_private *dev_priv = to_i915(dev);
4119 u32 mask = SDE_GMBUS_ICP;
4121 WARN_ON(I915_READ(SDEIER) != 0);
4122 I915_WRITE(SDEIER, 0xffffffff);
4123 POSTING_READ(SDEIER);
4125 gen3_assert_iir_is_zero(dev_priv, SDEIIR);
4126 I915_WRITE(SDEIMR, ~mask);
4128 icp_hpd_detection_setup(dev_priv);
4131 static int gen11_irq_postinstall(struct drm_device *dev)
4133 struct drm_i915_private *dev_priv = dev->dev_private;
4134 u32 gu_misc_masked = GEN11_GU_MISC_GSE;
4136 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
4137 icp_irq_postinstall(dev);
4139 gen11_gt_irq_postinstall(dev_priv);
4140 gen8_de_irq_postinstall(dev_priv);
4142 GEN3_IRQ_INIT(GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
4144 I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
4146 gen11_master_intr_enable(dev_priv->uncore.regs);
4147 POSTING_READ(GEN11_GFX_MSTR_IRQ);
4152 static int cherryview_irq_postinstall(struct drm_device *dev)
4154 struct drm_i915_private *dev_priv = to_i915(dev);
4156 gen8_gt_irq_postinstall(dev_priv);
4158 spin_lock_irq(&dev_priv->irq_lock);
4159 if (dev_priv->display_irqs_enabled)
4160 vlv_display_irq_postinstall(dev_priv);
4161 spin_unlock_irq(&dev_priv->irq_lock);
4163 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
4164 POSTING_READ(GEN8_MASTER_IRQ);
4169 static void i8xx_irq_reset(struct drm_device *dev)
4171 struct drm_i915_private *dev_priv = to_i915(dev);
4173 i9xx_pipestat_irq_reset(dev_priv);
4178 static int i8xx_irq_postinstall(struct drm_device *dev)
4180 struct drm_i915_private *dev_priv = to_i915(dev);
4183 I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE |
4184 I915_ERROR_MEMORY_REFRESH));
4186 /* Unmask the interrupts that we always want on. */
4187 dev_priv->irq_mask =
4188 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4189 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4190 I915_MASTER_ERROR_INTERRUPT);
4193 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4194 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4195 I915_MASTER_ERROR_INTERRUPT |
4196 I915_USER_INTERRUPT;
4198 GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
4200 /* Interrupt setup is already guaranteed to be single-threaded, this is
4201 * just to make the assert_spin_locked check happy. */
4202 spin_lock_irq(&dev_priv->irq_lock);
4203 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4204 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4205 spin_unlock_irq(&dev_priv->irq_lock);
4210 static void i8xx_error_irq_ack(struct drm_i915_private *dev_priv,
4211 u16 *eir, u16 *eir_stuck)
4215 *eir = I915_READ16(EIR);
4218 I915_WRITE16(EIR, *eir);
4220 *eir_stuck = I915_READ16(EIR);
4221 if (*eir_stuck == 0)
4225 * Toggle all EMR bits to make sure we get an edge
4226 * in the ISR master error bit if we don't clear
4227 * all the EIR bits. Otherwise the edge triggered
4228 * IIR on i965/g4x wouldn't notice that an interrupt
4229 * is still pending. Also some EIR bits can't be
4230 * cleared except by handling the underlying error
4231 * (or by a GPU reset) so we mask any bit that
4234 emr = I915_READ16(EMR);
4235 I915_WRITE16(EMR, 0xffff);
4236 I915_WRITE16(EMR, emr | *eir_stuck);
4239 static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
4240 u16 eir, u16 eir_stuck)
4242 DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
4245 DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck);
4248 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
4249 u32 *eir, u32 *eir_stuck)
4253 *eir = I915_READ(EIR);
4255 I915_WRITE(EIR, *eir);
4257 *eir_stuck = I915_READ(EIR);
4258 if (*eir_stuck == 0)
4262 * Toggle all EMR bits to make sure we get an edge
4263 * in the ISR master error bit if we don't clear
4264 * all the EIR bits. Otherwise the edge triggered
4265 * IIR on i965/g4x wouldn't notice that an interrupt
4266 * is still pending. Also some EIR bits can't be
4267 * cleared except by handling the underlying error
4268 * (or by a GPU reset) so we mask any bit that
4271 emr = I915_READ(EMR);
4272 I915_WRITE(EMR, 0xffffffff);
4273 I915_WRITE(EMR, emr | *eir_stuck);
4276 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
4277 u32 eir, u32 eir_stuck)
4279 DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
4282 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck);
4285 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4287 struct drm_device *dev = arg;
4288 struct drm_i915_private *dev_priv = to_i915(dev);
4289 irqreturn_t ret = IRQ_NONE;
4291 if (!intel_irqs_enabled(dev_priv))
4294 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4295 disable_rpm_wakeref_asserts(dev_priv);
4298 u32 pipe_stats[I915_MAX_PIPES] = {};
4299 u16 eir = 0, eir_stuck = 0;
4302 iir = I915_READ16(IIR);
4308 /* Call regardless, as some status bits might not be
4309 * signalled in iir */
4310 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4312 if (iir & I915_MASTER_ERROR_INTERRUPT)
4313 i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4315 I915_WRITE16(IIR, iir);
4317 if (iir & I915_USER_INTERRUPT)
4318 intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
4320 if (iir & I915_MASTER_ERROR_INTERRUPT)
4321 i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
4323 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4326 enable_rpm_wakeref_asserts(dev_priv);
4331 static void i915_irq_reset(struct drm_device *dev)
4333 struct drm_i915_private *dev_priv = to_i915(dev);
4335 if (I915_HAS_HOTPLUG(dev_priv)) {
4336 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4337 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4340 i9xx_pipestat_irq_reset(dev_priv);
4345 static int i915_irq_postinstall(struct drm_device *dev)
4347 struct drm_i915_private *dev_priv = to_i915(dev);
4350 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
4351 I915_ERROR_MEMORY_REFRESH));
4353 /* Unmask the interrupts that we always want on. */
4354 dev_priv->irq_mask =
4355 ~(I915_ASLE_INTERRUPT |
4356 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4357 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4358 I915_MASTER_ERROR_INTERRUPT);
4361 I915_ASLE_INTERRUPT |
4362 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4363 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4364 I915_MASTER_ERROR_INTERRUPT |
4365 I915_USER_INTERRUPT;
4367 if (I915_HAS_HOTPLUG(dev_priv)) {
4368 /* Enable in IER... */
4369 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4370 /* and unmask in IMR */
4371 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4374 GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
4376 /* Interrupt setup is already guaranteed to be single-threaded, this is
4377 * just to make the assert_spin_locked check happy. */
4378 spin_lock_irq(&dev_priv->irq_lock);
4379 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4380 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4381 spin_unlock_irq(&dev_priv->irq_lock);
4383 i915_enable_asle_pipestat(dev_priv);
4388 static irqreturn_t i915_irq_handler(int irq, void *arg)
4390 struct drm_device *dev = arg;
4391 struct drm_i915_private *dev_priv = to_i915(dev);
4392 irqreturn_t ret = IRQ_NONE;
4394 if (!intel_irqs_enabled(dev_priv))
4397 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4398 disable_rpm_wakeref_asserts(dev_priv);
4401 u32 pipe_stats[I915_MAX_PIPES] = {};
4402 u32 eir = 0, eir_stuck = 0;
4403 u32 hotplug_status = 0;
4406 iir = I915_READ(IIR);
4412 if (I915_HAS_HOTPLUG(dev_priv) &&
4413 iir & I915_DISPLAY_PORT_INTERRUPT)
4414 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4416 /* Call regardless, as some status bits might not be
4417 * signalled in iir */
4418 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4420 if (iir & I915_MASTER_ERROR_INTERRUPT)
4421 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4423 I915_WRITE(IIR, iir);
4425 if (iir & I915_USER_INTERRUPT)
4426 intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
4428 if (iir & I915_MASTER_ERROR_INTERRUPT)
4429 i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4432 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4434 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4437 enable_rpm_wakeref_asserts(dev_priv);
4442 static void i965_irq_reset(struct drm_device *dev)
4444 struct drm_i915_private *dev_priv = to_i915(dev);
4446 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4447 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4449 i9xx_pipestat_irq_reset(dev_priv);
4454 static int i965_irq_postinstall(struct drm_device *dev)
4456 struct drm_i915_private *dev_priv = to_i915(dev);
4461 * Enable some error detection, note the instruction error mask
4462 * bit is reserved, so we leave it masked.
4464 if (IS_G4X(dev_priv)) {
4465 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4466 GM45_ERROR_MEM_PRIV |
4467 GM45_ERROR_CP_PRIV |
4468 I915_ERROR_MEMORY_REFRESH);
4470 error_mask = ~(I915_ERROR_PAGE_TABLE |
4471 I915_ERROR_MEMORY_REFRESH);
4473 I915_WRITE(EMR, error_mask);
4475 /* Unmask the interrupts that we always want on. */
4476 dev_priv->irq_mask =
4477 ~(I915_ASLE_INTERRUPT |
4478 I915_DISPLAY_PORT_INTERRUPT |
4479 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4480 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4481 I915_MASTER_ERROR_INTERRUPT);
4484 I915_ASLE_INTERRUPT |
4485 I915_DISPLAY_PORT_INTERRUPT |
4486 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4487 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4488 I915_MASTER_ERROR_INTERRUPT |
4489 I915_USER_INTERRUPT;
4491 if (IS_G4X(dev_priv))
4492 enable_mask |= I915_BSD_USER_INTERRUPT;
4494 GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
4496 /* Interrupt setup is already guaranteed to be single-threaded, this is
4497 * just to make the assert_spin_locked check happy. */
4498 spin_lock_irq(&dev_priv->irq_lock);
4499 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4500 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4501 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4502 spin_unlock_irq(&dev_priv->irq_lock);
4504 i915_enable_asle_pipestat(dev_priv);
4509 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4513 lockdep_assert_held(&dev_priv->irq_lock);
4515 /* Note HDMI and DP share hotplug bits */
4516 /* enable bits are the same for all generations */
4517 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4518 /* Programming the CRT detection parameters tends
4519 to generate a spurious hotplug event about three
4520 seconds later. So just do it once.
4522 if (IS_G4X(dev_priv))
4523 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4524 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4526 /* Ignore TV since it's buggy */
4527 i915_hotplug_interrupt_update_locked(dev_priv,
4528 HOTPLUG_INT_EN_MASK |
4529 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4530 CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4534 static irqreturn_t i965_irq_handler(int irq, void *arg)
4536 struct drm_device *dev = arg;
4537 struct drm_i915_private *dev_priv = to_i915(dev);
4538 irqreturn_t ret = IRQ_NONE;
4540 if (!intel_irqs_enabled(dev_priv))
4543 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4544 disable_rpm_wakeref_asserts(dev_priv);
4547 u32 pipe_stats[I915_MAX_PIPES] = {};
4548 u32 eir = 0, eir_stuck = 0;
4549 u32 hotplug_status = 0;
4552 iir = I915_READ(IIR);
4558 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4559 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4561 /* Call regardless, as some status bits might not be
4562 * signalled in iir */
4563 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4565 if (iir & I915_MASTER_ERROR_INTERRUPT)
4566 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4568 I915_WRITE(IIR, iir);
4570 if (iir & I915_USER_INTERRUPT)
4571 intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
4573 if (iir & I915_BSD_USER_INTERRUPT)
4574 intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
4576 if (iir & I915_MASTER_ERROR_INTERRUPT)
4577 i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4580 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4582 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4585 enable_rpm_wakeref_asserts(dev_priv);
4591 * intel_irq_init - initializes irq support
4592 * @dev_priv: i915 device instance
4594 * This function initializes all the irq support including work items, timers
4595 * and all the vtables. It does not setup the interrupt itself though.
4597 void intel_irq_init(struct drm_i915_private *dev_priv)
4599 struct drm_device *dev = &dev_priv->drm;
4600 struct intel_rps *rps = &dev_priv->gt_pm.rps;
4603 if (IS_I945GM(dev_priv))
4604 i945gm_vblank_work_init(dev_priv);
4606 intel_hpd_init_work(dev_priv);
4608 INIT_WORK(&rps->work, gen6_pm_rps_work);
4610 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4611 for (i = 0; i < MAX_L3_SLICES; ++i)
4612 dev_priv->l3_parity.remap_info[i] = NULL;
4614 if (HAS_GUC_SCHED(dev_priv))
4615 dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
4617 /* Let's track the enabled rps events */
4618 if (IS_VALLEYVIEW(dev_priv))
4619 /* WaGsvRC0ResidencyMethod:vlv */
4620 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4622 dev_priv->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD |
4623 GEN6_PM_RP_DOWN_THRESHOLD |
4624 GEN6_PM_RP_DOWN_TIMEOUT);
4626 rps->pm_intrmsk_mbz = 0;
4629 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
4630 * if GEN6_PM_UP_EI_EXPIRED is masked.
4632 * TODO: verify if this can be reproduced on VLV,CHV.
4634 if (INTEL_GEN(dev_priv) <= 7)
4635 rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
4637 if (INTEL_GEN(dev_priv) >= 8)
4638 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
4640 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
4641 dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4642 else if (INTEL_GEN(dev_priv) >= 3)
4643 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4646 * Opt out of the vblank disable timer on everything except gen2.
4647 * Gen2 doesn't have a hardware frame counter and so depends on
4648 * vblank interrupts to produce sane vblank seuquence numbers.
4650 if (!IS_GEN(dev_priv, 2))
4651 dev->vblank_disable_immediate = true;
4653 /* Most platforms treat the display irq block as an always-on
4654 * power domain. vlv/chv can disable it at runtime and need
4655 * special care to avoid writing any of the display block registers
4656 * outside of the power domain. We defer setting up the display irqs
4657 * in this case to the runtime pm.
4659 dev_priv->display_irqs_enabled = true;
4660 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4661 dev_priv->display_irqs_enabled = false;
4663 dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4664 /* If we have MST support, we want to avoid doing short HPD IRQ storm
4665 * detection, as short HPD storms will occur as a natural part of
4666 * sideband messaging with MST.
4667 * On older platforms however, IRQ storms can occur with both long and
4668 * short pulses, as seen on some G4x systems.
4670 dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
4672 dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
4673 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4675 if (IS_CHERRYVIEW(dev_priv)) {
4676 dev->driver->irq_handler = cherryview_irq_handler;
4677 dev->driver->irq_preinstall = cherryview_irq_reset;
4678 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4679 dev->driver->irq_uninstall = cherryview_irq_reset;
4680 dev->driver->enable_vblank = i965_enable_vblank;
4681 dev->driver->disable_vblank = i965_disable_vblank;
4682 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4683 } else if (IS_VALLEYVIEW(dev_priv)) {
4684 dev->driver->irq_handler = valleyview_irq_handler;
4685 dev->driver->irq_preinstall = valleyview_irq_reset;
4686 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4687 dev->driver->irq_uninstall = valleyview_irq_reset;
4688 dev->driver->enable_vblank = i965_enable_vblank;
4689 dev->driver->disable_vblank = i965_disable_vblank;
4690 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4691 } else if (INTEL_GEN(dev_priv) >= 11) {
4692 dev->driver->irq_handler = gen11_irq_handler;
4693 dev->driver->irq_preinstall = gen11_irq_reset;
4694 dev->driver->irq_postinstall = gen11_irq_postinstall;
4695 dev->driver->irq_uninstall = gen11_irq_reset;
4696 dev->driver->enable_vblank = gen8_enable_vblank;
4697 dev->driver->disable_vblank = gen8_disable_vblank;
4698 dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
4699 } else if (INTEL_GEN(dev_priv) >= 8) {
4700 dev->driver->irq_handler = gen8_irq_handler;
4701 dev->driver->irq_preinstall = gen8_irq_reset;
4702 dev->driver->irq_postinstall = gen8_irq_postinstall;
4703 dev->driver->irq_uninstall = gen8_irq_reset;
4704 dev->driver->enable_vblank = gen8_enable_vblank;
4705 dev->driver->disable_vblank = gen8_disable_vblank;
4706 if (IS_GEN9_LP(dev_priv))
4707 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4708 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
4709 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4711 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4712 } else if (HAS_PCH_SPLIT(dev_priv)) {
4713 dev->driver->irq_handler = ironlake_irq_handler;
4714 dev->driver->irq_preinstall = ironlake_irq_reset;
4715 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4716 dev->driver->irq_uninstall = ironlake_irq_reset;
4717 dev->driver->enable_vblank = ironlake_enable_vblank;
4718 dev->driver->disable_vblank = ironlake_disable_vblank;
4719 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4721 if (IS_GEN(dev_priv, 2)) {
4722 dev->driver->irq_preinstall = i8xx_irq_reset;
4723 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4724 dev->driver->irq_handler = i8xx_irq_handler;
4725 dev->driver->irq_uninstall = i8xx_irq_reset;
4726 dev->driver->enable_vblank = i8xx_enable_vblank;
4727 dev->driver->disable_vblank = i8xx_disable_vblank;
4728 } else if (IS_I945GM(dev_priv)) {
4729 dev->driver->irq_preinstall = i915_irq_reset;
4730 dev->driver->irq_postinstall = i915_irq_postinstall;
4731 dev->driver->irq_uninstall = i915_irq_reset;
4732 dev->driver->irq_handler = i915_irq_handler;
4733 dev->driver->enable_vblank = i945gm_enable_vblank;
4734 dev->driver->disable_vblank = i945gm_disable_vblank;
4735 } else if (IS_GEN(dev_priv, 3)) {
4736 dev->driver->irq_preinstall = i915_irq_reset;
4737 dev->driver->irq_postinstall = i915_irq_postinstall;
4738 dev->driver->irq_uninstall = i915_irq_reset;
4739 dev->driver->irq_handler = i915_irq_handler;
4740 dev->driver->enable_vblank = i8xx_enable_vblank;
4741 dev->driver->disable_vblank = i8xx_disable_vblank;
4743 dev->driver->irq_preinstall = i965_irq_reset;
4744 dev->driver->irq_postinstall = i965_irq_postinstall;
4745 dev->driver->irq_uninstall = i965_irq_reset;
4746 dev->driver->irq_handler = i965_irq_handler;
4747 dev->driver->enable_vblank = i965_enable_vblank;
4748 dev->driver->disable_vblank = i965_disable_vblank;
4750 if (I915_HAS_HOTPLUG(dev_priv))
4751 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4756 * intel_irq_fini - deinitializes IRQ support
4757 * @i915: i915 device instance
4759 * This function deinitializes all the IRQ support.
4761 void intel_irq_fini(struct drm_i915_private *i915)
4765 if (IS_I945GM(i915))
4766 i945gm_vblank_work_fini(i915);
4768 for (i = 0; i < MAX_L3_SLICES; ++i)
4769 kfree(i915->l3_parity.remap_info[i]);
4773 * intel_irq_install - enables the hardware interrupt
4774 * @dev_priv: i915 device instance
4776 * This function enables the hardware interrupt handling, but leaves the hotplug
4777 * handling still disabled. It is called after intel_irq_init().
4779 * In the driver load and resume code we need working interrupts in a few places
4780 * but don't want to deal with the hassle of concurrent probe and hotplug
4781 * workers. Hence the split into this two-stage approach.
4783 int intel_irq_install(struct drm_i915_private *dev_priv)
4786 * We enable some interrupt sources in our postinstall hooks, so mark
4787 * interrupts as enabled _before_ actually enabling them to avoid
4788 * special cases in our ordering checks.
4790 dev_priv->runtime_pm.irqs_enabled = true;
4792 return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
4796 * intel_irq_uninstall - finilizes all irq handling
4797 * @dev_priv: i915 device instance
4799 * This stops interrupt and hotplug handling and unregisters and frees all
4800 * resources acquired in the init functions.
4802 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4804 drm_irq_uninstall(&dev_priv->drm);
4805 intel_hpd_cancel_work(dev_priv);
4806 dev_priv->runtime_pm.irqs_enabled = false;
4810 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4811 * @dev_priv: i915 device instance
4813 * This function is used to disable interrupts at runtime, both in the runtime
4814 * pm and the system suspend/resume code.
4816 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4818 dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
4819 dev_priv->runtime_pm.irqs_enabled = false;
4820 synchronize_irq(dev_priv->drm.irq);
4824 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4825 * @dev_priv: i915 device instance
4827 * This function is used to enable interrupts at runtime, both in the runtime
4828 * pm and the system suspend/resume code.
4830 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4832 dev_priv->runtime_pm.irqs_enabled = true;
4833 dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
4834 dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);