]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/i915_irq.c
drm/i915: Disable C3 when enabling vblank interrupts on i945gm
[linux.git] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/cpuidle.h>
34 #include <linux/circ_buf.h>
35 #include <drm/drm_irq.h>
36 #include <drm/drm_drv.h>
37 #include <drm/i915_drm.h>
38 #include "i915_drv.h"
39 #include "i915_trace.h"
40 #include "intel_drv.h"
41
42 /**
43  * DOC: interrupt handling
44  *
45  * These functions provide the basic support for enabling and disabling the
46  * interrupt handling support. There's a lot more functionality in i915_irq.c
47  * and related files, but that will be described in separate chapters.
48  */
49
50 static const u32 hpd_ilk[HPD_NUM_PINS] = {
51         [HPD_PORT_A] = DE_DP_A_HOTPLUG,
52 };
53
54 static const u32 hpd_ivb[HPD_NUM_PINS] = {
55         [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
56 };
57
58 static const u32 hpd_bdw[HPD_NUM_PINS] = {
59         [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
60 };
61
62 static const u32 hpd_ibx[HPD_NUM_PINS] = {
63         [HPD_CRT] = SDE_CRT_HOTPLUG,
64         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
65         [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
66         [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
67         [HPD_PORT_D] = SDE_PORTD_HOTPLUG
68 };
69
70 static const u32 hpd_cpt[HPD_NUM_PINS] = {
71         [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
72         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
73         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
74         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
75         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
76 };
77
78 static const u32 hpd_spt[HPD_NUM_PINS] = {
79         [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
80         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
81         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
82         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
83         [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
84 };
85
86 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
87         [HPD_CRT] = CRT_HOTPLUG_INT_EN,
88         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
89         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
90         [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
91         [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
92         [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
93 };
94
95 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
96         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
97         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
98         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
99         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
100         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
101         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
102 };
103
104 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
105         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
106         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
107         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
108         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
109         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
110         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
111 };
112
113 /* BXT hpd list */
114 static const u32 hpd_bxt[HPD_NUM_PINS] = {
115         [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
116         [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
117         [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
118 };
119
120 static const u32 hpd_gen11[HPD_NUM_PINS] = {
121         [HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
122         [HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
123         [HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
124         [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG
125 };
126
127 static const u32 hpd_icp[HPD_NUM_PINS] = {
128         [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
129         [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
130         [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP,
131         [HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP,
132         [HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP,
133         [HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP
134 };
135
136 /* IIR can theoretically queue up two events. Be paranoid. */
137 #define GEN8_IRQ_RESET_NDX(type, which) do { \
138         I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
139         POSTING_READ(GEN8_##type##_IMR(which)); \
140         I915_WRITE(GEN8_##type##_IER(which), 0); \
141         I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
142         POSTING_READ(GEN8_##type##_IIR(which)); \
143         I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
144         POSTING_READ(GEN8_##type##_IIR(which)); \
145 } while (0)
146
147 #define GEN3_IRQ_RESET(type) do { \
148         I915_WRITE(type##IMR, 0xffffffff); \
149         POSTING_READ(type##IMR); \
150         I915_WRITE(type##IER, 0); \
151         I915_WRITE(type##IIR, 0xffffffff); \
152         POSTING_READ(type##IIR); \
153         I915_WRITE(type##IIR, 0xffffffff); \
154         POSTING_READ(type##IIR); \
155 } while (0)
156
157 #define GEN2_IRQ_RESET(type) do { \
158         I915_WRITE16(type##IMR, 0xffff); \
159         POSTING_READ16(type##IMR); \
160         I915_WRITE16(type##IER, 0); \
161         I915_WRITE16(type##IIR, 0xffff); \
162         POSTING_READ16(type##IIR); \
163         I915_WRITE16(type##IIR, 0xffff); \
164         POSTING_READ16(type##IIR); \
165 } while (0)
166
167 /*
168  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
169  */
170 static void gen3_assert_iir_is_zero(struct drm_i915_private *dev_priv,
171                                     i915_reg_t reg)
172 {
173         u32 val = I915_READ(reg);
174
175         if (val == 0)
176                 return;
177
178         WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
179              i915_mmio_reg_offset(reg), val);
180         I915_WRITE(reg, 0xffffffff);
181         POSTING_READ(reg);
182         I915_WRITE(reg, 0xffffffff);
183         POSTING_READ(reg);
184 }
185
186 static void gen2_assert_iir_is_zero(struct drm_i915_private *dev_priv,
187                                     i915_reg_t reg)
188 {
189         u16 val = I915_READ16(reg);
190
191         if (val == 0)
192                 return;
193
194         WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
195              i915_mmio_reg_offset(reg), val);
196         I915_WRITE16(reg, 0xffff);
197         POSTING_READ16(reg);
198         I915_WRITE16(reg, 0xffff);
199         POSTING_READ16(reg);
200 }
201
202 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
203         gen3_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
204         I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
205         I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
206         POSTING_READ(GEN8_##type##_IMR(which)); \
207 } while (0)
208
209 #define GEN3_IRQ_INIT(type, imr_val, ier_val) do { \
210         gen3_assert_iir_is_zero(dev_priv, type##IIR); \
211         I915_WRITE(type##IER, (ier_val)); \
212         I915_WRITE(type##IMR, (imr_val)); \
213         POSTING_READ(type##IMR); \
214 } while (0)
215
216 #define GEN2_IRQ_INIT(type, imr_val, ier_val) do { \
217         gen2_assert_iir_is_zero(dev_priv, type##IIR); \
218         I915_WRITE16(type##IER, (ier_val)); \
219         I915_WRITE16(type##IMR, (imr_val)); \
220         POSTING_READ16(type##IMR); \
221 } while (0)
222
223 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
224 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
225
226 /* For display hotplug interrupt */
227 static inline void
228 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
229                                      u32 mask,
230                                      u32 bits)
231 {
232         u32 val;
233
234         lockdep_assert_held(&dev_priv->irq_lock);
235         WARN_ON(bits & ~mask);
236
237         val = I915_READ(PORT_HOTPLUG_EN);
238         val &= ~mask;
239         val |= bits;
240         I915_WRITE(PORT_HOTPLUG_EN, val);
241 }
242
243 /**
244  * i915_hotplug_interrupt_update - update hotplug interrupt enable
245  * @dev_priv: driver private
246  * @mask: bits to update
247  * @bits: bits to enable
248  * NOTE: the HPD enable bits are modified both inside and outside
249  * of an interrupt context. To avoid that read-modify-write cycles
250  * interfer, these bits are protected by a spinlock. Since this
251  * function is usually not called from a context where the lock is
252  * held already, this function acquires the lock itself. A non-locking
253  * version is also available.
254  */
255 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
256                                    u32 mask,
257                                    u32 bits)
258 {
259         spin_lock_irq(&dev_priv->irq_lock);
260         i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
261         spin_unlock_irq(&dev_priv->irq_lock);
262 }
263
264 static u32
265 gen11_gt_engine_identity(struct drm_i915_private * const i915,
266                          const unsigned int bank, const unsigned int bit);
267
268 static bool gen11_reset_one_iir(struct drm_i915_private * const i915,
269                                 const unsigned int bank,
270                                 const unsigned int bit)
271 {
272         void __iomem * const regs = i915->uncore.regs;
273         u32 dw;
274
275         lockdep_assert_held(&i915->irq_lock);
276
277         dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
278         if (dw & BIT(bit)) {
279                 /*
280                  * According to the BSpec, DW_IIR bits cannot be cleared without
281                  * first servicing the Selector & Shared IIR registers.
282                  */
283                 gen11_gt_engine_identity(i915, bank, bit);
284
285                 /*
286                  * We locked GT INT DW by reading it. If we want to (try
287                  * to) recover from this succesfully, we need to clear
288                  * our bit, otherwise we are locking the register for
289                  * everybody.
290                  */
291                 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));
292
293                 return true;
294         }
295
296         return false;
297 }
298
299 /**
300  * ilk_update_display_irq - update DEIMR
301  * @dev_priv: driver private
302  * @interrupt_mask: mask of interrupt bits to update
303  * @enabled_irq_mask: mask of interrupt bits to enable
304  */
305 void ilk_update_display_irq(struct drm_i915_private *dev_priv,
306                             u32 interrupt_mask,
307                             u32 enabled_irq_mask)
308 {
309         u32 new_val;
310
311         lockdep_assert_held(&dev_priv->irq_lock);
312
313         WARN_ON(enabled_irq_mask & ~interrupt_mask);
314
315         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
316                 return;
317
318         new_val = dev_priv->irq_mask;
319         new_val &= ~interrupt_mask;
320         new_val |= (~enabled_irq_mask & interrupt_mask);
321
322         if (new_val != dev_priv->irq_mask) {
323                 dev_priv->irq_mask = new_val;
324                 I915_WRITE(DEIMR, dev_priv->irq_mask);
325                 POSTING_READ(DEIMR);
326         }
327 }
328
329 /**
330  * ilk_update_gt_irq - update GTIMR
331  * @dev_priv: driver private
332  * @interrupt_mask: mask of interrupt bits to update
333  * @enabled_irq_mask: mask of interrupt bits to enable
334  */
335 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
336                               u32 interrupt_mask,
337                               u32 enabled_irq_mask)
338 {
339         lockdep_assert_held(&dev_priv->irq_lock);
340
341         WARN_ON(enabled_irq_mask & ~interrupt_mask);
342
343         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
344                 return;
345
346         dev_priv->gt_irq_mask &= ~interrupt_mask;
347         dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
348         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
349 }
350
351 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
352 {
353         ilk_update_gt_irq(dev_priv, mask, mask);
354         POSTING_READ_FW(GTIMR);
355 }
356
357 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
358 {
359         ilk_update_gt_irq(dev_priv, mask, 0);
360 }
361
362 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
363 {
364         WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11);
365
366         return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
367 }
368
369 static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
370 {
371         if (INTEL_GEN(dev_priv) >= 11)
372                 return GEN11_GPM_WGBOXPERF_INTR_MASK;
373         else if (INTEL_GEN(dev_priv) >= 8)
374                 return GEN8_GT_IMR(2);
375         else
376                 return GEN6_PMIMR;
377 }
378
379 static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
380 {
381         if (INTEL_GEN(dev_priv) >= 11)
382                 return GEN11_GPM_WGBOXPERF_INTR_ENABLE;
383         else if (INTEL_GEN(dev_priv) >= 8)
384                 return GEN8_GT_IER(2);
385         else
386                 return GEN6_PMIER;
387 }
388
389 /**
390  * snb_update_pm_irq - update GEN6_PMIMR
391  * @dev_priv: driver private
392  * @interrupt_mask: mask of interrupt bits to update
393  * @enabled_irq_mask: mask of interrupt bits to enable
394  */
395 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
396                               u32 interrupt_mask,
397                               u32 enabled_irq_mask)
398 {
399         u32 new_val;
400
401         WARN_ON(enabled_irq_mask & ~interrupt_mask);
402
403         lockdep_assert_held(&dev_priv->irq_lock);
404
405         new_val = dev_priv->pm_imr;
406         new_val &= ~interrupt_mask;
407         new_val |= (~enabled_irq_mask & interrupt_mask);
408
409         if (new_val != dev_priv->pm_imr) {
410                 dev_priv->pm_imr = new_val;
411                 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr);
412                 POSTING_READ(gen6_pm_imr(dev_priv));
413         }
414 }
415
416 void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
417 {
418         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
419                 return;
420
421         snb_update_pm_irq(dev_priv, mask, mask);
422 }
423
424 static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
425 {
426         snb_update_pm_irq(dev_priv, mask, 0);
427 }
428
429 void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
430 {
431         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
432                 return;
433
434         __gen6_mask_pm_irq(dev_priv, mask);
435 }
436
437 static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
438 {
439         i915_reg_t reg = gen6_pm_iir(dev_priv);
440
441         lockdep_assert_held(&dev_priv->irq_lock);
442
443         I915_WRITE(reg, reset_mask);
444         I915_WRITE(reg, reset_mask);
445         POSTING_READ(reg);
446 }
447
448 static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
449 {
450         lockdep_assert_held(&dev_priv->irq_lock);
451
452         dev_priv->pm_ier |= enable_mask;
453         I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
454         gen6_unmask_pm_irq(dev_priv, enable_mask);
455         /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
456 }
457
458 static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
459 {
460         lockdep_assert_held(&dev_priv->irq_lock);
461
462         dev_priv->pm_ier &= ~disable_mask;
463         __gen6_mask_pm_irq(dev_priv, disable_mask);
464         I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
465         /* though a barrier is missing here, but don't really need a one */
466 }
467
468 void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
469 {
470         spin_lock_irq(&dev_priv->irq_lock);
471
472         while (gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM))
473                 ;
474
475         dev_priv->gt_pm.rps.pm_iir = 0;
476
477         spin_unlock_irq(&dev_priv->irq_lock);
478 }
479
480 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
481 {
482         spin_lock_irq(&dev_priv->irq_lock);
483         gen6_reset_pm_iir(dev_priv, GEN6_PM_RPS_EVENTS);
484         dev_priv->gt_pm.rps.pm_iir = 0;
485         spin_unlock_irq(&dev_priv->irq_lock);
486 }
487
488 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
489 {
490         struct intel_rps *rps = &dev_priv->gt_pm.rps;
491
492         if (READ_ONCE(rps->interrupts_enabled))
493                 return;
494
495         spin_lock_irq(&dev_priv->irq_lock);
496         WARN_ON_ONCE(rps->pm_iir);
497
498         if (INTEL_GEN(dev_priv) >= 11)
499                 WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM));
500         else
501                 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
502
503         rps->interrupts_enabled = true;
504         gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
505
506         spin_unlock_irq(&dev_priv->irq_lock);
507 }
508
509 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
510 {
511         struct intel_rps *rps = &dev_priv->gt_pm.rps;
512
513         if (!READ_ONCE(rps->interrupts_enabled))
514                 return;
515
516         spin_lock_irq(&dev_priv->irq_lock);
517         rps->interrupts_enabled = false;
518
519         I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
520
521         gen6_disable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
522
523         spin_unlock_irq(&dev_priv->irq_lock);
524         synchronize_irq(dev_priv->drm.irq);
525
526         /* Now that we will not be generating any more work, flush any
527          * outstanding tasks. As we are called on the RPS idle path,
528          * we will reset the GPU to minimum frequencies, so the current
529          * state of the worker can be discarded.
530          */
531         cancel_work_sync(&rps->work);
532         if (INTEL_GEN(dev_priv) >= 11)
533                 gen11_reset_rps_interrupts(dev_priv);
534         else
535                 gen6_reset_rps_interrupts(dev_priv);
536 }
537
538 void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
539 {
540         assert_rpm_wakelock_held(dev_priv);
541
542         spin_lock_irq(&dev_priv->irq_lock);
543         gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
544         spin_unlock_irq(&dev_priv->irq_lock);
545 }
546
547 void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
548 {
549         assert_rpm_wakelock_held(dev_priv);
550
551         spin_lock_irq(&dev_priv->irq_lock);
552         if (!dev_priv->guc.interrupts_enabled) {
553                 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
554                                        dev_priv->pm_guc_events);
555                 dev_priv->guc.interrupts_enabled = true;
556                 gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events);
557         }
558         spin_unlock_irq(&dev_priv->irq_lock);
559 }
560
561 void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
562 {
563         assert_rpm_wakelock_held(dev_priv);
564
565         spin_lock_irq(&dev_priv->irq_lock);
566         dev_priv->guc.interrupts_enabled = false;
567
568         gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);
569
570         spin_unlock_irq(&dev_priv->irq_lock);
571         synchronize_irq(dev_priv->drm.irq);
572
573         gen9_reset_guc_interrupts(dev_priv);
574 }
575
576 /**
577  * bdw_update_port_irq - update DE port interrupt
578  * @dev_priv: driver private
579  * @interrupt_mask: mask of interrupt bits to update
580  * @enabled_irq_mask: mask of interrupt bits to enable
581  */
582 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
583                                 u32 interrupt_mask,
584                                 u32 enabled_irq_mask)
585 {
586         u32 new_val;
587         u32 old_val;
588
589         lockdep_assert_held(&dev_priv->irq_lock);
590
591         WARN_ON(enabled_irq_mask & ~interrupt_mask);
592
593         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
594                 return;
595
596         old_val = I915_READ(GEN8_DE_PORT_IMR);
597
598         new_val = old_val;
599         new_val &= ~interrupt_mask;
600         new_val |= (~enabled_irq_mask & interrupt_mask);
601
602         if (new_val != old_val) {
603                 I915_WRITE(GEN8_DE_PORT_IMR, new_val);
604                 POSTING_READ(GEN8_DE_PORT_IMR);
605         }
606 }
607
608 /**
609  * bdw_update_pipe_irq - update DE pipe interrupt
610  * @dev_priv: driver private
611  * @pipe: pipe whose interrupt to update
612  * @interrupt_mask: mask of interrupt bits to update
613  * @enabled_irq_mask: mask of interrupt bits to enable
614  */
615 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
616                          enum pipe pipe,
617                          u32 interrupt_mask,
618                          u32 enabled_irq_mask)
619 {
620         u32 new_val;
621
622         lockdep_assert_held(&dev_priv->irq_lock);
623
624         WARN_ON(enabled_irq_mask & ~interrupt_mask);
625
626         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
627                 return;
628
629         new_val = dev_priv->de_irq_mask[pipe];
630         new_val &= ~interrupt_mask;
631         new_val |= (~enabled_irq_mask & interrupt_mask);
632
633         if (new_val != dev_priv->de_irq_mask[pipe]) {
634                 dev_priv->de_irq_mask[pipe] = new_val;
635                 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
636                 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
637         }
638 }
639
640 /**
641  * ibx_display_interrupt_update - update SDEIMR
642  * @dev_priv: driver private
643  * @interrupt_mask: mask of interrupt bits to update
644  * @enabled_irq_mask: mask of interrupt bits to enable
645  */
646 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
647                                   u32 interrupt_mask,
648                                   u32 enabled_irq_mask)
649 {
650         u32 sdeimr = I915_READ(SDEIMR);
651         sdeimr &= ~interrupt_mask;
652         sdeimr |= (~enabled_irq_mask & interrupt_mask);
653
654         WARN_ON(enabled_irq_mask & ~interrupt_mask);
655
656         lockdep_assert_held(&dev_priv->irq_lock);
657
658         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
659                 return;
660
661         I915_WRITE(SDEIMR, sdeimr);
662         POSTING_READ(SDEIMR);
663 }
664
665 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
666                               enum pipe pipe)
667 {
668         u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
669         u32 enable_mask = status_mask << 16;
670
671         lockdep_assert_held(&dev_priv->irq_lock);
672
673         if (INTEL_GEN(dev_priv) < 5)
674                 goto out;
675
676         /*
677          * On pipe A we don't support the PSR interrupt yet,
678          * on pipe B and C the same bit MBZ.
679          */
680         if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
681                 return 0;
682         /*
683          * On pipe B and C we don't support the PSR interrupt yet, on pipe
684          * A the same bit is for perf counters which we don't use either.
685          */
686         if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
687                 return 0;
688
689         enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
690                          SPRITE0_FLIP_DONE_INT_EN_VLV |
691                          SPRITE1_FLIP_DONE_INT_EN_VLV);
692         if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
693                 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
694         if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
695                 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
696
697 out:
698         WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
699                   status_mask & ~PIPESTAT_INT_STATUS_MASK,
700                   "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
701                   pipe_name(pipe), enable_mask, status_mask);
702
703         return enable_mask;
704 }
705
706 void i915_enable_pipestat(struct drm_i915_private *dev_priv,
707                           enum pipe pipe, u32 status_mask)
708 {
709         i915_reg_t reg = PIPESTAT(pipe);
710         u32 enable_mask;
711
712         WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
713                   "pipe %c: status_mask=0x%x\n",
714                   pipe_name(pipe), status_mask);
715
716         lockdep_assert_held(&dev_priv->irq_lock);
717         WARN_ON(!intel_irqs_enabled(dev_priv));
718
719         if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
720                 return;
721
722         dev_priv->pipestat_irq_mask[pipe] |= status_mask;
723         enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
724
725         I915_WRITE(reg, enable_mask | status_mask);
726         POSTING_READ(reg);
727 }
728
729 void i915_disable_pipestat(struct drm_i915_private *dev_priv,
730                            enum pipe pipe, u32 status_mask)
731 {
732         i915_reg_t reg = PIPESTAT(pipe);
733         u32 enable_mask;
734
735         WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
736                   "pipe %c: status_mask=0x%x\n",
737                   pipe_name(pipe), status_mask);
738
739         lockdep_assert_held(&dev_priv->irq_lock);
740         WARN_ON(!intel_irqs_enabled(dev_priv));
741
742         if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
743                 return;
744
745         dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
746         enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
747
748         I915_WRITE(reg, enable_mask | status_mask);
749         POSTING_READ(reg);
750 }
751
752 static bool i915_has_asle(struct drm_i915_private *dev_priv)
753 {
754         if (!dev_priv->opregion.asle)
755                 return false;
756
757         return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
758 }
759
760 /**
761  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
762  * @dev_priv: i915 device private
763  */
764 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
765 {
766         if (!i915_has_asle(dev_priv))
767                 return;
768
769         spin_lock_irq(&dev_priv->irq_lock);
770
771         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
772         if (INTEL_GEN(dev_priv) >= 4)
773                 i915_enable_pipestat(dev_priv, PIPE_A,
774                                      PIPE_LEGACY_BLC_EVENT_STATUS);
775
776         spin_unlock_irq(&dev_priv->irq_lock);
777 }
778
779 /*
780  * This timing diagram depicts the video signal in and
781  * around the vertical blanking period.
782  *
783  * Assumptions about the fictitious mode used in this example:
784  *  vblank_start >= 3
785  *  vsync_start = vblank_start + 1
786  *  vsync_end = vblank_start + 2
787  *  vtotal = vblank_start + 3
788  *
789  *           start of vblank:
790  *           latch double buffered registers
791  *           increment frame counter (ctg+)
792  *           generate start of vblank interrupt (gen4+)
793  *           |
794  *           |          frame start:
795  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
796  *           |          may be shifted forward 1-3 extra lines via PIPECONF
797  *           |          |
798  *           |          |  start of vsync:
799  *           |          |  generate vsync interrupt
800  *           |          |  |
801  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
802  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
803  * ----va---> <-----------------vb--------------------> <--------va-------------
804  *       |          |       <----vs----->                     |
805  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
806  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
807  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
808  *       |          |                                         |
809  *       last visible pixel                                   first visible pixel
810  *                  |                                         increment frame counter (gen3/4)
811  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
812  *
813  * x  = horizontal active
814  * _  = horizontal blanking
815  * hs = horizontal sync
816  * va = vertical active
817  * vb = vertical blanking
818  * vs = vertical sync
819  * vbs = vblank_start (number)
820  *
821  * Summary:
822  * - most events happen at the start of horizontal sync
823  * - frame start happens at the start of horizontal blank, 1-4 lines
824  *   (depending on PIPECONF settings) after the start of vblank
825  * - gen3/4 pixel and frame counter are synchronized with the start
826  *   of horizontal active on the first line of vertical active
827  */
828
829 /* Called from drm generic code, passed a 'crtc', which
830  * we use as a pipe index
831  */
832 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
833 {
834         struct drm_i915_private *dev_priv = to_i915(dev);
835         struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
836         const struct drm_display_mode *mode = &vblank->hwmode;
837         i915_reg_t high_frame, low_frame;
838         u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
839         unsigned long irqflags;
840
841         /*
842          * On i965gm TV output the frame counter only works up to
843          * the point when we enable the TV encoder. After that the
844          * frame counter ceases to work and reads zero. We need a
845          * vblank wait before enabling the TV encoder and so we
846          * have to enable vblank interrupts while the frame counter
847          * is still in a working state. However the core vblank code
848          * does not like us returning non-zero frame counter values
849          * when we've told it that we don't have a working frame
850          * counter. Thus we must stop non-zero values leaking out.
851          */
852         if (!vblank->max_vblank_count)
853                 return 0;
854
855         htotal = mode->crtc_htotal;
856         hsync_start = mode->crtc_hsync_start;
857         vbl_start = mode->crtc_vblank_start;
858         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
859                 vbl_start = DIV_ROUND_UP(vbl_start, 2);
860
861         /* Convert to pixel count */
862         vbl_start *= htotal;
863
864         /* Start of vblank event occurs at start of hsync */
865         vbl_start -= htotal - hsync_start;
866
867         high_frame = PIPEFRAME(pipe);
868         low_frame = PIPEFRAMEPIXEL(pipe);
869
870         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
871
872         /*
873          * High & low register fields aren't synchronized, so make sure
874          * we get a low value that's stable across two reads of the high
875          * register.
876          */
877         do {
878                 high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
879                 low   = I915_READ_FW(low_frame);
880                 high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
881         } while (high1 != high2);
882
883         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
884
885         high1 >>= PIPE_FRAME_HIGH_SHIFT;
886         pixel = low & PIPE_PIXEL_MASK;
887         low >>= PIPE_FRAME_LOW_SHIFT;
888
889         /*
890          * The frame counter increments at beginning of active.
891          * Cook up a vblank counter by also checking the pixel
892          * counter against vblank start.
893          */
894         return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
895 }
896
897 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
898 {
899         struct drm_i915_private *dev_priv = to_i915(dev);
900
901         return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
902 }
903
904 /*
905  * On certain encoders on certain platforms, pipe
906  * scanline register will not work to get the scanline,
907  * since the timings are driven from the PORT or issues
908  * with scanline register updates.
909  * This function will use Framestamp and current
910  * timestamp registers to calculate the scanline.
911  */
912 static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
913 {
914         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
915         struct drm_vblank_crtc *vblank =
916                 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
917         const struct drm_display_mode *mode = &vblank->hwmode;
918         u32 vblank_start = mode->crtc_vblank_start;
919         u32 vtotal = mode->crtc_vtotal;
920         u32 htotal = mode->crtc_htotal;
921         u32 clock = mode->crtc_clock;
922         u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;
923
924         /*
925          * To avoid the race condition where we might cross into the
926          * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
927          * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
928          * during the same frame.
929          */
930         do {
931                 /*
932                  * This field provides read back of the display
933                  * pipe frame time stamp. The time stamp value
934                  * is sampled at every start of vertical blank.
935                  */
936                 scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
937
938                 /*
939                  * The TIMESTAMP_CTR register has the current
940                  * time stamp value.
941                  */
942                 scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR);
943
944                 scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
945         } while (scan_post_time != scan_prev_time);
946
947         scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
948                                         clock), 1000 * htotal);
949         scanline = min(scanline, vtotal - 1);
950         scanline = (scanline + vblank_start) % vtotal;
951
952         return scanline;
953 }
954
955 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
956 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
957 {
958         struct drm_device *dev = crtc->base.dev;
959         struct drm_i915_private *dev_priv = to_i915(dev);
960         const struct drm_display_mode *mode;
961         struct drm_vblank_crtc *vblank;
962         enum pipe pipe = crtc->pipe;
963         int position, vtotal;
964
965         if (!crtc->active)
966                 return -1;
967
968         vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
969         mode = &vblank->hwmode;
970
971         if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
972                 return __intel_get_crtc_scanline_from_timestamp(crtc);
973
974         vtotal = mode->crtc_vtotal;
975         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
976                 vtotal /= 2;
977
978         if (IS_GEN(dev_priv, 2))
979                 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
980         else
981                 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
982
983         /*
984          * On HSW, the DSL reg (0x70000) appears to return 0 if we
985          * read it just before the start of vblank.  So try it again
986          * so we don't accidentally end up spanning a vblank frame
987          * increment, causing the pipe_update_end() code to squak at us.
988          *
989          * The nature of this problem means we can't simply check the ISR
990          * bit and return the vblank start value; nor can we use the scanline
991          * debug register in the transcoder as it appears to have the same
992          * problem.  We may need to extend this to include other platforms,
993          * but so far testing only shows the problem on HSW.
994          */
995         if (HAS_DDI(dev_priv) && !position) {
996                 int i, temp;
997
998                 for (i = 0; i < 100; i++) {
999                         udelay(1);
1000                         temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
1001                         if (temp != position) {
1002                                 position = temp;
1003                                 break;
1004                         }
1005                 }
1006         }
1007
1008         /*
1009          * See update_scanline_offset() for the details on the
1010          * scanline_offset adjustment.
1011          */
1012         return (position + crtc->scanline_offset) % vtotal;
1013 }
1014
1015 static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
1016                                      bool in_vblank_irq, int *vpos, int *hpos,
1017                                      ktime_t *stime, ktime_t *etime,
1018                                      const struct drm_display_mode *mode)
1019 {
1020         struct drm_i915_private *dev_priv = to_i915(dev);
1021         struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
1022                                                                 pipe);
1023         int position;
1024         int vbl_start, vbl_end, hsync_start, htotal, vtotal;
1025         unsigned long irqflags;
1026         bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 ||
1027                 IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
1028                 mode->private_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
1029
1030         if (WARN_ON(!mode->crtc_clock)) {
1031                 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
1032                                  "pipe %c\n", pipe_name(pipe));
1033                 return false;
1034         }
1035
1036         htotal = mode->crtc_htotal;
1037         hsync_start = mode->crtc_hsync_start;
1038         vtotal = mode->crtc_vtotal;
1039         vbl_start = mode->crtc_vblank_start;
1040         vbl_end = mode->crtc_vblank_end;
1041
1042         if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
1043                 vbl_start = DIV_ROUND_UP(vbl_start, 2);
1044                 vbl_end /= 2;
1045                 vtotal /= 2;
1046         }
1047
1048         /*
1049          * Lock uncore.lock, as we will do multiple timing critical raw
1050          * register reads, potentially with preemption disabled, so the
1051          * following code must not block on uncore.lock.
1052          */
1053         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1054
1055         /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
1056
1057         /* Get optional system timestamp before query. */
1058         if (stime)
1059                 *stime = ktime_get();
1060
1061         if (use_scanline_counter) {
1062                 /* No obvious pixelcount register. Only query vertical
1063                  * scanout position from Display scan line register.
1064                  */
1065                 position = __intel_get_crtc_scanline(intel_crtc);
1066         } else {
1067                 /* Have access to pixelcount since start of frame.
1068                  * We can split this into vertical and horizontal
1069                  * scanout position.
1070                  */
1071                 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
1072
1073                 /* convert to pixel counts */
1074                 vbl_start *= htotal;
1075                 vbl_end *= htotal;
1076                 vtotal *= htotal;
1077
1078                 /*
1079                  * In interlaced modes, the pixel counter counts all pixels,
1080                  * so one field will have htotal more pixels. In order to avoid
1081                  * the reported position from jumping backwards when the pixel
1082                  * counter is beyond the length of the shorter field, just
1083                  * clamp the position the length of the shorter field. This
1084                  * matches how the scanline counter based position works since
1085                  * the scanline counter doesn't count the two half lines.
1086                  */
1087                 if (position >= vtotal)
1088                         position = vtotal - 1;
1089
1090                 /*
1091                  * Start of vblank interrupt is triggered at start of hsync,
1092                  * just prior to the first active line of vblank. However we
1093                  * consider lines to start at the leading edge of horizontal
1094                  * active. So, should we get here before we've crossed into
1095                  * the horizontal active of the first line in vblank, we would
1096                  * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
1097                  * always add htotal-hsync_start to the current pixel position.
1098                  */
1099                 position = (position + htotal - hsync_start) % vtotal;
1100         }
1101
1102         /* Get optional system timestamp after query. */
1103         if (etime)
1104                 *etime = ktime_get();
1105
1106         /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
1107
1108         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1109
1110         /*
1111          * While in vblank, position will be negative
1112          * counting up towards 0 at vbl_end. And outside
1113          * vblank, position will be positive counting
1114          * up since vbl_end.
1115          */
1116         if (position >= vbl_start)
1117                 position -= vbl_end;
1118         else
1119                 position += vtotal - vbl_end;
1120
1121         if (use_scanline_counter) {
1122                 *vpos = position;
1123                 *hpos = 0;
1124         } else {
1125                 *vpos = position / htotal;
1126                 *hpos = position - (*vpos * htotal);
1127         }
1128
1129         return true;
1130 }
1131
1132 int intel_get_crtc_scanline(struct intel_crtc *crtc)
1133 {
1134         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1135         unsigned long irqflags;
1136         int position;
1137
1138         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1139         position = __intel_get_crtc_scanline(crtc);
1140         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1141
1142         return position;
1143 }
1144
1145 static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
1146 {
1147         u32 busy_up, busy_down, max_avg, min_avg;
1148         u8 new_delay;
1149
1150         spin_lock(&mchdev_lock);
1151
1152         I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
1153
1154         new_delay = dev_priv->ips.cur_delay;
1155
1156         I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
1157         busy_up = I915_READ(RCPREVBSYTUPAVG);
1158         busy_down = I915_READ(RCPREVBSYTDNAVG);
1159         max_avg = I915_READ(RCBMAXAVG);
1160         min_avg = I915_READ(RCBMINAVG);
1161
1162         /* Handle RCS change request from hw */
1163         if (busy_up > max_avg) {
1164                 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
1165                         new_delay = dev_priv->ips.cur_delay - 1;
1166                 if (new_delay < dev_priv->ips.max_delay)
1167                         new_delay = dev_priv->ips.max_delay;
1168         } else if (busy_down < min_avg) {
1169                 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
1170                         new_delay = dev_priv->ips.cur_delay + 1;
1171                 if (new_delay > dev_priv->ips.min_delay)
1172                         new_delay = dev_priv->ips.min_delay;
1173         }
1174
1175         if (ironlake_set_drps(dev_priv, new_delay))
1176                 dev_priv->ips.cur_delay = new_delay;
1177
1178         spin_unlock(&mchdev_lock);
1179
1180         return;
1181 }
1182
1183 static void vlv_c0_read(struct drm_i915_private *dev_priv,
1184                         struct intel_rps_ei *ei)
1185 {
1186         ei->ktime = ktime_get_raw();
1187         ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
1188         ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
1189 }
1190
1191 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1192 {
1193         memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei));
1194 }
1195
1196 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1197 {
1198         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1199         const struct intel_rps_ei *prev = &rps->ei;
1200         struct intel_rps_ei now;
1201         u32 events = 0;
1202
1203         if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
1204                 return 0;
1205
1206         vlv_c0_read(dev_priv, &now);
1207
1208         if (prev->ktime) {
1209                 u64 time, c0;
1210                 u32 render, media;
1211
1212                 time = ktime_us_delta(now.ktime, prev->ktime);
1213
1214                 time *= dev_priv->czclk_freq;
1215
1216                 /* Workload can be split between render + media,
1217                  * e.g. SwapBuffers being blitted in X after being rendered in
1218                  * mesa. To account for this we need to combine both engines
1219                  * into our activity counter.
1220                  */
1221                 render = now.render_c0 - prev->render_c0;
1222                 media = now.media_c0 - prev->media_c0;
1223                 c0 = max(render, media);
1224                 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
1225
1226                 if (c0 > time * rps->power.up_threshold)
1227                         events = GEN6_PM_RP_UP_THRESHOLD;
1228                 else if (c0 < time * rps->power.down_threshold)
1229                         events = GEN6_PM_RP_DOWN_THRESHOLD;
1230         }
1231
1232         rps->ei = now;
1233         return events;
1234 }
1235
1236 static void gen6_pm_rps_work(struct work_struct *work)
1237 {
1238         struct drm_i915_private *dev_priv =
1239                 container_of(work, struct drm_i915_private, gt_pm.rps.work);
1240         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1241         bool client_boost = false;
1242         int new_delay, adj, min, max;
1243         u32 pm_iir = 0;
1244
1245         spin_lock_irq(&dev_priv->irq_lock);
1246         if (rps->interrupts_enabled) {
1247                 pm_iir = fetch_and_zero(&rps->pm_iir);
1248                 client_boost = atomic_read(&rps->num_waiters);
1249         }
1250         spin_unlock_irq(&dev_priv->irq_lock);
1251
1252         /* Make sure we didn't queue anything we're not going to process. */
1253         WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1254         if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1255                 goto out;
1256
1257         mutex_lock(&dev_priv->pcu_lock);
1258
1259         pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1260
1261         adj = rps->last_adj;
1262         new_delay = rps->cur_freq;
1263         min = rps->min_freq_softlimit;
1264         max = rps->max_freq_softlimit;
1265         if (client_boost)
1266                 max = rps->max_freq;
1267         if (client_boost && new_delay < rps->boost_freq) {
1268                 new_delay = rps->boost_freq;
1269                 adj = 0;
1270         } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1271                 if (adj > 0)
1272                         adj *= 2;
1273                 else /* CHV needs even encode values */
1274                         adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1275
1276                 if (new_delay >= rps->max_freq_softlimit)
1277                         adj = 0;
1278         } else if (client_boost) {
1279                 adj = 0;
1280         } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1281                 if (rps->cur_freq > rps->efficient_freq)
1282                         new_delay = rps->efficient_freq;
1283                 else if (rps->cur_freq > rps->min_freq_softlimit)
1284                         new_delay = rps->min_freq_softlimit;
1285                 adj = 0;
1286         } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1287                 if (adj < 0)
1288                         adj *= 2;
1289                 else /* CHV needs even encode values */
1290                         adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1291
1292                 if (new_delay <= rps->min_freq_softlimit)
1293                         adj = 0;
1294         } else { /* unknown event */
1295                 adj = 0;
1296         }
1297
1298         rps->last_adj = adj;
1299
1300         /*
1301          * Limit deboosting and boosting to keep ourselves at the extremes
1302          * when in the respective power modes (i.e. slowly decrease frequencies
1303          * while in the HIGH_POWER zone and slowly increase frequencies while
1304          * in the LOW_POWER zone). On idle, we will hit the timeout and drop
1305          * to the next level quickly, and conversely if busy we expect to
1306          * hit a waitboost and rapidly switch into max power.
1307          */
1308         if ((adj < 0 && rps->power.mode == HIGH_POWER) ||
1309             (adj > 0 && rps->power.mode == LOW_POWER))
1310                 rps->last_adj = 0;
1311
1312         /* sysfs frequency interfaces may have snuck in while servicing the
1313          * interrupt
1314          */
1315         new_delay += adj;
1316         new_delay = clamp_t(int, new_delay, min, max);
1317
1318         if (intel_set_rps(dev_priv, new_delay)) {
1319                 DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
1320                 rps->last_adj = 0;
1321         }
1322
1323         mutex_unlock(&dev_priv->pcu_lock);
1324
1325 out:
1326         /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1327         spin_lock_irq(&dev_priv->irq_lock);
1328         if (rps->interrupts_enabled)
1329                 gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
1330         spin_unlock_irq(&dev_priv->irq_lock);
1331 }
1332
1333
1334 /**
1335  * ivybridge_parity_work - Workqueue called when a parity error interrupt
1336  * occurred.
1337  * @work: workqueue struct
1338  *
1339  * Doesn't actually do anything except notify userspace. As a consequence of
1340  * this event, userspace should try to remap the bad rows since statistically
1341  * it is likely the same row is more likely to go bad again.
1342  */
1343 static void ivybridge_parity_work(struct work_struct *work)
1344 {
1345         struct drm_i915_private *dev_priv =
1346                 container_of(work, typeof(*dev_priv), l3_parity.error_work);
1347         u32 error_status, row, bank, subbank;
1348         char *parity_event[6];
1349         u32 misccpctl;
1350         u8 slice = 0;
1351
1352         /* We must turn off DOP level clock gating to access the L3 registers.
1353          * In order to prevent a get/put style interface, acquire struct mutex
1354          * any time we access those registers.
1355          */
1356         mutex_lock(&dev_priv->drm.struct_mutex);
1357
1358         /* If we've screwed up tracking, just let the interrupt fire again */
1359         if (WARN_ON(!dev_priv->l3_parity.which_slice))
1360                 goto out;
1361
1362         misccpctl = I915_READ(GEN7_MISCCPCTL);
1363         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1364         POSTING_READ(GEN7_MISCCPCTL);
1365
1366         while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1367                 i915_reg_t reg;
1368
1369                 slice--;
1370                 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
1371                         break;
1372
1373                 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1374
1375                 reg = GEN7_L3CDERRST1(slice);
1376
1377                 error_status = I915_READ(reg);
1378                 row = GEN7_PARITY_ERROR_ROW(error_status);
1379                 bank = GEN7_PARITY_ERROR_BANK(error_status);
1380                 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1381
1382                 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1383                 POSTING_READ(reg);
1384
1385                 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1386                 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1387                 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1388                 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1389                 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1390                 parity_event[5] = NULL;
1391
1392                 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1393                                    KOBJ_CHANGE, parity_event);
1394
1395                 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1396                           slice, row, bank, subbank);
1397
1398                 kfree(parity_event[4]);
1399                 kfree(parity_event[3]);
1400                 kfree(parity_event[2]);
1401                 kfree(parity_event[1]);
1402         }
1403
1404         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1405
1406 out:
1407         WARN_ON(dev_priv->l3_parity.which_slice);
1408         spin_lock_irq(&dev_priv->irq_lock);
1409         gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1410         spin_unlock_irq(&dev_priv->irq_lock);
1411
1412         mutex_unlock(&dev_priv->drm.struct_mutex);
1413 }
1414
1415 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
1416                                                u32 iir)
1417 {
1418         if (!HAS_L3_DPF(dev_priv))
1419                 return;
1420
1421         spin_lock(&dev_priv->irq_lock);
1422         gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1423         spin_unlock(&dev_priv->irq_lock);
1424
1425         iir &= GT_PARITY_ERROR(dev_priv);
1426         if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1427                 dev_priv->l3_parity.which_slice |= 1 << 1;
1428
1429         if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1430                 dev_priv->l3_parity.which_slice |= 1 << 0;
1431
1432         queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1433 }
1434
1435 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
1436                                u32 gt_iir)
1437 {
1438         if (gt_iir & GT_RENDER_USER_INTERRUPT)
1439                 intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
1440         if (gt_iir & ILK_BSD_USER_INTERRUPT)
1441                 intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
1442 }
1443
1444 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
1445                                u32 gt_iir)
1446 {
1447         if (gt_iir & GT_RENDER_USER_INTERRUPT)
1448                 intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
1449         if (gt_iir & GT_BSD_USER_INTERRUPT)
1450                 intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
1451         if (gt_iir & GT_BLT_USER_INTERRUPT)
1452                 intel_engine_breadcrumbs_irq(dev_priv->engine[BCS0]);
1453
1454         if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1455                       GT_BSD_CS_ERROR_INTERRUPT |
1456                       GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1457                 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1458
1459         if (gt_iir & GT_PARITY_ERROR(dev_priv))
1460                 ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
1461 }
1462
1463 static void
1464 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
1465 {
1466         bool tasklet = false;
1467
1468         if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
1469                 tasklet = true;
1470
1471         if (iir & GT_RENDER_USER_INTERRUPT) {
1472                 intel_engine_breadcrumbs_irq(engine);
1473                 tasklet |= USES_GUC_SUBMISSION(engine->i915);
1474         }
1475
1476         if (tasklet)
1477                 tasklet_hi_schedule(&engine->execlists.tasklet);
1478 }
1479
1480 static void gen8_gt_irq_ack(struct drm_i915_private *i915,
1481                             u32 master_ctl, u32 gt_iir[4])
1482 {
1483         void __iomem * const regs = i915->uncore.regs;
1484
1485 #define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
1486                       GEN8_GT_BCS_IRQ | \
1487                       GEN8_GT_VCS0_IRQ | \
1488                       GEN8_GT_VCS1_IRQ | \
1489                       GEN8_GT_VECS_IRQ | \
1490                       GEN8_GT_PM_IRQ | \
1491                       GEN8_GT_GUC_IRQ)
1492
1493         if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1494                 gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0));
1495                 if (likely(gt_iir[0]))
1496                         raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
1497         }
1498
1499         if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
1500                 gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
1501                 if (likely(gt_iir[1]))
1502                         raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
1503         }
1504
1505         if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
1506                 gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2));
1507                 if (likely(gt_iir[2]))
1508                         raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]);
1509         }
1510
1511         if (master_ctl & GEN8_GT_VECS_IRQ) {
1512                 gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3));
1513                 if (likely(gt_iir[3]))
1514                         raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]);
1515         }
1516 }
1517
1518 static void gen8_gt_irq_handler(struct drm_i915_private *i915,
1519                                 u32 master_ctl, u32 gt_iir[4])
1520 {
1521         if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1522                 gen8_cs_irq_handler(i915->engine[RCS0],
1523                                     gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
1524                 gen8_cs_irq_handler(i915->engine[BCS0],
1525                                     gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
1526         }
1527
1528         if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
1529                 gen8_cs_irq_handler(i915->engine[VCS0],
1530                                     gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT);
1531                 gen8_cs_irq_handler(i915->engine[VCS1],
1532                                     gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
1533         }
1534
1535         if (master_ctl & GEN8_GT_VECS_IRQ) {
1536                 gen8_cs_irq_handler(i915->engine[VECS0],
1537                                     gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
1538         }
1539
1540         if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
1541                 gen6_rps_irq_handler(i915, gt_iir[2]);
1542                 gen9_guc_irq_handler(i915, gt_iir[2]);
1543         }
1544 }
1545
1546 static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1547 {
1548         switch (pin) {
1549         case HPD_PORT_C:
1550                 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
1551         case HPD_PORT_D:
1552                 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
1553         case HPD_PORT_E:
1554                 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
1555         case HPD_PORT_F:
1556                 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
1557         default:
1558                 return false;
1559         }
1560 }
1561
1562 static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1563 {
1564         switch (pin) {
1565         case HPD_PORT_A:
1566                 return val & PORTA_HOTPLUG_LONG_DETECT;
1567         case HPD_PORT_B:
1568                 return val & PORTB_HOTPLUG_LONG_DETECT;
1569         case HPD_PORT_C:
1570                 return val & PORTC_HOTPLUG_LONG_DETECT;
1571         default:
1572                 return false;
1573         }
1574 }
1575
1576 static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1577 {
1578         switch (pin) {
1579         case HPD_PORT_A:
1580                 return val & ICP_DDIA_HPD_LONG_DETECT;
1581         case HPD_PORT_B:
1582                 return val & ICP_DDIB_HPD_LONG_DETECT;
1583         default:
1584                 return false;
1585         }
1586 }
1587
1588 static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1589 {
1590         switch (pin) {
1591         case HPD_PORT_C:
1592                 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
1593         case HPD_PORT_D:
1594                 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
1595         case HPD_PORT_E:
1596                 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
1597         case HPD_PORT_F:
1598                 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
1599         default:
1600                 return false;
1601         }
1602 }
1603
1604 static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
1605 {
1606         switch (pin) {
1607         case HPD_PORT_E:
1608                 return val & PORTE_HOTPLUG_LONG_DETECT;
1609         default:
1610                 return false;
1611         }
1612 }
1613
1614 static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1615 {
1616         switch (pin) {
1617         case HPD_PORT_A:
1618                 return val & PORTA_HOTPLUG_LONG_DETECT;
1619         case HPD_PORT_B:
1620                 return val & PORTB_HOTPLUG_LONG_DETECT;
1621         case HPD_PORT_C:
1622                 return val & PORTC_HOTPLUG_LONG_DETECT;
1623         case HPD_PORT_D:
1624                 return val & PORTD_HOTPLUG_LONG_DETECT;
1625         default:
1626                 return false;
1627         }
1628 }
1629
1630 static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1631 {
1632         switch (pin) {
1633         case HPD_PORT_A:
1634                 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1635         default:
1636                 return false;
1637         }
1638 }
1639
1640 static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1641 {
1642         switch (pin) {
1643         case HPD_PORT_B:
1644                 return val & PORTB_HOTPLUG_LONG_DETECT;
1645         case HPD_PORT_C:
1646                 return val & PORTC_HOTPLUG_LONG_DETECT;
1647         case HPD_PORT_D:
1648                 return val & PORTD_HOTPLUG_LONG_DETECT;
1649         default:
1650                 return false;
1651         }
1652 }
1653
1654 static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1655 {
1656         switch (pin) {
1657         case HPD_PORT_B:
1658                 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1659         case HPD_PORT_C:
1660                 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1661         case HPD_PORT_D:
1662                 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1663         default:
1664                 return false;
1665         }
1666 }
1667
1668 /*
1669  * Get a bit mask of pins that have triggered, and which ones may be long.
1670  * This can be called multiple times with the same masks to accumulate
1671  * hotplug detection results from several registers.
1672  *
1673  * Note that the caller is expected to zero out the masks initially.
1674  */
1675 static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
1676                                u32 *pin_mask, u32 *long_mask,
1677                                u32 hotplug_trigger, u32 dig_hotplug_reg,
1678                                const u32 hpd[HPD_NUM_PINS],
1679                                bool long_pulse_detect(enum hpd_pin pin, u32 val))
1680 {
1681         enum hpd_pin pin;
1682
1683         for_each_hpd_pin(pin) {
1684                 if ((hpd[pin] & hotplug_trigger) == 0)
1685                         continue;
1686
1687                 *pin_mask |= BIT(pin);
1688
1689                 if (long_pulse_detect(pin, dig_hotplug_reg))
1690                         *long_mask |= BIT(pin);
1691         }
1692
1693         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
1694                          hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1695
1696 }
1697
1698 static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1699 {
1700         wake_up_all(&dev_priv->gmbus_wait_queue);
1701 }
1702
1703 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1704 {
1705         wake_up_all(&dev_priv->gmbus_wait_queue);
1706 }
1707
1708 #if defined(CONFIG_DEBUG_FS)
1709 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1710                                          enum pipe pipe,
1711                                          u32 crc0, u32 crc1,
1712                                          u32 crc2, u32 crc3,
1713                                          u32 crc4)
1714 {
1715         struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1716         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1717         u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
1718
1719         trace_intel_pipe_crc(crtc, crcs);
1720
1721         spin_lock(&pipe_crc->lock);
1722         /*
1723          * For some not yet identified reason, the first CRC is
1724          * bonkers. So let's just wait for the next vblank and read
1725          * out the buggy result.
1726          *
1727          * On GEN8+ sometimes the second CRC is bonkers as well, so
1728          * don't trust that one either.
1729          */
1730         if (pipe_crc->skipped <= 0 ||
1731             (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
1732                 pipe_crc->skipped++;
1733                 spin_unlock(&pipe_crc->lock);
1734                 return;
1735         }
1736         spin_unlock(&pipe_crc->lock);
1737
1738         drm_crtc_add_crc_entry(&crtc->base, true,
1739                                 drm_crtc_accurate_vblank_count(&crtc->base),
1740                                 crcs);
1741 }
1742 #else
1743 static inline void
1744 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1745                              enum pipe pipe,
1746                              u32 crc0, u32 crc1,
1747                              u32 crc2, u32 crc3,
1748                              u32 crc4) {}
1749 #endif
1750
1751
1752 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1753                                      enum pipe pipe)
1754 {
1755         display_pipe_crc_irq_handler(dev_priv, pipe,
1756                                      I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1757                                      0, 0, 0, 0);
1758 }
1759
1760 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1761                                      enum pipe pipe)
1762 {
1763         display_pipe_crc_irq_handler(dev_priv, pipe,
1764                                      I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1765                                      I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1766                                      I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1767                                      I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1768                                      I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1769 }
1770
1771 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1772                                       enum pipe pipe)
1773 {
1774         u32 res1, res2;
1775
1776         if (INTEL_GEN(dev_priv) >= 3)
1777                 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1778         else
1779                 res1 = 0;
1780
1781         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1782                 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1783         else
1784                 res2 = 0;
1785
1786         display_pipe_crc_irq_handler(dev_priv, pipe,
1787                                      I915_READ(PIPE_CRC_RES_RED(pipe)),
1788                                      I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1789                                      I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1790                                      res1, res2);
1791 }
1792
1793 /* The RPS events need forcewake, so we add them to a work queue and mask their
1794  * IMR bits until the work is done. Other interrupts can be processed without
1795  * the work queue. */
1796 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1797 {
1798         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1799
1800         if (pm_iir & dev_priv->pm_rps_events) {
1801                 spin_lock(&dev_priv->irq_lock);
1802                 gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1803                 if (rps->interrupts_enabled) {
1804                         rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
1805                         schedule_work(&rps->work);
1806                 }
1807                 spin_unlock(&dev_priv->irq_lock);
1808         }
1809
1810         if (INTEL_GEN(dev_priv) >= 8)
1811                 return;
1812
1813         if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1814                 intel_engine_breadcrumbs_irq(dev_priv->engine[VECS0]);
1815
1816         if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1817                 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1818 }
1819
1820 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
1821 {
1822         if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT)
1823                 intel_guc_to_host_event_handler(&dev_priv->guc);
1824 }
1825
1826 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
1827 {
1828         enum pipe pipe;
1829
1830         for_each_pipe(dev_priv, pipe) {
1831                 I915_WRITE(PIPESTAT(pipe),
1832                            PIPESTAT_INT_STATUS_MASK |
1833                            PIPE_FIFO_UNDERRUN_STATUS);
1834
1835                 dev_priv->pipestat_irq_mask[pipe] = 0;
1836         }
1837 }
1838
1839 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1840                                   u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1841 {
1842         int pipe;
1843
1844         spin_lock(&dev_priv->irq_lock);
1845
1846         if (!dev_priv->display_irqs_enabled) {
1847                 spin_unlock(&dev_priv->irq_lock);
1848                 return;
1849         }
1850
1851         for_each_pipe(dev_priv, pipe) {
1852                 i915_reg_t reg;
1853                 u32 status_mask, enable_mask, iir_bit = 0;
1854
1855                 /*
1856                  * PIPESTAT bits get signalled even when the interrupt is
1857                  * disabled with the mask bits, and some of the status bits do
1858                  * not generate interrupts at all (like the underrun bit). Hence
1859                  * we need to be careful that we only handle what we want to
1860                  * handle.
1861                  */
1862
1863                 /* fifo underruns are filterered in the underrun handler. */
1864                 status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1865
1866                 switch (pipe) {
1867                 case PIPE_A:
1868                         iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1869                         break;
1870                 case PIPE_B:
1871                         iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1872                         break;
1873                 case PIPE_C:
1874                         iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1875                         break;
1876                 }
1877                 if (iir & iir_bit)
1878                         status_mask |= dev_priv->pipestat_irq_mask[pipe];
1879
1880                 if (!status_mask)
1881                         continue;
1882
1883                 reg = PIPESTAT(pipe);
1884                 pipe_stats[pipe] = I915_READ(reg) & status_mask;
1885                 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1886
1887                 /*
1888                  * Clear the PIPE*STAT regs before the IIR
1889                  *
1890                  * Toggle the enable bits to make sure we get an
1891                  * edge in the ISR pipe event bit if we don't clear
1892                  * all the enabled status bits. Otherwise the edge
1893                  * triggered IIR on i965/g4x wouldn't notice that
1894                  * an interrupt is still pending.
1895                  */
1896                 if (pipe_stats[pipe]) {
1897                         I915_WRITE(reg, pipe_stats[pipe]);
1898                         I915_WRITE(reg, enable_mask);
1899                 }
1900         }
1901         spin_unlock(&dev_priv->irq_lock);
1902 }
1903
1904 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1905                                       u16 iir, u32 pipe_stats[I915_MAX_PIPES])
1906 {
1907         enum pipe pipe;
1908
1909         for_each_pipe(dev_priv, pipe) {
1910                 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1911                         drm_handle_vblank(&dev_priv->drm, pipe);
1912
1913                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1914                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1915
1916                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1917                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1918         }
1919 }
1920
1921 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1922                                       u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1923 {
1924         bool blc_event = false;
1925         enum pipe pipe;
1926
1927         for_each_pipe(dev_priv, pipe) {
1928                 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1929                         drm_handle_vblank(&dev_priv->drm, pipe);
1930
1931                 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1932                         blc_event = true;
1933
1934                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1935                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1936
1937                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1938                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1939         }
1940
1941         if (blc_event || (iir & I915_ASLE_INTERRUPT))
1942                 intel_opregion_asle_intr(dev_priv);
1943 }
1944
1945 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1946                                       u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1947 {
1948         bool blc_event = false;
1949         enum pipe pipe;
1950
1951         for_each_pipe(dev_priv, pipe) {
1952                 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1953                         drm_handle_vblank(&dev_priv->drm, pipe);
1954
1955                 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1956                         blc_event = true;
1957
1958                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1959                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1960
1961                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1962                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1963         }
1964
1965         if (blc_event || (iir & I915_ASLE_INTERRUPT))
1966                 intel_opregion_asle_intr(dev_priv);
1967
1968         if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1969                 gmbus_irq_handler(dev_priv);
1970 }
1971
1972 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1973                                             u32 pipe_stats[I915_MAX_PIPES])
1974 {
1975         enum pipe pipe;
1976
1977         for_each_pipe(dev_priv, pipe) {
1978                 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1979                         drm_handle_vblank(&dev_priv->drm, pipe);
1980
1981                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1982                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1983
1984                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1985                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1986         }
1987
1988         if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1989                 gmbus_irq_handler(dev_priv);
1990 }
1991
1992 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1993 {
1994         u32 hotplug_status = 0, hotplug_status_mask;
1995         int i;
1996
1997         if (IS_G4X(dev_priv) ||
1998             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1999                 hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
2000                         DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
2001         else
2002                 hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
2003
2004         /*
2005          * We absolutely have to clear all the pending interrupt
2006          * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
2007          * interrupt bit won't have an edge, and the i965/g4x
2008          * edge triggered IIR will not notice that an interrupt
2009          * is still pending. We can't use PORT_HOTPLUG_EN to
2010          * guarantee the edge as the act of toggling the enable
2011          * bits can itself generate a new hotplug interrupt :(
2012          */
2013         for (i = 0; i < 10; i++) {
2014                 u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;
2015
2016                 if (tmp == 0)
2017                         return hotplug_status;
2018
2019                 hotplug_status |= tmp;
2020                 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2021         }
2022
2023         WARN_ONCE(1,
2024                   "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
2025                   I915_READ(PORT_HOTPLUG_STAT));
2026
2027         return hotplug_status;
2028 }
2029
2030 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
2031                                  u32 hotplug_status)
2032 {
2033         u32 pin_mask = 0, long_mask = 0;
2034
2035         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
2036             IS_CHERRYVIEW(dev_priv)) {
2037                 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
2038
2039                 if (hotplug_trigger) {
2040                         intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2041                                            hotplug_trigger, hotplug_trigger,
2042                                            hpd_status_g4x,
2043                                            i9xx_port_hotplug_long_detect);
2044
2045                         intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2046                 }
2047
2048                 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
2049                         dp_aux_irq_handler(dev_priv);
2050         } else {
2051                 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
2052
2053                 if (hotplug_trigger) {
2054                         intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2055                                            hotplug_trigger, hotplug_trigger,
2056                                            hpd_status_i915,
2057                                            i9xx_port_hotplug_long_detect);
2058                         intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2059                 }
2060         }
2061 }
2062
2063 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
2064 {
2065         struct drm_device *dev = arg;
2066         struct drm_i915_private *dev_priv = to_i915(dev);
2067         irqreturn_t ret = IRQ_NONE;
2068
2069         if (!intel_irqs_enabled(dev_priv))
2070                 return IRQ_NONE;
2071
2072         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2073         disable_rpm_wakeref_asserts(dev_priv);
2074
2075         do {
2076                 u32 iir, gt_iir, pm_iir;
2077                 u32 pipe_stats[I915_MAX_PIPES] = {};
2078                 u32 hotplug_status = 0;
2079                 u32 ier = 0;
2080
2081                 gt_iir = I915_READ(GTIIR);
2082                 pm_iir = I915_READ(GEN6_PMIIR);
2083                 iir = I915_READ(VLV_IIR);
2084
2085                 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
2086                         break;
2087
2088                 ret = IRQ_HANDLED;
2089
2090                 /*
2091                  * Theory on interrupt generation, based on empirical evidence:
2092                  *
2093                  * x = ((VLV_IIR & VLV_IER) ||
2094                  *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
2095                  *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
2096                  *
2097                  * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
2098                  * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
2099                  * guarantee the CPU interrupt will be raised again even if we
2100                  * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
2101                  * bits this time around.
2102                  */
2103                 I915_WRITE(VLV_MASTER_IER, 0);
2104                 ier = I915_READ(VLV_IER);
2105                 I915_WRITE(VLV_IER, 0);
2106
2107                 if (gt_iir)
2108                         I915_WRITE(GTIIR, gt_iir);
2109                 if (pm_iir)
2110                         I915_WRITE(GEN6_PMIIR, pm_iir);
2111
2112                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
2113                         hotplug_status = i9xx_hpd_irq_ack(dev_priv);
2114
2115                 /* Call regardless, as some status bits might not be
2116                  * signalled in iir */
2117                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
2118
2119                 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
2120                            I915_LPE_PIPE_B_INTERRUPT))
2121                         intel_lpe_audio_irq_handler(dev_priv);
2122
2123                 /*
2124                  * VLV_IIR is single buffered, and reflects the level
2125                  * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
2126                  */
2127                 if (iir)
2128                         I915_WRITE(VLV_IIR, iir);
2129
2130                 I915_WRITE(VLV_IER, ier);
2131                 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2132
2133                 if (gt_iir)
2134                         snb_gt_irq_handler(dev_priv, gt_iir);
2135                 if (pm_iir)
2136                         gen6_rps_irq_handler(dev_priv, pm_iir);
2137
2138                 if (hotplug_status)
2139                         i9xx_hpd_irq_handler(dev_priv, hotplug_status);
2140
2141                 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
2142         } while (0);
2143
2144         enable_rpm_wakeref_asserts(dev_priv);
2145
2146         return ret;
2147 }
2148
2149 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
2150 {
2151         struct drm_device *dev = arg;
2152         struct drm_i915_private *dev_priv = to_i915(dev);
2153         irqreturn_t ret = IRQ_NONE;
2154
2155         if (!intel_irqs_enabled(dev_priv))
2156                 return IRQ_NONE;
2157
2158         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2159         disable_rpm_wakeref_asserts(dev_priv);
2160
2161         do {
2162                 u32 master_ctl, iir;
2163                 u32 pipe_stats[I915_MAX_PIPES] = {};
2164                 u32 hotplug_status = 0;
2165                 u32 gt_iir[4];
2166                 u32 ier = 0;
2167
2168                 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
2169                 iir = I915_READ(VLV_IIR);
2170
2171                 if (master_ctl == 0 && iir == 0)
2172                         break;
2173
2174                 ret = IRQ_HANDLED;
2175
2176                 /*
2177                  * Theory on interrupt generation, based on empirical evidence:
2178                  *
2179                  * x = ((VLV_IIR & VLV_IER) ||
2180                  *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
2181                  *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
2182                  *
2183                  * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
2184                  * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
2185                  * guarantee the CPU interrupt will be raised again even if we
2186                  * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
2187                  * bits this time around.
2188                  */
2189                 I915_WRITE(GEN8_MASTER_IRQ, 0);
2190                 ier = I915_READ(VLV_IER);
2191                 I915_WRITE(VLV_IER, 0);
2192
2193                 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2194
2195                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
2196                         hotplug_status = i9xx_hpd_irq_ack(dev_priv);
2197
2198                 /* Call regardless, as some status bits might not be
2199                  * signalled in iir */
2200                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
2201
2202                 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
2203                            I915_LPE_PIPE_B_INTERRUPT |
2204                            I915_LPE_PIPE_C_INTERRUPT))
2205                         intel_lpe_audio_irq_handler(dev_priv);
2206
2207                 /*
2208                  * VLV_IIR is single buffered, and reflects the level
2209                  * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
2210                  */
2211                 if (iir)
2212                         I915_WRITE(VLV_IIR, iir);
2213
2214                 I915_WRITE(VLV_IER, ier);
2215                 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2216
2217                 gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
2218
2219                 if (hotplug_status)
2220                         i9xx_hpd_irq_handler(dev_priv, hotplug_status);
2221
2222                 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
2223         } while (0);
2224
2225         enable_rpm_wakeref_asserts(dev_priv);
2226
2227         return ret;
2228 }
2229
2230 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
2231                                 u32 hotplug_trigger,
2232                                 const u32 hpd[HPD_NUM_PINS])
2233 {
2234         u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2235
2236         /*
2237          * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
2238          * unless we touch the hotplug register, even if hotplug_trigger is
2239          * zero. Not acking leads to "The master control interrupt lied (SDE)!"
2240          * errors.
2241          */
2242         dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2243         if (!hotplug_trigger) {
2244                 u32 mask = PORTA_HOTPLUG_STATUS_MASK |
2245                         PORTD_HOTPLUG_STATUS_MASK |
2246                         PORTC_HOTPLUG_STATUS_MASK |
2247                         PORTB_HOTPLUG_STATUS_MASK;
2248                 dig_hotplug_reg &= ~mask;
2249         }
2250
2251         I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2252         if (!hotplug_trigger)
2253                 return;
2254
2255         intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2256                            dig_hotplug_reg, hpd,
2257                            pch_port_hotplug_long_detect);
2258
2259         intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2260 }
2261
2262 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2263 {
2264         int pipe;
2265         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
2266
2267         ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
2268
2269         if (pch_iir & SDE_AUDIO_POWER_MASK) {
2270                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
2271                                SDE_AUDIO_POWER_SHIFT);
2272                 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
2273                                  port_name(port));
2274         }
2275
2276         if (pch_iir & SDE_AUX_MASK)
2277                 dp_aux_irq_handler(dev_priv);
2278
2279         if (pch_iir & SDE_GMBUS)
2280                 gmbus_irq_handler(dev_priv);
2281
2282         if (pch_iir & SDE_AUDIO_HDCP_MASK)
2283                 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
2284
2285         if (pch_iir & SDE_AUDIO_TRANS_MASK)
2286                 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
2287
2288         if (pch_iir & SDE_POISON)
2289                 DRM_ERROR("PCH poison interrupt\n");
2290
2291         if (pch_iir & SDE_FDI_MASK)
2292                 for_each_pipe(dev_priv, pipe)
2293                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
2294                                          pipe_name(pipe),
2295                                          I915_READ(FDI_RX_IIR(pipe)));
2296
2297         if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
2298                 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
2299
2300         if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
2301                 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2302
2303         if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2304                 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
2305
2306         if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2307                 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
2308 }
2309
2310 static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
2311 {
2312         u32 err_int = I915_READ(GEN7_ERR_INT);
2313         enum pipe pipe;
2314
2315         if (err_int & ERR_INT_POISON)
2316                 DRM_ERROR("Poison interrupt\n");
2317
2318         for_each_pipe(dev_priv, pipe) {
2319                 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
2320                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2321
2322                 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2323                         if (IS_IVYBRIDGE(dev_priv))
2324                                 ivb_pipe_crc_irq_handler(dev_priv, pipe);
2325                         else
2326                                 hsw_pipe_crc_irq_handler(dev_priv, pipe);
2327                 }
2328         }
2329
2330         I915_WRITE(GEN7_ERR_INT, err_int);
2331 }
2332
2333 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
2334 {
2335         u32 serr_int = I915_READ(SERR_INT);
2336         enum pipe pipe;
2337
2338         if (serr_int & SERR_INT_POISON)
2339                 DRM_ERROR("PCH poison interrupt\n");
2340
2341         for_each_pipe(dev_priv, pipe)
2342                 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
2343                         intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
2344
2345         I915_WRITE(SERR_INT, serr_int);
2346 }
2347
2348 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2349 {
2350         int pipe;
2351         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2352
2353         ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
2354
2355         if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2356                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2357                                SDE_AUDIO_POWER_SHIFT_CPT);
2358                 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2359                                  port_name(port));
2360         }
2361
2362         if (pch_iir & SDE_AUX_MASK_CPT)
2363                 dp_aux_irq_handler(dev_priv);
2364
2365         if (pch_iir & SDE_GMBUS_CPT)
2366                 gmbus_irq_handler(dev_priv);
2367
2368         if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2369                 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2370
2371         if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2372                 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2373
2374         if (pch_iir & SDE_FDI_MASK_CPT)
2375                 for_each_pipe(dev_priv, pipe)
2376                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
2377                                          pipe_name(pipe),
2378                                          I915_READ(FDI_RX_IIR(pipe)));
2379
2380         if (pch_iir & SDE_ERROR_CPT)
2381                 cpt_serr_int_handler(dev_priv);
2382 }
2383
2384 static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2385 {
2386         u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
2387         u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
2388         u32 pin_mask = 0, long_mask = 0;
2389
2390         if (ddi_hotplug_trigger) {
2391                 u32 dig_hotplug_reg;
2392
2393                 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
2394                 I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
2395
2396                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2397                                    ddi_hotplug_trigger,
2398                                    dig_hotplug_reg, hpd_icp,
2399                                    icp_ddi_port_hotplug_long_detect);
2400         }
2401
2402         if (tc_hotplug_trigger) {
2403                 u32 dig_hotplug_reg;
2404
2405                 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
2406                 I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
2407
2408                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2409                                    tc_hotplug_trigger,
2410                                    dig_hotplug_reg, hpd_icp,
2411                                    icp_tc_port_hotplug_long_detect);
2412         }
2413
2414         if (pin_mask)
2415                 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2416
2417         if (pch_iir & SDE_GMBUS_ICP)
2418                 gmbus_irq_handler(dev_priv);
2419 }
2420
2421 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2422 {
2423         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2424                 ~SDE_PORTE_HOTPLUG_SPT;
2425         u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2426         u32 pin_mask = 0, long_mask = 0;
2427
2428         if (hotplug_trigger) {
2429                 u32 dig_hotplug_reg;
2430
2431                 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2432                 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2433
2434                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2435                                    hotplug_trigger, dig_hotplug_reg, hpd_spt,
2436                                    spt_port_hotplug_long_detect);
2437         }
2438
2439         if (hotplug2_trigger) {
2440                 u32 dig_hotplug_reg;
2441
2442                 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
2443                 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2444
2445                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2446                                    hotplug2_trigger, dig_hotplug_reg, hpd_spt,
2447                                    spt_port_hotplug2_long_detect);
2448         }
2449
2450         if (pin_mask)
2451                 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2452
2453         if (pch_iir & SDE_GMBUS_CPT)
2454                 gmbus_irq_handler(dev_priv);
2455 }
2456
2457 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2458                                 u32 hotplug_trigger,
2459                                 const u32 hpd[HPD_NUM_PINS])
2460 {
2461         u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2462
2463         dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2464         I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2465
2466         intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2467                            dig_hotplug_reg, hpd,
2468                            ilk_port_hotplug_long_detect);
2469
2470         intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2471 }
2472
2473 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2474                                     u32 de_iir)
2475 {
2476         enum pipe pipe;
2477         u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2478
2479         if (hotplug_trigger)
2480                 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
2481
2482         if (de_iir & DE_AUX_CHANNEL_A)
2483                 dp_aux_irq_handler(dev_priv);
2484
2485         if (de_iir & DE_GSE)
2486                 intel_opregion_asle_intr(dev_priv);
2487
2488         if (de_iir & DE_POISON)
2489                 DRM_ERROR("Poison interrupt\n");
2490
2491         for_each_pipe(dev_priv, pipe) {
2492                 if (de_iir & DE_PIPE_VBLANK(pipe))
2493                         drm_handle_vblank(&dev_priv->drm, pipe);
2494
2495                 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2496                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2497
2498                 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2499                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2500         }
2501
2502         /* check event from PCH */
2503         if (de_iir & DE_PCH_EVENT) {
2504                 u32 pch_iir = I915_READ(SDEIIR);
2505
2506                 if (HAS_PCH_CPT(dev_priv))
2507                         cpt_irq_handler(dev_priv, pch_iir);
2508                 else
2509                         ibx_irq_handler(dev_priv, pch_iir);
2510
2511                 /* should clear PCH hotplug event before clear CPU irq */
2512                 I915_WRITE(SDEIIR, pch_iir);
2513         }
2514
2515         if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
2516                 ironlake_rps_change_irq_handler(dev_priv);
2517 }
2518
2519 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2520                                     u32 de_iir)
2521 {
2522         enum pipe pipe;
2523         u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2524
2525         if (hotplug_trigger)
2526                 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
2527
2528         if (de_iir & DE_ERR_INT_IVB)
2529                 ivb_err_int_handler(dev_priv);
2530
2531         if (de_iir & DE_EDP_PSR_INT_HSW) {
2532                 u32 psr_iir = I915_READ(EDP_PSR_IIR);
2533
2534                 intel_psr_irq_handler(dev_priv, psr_iir);
2535                 I915_WRITE(EDP_PSR_IIR, psr_iir);
2536         }
2537
2538         if (de_iir & DE_AUX_CHANNEL_A_IVB)
2539                 dp_aux_irq_handler(dev_priv);
2540
2541         if (de_iir & DE_GSE_IVB)
2542                 intel_opregion_asle_intr(dev_priv);
2543
2544         for_each_pipe(dev_priv, pipe) {
2545                 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
2546                         drm_handle_vblank(&dev_priv->drm, pipe);
2547         }
2548
2549         /* check event from PCH */
2550         if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2551                 u32 pch_iir = I915_READ(SDEIIR);
2552
2553                 cpt_irq_handler(dev_priv, pch_iir);
2554
2555                 /* clear PCH hotplug event before clear CPU irq */
2556                 I915_WRITE(SDEIIR, pch_iir);
2557         }
2558 }
2559
2560 /*
2561  * To handle irqs with the minimum potential races with fresh interrupts, we:
2562  * 1 - Disable Master Interrupt Control.
2563  * 2 - Find the source(s) of the interrupt.
2564  * 3 - Clear the Interrupt Identity bits (IIR).
2565  * 4 - Process the interrupt(s) that had bits set in the IIRs.
2566  * 5 - Re-enable Master Interrupt Control.
2567  */
2568 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2569 {
2570         struct drm_device *dev = arg;
2571         struct drm_i915_private *dev_priv = to_i915(dev);
2572         u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2573         irqreturn_t ret = IRQ_NONE;
2574
2575         if (!intel_irqs_enabled(dev_priv))
2576                 return IRQ_NONE;
2577
2578         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2579         disable_rpm_wakeref_asserts(dev_priv);
2580
2581         /* disable master interrupt before clearing iir  */
2582         de_ier = I915_READ(DEIER);
2583         I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2584
2585         /* Disable south interrupts. We'll only write to SDEIIR once, so further
2586          * interrupts will will be stored on its back queue, and then we'll be
2587          * able to process them after we restore SDEIER (as soon as we restore
2588          * it, we'll get an interrupt if SDEIIR still has something to process
2589          * due to its back queue). */
2590         if (!HAS_PCH_NOP(dev_priv)) {
2591                 sde_ier = I915_READ(SDEIER);
2592                 I915_WRITE(SDEIER, 0);
2593         }
2594
2595         /* Find, clear, then process each source of interrupt */
2596
2597         gt_iir = I915_READ(GTIIR);
2598         if (gt_iir) {
2599                 I915_WRITE(GTIIR, gt_iir);
2600                 ret = IRQ_HANDLED;
2601                 if (INTEL_GEN(dev_priv) >= 6)
2602                         snb_gt_irq_handler(dev_priv, gt_iir);
2603                 else
2604                         ilk_gt_irq_handler(dev_priv, gt_iir);
2605         }
2606
2607         de_iir = I915_READ(DEIIR);
2608         if (de_iir) {
2609                 I915_WRITE(DEIIR, de_iir);
2610                 ret = IRQ_HANDLED;
2611                 if (INTEL_GEN(dev_priv) >= 7)
2612                         ivb_display_irq_handler(dev_priv, de_iir);
2613                 else
2614                         ilk_display_irq_handler(dev_priv, de_iir);
2615         }
2616
2617         if (INTEL_GEN(dev_priv) >= 6) {
2618                 u32 pm_iir = I915_READ(GEN6_PMIIR);
2619                 if (pm_iir) {
2620                         I915_WRITE(GEN6_PMIIR, pm_iir);
2621                         ret = IRQ_HANDLED;
2622                         gen6_rps_irq_handler(dev_priv, pm_iir);
2623                 }
2624         }
2625
2626         I915_WRITE(DEIER, de_ier);
2627         if (!HAS_PCH_NOP(dev_priv))
2628                 I915_WRITE(SDEIER, sde_ier);
2629
2630         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2631         enable_rpm_wakeref_asserts(dev_priv);
2632
2633         return ret;
2634 }
2635
2636 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2637                                 u32 hotplug_trigger,
2638                                 const u32 hpd[HPD_NUM_PINS])
2639 {
2640         u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2641
2642         dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2643         I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2644
2645         intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2646                            dig_hotplug_reg, hpd,
2647                            bxt_port_hotplug_long_detect);
2648
2649         intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2650 }
2651
2652 static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2653 {
2654         u32 pin_mask = 0, long_mask = 0;
2655         u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
2656         u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2657
2658         if (trigger_tc) {
2659                 u32 dig_hotplug_reg;
2660
2661                 dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
2662                 I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
2663
2664                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
2665                                    dig_hotplug_reg, hpd_gen11,
2666                                    gen11_port_hotplug_long_detect);
2667         }
2668
2669         if (trigger_tbt) {
2670                 u32 dig_hotplug_reg;
2671
2672                 dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
2673                 I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
2674
2675                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
2676                                    dig_hotplug_reg, hpd_gen11,
2677                                    gen11_port_hotplug_long_detect);
2678         }
2679
2680         if (pin_mask)
2681                 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2682         else
2683                 DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
2684 }
2685
2686 static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
2687 {
2688         u32 mask = GEN8_AUX_CHANNEL_A;
2689
2690         if (INTEL_GEN(dev_priv) >= 9)
2691                 mask |= GEN9_AUX_CHANNEL_B |
2692                         GEN9_AUX_CHANNEL_C |
2693                         GEN9_AUX_CHANNEL_D;
2694
2695         if (IS_CNL_WITH_PORT_F(dev_priv))
2696                 mask |= CNL_AUX_CHANNEL_F;
2697
2698         if (INTEL_GEN(dev_priv) >= 11)
2699                 mask |= ICL_AUX_CHANNEL_E |
2700                         CNL_AUX_CHANNEL_F;
2701
2702         return mask;
2703 }
2704
2705 static irqreturn_t
2706 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2707 {
2708         irqreturn_t ret = IRQ_NONE;
2709         u32 iir;
2710         enum pipe pipe;
2711
2712         if (master_ctl & GEN8_DE_MISC_IRQ) {
2713                 iir = I915_READ(GEN8_DE_MISC_IIR);
2714                 if (iir) {
2715                         bool found = false;
2716
2717                         I915_WRITE(GEN8_DE_MISC_IIR, iir);
2718                         ret = IRQ_HANDLED;
2719
2720                         if (iir & GEN8_DE_MISC_GSE) {
2721                                 intel_opregion_asle_intr(dev_priv);
2722                                 found = true;
2723                         }
2724
2725                         if (iir & GEN8_DE_EDP_PSR) {
2726                                 u32 psr_iir = I915_READ(EDP_PSR_IIR);
2727
2728                                 intel_psr_irq_handler(dev_priv, psr_iir);
2729                                 I915_WRITE(EDP_PSR_IIR, psr_iir);
2730                                 found = true;
2731                         }
2732
2733                         if (!found)
2734                                 DRM_ERROR("Unexpected DE Misc interrupt\n");
2735                 }
2736                 else
2737                         DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2738         }
2739
2740         if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2741                 iir = I915_READ(GEN11_DE_HPD_IIR);
2742                 if (iir) {
2743                         I915_WRITE(GEN11_DE_HPD_IIR, iir);
2744                         ret = IRQ_HANDLED;
2745                         gen11_hpd_irq_handler(dev_priv, iir);
2746                 } else {
2747                         DRM_ERROR("The master control interrupt lied, (DE HPD)!\n");
2748                 }
2749         }
2750
2751         if (master_ctl & GEN8_DE_PORT_IRQ) {
2752                 iir = I915_READ(GEN8_DE_PORT_IIR);
2753                 if (iir) {
2754                         u32 tmp_mask;
2755                         bool found = false;
2756
2757                         I915_WRITE(GEN8_DE_PORT_IIR, iir);
2758                         ret = IRQ_HANDLED;
2759
2760                         if (iir & gen8_de_port_aux_mask(dev_priv)) {
2761                                 dp_aux_irq_handler(dev_priv);
2762                                 found = true;
2763                         }
2764
2765                         if (IS_GEN9_LP(dev_priv)) {
2766                                 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2767                                 if (tmp_mask) {
2768                                         bxt_hpd_irq_handler(dev_priv, tmp_mask,
2769                                                             hpd_bxt);
2770                                         found = true;
2771                                 }
2772                         } else if (IS_BROADWELL(dev_priv)) {
2773                                 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2774                                 if (tmp_mask) {
2775                                         ilk_hpd_irq_handler(dev_priv,
2776                                                             tmp_mask, hpd_bdw);
2777                                         found = true;
2778                                 }
2779                         }
2780
2781                         if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2782                                 gmbus_irq_handler(dev_priv);
2783                                 found = true;
2784                         }
2785
2786                         if (!found)
2787                                 DRM_ERROR("Unexpected DE Port interrupt\n");
2788                 }
2789                 else
2790                         DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2791         }
2792
2793         for_each_pipe(dev_priv, pipe) {
2794                 u32 fault_errors;
2795
2796                 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2797                         continue;
2798
2799                 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2800                 if (!iir) {
2801                         DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2802                         continue;
2803                 }
2804
2805                 ret = IRQ_HANDLED;
2806                 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2807
2808                 if (iir & GEN8_PIPE_VBLANK)
2809                         drm_handle_vblank(&dev_priv->drm, pipe);
2810
2811                 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2812                         hsw_pipe_crc_irq_handler(dev_priv, pipe);
2813
2814                 if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2815                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2816
2817                 fault_errors = iir;
2818                 if (INTEL_GEN(dev_priv) >= 9)
2819                         fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2820                 else
2821                         fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2822
2823                 if (fault_errors)
2824                         DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
2825                                   pipe_name(pipe),
2826                                   fault_errors);
2827         }
2828
2829         if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2830             master_ctl & GEN8_DE_PCH_IRQ) {
2831                 /*
2832                  * FIXME(BDW): Assume for now that the new interrupt handling
2833                  * scheme also closed the SDE interrupt handling race we've seen
2834                  * on older pch-split platforms. But this needs testing.
2835                  */
2836                 iir = I915_READ(SDEIIR);
2837                 if (iir) {
2838                         I915_WRITE(SDEIIR, iir);
2839                         ret = IRQ_HANDLED;
2840
2841                         if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2842                                 icp_irq_handler(dev_priv, iir);
2843                         else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
2844                                 spt_irq_handler(dev_priv, iir);
2845                         else
2846                                 cpt_irq_handler(dev_priv, iir);
2847                 } else {
2848                         /*
2849                          * Like on previous PCH there seems to be something
2850                          * fishy going on with forwarding PCH interrupts.
2851                          */
2852                         DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
2853                 }
2854         }
2855
2856         return ret;
2857 }
2858
2859 static inline u32 gen8_master_intr_disable(void __iomem * const regs)
2860 {
2861         raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
2862
2863         /*
2864          * Now with master disabled, get a sample of level indications
2865          * for this interrupt. Indications will be cleared on related acks.
2866          * New indications can and will light up during processing,
2867          * and will generate new interrupt after enabling master.
2868          */
2869         return raw_reg_read(regs, GEN8_MASTER_IRQ);
2870 }
2871
2872 static inline void gen8_master_intr_enable(void __iomem * const regs)
2873 {
2874         raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2875 }
2876
2877 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2878 {
2879         struct drm_i915_private *dev_priv = to_i915(arg);
2880         void __iomem * const regs = dev_priv->uncore.regs;
2881         u32 master_ctl;
2882         u32 gt_iir[4];
2883
2884         if (!intel_irqs_enabled(dev_priv))
2885                 return IRQ_NONE;
2886
2887         master_ctl = gen8_master_intr_disable(regs);
2888         if (!master_ctl) {
2889                 gen8_master_intr_enable(regs);
2890                 return IRQ_NONE;
2891         }
2892
2893         /* Find, clear, then process each source of interrupt */
2894         gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2895
2896         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2897         if (master_ctl & ~GEN8_GT_IRQS) {
2898                 disable_rpm_wakeref_asserts(dev_priv);
2899                 gen8_de_irq_handler(dev_priv, master_ctl);
2900                 enable_rpm_wakeref_asserts(dev_priv);
2901         }
2902
2903         gen8_master_intr_enable(regs);
2904
2905         gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
2906
2907         return IRQ_HANDLED;
2908 }
2909
2910 static u32
2911 gen11_gt_engine_identity(struct drm_i915_private * const i915,
2912                          const unsigned int bank, const unsigned int bit)
2913 {
2914         void __iomem * const regs = i915->uncore.regs;
2915         u32 timeout_ts;
2916         u32 ident;
2917
2918         lockdep_assert_held(&i915->irq_lock);
2919
2920         raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
2921
2922         /*
2923          * NB: Specs do not specify how long to spin wait,
2924          * so we do ~100us as an educated guess.
2925          */
2926         timeout_ts = (local_clock() >> 10) + 100;
2927         do {
2928                 ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
2929         } while (!(ident & GEN11_INTR_DATA_VALID) &&
2930                  !time_after32(local_clock() >> 10, timeout_ts));
2931
2932         if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
2933                 DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
2934                           bank, bit, ident);
2935                 return 0;
2936         }
2937
2938         raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
2939                       GEN11_INTR_DATA_VALID);
2940
2941         return ident;
2942 }
2943
2944 static void
2945 gen11_other_irq_handler(struct drm_i915_private * const i915,
2946                         const u8 instance, const u16 iir)
2947 {
2948         if (instance == OTHER_GTPM_INSTANCE)
2949                 return gen6_rps_irq_handler(i915, iir);
2950
2951         WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
2952                   instance, iir);
2953 }
2954
2955 static void
2956 gen11_engine_irq_handler(struct drm_i915_private * const i915,
2957                          const u8 class, const u8 instance, const u16 iir)
2958 {
2959         struct intel_engine_cs *engine;
2960
2961         if (instance <= MAX_ENGINE_INSTANCE)
2962                 engine = i915->engine_class[class][instance];
2963         else
2964                 engine = NULL;
2965
2966         if (likely(engine))
2967                 return gen8_cs_irq_handler(engine, iir);
2968
2969         WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
2970                   class, instance);
2971 }
2972
2973 static void
2974 gen11_gt_identity_handler(struct drm_i915_private * const i915,
2975                           const u32 identity)
2976 {
2977         const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
2978         const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
2979         const u16 intr = GEN11_INTR_ENGINE_INTR(identity);
2980
2981         if (unlikely(!intr))
2982                 return;
2983
2984         if (class <= COPY_ENGINE_CLASS)
2985                 return gen11_engine_irq_handler(i915, class, instance, intr);
2986
2987         if (class == OTHER_CLASS)
2988                 return gen11_other_irq_handler(i915, instance, intr);
2989
2990         WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
2991                   class, instance, intr);
2992 }
2993
2994 static void
2995 gen11_gt_bank_handler(struct drm_i915_private * const i915,
2996                       const unsigned int bank)
2997 {
2998         void __iomem * const regs = i915->uncore.regs;
2999         unsigned long intr_dw;
3000         unsigned int bit;
3001
3002         lockdep_assert_held(&i915->irq_lock);
3003
3004         intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
3005
3006         if (unlikely(!intr_dw)) {
3007                 DRM_ERROR("GT_INTR_DW%u blank!\n", bank);
3008                 return;
3009         }
3010
3011         for_each_set_bit(bit, &intr_dw, 32) {
3012                 const u32 ident = gen11_gt_engine_identity(i915,
3013                                                            bank, bit);
3014
3015                 gen11_gt_identity_handler(i915, ident);
3016         }
3017
3018         /* Clear must be after shared has been served for engine */
3019         raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
3020 }
3021
3022 static void
3023 gen11_gt_irq_handler(struct drm_i915_private * const i915,
3024                      const u32 master_ctl)
3025 {
3026         unsigned int bank;
3027
3028         spin_lock(&i915->irq_lock);
3029
3030         for (bank = 0; bank < 2; bank++) {
3031                 if (master_ctl & GEN11_GT_DW_IRQ(bank))
3032                         gen11_gt_bank_handler(i915, bank);
3033         }
3034
3035         spin_unlock(&i915->irq_lock);
3036 }
3037
3038 static u32
3039 gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
3040 {
3041         void __iomem * const regs = dev_priv->uncore.regs;
3042         u32 iir;
3043
3044         if (!(master_ctl & GEN11_GU_MISC_IRQ))
3045                 return 0;
3046
3047         iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
3048         if (likely(iir))
3049                 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
3050
3051         return iir;
3052 }
3053
3054 static void
3055 gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir)
3056 {
3057         if (iir & GEN11_GU_MISC_GSE)
3058                 intel_opregion_asle_intr(dev_priv);
3059 }
3060
3061 static inline u32 gen11_master_intr_disable(void __iomem * const regs)
3062 {
3063         raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
3064
3065         /*
3066          * Now with master disabled, get a sample of level indications
3067          * for this interrupt. Indications will be cleared on related acks.
3068          * New indications can and will light up during processing,
3069          * and will generate new interrupt after enabling master.
3070          */
3071         return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
3072 }
3073
3074 static inline void gen11_master_intr_enable(void __iomem * const regs)
3075 {
3076         raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
3077 }
3078
3079 static irqreturn_t gen11_irq_handler(int irq, void *arg)
3080 {
3081         struct drm_i915_private * const i915 = to_i915(arg);
3082         void __iomem * const regs = i915->uncore.regs;
3083         u32 master_ctl;
3084         u32 gu_misc_iir;
3085
3086         if (!intel_irqs_enabled(i915))
3087                 return IRQ_NONE;
3088
3089         master_ctl = gen11_master_intr_disable(regs);
3090         if (!master_ctl) {
3091                 gen11_master_intr_enable(regs);
3092                 return IRQ_NONE;
3093         }
3094
3095         /* Find, clear, then process each source of interrupt. */
3096         gen11_gt_irq_handler(i915, master_ctl);
3097
3098         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
3099         if (master_ctl & GEN11_DISPLAY_IRQ) {
3100                 const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
3101
3102                 disable_rpm_wakeref_asserts(i915);
3103                 /*
3104                  * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
3105                  * for the display related bits.
3106                  */
3107                 gen8_de_irq_handler(i915, disp_ctl);
3108                 enable_rpm_wakeref_asserts(i915);
3109         }
3110
3111         gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
3112
3113         gen11_master_intr_enable(regs);
3114
3115         gen11_gu_misc_irq_handler(i915, gu_misc_iir);
3116
3117         return IRQ_HANDLED;
3118 }
3119
3120 /* Called from drm generic code, passed 'crtc' which
3121  * we use as a pipe index
3122  */
3123 static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
3124 {
3125         struct drm_i915_private *dev_priv = to_i915(dev);
3126         unsigned long irqflags;
3127
3128         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3129         i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
3130         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3131
3132         return 0;
3133 }
3134
3135 static int i945gm_enable_vblank(struct drm_device *dev, unsigned int pipe)
3136 {
3137         struct drm_i915_private *dev_priv = to_i915(dev);
3138
3139         if (dev_priv->i945gm_vblank.enabled++ == 0)
3140                 schedule_work(&dev_priv->i945gm_vblank.work);
3141
3142         return i8xx_enable_vblank(dev, pipe);
3143 }
3144
3145 static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
3146 {
3147         struct drm_i915_private *dev_priv = to_i915(dev);
3148         unsigned long irqflags;
3149
3150         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3151         i915_enable_pipestat(dev_priv, pipe,
3152                              PIPE_START_VBLANK_INTERRUPT_STATUS);
3153         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3154
3155         return 0;
3156 }
3157
3158 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
3159 {
3160         struct drm_i915_private *dev_priv = to_i915(dev);
3161         unsigned long irqflags;
3162         u32 bit = INTEL_GEN(dev_priv) >= 7 ?
3163                 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
3164
3165         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3166         ilk_enable_display_irq(dev_priv, bit);
3167         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3168
3169         /* Even though there is no DMC, frame counter can get stuck when
3170          * PSR is active as no frames are generated.
3171          */
3172         if (HAS_PSR(dev_priv))
3173                 drm_vblank_restore(dev, pipe);
3174
3175         return 0;
3176 }
3177
3178 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
3179 {
3180         struct drm_i915_private *dev_priv = to_i915(dev);
3181         unsigned long irqflags;
3182
3183         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3184         bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3185         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3186
3187         /* Even if there is no DMC, frame counter can get stuck when
3188          * PSR is active as no frames are generated, so check only for PSR.
3189          */
3190         if (HAS_PSR(dev_priv))
3191                 drm_vblank_restore(dev, pipe);
3192
3193         return 0;
3194 }
3195
3196 /* Called from drm generic code, passed 'crtc' which
3197  * we use as a pipe index
3198  */
3199 static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
3200 {
3201         struct drm_i915_private *dev_priv = to_i915(dev);
3202         unsigned long irqflags;
3203
3204         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3205         i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
3206         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3207 }
3208
3209 static void i945gm_disable_vblank(struct drm_device *dev, unsigned int pipe)
3210 {
3211         struct drm_i915_private *dev_priv = to_i915(dev);
3212
3213         i8xx_disable_vblank(dev, pipe);
3214
3215         if (--dev_priv->i945gm_vblank.enabled == 0)
3216                 schedule_work(&dev_priv->i945gm_vblank.work);
3217 }
3218
3219 static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
3220 {
3221         struct drm_i915_private *dev_priv = to_i915(dev);
3222         unsigned long irqflags;
3223
3224         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3225         i915_disable_pipestat(dev_priv, pipe,
3226                               PIPE_START_VBLANK_INTERRUPT_STATUS);
3227         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3228 }
3229
3230 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
3231 {
3232         struct drm_i915_private *dev_priv = to_i915(dev);
3233         unsigned long irqflags;
3234         u32 bit = INTEL_GEN(dev_priv) >= 7 ?
3235                 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
3236
3237         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3238         ilk_disable_display_irq(dev_priv, bit);
3239         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3240 }
3241
3242 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
3243 {
3244         struct drm_i915_private *dev_priv = to_i915(dev);
3245         unsigned long irqflags;
3246
3247         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3248         bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3249         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3250 }
3251
3252 static void i945gm_vblank_work_func(struct work_struct *work)
3253 {
3254         struct drm_i915_private *dev_priv =
3255                 container_of(work, struct drm_i915_private, i945gm_vblank.work);
3256
3257         /*
3258          * Vblank interrupts fail to wake up the device from C3,
3259          * hence we want to prevent C3 usage while vblank interrupts
3260          * are enabled.
3261          */
3262         pm_qos_update_request(&dev_priv->i945gm_vblank.pm_qos,
3263                               READ_ONCE(dev_priv->i945gm_vblank.enabled) ?
3264                               dev_priv->i945gm_vblank.c3_disable_latency :
3265                               PM_QOS_DEFAULT_VALUE);
3266 }
3267
3268 static int cstate_disable_latency(const char *name)
3269 {
3270         const struct cpuidle_driver *drv;
3271         int i;
3272
3273         drv = cpuidle_get_driver();
3274         if (!drv)
3275                 return 0;
3276
3277         for (i = 0; i < drv->state_count; i++) {
3278                 const struct cpuidle_state *state = &drv->states[i];
3279
3280                 if (!strcmp(state->name, name))
3281                         return state->exit_latency ?
3282                                 state->exit_latency - 1 : 0;
3283         }
3284
3285         return 0;
3286 }
3287
3288 static void i945gm_vblank_work_init(struct drm_i915_private *dev_priv)
3289 {
3290         INIT_WORK(&dev_priv->i945gm_vblank.work,
3291                   i945gm_vblank_work_func);
3292
3293         dev_priv->i945gm_vblank.c3_disable_latency =
3294                 cstate_disable_latency("C3");
3295         pm_qos_add_request(&dev_priv->i945gm_vblank.pm_qos,
3296                            PM_QOS_CPU_DMA_LATENCY,
3297                            PM_QOS_DEFAULT_VALUE);
3298 }
3299
3300 static void i945gm_vblank_work_fini(struct drm_i915_private *dev_priv)
3301 {
3302         cancel_work_sync(&dev_priv->i945gm_vblank.work);
3303         pm_qos_remove_request(&dev_priv->i945gm_vblank.pm_qos);
3304 }
3305
3306 static void ibx_irq_reset(struct drm_i915_private *dev_priv)
3307 {
3308         if (HAS_PCH_NOP(dev_priv))
3309                 return;
3310
3311         GEN3_IRQ_RESET(SDE);
3312
3313         if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3314                 I915_WRITE(SERR_INT, 0xffffffff);
3315 }
3316
3317 /*
3318  * SDEIER is also touched by the interrupt handler to work around missed PCH
3319  * interrupts. Hence we can't update it after the interrupt handler is enabled -
3320  * instead we unconditionally enable all PCH interrupt sources here, but then
3321  * only unmask them as needed with SDEIMR.
3322  *
3323  * This function needs to be called before interrupts are enabled.
3324  */
3325 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3326 {
3327         struct drm_i915_private *dev_priv = to_i915(dev);
3328
3329         if (HAS_PCH_NOP(dev_priv))
3330                 return;
3331
3332         WARN_ON(I915_READ(SDEIER) != 0);
3333         I915_WRITE(SDEIER, 0xffffffff);
3334         POSTING_READ(SDEIER);
3335 }
3336
3337 static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
3338 {
3339         GEN3_IRQ_RESET(GT);
3340         if (INTEL_GEN(dev_priv) >= 6)
3341                 GEN3_IRQ_RESET(GEN6_PM);
3342 }
3343
3344 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3345 {
3346         if (IS_CHERRYVIEW(dev_priv))
3347                 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3348         else
3349                 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3350
3351         i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
3352         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3353
3354         i9xx_pipestat_irq_reset(dev_priv);
3355
3356         GEN3_IRQ_RESET(VLV_);
3357         dev_priv->irq_mask = ~0u;
3358 }
3359
3360 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3361 {
3362         u32 pipestat_mask;
3363         u32 enable_mask;
3364         enum pipe pipe;
3365
3366         pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
3367
3368         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3369         for_each_pipe(dev_priv, pipe)
3370                 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3371
3372         enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3373                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3374                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3375                 I915_LPE_PIPE_A_INTERRUPT |
3376                 I915_LPE_PIPE_B_INTERRUPT;
3377
3378         if (IS_CHERRYVIEW(dev_priv))
3379                 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
3380                         I915_LPE_PIPE_C_INTERRUPT;
3381
3382         WARN_ON(dev_priv->irq_mask != ~0u);
3383
3384         dev_priv->irq_mask = ~enable_mask;
3385
3386         GEN3_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
3387 }
3388
3389 /* drm_dma.h hooks
3390 */
3391 static void ironlake_irq_reset(struct drm_device *dev)
3392 {
3393         struct drm_i915_private *dev_priv = to_i915(dev);
3394
3395         GEN3_IRQ_RESET(DE);
3396         if (IS_GEN(dev_priv, 7))
3397                 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3398
3399         if (IS_HASWELL(dev_priv)) {
3400                 I915_WRITE(EDP_PSR_IMR, 0xffffffff);
3401                 I915_WRITE(EDP_PSR_IIR, 0xffffffff);
3402         }
3403
3404         gen5_gt_irq_reset(dev_priv);
3405
3406         ibx_irq_reset(dev_priv);
3407 }
3408
3409 static void valleyview_irq_reset(struct drm_device *dev)
3410 {
3411         struct drm_i915_private *dev_priv = to_i915(dev);
3412
3413         I915_WRITE(VLV_MASTER_IER, 0);
3414         POSTING_READ(VLV_MASTER_IER);
3415
3416         gen5_gt_irq_reset(dev_priv);
3417
3418         spin_lock_irq(&dev_priv->irq_lock);
3419         if (dev_priv->display_irqs_enabled)
3420                 vlv_display_irq_reset(dev_priv);
3421         spin_unlock_irq(&dev_priv->irq_lock);
3422 }
3423
3424 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3425 {
3426         GEN8_IRQ_RESET_NDX(GT, 0);
3427         GEN8_IRQ_RESET_NDX(GT, 1);
3428         GEN8_IRQ_RESET_NDX(GT, 2);
3429         GEN8_IRQ_RESET_NDX(GT, 3);
3430 }
3431
3432 static void gen8_irq_reset(struct drm_device *dev)
3433 {
3434         struct drm_i915_private *dev_priv = to_i915(dev);
3435         int pipe;
3436
3437         gen8_master_intr_disable(dev_priv->uncore.regs);
3438
3439         gen8_gt_irq_reset(dev_priv);
3440
3441         I915_WRITE(EDP_PSR_IMR, 0xffffffff);
3442         I915_WRITE(EDP_PSR_IIR, 0xffffffff);
3443
3444         for_each_pipe(dev_priv, pipe)
3445                 if (intel_display_power_is_enabled(dev_priv,
3446                                                    POWER_DOMAIN_PIPE(pipe)))
3447                         GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3448
3449         GEN3_IRQ_RESET(GEN8_DE_PORT_);
3450         GEN3_IRQ_RESET(GEN8_DE_MISC_);
3451         GEN3_IRQ_RESET(GEN8_PCU_);
3452
3453         if (HAS_PCH_SPLIT(dev_priv))
3454                 ibx_irq_reset(dev_priv);
3455 }
3456
3457 static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv)
3458 {
3459         /* Disable RCS, BCS, VCS and VECS class engines. */
3460         I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, 0);
3461         I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE,    0);
3462
3463         /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
3464         I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK,   ~0);
3465         I915_WRITE(GEN11_BCS_RSVD_INTR_MASK,    ~0);
3466         I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK,   ~0);
3467         I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK,   ~0);
3468         I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~0);
3469
3470         I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
3471         I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
3472 }
3473
3474 static void gen11_irq_reset(struct drm_device *dev)
3475 {
3476         struct drm_i915_private *dev_priv = dev->dev_private;
3477         int pipe;
3478
3479         gen11_master_intr_disable(dev_priv->uncore.regs);
3480
3481         gen11_gt_irq_reset(dev_priv);
3482
3483         I915_WRITE(GEN11_DISPLAY_INT_CTL, 0);
3484
3485         I915_WRITE(EDP_PSR_IMR, 0xffffffff);
3486         I915_WRITE(EDP_PSR_IIR, 0xffffffff);
3487
3488         for_each_pipe(dev_priv, pipe)
3489                 if (intel_display_power_is_enabled(dev_priv,
3490                                                    POWER_DOMAIN_PIPE(pipe)))
3491                         GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3492
3493         GEN3_IRQ_RESET(GEN8_DE_PORT_);
3494         GEN3_IRQ_RESET(GEN8_DE_MISC_);
3495         GEN3_IRQ_RESET(GEN11_DE_HPD_);
3496         GEN3_IRQ_RESET(GEN11_GU_MISC_);
3497         GEN3_IRQ_RESET(GEN8_PCU_);
3498
3499         if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3500                 GEN3_IRQ_RESET(SDE);
3501 }
3502
3503 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3504                                      u8 pipe_mask)
3505 {
3506         u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3507         enum pipe pipe;
3508
3509         spin_lock_irq(&dev_priv->irq_lock);
3510
3511         if (!intel_irqs_enabled(dev_priv)) {
3512                 spin_unlock_irq(&dev_priv->irq_lock);
3513                 return;
3514         }
3515
3516         for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3517                 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3518                                   dev_priv->de_irq_mask[pipe],
3519                                   ~dev_priv->de_irq_mask[pipe] | extra_ier);
3520
3521         spin_unlock_irq(&dev_priv->irq_lock);
3522 }
3523
3524 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3525                                      u8 pipe_mask)
3526 {
3527         enum pipe pipe;
3528
3529         spin_lock_irq(&dev_priv->irq_lock);
3530
3531         if (!intel_irqs_enabled(dev_priv)) {
3532                 spin_unlock_irq(&dev_priv->irq_lock);
3533                 return;
3534         }
3535
3536         for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3537                 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3538
3539         spin_unlock_irq(&dev_priv->irq_lock);
3540
3541         /* make sure we're done processing display irqs */
3542         synchronize_irq(dev_priv->drm.irq);
3543 }
3544
3545 static void cherryview_irq_reset(struct drm_device *dev)
3546 {
3547         struct drm_i915_private *dev_priv = to_i915(dev);
3548
3549         I915_WRITE(GEN8_MASTER_IRQ, 0);
3550         POSTING_READ(GEN8_MASTER_IRQ);
3551
3552         gen8_gt_irq_reset(dev_priv);
3553
3554         GEN3_IRQ_RESET(GEN8_PCU_);
3555
3556         spin_lock_irq(&dev_priv->irq_lock);
3557         if (dev_priv->display_irqs_enabled)
3558                 vlv_display_irq_reset(dev_priv);
3559         spin_unlock_irq(&dev_priv->irq_lock);
3560 }
3561
3562 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
3563                                   const u32 hpd[HPD_NUM_PINS])
3564 {
3565         struct intel_encoder *encoder;
3566         u32 enabled_irqs = 0;
3567
3568         for_each_intel_encoder(&dev_priv->drm, encoder)
3569                 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3570                         enabled_irqs |= hpd[encoder->hpd_pin];
3571
3572         return enabled_irqs;
3573 }
3574
3575 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
3576 {
3577         u32 hotplug;
3578
3579         /*
3580          * Enable digital hotplug on the PCH, and configure the DP short pulse
3581          * duration to 2ms (which is the minimum in the Display Port spec).
3582          * The pulse duration bits are reserved on LPT+.
3583          */
3584         hotplug = I915_READ(PCH_PORT_HOTPLUG);
3585         hotplug &= ~(PORTB_PULSE_DURATION_MASK |
3586                      PORTC_PULSE_DURATION_MASK |
3587                      PORTD_PULSE_DURATION_MASK);
3588         hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3589         hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3590         hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3591         /*
3592          * When CPU and PCH are on the same package, port A
3593          * HPD must be enabled in both north and south.
3594          */
3595         if (HAS_PCH_LPT_LP(dev_priv))
3596                 hotplug |= PORTA_HOTPLUG_ENABLE;
3597         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3598 }
3599
3600 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3601 {
3602         u32 hotplug_irqs, enabled_irqs;
3603
3604         if (HAS_PCH_IBX(dev_priv)) {
3605                 hotplug_irqs = SDE_HOTPLUG_MASK;
3606                 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
3607         } else {
3608                 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3609                 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
3610         }
3611
3612         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3613
3614         ibx_hpd_detection_setup(dev_priv);
3615 }
3616
3617 static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv)
3618 {
3619         u32 hotplug;
3620
3621         hotplug = I915_READ(SHOTPLUG_CTL_DDI);
3622         hotplug |= ICP_DDIA_HPD_ENABLE |
3623                    ICP_DDIB_HPD_ENABLE;
3624         I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
3625
3626         hotplug = I915_READ(SHOTPLUG_CTL_TC);
3627         hotplug |= ICP_TC_HPD_ENABLE(PORT_TC1) |
3628                    ICP_TC_HPD_ENABLE(PORT_TC2) |
3629                    ICP_TC_HPD_ENABLE(PORT_TC3) |
3630                    ICP_TC_HPD_ENABLE(PORT_TC4);
3631         I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
3632 }
3633
3634 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
3635 {
3636         u32 hotplug_irqs, enabled_irqs;
3637
3638         hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP;
3639         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp);
3640
3641         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3642
3643         icp_hpd_detection_setup(dev_priv);
3644 }
3645
3646 static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
3647 {
3648         u32 hotplug;
3649
3650         hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
3651         hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3652                    GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3653                    GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3654                    GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3655         I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
3656
3657         hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
3658         hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3659                    GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3660                    GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3661                    GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3662         I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
3663 }
3664
3665 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
3666 {
3667         u32 hotplug_irqs, enabled_irqs;
3668         u32 val;
3669
3670         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_gen11);
3671         hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
3672
3673         val = I915_READ(GEN11_DE_HPD_IMR);
3674         val &= ~hotplug_irqs;
3675         I915_WRITE(GEN11_DE_HPD_IMR, val);
3676         POSTING_READ(GEN11_DE_HPD_IMR);
3677
3678         gen11_hpd_detection_setup(dev_priv);
3679
3680         if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3681                 icp_hpd_irq_setup(dev_priv);
3682 }
3683
3684 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3685 {
3686         u32 val, hotplug;
3687
3688         /* Display WA #1179 WaHardHangonHotPlug: cnp */
3689         if (HAS_PCH_CNP(dev_priv)) {
3690                 val = I915_READ(SOUTH_CHICKEN1);
3691                 val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
3692                 val |= CHASSIS_CLK_REQ_DURATION(0xf);
3693                 I915_WRITE(SOUTH_CHICKEN1, val);
3694         }
3695
3696         /* Enable digital hotplug on the PCH */
3697         hotplug = I915_READ(PCH_PORT_HOTPLUG);
3698         hotplug |= PORTA_HOTPLUG_ENABLE |
3699                    PORTB_HOTPLUG_ENABLE |
3700                    PORTC_HOTPLUG_ENABLE |
3701                    PORTD_HOTPLUG_ENABLE;
3702         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3703
3704         hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3705         hotplug |= PORTE_HOTPLUG_ENABLE;
3706         I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3707 }
3708
3709 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3710 {
3711         u32 hotplug_irqs, enabled_irqs;
3712
3713         hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3714         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
3715
3716         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3717
3718         spt_hpd_detection_setup(dev_priv);
3719 }
3720
3721 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
3722 {
3723         u32 hotplug;
3724
3725         /*
3726          * Enable digital hotplug on the CPU, and configure the DP short pulse
3727          * duration to 2ms (which is the minimum in the Display Port spec)
3728          * The pulse duration bits are reserved on HSW+.
3729          */
3730         hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3731         hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3732         hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
3733                    DIGITAL_PORTA_PULSE_DURATION_2ms;
3734         I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3735 }
3736
3737 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3738 {
3739         u32 hotplug_irqs, enabled_irqs;
3740
3741         if (INTEL_GEN(dev_priv) >= 8) {
3742                 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3743                 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
3744
3745                 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3746         } else if (INTEL_GEN(dev_priv) >= 7) {
3747                 hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3748                 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
3749
3750                 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3751         } else {
3752                 hotplug_irqs = DE_DP_A_HOTPLUG;
3753                 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3754
3755                 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3756         }
3757
3758         ilk_hpd_detection_setup(dev_priv);
3759
3760         ibx_hpd_irq_setup(dev_priv);
3761 }
3762
3763 static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
3764                                       u32 enabled_irqs)
3765 {
3766         u32 hotplug;
3767
3768         hotplug = I915_READ(PCH_PORT_HOTPLUG);
3769         hotplug |= PORTA_HOTPLUG_ENABLE |
3770                    PORTB_HOTPLUG_ENABLE |
3771                    PORTC_HOTPLUG_ENABLE;
3772
3773         DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
3774                       hotplug, enabled_irqs);
3775         hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3776
3777         /*
3778          * For BXT invert bit has to be set based on AOB design
3779          * for HPD detection logic, update it based on VBT fields.
3780          */
3781         if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
3782             intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3783                 hotplug |= BXT_DDIA_HPD_INVERT;
3784         if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
3785             intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3786                 hotplug |= BXT_DDIB_HPD_INVERT;
3787         if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
3788             intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3789                 hotplug |= BXT_DDIC_HPD_INVERT;
3790
3791         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3792 }
3793
3794 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3795 {
3796         __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
3797 }
3798
3799 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3800 {
3801         u32 hotplug_irqs, enabled_irqs;
3802
3803         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
3804         hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3805
3806         bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3807
3808         __bxt_hpd_detection_setup(dev_priv, enabled_irqs);
3809 }
3810
3811 static void ibx_irq_postinstall(struct drm_device *dev)
3812 {
3813         struct drm_i915_private *dev_priv = to_i915(dev);
3814         u32 mask;
3815
3816         if (HAS_PCH_NOP(dev_priv))
3817                 return;
3818
3819         if (HAS_PCH_IBX(dev_priv))
3820                 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3821         else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3822                 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3823         else
3824                 mask = SDE_GMBUS_CPT;
3825
3826         gen3_assert_iir_is_zero(dev_priv, SDEIIR);
3827         I915_WRITE(SDEIMR, ~mask);
3828
3829         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
3830             HAS_PCH_LPT(dev_priv))
3831                 ibx_hpd_detection_setup(dev_priv);
3832         else
3833                 spt_hpd_detection_setup(dev_priv);
3834 }
3835
3836 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3837 {
3838         struct drm_i915_private *dev_priv = to_i915(dev);
3839         u32 pm_irqs, gt_irqs;
3840
3841         pm_irqs = gt_irqs = 0;
3842
3843         dev_priv->gt_irq_mask = ~0;
3844         if (HAS_L3_DPF(dev_priv)) {
3845                 /* L3 parity interrupt is always unmasked. */
3846                 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
3847                 gt_irqs |= GT_PARITY_ERROR(dev_priv);
3848         }
3849
3850         gt_irqs |= GT_RENDER_USER_INTERRUPT;
3851         if (IS_GEN(dev_priv, 5)) {
3852                 gt_irqs |= ILK_BSD_USER_INTERRUPT;
3853         } else {
3854                 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3855         }
3856
3857         GEN3_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3858
3859         if (INTEL_GEN(dev_priv) >= 6) {
3860                 /*
3861                  * RPS interrupts will get enabled/disabled on demand when RPS
3862                  * itself is enabled/disabled.
3863                  */
3864                 if (HAS_ENGINE(dev_priv, VECS0)) {
3865                         pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3866                         dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
3867                 }
3868
3869                 dev_priv->pm_imr = 0xffffffff;
3870                 GEN3_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs);
3871         }
3872 }
3873
3874 static int ironlake_irq_postinstall(struct drm_device *dev)
3875 {
3876         struct drm_i915_private *dev_priv = to_i915(dev);
3877         u32 display_mask, extra_mask;
3878
3879         if (INTEL_GEN(dev_priv) >= 7) {
3880                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3881                                 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
3882                 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3883                               DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3884                               DE_DP_A_HOTPLUG_IVB);
3885         } else {
3886                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3887                                 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
3888                                 DE_PIPEA_CRC_DONE | DE_POISON);
3889                 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3890                               DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3891                               DE_DP_A_HOTPLUG);
3892         }
3893
3894         if (IS_HASWELL(dev_priv)) {
3895                 gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR);
3896                 intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
3897                 display_mask |= DE_EDP_PSR_INT_HSW;
3898         }
3899
3900         dev_priv->irq_mask = ~display_mask;
3901
3902         ibx_irq_pre_postinstall(dev);
3903
3904         GEN3_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3905
3906         gen5_gt_irq_postinstall(dev);
3907
3908         ilk_hpd_detection_setup(dev_priv);
3909
3910         ibx_irq_postinstall(dev);
3911
3912         if (IS_IRONLAKE_M(dev_priv)) {
3913                 /* Enable PCU event interrupts
3914                  *
3915                  * spinlocking not required here for correctness since interrupt
3916                  * setup is guaranteed to run in single-threaded context. But we
3917                  * need it to make the assert_spin_locked happy. */
3918                 spin_lock_irq(&dev_priv->irq_lock);
3919                 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
3920                 spin_unlock_irq(&dev_priv->irq_lock);
3921         }
3922
3923         return 0;
3924 }
3925
3926 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3927 {
3928         lockdep_assert_held(&dev_priv->irq_lock);
3929
3930         if (dev_priv->display_irqs_enabled)
3931                 return;
3932
3933         dev_priv->display_irqs_enabled = true;
3934
3935         if (intel_irqs_enabled(dev_priv)) {
3936                 vlv_display_irq_reset(dev_priv);
3937                 vlv_display_irq_postinstall(dev_priv);
3938         }
3939 }
3940
3941 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3942 {
3943         lockdep_assert_held(&dev_priv->irq_lock);
3944
3945         if (!dev_priv->display_irqs_enabled)
3946                 return;
3947
3948         dev_priv->display_irqs_enabled = false;
3949
3950         if (intel_irqs_enabled(dev_priv))
3951                 vlv_display_irq_reset(dev_priv);
3952 }
3953
3954
3955 static int valleyview_irq_postinstall(struct drm_device *dev)
3956 {
3957         struct drm_i915_private *dev_priv = to_i915(dev);
3958
3959         gen5_gt_irq_postinstall(dev);
3960
3961         spin_lock_irq(&dev_priv->irq_lock);
3962         if (dev_priv->display_irqs_enabled)
3963                 vlv_display_irq_postinstall(dev_priv);
3964         spin_unlock_irq(&dev_priv->irq_lock);
3965
3966         I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3967         POSTING_READ(VLV_MASTER_IER);
3968
3969         return 0;
3970 }
3971
3972 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3973 {
3974         /* These are interrupts we'll toggle with the ring mask register */
3975         u32 gt_interrupts[] = {
3976                 (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3977                  GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3978                  GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3979                  GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT),
3980
3981                 (GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
3982                  GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
3983                  GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3984                  GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT),
3985
3986                 0,
3987
3988                 (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3989                  GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)
3990         };
3991
3992         dev_priv->pm_ier = 0x0;
3993         dev_priv->pm_imr = ~dev_priv->pm_ier;
3994         GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3995         GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3996         /*
3997          * RPS interrupts will get enabled/disabled on demand when RPS itself
3998          * is enabled/disabled. Same wil be the case for GuC interrupts.
3999          */
4000         GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
4001         GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
4002 }
4003
4004 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
4005 {
4006         u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
4007         u32 de_pipe_enables;
4008         u32 de_port_masked = GEN8_AUX_CHANNEL_A;
4009         u32 de_port_enables;
4010         u32 de_misc_masked = GEN8_DE_EDP_PSR;
4011         enum pipe pipe;
4012
4013         if (INTEL_GEN(dev_priv) <= 10)
4014                 de_misc_masked |= GEN8_DE_MISC_GSE;
4015
4016         if (INTEL_GEN(dev_priv) >= 9) {
4017                 de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
4018                 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
4019                                   GEN9_AUX_CHANNEL_D;
4020                 if (IS_GEN9_LP(dev_priv))
4021                         de_port_masked |= BXT_DE_PORT_GMBUS;
4022         } else {
4023                 de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
4024         }
4025
4026         if (INTEL_GEN(dev_priv) >= 11)
4027                 de_port_masked |= ICL_AUX_CHANNEL_E;
4028
4029         if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11)
4030                 de_port_masked |= CNL_AUX_CHANNEL_F;
4031
4032         de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
4033                                            GEN8_PIPE_FIFO_UNDERRUN;
4034
4035         de_port_enables = de_port_masked;
4036         if (IS_GEN9_LP(dev_priv))
4037                 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
4038         else if (IS_BROADWELL(dev_priv))
4039                 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
4040
4041         gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR);
4042         intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
4043
4044         for_each_pipe(dev_priv, pipe) {
4045                 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
4046
4047                 if (intel_display_power_is_enabled(dev_priv,
4048                                 POWER_DOMAIN_PIPE(pipe)))
4049                         GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
4050                                           dev_priv->de_irq_mask[pipe],
4051                                           de_pipe_enables);
4052         }
4053
4054         GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
4055         GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
4056
4057         if (INTEL_GEN(dev_priv) >= 11) {
4058                 u32 de_hpd_masked = 0;
4059                 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
4060                                      GEN11_DE_TBT_HOTPLUG_MASK;
4061
4062                 GEN3_IRQ_INIT(GEN11_DE_HPD_, ~de_hpd_masked, de_hpd_enables);
4063                 gen11_hpd_detection_setup(dev_priv);
4064         } else if (IS_GEN9_LP(dev_priv)) {
4065                 bxt_hpd_detection_setup(dev_priv);
4066         } else if (IS_BROADWELL(dev_priv)) {
4067                 ilk_hpd_detection_setup(dev_priv);
4068         }
4069 }
4070
4071 static int gen8_irq_postinstall(struct drm_device *dev)
4072 {
4073         struct drm_i915_private *dev_priv = to_i915(dev);
4074
4075         if (HAS_PCH_SPLIT(dev_priv))
4076                 ibx_irq_pre_postinstall(dev);
4077
4078         gen8_gt_irq_postinstall(dev_priv);
4079         gen8_de_irq_postinstall(dev_priv);
4080
4081         if (HAS_PCH_SPLIT(dev_priv))
4082                 ibx_irq_postinstall(dev);
4083
4084         gen8_master_intr_enable(dev_priv->uncore.regs);
4085
4086         return 0;
4087 }
4088
4089 static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv)
4090 {
4091         const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
4092
4093         BUILD_BUG_ON(irqs & 0xffff0000);
4094
4095         /* Enable RCS, BCS, VCS and VECS class interrupts. */
4096         I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, irqs << 16 | irqs);
4097         I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE,    irqs << 16 | irqs);
4098
4099         /* Unmask irqs on RCS, BCS, VCS and VECS engines. */
4100         I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK,   ~(irqs << 16));
4101         I915_WRITE(GEN11_BCS_RSVD_INTR_MASK,    ~(irqs << 16));
4102         I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK,   ~(irqs | irqs << 16));
4103         I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK,   ~(irqs | irqs << 16));
4104         I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~(irqs | irqs << 16));
4105
4106         /*
4107          * RPS interrupts will get enabled/disabled on demand when RPS itself
4108          * is enabled/disabled.
4109          */
4110         dev_priv->pm_ier = 0x0;
4111         dev_priv->pm_imr = ~dev_priv->pm_ier;
4112         I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
4113         I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
4114 }
4115
4116 static void icp_irq_postinstall(struct drm_device *dev)
4117 {
4118         struct drm_i915_private *dev_priv = to_i915(dev);
4119         u32 mask = SDE_GMBUS_ICP;
4120
4121         WARN_ON(I915_READ(SDEIER) != 0);
4122         I915_WRITE(SDEIER, 0xffffffff);
4123         POSTING_READ(SDEIER);
4124
4125         gen3_assert_iir_is_zero(dev_priv, SDEIIR);
4126         I915_WRITE(SDEIMR, ~mask);
4127
4128         icp_hpd_detection_setup(dev_priv);
4129 }
4130
4131 static int gen11_irq_postinstall(struct drm_device *dev)
4132 {
4133         struct drm_i915_private *dev_priv = dev->dev_private;
4134         u32 gu_misc_masked = GEN11_GU_MISC_GSE;
4135
4136         if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
4137                 icp_irq_postinstall(dev);
4138
4139         gen11_gt_irq_postinstall(dev_priv);
4140         gen8_de_irq_postinstall(dev_priv);
4141
4142         GEN3_IRQ_INIT(GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
4143
4144         I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
4145
4146         gen11_master_intr_enable(dev_priv->uncore.regs);
4147         POSTING_READ(GEN11_GFX_MSTR_IRQ);
4148
4149         return 0;
4150 }
4151
4152 static int cherryview_irq_postinstall(struct drm_device *dev)
4153 {
4154         struct drm_i915_private *dev_priv = to_i915(dev);
4155
4156         gen8_gt_irq_postinstall(dev_priv);
4157
4158         spin_lock_irq(&dev_priv->irq_lock);
4159         if (dev_priv->display_irqs_enabled)
4160                 vlv_display_irq_postinstall(dev_priv);
4161         spin_unlock_irq(&dev_priv->irq_lock);
4162
4163         I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
4164         POSTING_READ(GEN8_MASTER_IRQ);
4165
4166         return 0;
4167 }
4168
4169 static void i8xx_irq_reset(struct drm_device *dev)
4170 {
4171         struct drm_i915_private *dev_priv = to_i915(dev);
4172
4173         i9xx_pipestat_irq_reset(dev_priv);
4174
4175         GEN2_IRQ_RESET();
4176 }
4177
4178 static int i8xx_irq_postinstall(struct drm_device *dev)
4179 {
4180         struct drm_i915_private *dev_priv = to_i915(dev);
4181         u16 enable_mask;
4182
4183         I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE |
4184                             I915_ERROR_MEMORY_REFRESH));
4185
4186         /* Unmask the interrupts that we always want on. */
4187         dev_priv->irq_mask =
4188                 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4189                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4190                   I915_MASTER_ERROR_INTERRUPT);
4191
4192         enable_mask =
4193                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4194                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4195                 I915_MASTER_ERROR_INTERRUPT |
4196                 I915_USER_INTERRUPT;
4197
4198         GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
4199
4200         /* Interrupt setup is already guaranteed to be single-threaded, this is
4201          * just to make the assert_spin_locked check happy. */
4202         spin_lock_irq(&dev_priv->irq_lock);
4203         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4204         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4205         spin_unlock_irq(&dev_priv->irq_lock);
4206
4207         return 0;
4208 }
4209
4210 static void i8xx_error_irq_ack(struct drm_i915_private *dev_priv,
4211                                u16 *eir, u16 *eir_stuck)
4212 {
4213         u16 emr;
4214
4215         *eir = I915_READ16(EIR);
4216
4217         if (*eir)
4218                 I915_WRITE16(EIR, *eir);
4219
4220         *eir_stuck = I915_READ16(EIR);
4221         if (*eir_stuck == 0)
4222                 return;
4223
4224         /*
4225          * Toggle all EMR bits to make sure we get an edge
4226          * in the ISR master error bit if we don't clear
4227          * all the EIR bits. Otherwise the edge triggered
4228          * IIR on i965/g4x wouldn't notice that an interrupt
4229          * is still pending. Also some EIR bits can't be
4230          * cleared except by handling the underlying error
4231          * (or by a GPU reset) so we mask any bit that
4232          * remains set.
4233          */
4234         emr = I915_READ16(EMR);
4235         I915_WRITE16(EMR, 0xffff);
4236         I915_WRITE16(EMR, emr | *eir_stuck);
4237 }
4238
4239 static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
4240                                    u16 eir, u16 eir_stuck)
4241 {
4242         DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
4243
4244         if (eir_stuck)
4245                 DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck);
4246 }
4247
4248 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
4249                                u32 *eir, u32 *eir_stuck)
4250 {
4251         u32 emr;
4252
4253         *eir = I915_READ(EIR);
4254
4255         I915_WRITE(EIR, *eir);
4256
4257         *eir_stuck = I915_READ(EIR);
4258         if (*eir_stuck == 0)
4259                 return;
4260
4261         /*
4262          * Toggle all EMR bits to make sure we get an edge
4263          * in the ISR master error bit if we don't clear
4264          * all the EIR bits. Otherwise the edge triggered
4265          * IIR on i965/g4x wouldn't notice that an interrupt
4266          * is still pending. Also some EIR bits can't be
4267          * cleared except by handling the underlying error
4268          * (or by a GPU reset) so we mask any bit that
4269          * remains set.
4270          */
4271         emr = I915_READ(EMR);
4272         I915_WRITE(EMR, 0xffffffff);
4273         I915_WRITE(EMR, emr | *eir_stuck);
4274 }
4275
4276 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
4277                                    u32 eir, u32 eir_stuck)
4278 {
4279         DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
4280
4281         if (eir_stuck)
4282                 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck);
4283 }
4284
4285 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4286 {
4287         struct drm_device *dev = arg;
4288         struct drm_i915_private *dev_priv = to_i915(dev);
4289         irqreturn_t ret = IRQ_NONE;
4290
4291         if (!intel_irqs_enabled(dev_priv))
4292                 return IRQ_NONE;
4293
4294         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4295         disable_rpm_wakeref_asserts(dev_priv);
4296
4297         do {
4298                 u32 pipe_stats[I915_MAX_PIPES] = {};
4299                 u16 eir = 0, eir_stuck = 0;
4300                 u16 iir;
4301
4302                 iir = I915_READ16(IIR);
4303                 if (iir == 0)
4304                         break;
4305
4306                 ret = IRQ_HANDLED;
4307
4308                 /* Call regardless, as some status bits might not be
4309                  * signalled in iir */
4310                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4311
4312                 if (iir & I915_MASTER_ERROR_INTERRUPT)
4313                         i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4314
4315                 I915_WRITE16(IIR, iir);
4316
4317                 if (iir & I915_USER_INTERRUPT)
4318                         intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
4319
4320                 if (iir & I915_MASTER_ERROR_INTERRUPT)
4321                         i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
4322
4323                 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4324         } while (0);
4325
4326         enable_rpm_wakeref_asserts(dev_priv);
4327
4328         return ret;
4329 }
4330
4331 static void i915_irq_reset(struct drm_device *dev)
4332 {
4333         struct drm_i915_private *dev_priv = to_i915(dev);
4334
4335         if (I915_HAS_HOTPLUG(dev_priv)) {
4336                 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4337                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4338         }
4339
4340         i9xx_pipestat_irq_reset(dev_priv);
4341
4342         GEN3_IRQ_RESET();
4343 }
4344
4345 static int i915_irq_postinstall(struct drm_device *dev)
4346 {
4347         struct drm_i915_private *dev_priv = to_i915(dev);
4348         u32 enable_mask;
4349
4350         I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
4351                           I915_ERROR_MEMORY_REFRESH));
4352
4353         /* Unmask the interrupts that we always want on. */
4354         dev_priv->irq_mask =
4355                 ~(I915_ASLE_INTERRUPT |
4356                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4357                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4358                   I915_MASTER_ERROR_INTERRUPT);
4359
4360         enable_mask =
4361                 I915_ASLE_INTERRUPT |
4362                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4363                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4364                 I915_MASTER_ERROR_INTERRUPT |
4365                 I915_USER_INTERRUPT;
4366
4367         if (I915_HAS_HOTPLUG(dev_priv)) {
4368                 /* Enable in IER... */
4369                 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4370                 /* and unmask in IMR */
4371                 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4372         }
4373
4374         GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
4375
4376         /* Interrupt setup is already guaranteed to be single-threaded, this is
4377          * just to make the assert_spin_locked check happy. */
4378         spin_lock_irq(&dev_priv->irq_lock);
4379         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4380         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4381         spin_unlock_irq(&dev_priv->irq_lock);
4382
4383         i915_enable_asle_pipestat(dev_priv);
4384
4385         return 0;
4386 }
4387
4388 static irqreturn_t i915_irq_handler(int irq, void *arg)
4389 {
4390         struct drm_device *dev = arg;
4391         struct drm_i915_private *dev_priv = to_i915(dev);
4392         irqreturn_t ret = IRQ_NONE;
4393
4394         if (!intel_irqs_enabled(dev_priv))
4395                 return IRQ_NONE;
4396
4397         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4398         disable_rpm_wakeref_asserts(dev_priv);
4399
4400         do {
4401                 u32 pipe_stats[I915_MAX_PIPES] = {};
4402                 u32 eir = 0, eir_stuck = 0;
4403                 u32 hotplug_status = 0;
4404                 u32 iir;
4405
4406                 iir = I915_READ(IIR);
4407                 if (iir == 0)
4408                         break;
4409
4410                 ret = IRQ_HANDLED;
4411
4412                 if (I915_HAS_HOTPLUG(dev_priv) &&
4413                     iir & I915_DISPLAY_PORT_INTERRUPT)
4414                         hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4415
4416                 /* Call regardless, as some status bits might not be
4417                  * signalled in iir */
4418                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4419
4420                 if (iir & I915_MASTER_ERROR_INTERRUPT)
4421                         i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4422
4423                 I915_WRITE(IIR, iir);
4424
4425                 if (iir & I915_USER_INTERRUPT)
4426                         intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
4427
4428                 if (iir & I915_MASTER_ERROR_INTERRUPT)
4429                         i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4430
4431                 if (hotplug_status)
4432                         i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4433
4434                 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4435         } while (0);
4436
4437         enable_rpm_wakeref_asserts(dev_priv);
4438
4439         return ret;
4440 }
4441
4442 static void i965_irq_reset(struct drm_device *dev)
4443 {
4444         struct drm_i915_private *dev_priv = to_i915(dev);
4445
4446         i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4447         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4448
4449         i9xx_pipestat_irq_reset(dev_priv);
4450
4451         GEN3_IRQ_RESET();
4452 }
4453
4454 static int i965_irq_postinstall(struct drm_device *dev)
4455 {
4456         struct drm_i915_private *dev_priv = to_i915(dev);
4457         u32 enable_mask;
4458         u32 error_mask;
4459
4460         /*
4461          * Enable some error detection, note the instruction error mask
4462          * bit is reserved, so we leave it masked.
4463          */
4464         if (IS_G4X(dev_priv)) {
4465                 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4466                                GM45_ERROR_MEM_PRIV |
4467                                GM45_ERROR_CP_PRIV |
4468                                I915_ERROR_MEMORY_REFRESH);
4469         } else {
4470                 error_mask = ~(I915_ERROR_PAGE_TABLE |
4471                                I915_ERROR_MEMORY_REFRESH);
4472         }
4473         I915_WRITE(EMR, error_mask);
4474
4475         /* Unmask the interrupts that we always want on. */
4476         dev_priv->irq_mask =
4477                 ~(I915_ASLE_INTERRUPT |
4478                   I915_DISPLAY_PORT_INTERRUPT |
4479                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4480                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4481                   I915_MASTER_ERROR_INTERRUPT);
4482
4483         enable_mask =
4484                 I915_ASLE_INTERRUPT |
4485                 I915_DISPLAY_PORT_INTERRUPT |
4486                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4487                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4488                 I915_MASTER_ERROR_INTERRUPT |
4489                 I915_USER_INTERRUPT;
4490
4491         if (IS_G4X(dev_priv))
4492                 enable_mask |= I915_BSD_USER_INTERRUPT;
4493
4494         GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
4495
4496         /* Interrupt setup is already guaranteed to be single-threaded, this is
4497          * just to make the assert_spin_locked check happy. */
4498         spin_lock_irq(&dev_priv->irq_lock);
4499         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4500         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4501         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4502         spin_unlock_irq(&dev_priv->irq_lock);
4503
4504         i915_enable_asle_pipestat(dev_priv);
4505
4506         return 0;
4507 }
4508
4509 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4510 {
4511         u32 hotplug_en;
4512
4513         lockdep_assert_held(&dev_priv->irq_lock);
4514
4515         /* Note HDMI and DP share hotplug bits */
4516         /* enable bits are the same for all generations */
4517         hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4518         /* Programming the CRT detection parameters tends
4519            to generate a spurious hotplug event about three
4520            seconds later.  So just do it once.
4521         */
4522         if (IS_G4X(dev_priv))
4523                 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4524         hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4525
4526         /* Ignore TV since it's buggy */
4527         i915_hotplug_interrupt_update_locked(dev_priv,
4528                                              HOTPLUG_INT_EN_MASK |
4529                                              CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4530                                              CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4531                                              hotplug_en);
4532 }
4533
4534 static irqreturn_t i965_irq_handler(int irq, void *arg)
4535 {
4536         struct drm_device *dev = arg;
4537         struct drm_i915_private *dev_priv = to_i915(dev);
4538         irqreturn_t ret = IRQ_NONE;
4539
4540         if (!intel_irqs_enabled(dev_priv))
4541                 return IRQ_NONE;
4542
4543         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4544         disable_rpm_wakeref_asserts(dev_priv);
4545
4546         do {
4547                 u32 pipe_stats[I915_MAX_PIPES] = {};
4548                 u32 eir = 0, eir_stuck = 0;
4549                 u32 hotplug_status = 0;
4550                 u32 iir;
4551
4552                 iir = I915_READ(IIR);
4553                 if (iir == 0)
4554                         break;
4555
4556                 ret = IRQ_HANDLED;
4557
4558                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4559                         hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4560
4561                 /* Call regardless, as some status bits might not be
4562                  * signalled in iir */
4563                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4564
4565                 if (iir & I915_MASTER_ERROR_INTERRUPT)
4566                         i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4567
4568                 I915_WRITE(IIR, iir);
4569
4570                 if (iir & I915_USER_INTERRUPT)
4571                         intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
4572
4573                 if (iir & I915_BSD_USER_INTERRUPT)
4574                         intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
4575
4576                 if (iir & I915_MASTER_ERROR_INTERRUPT)
4577                         i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4578
4579                 if (hotplug_status)
4580                         i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4581
4582                 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4583         } while (0);
4584
4585         enable_rpm_wakeref_asserts(dev_priv);
4586
4587         return ret;
4588 }
4589
4590 /**
4591  * intel_irq_init - initializes irq support
4592  * @dev_priv: i915 device instance
4593  *
4594  * This function initializes all the irq support including work items, timers
4595  * and all the vtables. It does not setup the interrupt itself though.
4596  */
4597 void intel_irq_init(struct drm_i915_private *dev_priv)
4598 {
4599         struct drm_device *dev = &dev_priv->drm;
4600         struct intel_rps *rps = &dev_priv->gt_pm.rps;
4601         int i;
4602
4603         if (IS_I945GM(dev_priv))
4604                 i945gm_vblank_work_init(dev_priv);
4605
4606         intel_hpd_init_work(dev_priv);
4607
4608         INIT_WORK(&rps->work, gen6_pm_rps_work);
4609
4610         INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4611         for (i = 0; i < MAX_L3_SLICES; ++i)
4612                 dev_priv->l3_parity.remap_info[i] = NULL;
4613
4614         if (HAS_GUC_SCHED(dev_priv))
4615                 dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
4616
4617         /* Let's track the enabled rps events */
4618         if (IS_VALLEYVIEW(dev_priv))
4619                 /* WaGsvRC0ResidencyMethod:vlv */
4620                 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4621         else
4622                 dev_priv->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD |
4623                                            GEN6_PM_RP_DOWN_THRESHOLD |
4624                                            GEN6_PM_RP_DOWN_TIMEOUT);
4625
4626         rps->pm_intrmsk_mbz = 0;
4627
4628         /*
4629          * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
4630          * if GEN6_PM_UP_EI_EXPIRED is masked.
4631          *
4632          * TODO: verify if this can be reproduced on VLV,CHV.
4633          */
4634         if (INTEL_GEN(dev_priv) <= 7)
4635                 rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
4636
4637         if (INTEL_GEN(dev_priv) >= 8)
4638                 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
4639
4640         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
4641                 dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4642         else if (INTEL_GEN(dev_priv) >= 3)
4643                 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4644
4645         /*
4646          * Opt out of the vblank disable timer on everything except gen2.
4647          * Gen2 doesn't have a hardware frame counter and so depends on
4648          * vblank interrupts to produce sane vblank seuquence numbers.
4649          */
4650         if (!IS_GEN(dev_priv, 2))
4651                 dev->vblank_disable_immediate = true;
4652
4653         /* Most platforms treat the display irq block as an always-on
4654          * power domain. vlv/chv can disable it at runtime and need
4655          * special care to avoid writing any of the display block registers
4656          * outside of the power domain. We defer setting up the display irqs
4657          * in this case to the runtime pm.
4658          */
4659         dev_priv->display_irqs_enabled = true;
4660         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4661                 dev_priv->display_irqs_enabled = false;
4662
4663         dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4664         /* If we have MST support, we want to avoid doing short HPD IRQ storm
4665          * detection, as short HPD storms will occur as a natural part of
4666          * sideband messaging with MST.
4667          * On older platforms however, IRQ storms can occur with both long and
4668          * short pulses, as seen on some G4x systems.
4669          */
4670         dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
4671
4672         dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
4673         dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4674
4675         if (IS_CHERRYVIEW(dev_priv)) {
4676                 dev->driver->irq_handler = cherryview_irq_handler;
4677                 dev->driver->irq_preinstall = cherryview_irq_reset;
4678                 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4679                 dev->driver->irq_uninstall = cherryview_irq_reset;
4680                 dev->driver->enable_vblank = i965_enable_vblank;
4681                 dev->driver->disable_vblank = i965_disable_vblank;
4682                 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4683         } else if (IS_VALLEYVIEW(dev_priv)) {
4684                 dev->driver->irq_handler = valleyview_irq_handler;
4685                 dev->driver->irq_preinstall = valleyview_irq_reset;
4686                 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4687                 dev->driver->irq_uninstall = valleyview_irq_reset;
4688                 dev->driver->enable_vblank = i965_enable_vblank;
4689                 dev->driver->disable_vblank = i965_disable_vblank;
4690                 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4691         } else if (INTEL_GEN(dev_priv) >= 11) {
4692                 dev->driver->irq_handler = gen11_irq_handler;
4693                 dev->driver->irq_preinstall = gen11_irq_reset;
4694                 dev->driver->irq_postinstall = gen11_irq_postinstall;
4695                 dev->driver->irq_uninstall = gen11_irq_reset;
4696                 dev->driver->enable_vblank = gen8_enable_vblank;
4697                 dev->driver->disable_vblank = gen8_disable_vblank;
4698                 dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
4699         } else if (INTEL_GEN(dev_priv) >= 8) {
4700                 dev->driver->irq_handler = gen8_irq_handler;
4701                 dev->driver->irq_preinstall = gen8_irq_reset;
4702                 dev->driver->irq_postinstall = gen8_irq_postinstall;
4703                 dev->driver->irq_uninstall = gen8_irq_reset;
4704                 dev->driver->enable_vblank = gen8_enable_vblank;
4705                 dev->driver->disable_vblank = gen8_disable_vblank;
4706                 if (IS_GEN9_LP(dev_priv))
4707                         dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4708                 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
4709                         dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4710                 else
4711                         dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4712         } else if (HAS_PCH_SPLIT(dev_priv)) {
4713                 dev->driver->irq_handler = ironlake_irq_handler;
4714                 dev->driver->irq_preinstall = ironlake_irq_reset;
4715                 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4716                 dev->driver->irq_uninstall = ironlake_irq_reset;
4717                 dev->driver->enable_vblank = ironlake_enable_vblank;
4718                 dev->driver->disable_vblank = ironlake_disable_vblank;
4719                 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4720         } else {
4721                 if (IS_GEN(dev_priv, 2)) {
4722                         dev->driver->irq_preinstall = i8xx_irq_reset;
4723                         dev->driver->irq_postinstall = i8xx_irq_postinstall;
4724                         dev->driver->irq_handler = i8xx_irq_handler;
4725                         dev->driver->irq_uninstall = i8xx_irq_reset;
4726                         dev->driver->enable_vblank = i8xx_enable_vblank;
4727                         dev->driver->disable_vblank = i8xx_disable_vblank;
4728                 } else if (IS_I945GM(dev_priv)) {
4729                         dev->driver->irq_preinstall = i915_irq_reset;
4730                         dev->driver->irq_postinstall = i915_irq_postinstall;
4731                         dev->driver->irq_uninstall = i915_irq_reset;
4732                         dev->driver->irq_handler = i915_irq_handler;
4733                         dev->driver->enable_vblank = i945gm_enable_vblank;
4734                         dev->driver->disable_vblank = i945gm_disable_vblank;
4735                 } else if (IS_GEN(dev_priv, 3)) {
4736                         dev->driver->irq_preinstall = i915_irq_reset;
4737                         dev->driver->irq_postinstall = i915_irq_postinstall;
4738                         dev->driver->irq_uninstall = i915_irq_reset;
4739                         dev->driver->irq_handler = i915_irq_handler;
4740                         dev->driver->enable_vblank = i8xx_enable_vblank;
4741                         dev->driver->disable_vblank = i8xx_disable_vblank;
4742                 } else {
4743                         dev->driver->irq_preinstall = i965_irq_reset;
4744                         dev->driver->irq_postinstall = i965_irq_postinstall;
4745                         dev->driver->irq_uninstall = i965_irq_reset;
4746                         dev->driver->irq_handler = i965_irq_handler;
4747                         dev->driver->enable_vblank = i965_enable_vblank;
4748                         dev->driver->disable_vblank = i965_disable_vblank;
4749                 }
4750                 if (I915_HAS_HOTPLUG(dev_priv))
4751                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4752         }
4753 }
4754
4755 /**
4756  * intel_irq_fini - deinitializes IRQ support
4757  * @i915: i915 device instance
4758  *
4759  * This function deinitializes all the IRQ support.
4760  */
4761 void intel_irq_fini(struct drm_i915_private *i915)
4762 {
4763         int i;
4764
4765         if (IS_I945GM(i915))
4766                 i945gm_vblank_work_fini(i915);
4767
4768         for (i = 0; i < MAX_L3_SLICES; ++i)
4769                 kfree(i915->l3_parity.remap_info[i]);
4770 }
4771
4772 /**
4773  * intel_irq_install - enables the hardware interrupt
4774  * @dev_priv: i915 device instance
4775  *
4776  * This function enables the hardware interrupt handling, but leaves the hotplug
4777  * handling still disabled. It is called after intel_irq_init().
4778  *
4779  * In the driver load and resume code we need working interrupts in a few places
4780  * but don't want to deal with the hassle of concurrent probe and hotplug
4781  * workers. Hence the split into this two-stage approach.
4782  */
4783 int intel_irq_install(struct drm_i915_private *dev_priv)
4784 {
4785         /*
4786          * We enable some interrupt sources in our postinstall hooks, so mark
4787          * interrupts as enabled _before_ actually enabling them to avoid
4788          * special cases in our ordering checks.
4789          */
4790         dev_priv->runtime_pm.irqs_enabled = true;
4791
4792         return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
4793 }
4794
4795 /**
4796  * intel_irq_uninstall - finilizes all irq handling
4797  * @dev_priv: i915 device instance
4798  *
4799  * This stops interrupt and hotplug handling and unregisters and frees all
4800  * resources acquired in the init functions.
4801  */
4802 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4803 {
4804         drm_irq_uninstall(&dev_priv->drm);
4805         intel_hpd_cancel_work(dev_priv);
4806         dev_priv->runtime_pm.irqs_enabled = false;
4807 }
4808
4809 /**
4810  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4811  * @dev_priv: i915 device instance
4812  *
4813  * This function is used to disable interrupts at runtime, both in the runtime
4814  * pm and the system suspend/resume code.
4815  */
4816 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4817 {
4818         dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
4819         dev_priv->runtime_pm.irqs_enabled = false;
4820         synchronize_irq(dev_priv->drm.irq);
4821 }
4822
4823 /**
4824  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4825  * @dev_priv: i915 device instance
4826  *
4827  * This function is used to enable interrupts at runtime, both in the runtime
4828  * pm and the system suspend/resume code.
4829  */
4830 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4831 {
4832         dev_priv->runtime_pm.irqs_enabled = true;
4833         dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
4834         dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
4835 }