]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/i915_irq.c
drm/i915: rename intel_drv.h to display/intel_display_types.h
[linux.git] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/circ_buf.h>
32 #include <linux/cpuidle.h>
33 #include <linux/slab.h>
34 #include <linux/sysrq.h>
35
36 #include <drm/drm_drv.h>
37 #include <drm/drm_irq.h>
38 #include <drm/i915_drm.h>
39
40 #include "display/intel_display_types.h"
41 #include "display/intel_fifo_underrun.h"
42 #include "display/intel_hotplug.h"
43 #include "display/intel_lpe_audio.h"
44 #include "display/intel_psr.h"
45
46 #include "gt/intel_gt.h"
47
48 #include "i915_drv.h"
49 #include "i915_irq.h"
50 #include "i915_trace.h"
51 #include "intel_pm.h"
52
53 /**
54  * DOC: interrupt handling
55  *
56  * These functions provide the basic support for enabling and disabling the
57  * interrupt handling support. There's a lot more functionality in i915_irq.c
58  * and related files, but that will be described in separate chapters.
59  */
60
61 typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
62
63 static const u32 hpd_ilk[HPD_NUM_PINS] = {
64         [HPD_PORT_A] = DE_DP_A_HOTPLUG,
65 };
66
67 static const u32 hpd_ivb[HPD_NUM_PINS] = {
68         [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
69 };
70
71 static const u32 hpd_bdw[HPD_NUM_PINS] = {
72         [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
73 };
74
75 static const u32 hpd_ibx[HPD_NUM_PINS] = {
76         [HPD_CRT] = SDE_CRT_HOTPLUG,
77         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
78         [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
79         [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
80         [HPD_PORT_D] = SDE_PORTD_HOTPLUG
81 };
82
83 static const u32 hpd_cpt[HPD_NUM_PINS] = {
84         [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
85         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
86         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
87         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
88         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
89 };
90
91 static const u32 hpd_spt[HPD_NUM_PINS] = {
92         [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
93         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
94         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
95         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
96         [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
97 };
98
99 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
100         [HPD_CRT] = CRT_HOTPLUG_INT_EN,
101         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
102         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
103         [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
104         [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
105         [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
106 };
107
108 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
109         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
110         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
111         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
112         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
113         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
114         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
115 };
116
117 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
118         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
119         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
120         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
121         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
122         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
123         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
124 };
125
126 /* BXT hpd list */
127 static const u32 hpd_bxt[HPD_NUM_PINS] = {
128         [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
129         [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
130         [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
131 };
132
133 static const u32 hpd_gen11[HPD_NUM_PINS] = {
134         [HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
135         [HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
136         [HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
137         [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG
138 };
139
140 static const u32 hpd_gen12[HPD_NUM_PINS] = {
141         [HPD_PORT_D] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
142         [HPD_PORT_E] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
143         [HPD_PORT_F] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
144         [HPD_PORT_G] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG,
145         [HPD_PORT_H] = GEN12_TC5_HOTPLUG | GEN12_TBT5_HOTPLUG,
146         [HPD_PORT_I] = GEN12_TC6_HOTPLUG | GEN12_TBT6_HOTPLUG
147 };
148
149 static const u32 hpd_icp[HPD_NUM_PINS] = {
150         [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
151         [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
152         [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP,
153         [HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP,
154         [HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP,
155         [HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP
156 };
157
158 static const u32 hpd_mcc[HPD_NUM_PINS] = {
159         [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
160         [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
161         [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP
162 };
163
164 static const u32 hpd_tgp[HPD_NUM_PINS] = {
165         [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
166         [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
167         [HPD_PORT_C] = SDE_DDIC_HOTPLUG_TGP,
168         [HPD_PORT_D] = SDE_TC1_HOTPLUG_ICP,
169         [HPD_PORT_E] = SDE_TC2_HOTPLUG_ICP,
170         [HPD_PORT_F] = SDE_TC3_HOTPLUG_ICP,
171         [HPD_PORT_G] = SDE_TC4_HOTPLUG_ICP,
172         [HPD_PORT_H] = SDE_TC5_HOTPLUG_TGP,
173         [HPD_PORT_I] = SDE_TC6_HOTPLUG_TGP,
174 };
175
176 static void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
177                            i915_reg_t iir, i915_reg_t ier)
178 {
179         intel_uncore_write(uncore, imr, 0xffffffff);
180         intel_uncore_posting_read(uncore, imr);
181
182         intel_uncore_write(uncore, ier, 0);
183
184         /* IIR can theoretically queue up two events. Be paranoid. */
185         intel_uncore_write(uncore, iir, 0xffffffff);
186         intel_uncore_posting_read(uncore, iir);
187         intel_uncore_write(uncore, iir, 0xffffffff);
188         intel_uncore_posting_read(uncore, iir);
189 }
190
191 static void gen2_irq_reset(struct intel_uncore *uncore)
192 {
193         intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
194         intel_uncore_posting_read16(uncore, GEN2_IMR);
195
196         intel_uncore_write16(uncore, GEN2_IER, 0);
197
198         /* IIR can theoretically queue up two events. Be paranoid. */
199         intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
200         intel_uncore_posting_read16(uncore, GEN2_IIR);
201         intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
202         intel_uncore_posting_read16(uncore, GEN2_IIR);
203 }
204
205 #define GEN8_IRQ_RESET_NDX(uncore, type, which) \
206 ({ \
207         unsigned int which_ = which; \
208         gen3_irq_reset((uncore), GEN8_##type##_IMR(which_), \
209                        GEN8_##type##_IIR(which_), GEN8_##type##_IER(which_)); \
210 })
211
212 #define GEN3_IRQ_RESET(uncore, type) \
213         gen3_irq_reset((uncore), type##IMR, type##IIR, type##IER)
214
215 #define GEN2_IRQ_RESET(uncore) \
216         gen2_irq_reset(uncore)
217
218 /*
219  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
220  */
221 static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
222 {
223         u32 val = intel_uncore_read(uncore, reg);
224
225         if (val == 0)
226                 return;
227
228         WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
229              i915_mmio_reg_offset(reg), val);
230         intel_uncore_write(uncore, reg, 0xffffffff);
231         intel_uncore_posting_read(uncore, reg);
232         intel_uncore_write(uncore, reg, 0xffffffff);
233         intel_uncore_posting_read(uncore, reg);
234 }
235
236 static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
237 {
238         u16 val = intel_uncore_read16(uncore, GEN2_IIR);
239
240         if (val == 0)
241                 return;
242
243         WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
244              i915_mmio_reg_offset(GEN2_IIR), val);
245         intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
246         intel_uncore_posting_read16(uncore, GEN2_IIR);
247         intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
248         intel_uncore_posting_read16(uncore, GEN2_IIR);
249 }
250
251 static void gen3_irq_init(struct intel_uncore *uncore,
252                           i915_reg_t imr, u32 imr_val,
253                           i915_reg_t ier, u32 ier_val,
254                           i915_reg_t iir)
255 {
256         gen3_assert_iir_is_zero(uncore, iir);
257
258         intel_uncore_write(uncore, ier, ier_val);
259         intel_uncore_write(uncore, imr, imr_val);
260         intel_uncore_posting_read(uncore, imr);
261 }
262
263 static void gen2_irq_init(struct intel_uncore *uncore,
264                           u32 imr_val, u32 ier_val)
265 {
266         gen2_assert_iir_is_zero(uncore);
267
268         intel_uncore_write16(uncore, GEN2_IER, ier_val);
269         intel_uncore_write16(uncore, GEN2_IMR, imr_val);
270         intel_uncore_posting_read16(uncore, GEN2_IMR);
271 }
272
273 #define GEN8_IRQ_INIT_NDX(uncore, type, which, imr_val, ier_val) \
274 ({ \
275         unsigned int which_ = which; \
276         gen3_irq_init((uncore), \
277                       GEN8_##type##_IMR(which_), imr_val, \
278                       GEN8_##type##_IER(which_), ier_val, \
279                       GEN8_##type##_IIR(which_)); \
280 })
281
282 #define GEN3_IRQ_INIT(uncore, type, imr_val, ier_val) \
283         gen3_irq_init((uncore), \
284                       type##IMR, imr_val, \
285                       type##IER, ier_val, \
286                       type##IIR)
287
288 #define GEN2_IRQ_INIT(uncore, imr_val, ier_val) \
289         gen2_irq_init((uncore), imr_val, ier_val)
290
291 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
292 static void guc_irq_handler(struct intel_guc *guc, u16 guc_iir);
293
294 /* For display hotplug interrupt */
295 static inline void
296 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
297                                      u32 mask,
298                                      u32 bits)
299 {
300         u32 val;
301
302         lockdep_assert_held(&dev_priv->irq_lock);
303         WARN_ON(bits & ~mask);
304
305         val = I915_READ(PORT_HOTPLUG_EN);
306         val &= ~mask;
307         val |= bits;
308         I915_WRITE(PORT_HOTPLUG_EN, val);
309 }
310
311 /**
312  * i915_hotplug_interrupt_update - update hotplug interrupt enable
313  * @dev_priv: driver private
314  * @mask: bits to update
315  * @bits: bits to enable
316  * NOTE: the HPD enable bits are modified both inside and outside
317  * of an interrupt context. To avoid that read-modify-write cycles
318  * interfer, these bits are protected by a spinlock. Since this
319  * function is usually not called from a context where the lock is
320  * held already, this function acquires the lock itself. A non-locking
321  * version is also available.
322  */
323 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
324                                    u32 mask,
325                                    u32 bits)
326 {
327         spin_lock_irq(&dev_priv->irq_lock);
328         i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
329         spin_unlock_irq(&dev_priv->irq_lock);
330 }
331
332 static u32
333 gen11_gt_engine_identity(struct intel_gt *gt,
334                          const unsigned int bank, const unsigned int bit);
335
336 static bool gen11_reset_one_iir(struct intel_gt *gt,
337                                 const unsigned int bank,
338                                 const unsigned int bit)
339 {
340         void __iomem * const regs = gt->uncore->regs;
341         u32 dw;
342
343         lockdep_assert_held(&gt->i915->irq_lock);
344
345         dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
346         if (dw & BIT(bit)) {
347                 /*
348                  * According to the BSpec, DW_IIR bits cannot be cleared without
349                  * first servicing the Selector & Shared IIR registers.
350                  */
351                 gen11_gt_engine_identity(gt, bank, bit);
352
353                 /*
354                  * We locked GT INT DW by reading it. If we want to (try
355                  * to) recover from this succesfully, we need to clear
356                  * our bit, otherwise we are locking the register for
357                  * everybody.
358                  */
359                 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));
360
361                 return true;
362         }
363
364         return false;
365 }
366
367 /**
368  * ilk_update_display_irq - update DEIMR
369  * @dev_priv: driver private
370  * @interrupt_mask: mask of interrupt bits to update
371  * @enabled_irq_mask: mask of interrupt bits to enable
372  */
373 void ilk_update_display_irq(struct drm_i915_private *dev_priv,
374                             u32 interrupt_mask,
375                             u32 enabled_irq_mask)
376 {
377         u32 new_val;
378
379         lockdep_assert_held(&dev_priv->irq_lock);
380
381         WARN_ON(enabled_irq_mask & ~interrupt_mask);
382
383         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
384                 return;
385
386         new_val = dev_priv->irq_mask;
387         new_val &= ~interrupt_mask;
388         new_val |= (~enabled_irq_mask & interrupt_mask);
389
390         if (new_val != dev_priv->irq_mask) {
391                 dev_priv->irq_mask = new_val;
392                 I915_WRITE(DEIMR, dev_priv->irq_mask);
393                 POSTING_READ(DEIMR);
394         }
395 }
396
397 /**
398  * ilk_update_gt_irq - update GTIMR
399  * @dev_priv: driver private
400  * @interrupt_mask: mask of interrupt bits to update
401  * @enabled_irq_mask: mask of interrupt bits to enable
402  */
403 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
404                               u32 interrupt_mask,
405                               u32 enabled_irq_mask)
406 {
407         lockdep_assert_held(&dev_priv->irq_lock);
408
409         WARN_ON(enabled_irq_mask & ~interrupt_mask);
410
411         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
412                 return;
413
414         dev_priv->gt_irq_mask &= ~interrupt_mask;
415         dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
416         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
417 }
418
419 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
420 {
421         ilk_update_gt_irq(dev_priv, mask, mask);
422         intel_uncore_posting_read_fw(&dev_priv->uncore, GTIMR);
423 }
424
425 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
426 {
427         ilk_update_gt_irq(dev_priv, mask, 0);
428 }
429
430 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
431 {
432         WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11);
433
434         return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
435 }
436
437 static void write_pm_imr(struct intel_gt *gt)
438 {
439         struct drm_i915_private *i915 = gt->i915;
440         struct intel_uncore *uncore = gt->uncore;
441         u32 mask = gt->pm_imr;
442         i915_reg_t reg;
443
444         if (INTEL_GEN(i915) >= 11) {
445                 reg = GEN11_GPM_WGBOXPERF_INTR_MASK;
446                 /* pm is in upper half */
447                 mask = mask << 16;
448         } else if (INTEL_GEN(i915) >= 8) {
449                 reg = GEN8_GT_IMR(2);
450         } else {
451                 reg = GEN6_PMIMR;
452         }
453
454         intel_uncore_write(uncore, reg, mask);
455         intel_uncore_posting_read(uncore, reg);
456 }
457
458 static void write_pm_ier(struct intel_gt *gt)
459 {
460         struct drm_i915_private *i915 = gt->i915;
461         struct intel_uncore *uncore = gt->uncore;
462         u32 mask = gt->pm_ier;
463         i915_reg_t reg;
464
465         if (INTEL_GEN(i915) >= 11) {
466                 reg = GEN11_GPM_WGBOXPERF_INTR_ENABLE;
467                 /* pm is in upper half */
468                 mask = mask << 16;
469         } else if (INTEL_GEN(i915) >= 8) {
470                 reg = GEN8_GT_IER(2);
471         } else {
472                 reg = GEN6_PMIER;
473         }
474
475         intel_uncore_write(uncore, reg, mask);
476 }
477
478 /**
479  * snb_update_pm_irq - update GEN6_PMIMR
480  * @gt: gt for the interrupts
481  * @interrupt_mask: mask of interrupt bits to update
482  * @enabled_irq_mask: mask of interrupt bits to enable
483  */
484 static void snb_update_pm_irq(struct intel_gt *gt,
485                               u32 interrupt_mask,
486                               u32 enabled_irq_mask)
487 {
488         u32 new_val;
489
490         WARN_ON(enabled_irq_mask & ~interrupt_mask);
491
492         lockdep_assert_held(&gt->i915->irq_lock);
493
494         new_val = gt->pm_imr;
495         new_val &= ~interrupt_mask;
496         new_val |= (~enabled_irq_mask & interrupt_mask);
497
498         if (new_val != gt->pm_imr) {
499                 gt->pm_imr = new_val;
500                 write_pm_imr(gt);
501         }
502 }
503
504 void gen6_unmask_pm_irq(struct intel_gt *gt, u32 mask)
505 {
506         if (WARN_ON(!intel_irqs_enabled(gt->i915)))
507                 return;
508
509         snb_update_pm_irq(gt, mask, mask);
510 }
511
512 static void __gen6_mask_pm_irq(struct intel_gt *gt, u32 mask)
513 {
514         snb_update_pm_irq(gt, mask, 0);
515 }
516
517 void gen6_mask_pm_irq(struct intel_gt *gt, u32 mask)
518 {
519         if (WARN_ON(!intel_irqs_enabled(gt->i915)))
520                 return;
521
522         __gen6_mask_pm_irq(gt, mask);
523 }
524
525 static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
526 {
527         i915_reg_t reg = gen6_pm_iir(dev_priv);
528
529         lockdep_assert_held(&dev_priv->irq_lock);
530
531         I915_WRITE(reg, reset_mask);
532         I915_WRITE(reg, reset_mask);
533         POSTING_READ(reg);
534 }
535
536 static void gen6_enable_pm_irq(struct intel_gt *gt, u32 enable_mask)
537 {
538         lockdep_assert_held(&gt->i915->irq_lock);
539
540         gt->pm_ier |= enable_mask;
541         write_pm_ier(gt);
542         gen6_unmask_pm_irq(gt, enable_mask);
543         /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
544 }
545
546 static void gen6_disable_pm_irq(struct intel_gt *gt, u32 disable_mask)
547 {
548         lockdep_assert_held(&gt->i915->irq_lock);
549
550         gt->pm_ier &= ~disable_mask;
551         __gen6_mask_pm_irq(gt, disable_mask);
552         write_pm_ier(gt);
553         /* though a barrier is missing here, but don't really need a one */
554 }
555
556 void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
557 {
558         spin_lock_irq(&dev_priv->irq_lock);
559
560         while (gen11_reset_one_iir(&dev_priv->gt, 0, GEN11_GTPM))
561                 ;
562
563         dev_priv->gt_pm.rps.pm_iir = 0;
564
565         spin_unlock_irq(&dev_priv->irq_lock);
566 }
567
568 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
569 {
570         spin_lock_irq(&dev_priv->irq_lock);
571         gen6_reset_pm_iir(dev_priv, GEN6_PM_RPS_EVENTS);
572         dev_priv->gt_pm.rps.pm_iir = 0;
573         spin_unlock_irq(&dev_priv->irq_lock);
574 }
575
576 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
577 {
578         struct intel_gt *gt = &dev_priv->gt;
579         struct intel_rps *rps = &dev_priv->gt_pm.rps;
580
581         if (READ_ONCE(rps->interrupts_enabled))
582                 return;
583
584         spin_lock_irq(&dev_priv->irq_lock);
585         WARN_ON_ONCE(rps->pm_iir);
586
587         if (INTEL_GEN(dev_priv) >= 11)
588                 WARN_ON_ONCE(gen11_reset_one_iir(gt, 0, GEN11_GTPM));
589         else
590                 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
591
592         rps->interrupts_enabled = true;
593         gen6_enable_pm_irq(gt, dev_priv->pm_rps_events);
594
595         spin_unlock_irq(&dev_priv->irq_lock);
596 }
597
598 u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915, u32 mask)
599 {
600         return mask & ~i915->gt_pm.rps.pm_intrmsk_mbz;
601 }
602
603 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
604 {
605         struct intel_rps *rps = &dev_priv->gt_pm.rps;
606
607         if (!READ_ONCE(rps->interrupts_enabled))
608                 return;
609
610         spin_lock_irq(&dev_priv->irq_lock);
611         rps->interrupts_enabled = false;
612
613         I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
614
615         gen6_disable_pm_irq(&dev_priv->gt, GEN6_PM_RPS_EVENTS);
616
617         spin_unlock_irq(&dev_priv->irq_lock);
618         intel_synchronize_irq(dev_priv);
619
620         /* Now that we will not be generating any more work, flush any
621          * outstanding tasks. As we are called on the RPS idle path,
622          * we will reset the GPU to minimum frequencies, so the current
623          * state of the worker can be discarded.
624          */
625         cancel_work_sync(&rps->work);
626         if (INTEL_GEN(dev_priv) >= 11)
627                 gen11_reset_rps_interrupts(dev_priv);
628         else
629                 gen6_reset_rps_interrupts(dev_priv);
630 }
631
632 void gen9_reset_guc_interrupts(struct intel_guc *guc)
633 {
634         struct intel_gt *gt = guc_to_gt(guc);
635         struct drm_i915_private *i915 = gt->i915;
636
637         assert_rpm_wakelock_held(&i915->runtime_pm);
638
639         spin_lock_irq(&i915->irq_lock);
640         gen6_reset_pm_iir(i915, gt->pm_guc_events);
641         spin_unlock_irq(&i915->irq_lock);
642 }
643
644 void gen9_enable_guc_interrupts(struct intel_guc *guc)
645 {
646         struct intel_gt *gt = guc_to_gt(guc);
647         struct drm_i915_private *i915 = gt->i915;
648
649         assert_rpm_wakelock_held(&i915->runtime_pm);
650
651         spin_lock_irq(&i915->irq_lock);
652         if (!guc->interrupts.enabled) {
653                 WARN_ON_ONCE(intel_uncore_read(gt->uncore, gen6_pm_iir(i915)) &
654                              gt->pm_guc_events);
655                 guc->interrupts.enabled = true;
656                 gen6_enable_pm_irq(gt, gt->pm_guc_events);
657         }
658         spin_unlock_irq(&i915->irq_lock);
659 }
660
661 void gen9_disable_guc_interrupts(struct intel_guc *guc)
662 {
663         struct intel_gt *gt = guc_to_gt(guc);
664         struct drm_i915_private *i915 = gt->i915;
665
666         assert_rpm_wakelock_held(&i915->runtime_pm);
667
668         spin_lock_irq(&i915->irq_lock);
669         guc->interrupts.enabled = false;
670
671         gen6_disable_pm_irq(gt, gt->pm_guc_events);
672
673         spin_unlock_irq(&i915->irq_lock);
674         intel_synchronize_irq(i915);
675
676         gen9_reset_guc_interrupts(guc);
677 }
678
679 void gen11_reset_guc_interrupts(struct intel_guc *guc)
680 {
681         struct intel_gt *gt = guc_to_gt(guc);
682         struct drm_i915_private *i915 = gt->i915;
683
684         spin_lock_irq(&i915->irq_lock);
685         gen11_reset_one_iir(gt, 0, GEN11_GUC);
686         spin_unlock_irq(&i915->irq_lock);
687 }
688
689 void gen11_enable_guc_interrupts(struct intel_guc *guc)
690 {
691         struct intel_gt *gt = guc_to_gt(guc);
692
693         spin_lock_irq(&gt->i915->irq_lock);
694         if (!guc->interrupts.enabled) {
695                 u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
696
697                 WARN_ON_ONCE(gen11_reset_one_iir(gt, 0, GEN11_GUC));
698                 intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, events);
699                 intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~events);
700                 guc->interrupts.enabled = true;
701         }
702         spin_unlock_irq(&gt->i915->irq_lock);
703 }
704
705 void gen11_disable_guc_interrupts(struct intel_guc *guc)
706 {
707         struct intel_gt *gt = guc_to_gt(guc);
708         struct drm_i915_private *i915 = gt->i915;
709
710         spin_lock_irq(&i915->irq_lock);
711         guc->interrupts.enabled = false;
712
713         intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
714         intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
715
716         spin_unlock_irq(&i915->irq_lock);
717         intel_synchronize_irq(i915);
718
719         gen11_reset_guc_interrupts(guc);
720 }
721
722 /**
723  * bdw_update_port_irq - update DE port interrupt
724  * @dev_priv: driver private
725  * @interrupt_mask: mask of interrupt bits to update
726  * @enabled_irq_mask: mask of interrupt bits to enable
727  */
728 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
729                                 u32 interrupt_mask,
730                                 u32 enabled_irq_mask)
731 {
732         u32 new_val;
733         u32 old_val;
734
735         lockdep_assert_held(&dev_priv->irq_lock);
736
737         WARN_ON(enabled_irq_mask & ~interrupt_mask);
738
739         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
740                 return;
741
742         old_val = I915_READ(GEN8_DE_PORT_IMR);
743
744         new_val = old_val;
745         new_val &= ~interrupt_mask;
746         new_val |= (~enabled_irq_mask & interrupt_mask);
747
748         if (new_val != old_val) {
749                 I915_WRITE(GEN8_DE_PORT_IMR, new_val);
750                 POSTING_READ(GEN8_DE_PORT_IMR);
751         }
752 }
753
754 /**
755  * bdw_update_pipe_irq - update DE pipe interrupt
756  * @dev_priv: driver private
757  * @pipe: pipe whose interrupt to update
758  * @interrupt_mask: mask of interrupt bits to update
759  * @enabled_irq_mask: mask of interrupt bits to enable
760  */
761 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
762                          enum pipe pipe,
763                          u32 interrupt_mask,
764                          u32 enabled_irq_mask)
765 {
766         u32 new_val;
767
768         lockdep_assert_held(&dev_priv->irq_lock);
769
770         WARN_ON(enabled_irq_mask & ~interrupt_mask);
771
772         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
773                 return;
774
775         new_val = dev_priv->de_irq_mask[pipe];
776         new_val &= ~interrupt_mask;
777         new_val |= (~enabled_irq_mask & interrupt_mask);
778
779         if (new_val != dev_priv->de_irq_mask[pipe]) {
780                 dev_priv->de_irq_mask[pipe] = new_val;
781                 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
782                 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
783         }
784 }
785
786 /**
787  * ibx_display_interrupt_update - update SDEIMR
788  * @dev_priv: driver private
789  * @interrupt_mask: mask of interrupt bits to update
790  * @enabled_irq_mask: mask of interrupt bits to enable
791  */
792 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
793                                   u32 interrupt_mask,
794                                   u32 enabled_irq_mask)
795 {
796         u32 sdeimr = I915_READ(SDEIMR);
797         sdeimr &= ~interrupt_mask;
798         sdeimr |= (~enabled_irq_mask & interrupt_mask);
799
800         WARN_ON(enabled_irq_mask & ~interrupt_mask);
801
802         lockdep_assert_held(&dev_priv->irq_lock);
803
804         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
805                 return;
806
807         I915_WRITE(SDEIMR, sdeimr);
808         POSTING_READ(SDEIMR);
809 }
810
811 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
812                               enum pipe pipe)
813 {
814         u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
815         u32 enable_mask = status_mask << 16;
816
817         lockdep_assert_held(&dev_priv->irq_lock);
818
819         if (INTEL_GEN(dev_priv) < 5)
820                 goto out;
821
822         /*
823          * On pipe A we don't support the PSR interrupt yet,
824          * on pipe B and C the same bit MBZ.
825          */
826         if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
827                 return 0;
828         /*
829          * On pipe B and C we don't support the PSR interrupt yet, on pipe
830          * A the same bit is for perf counters which we don't use either.
831          */
832         if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
833                 return 0;
834
835         enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
836                          SPRITE0_FLIP_DONE_INT_EN_VLV |
837                          SPRITE1_FLIP_DONE_INT_EN_VLV);
838         if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
839                 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
840         if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
841                 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
842
843 out:
844         WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
845                   status_mask & ~PIPESTAT_INT_STATUS_MASK,
846                   "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
847                   pipe_name(pipe), enable_mask, status_mask);
848
849         return enable_mask;
850 }
851
852 void i915_enable_pipestat(struct drm_i915_private *dev_priv,
853                           enum pipe pipe, u32 status_mask)
854 {
855         i915_reg_t reg = PIPESTAT(pipe);
856         u32 enable_mask;
857
858         WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
859                   "pipe %c: status_mask=0x%x\n",
860                   pipe_name(pipe), status_mask);
861
862         lockdep_assert_held(&dev_priv->irq_lock);
863         WARN_ON(!intel_irqs_enabled(dev_priv));
864
865         if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
866                 return;
867
868         dev_priv->pipestat_irq_mask[pipe] |= status_mask;
869         enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
870
871         I915_WRITE(reg, enable_mask | status_mask);
872         POSTING_READ(reg);
873 }
874
875 void i915_disable_pipestat(struct drm_i915_private *dev_priv,
876                            enum pipe pipe, u32 status_mask)
877 {
878         i915_reg_t reg = PIPESTAT(pipe);
879         u32 enable_mask;
880
881         WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
882                   "pipe %c: status_mask=0x%x\n",
883                   pipe_name(pipe), status_mask);
884
885         lockdep_assert_held(&dev_priv->irq_lock);
886         WARN_ON(!intel_irqs_enabled(dev_priv));
887
888         if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
889                 return;
890
891         dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
892         enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
893
894         I915_WRITE(reg, enable_mask | status_mask);
895         POSTING_READ(reg);
896 }
897
898 static bool i915_has_asle(struct drm_i915_private *dev_priv)
899 {
900         if (!dev_priv->opregion.asle)
901                 return false;
902
903         return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
904 }
905
906 /**
907  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
908  * @dev_priv: i915 device private
909  */
910 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
911 {
912         if (!i915_has_asle(dev_priv))
913                 return;
914
915         spin_lock_irq(&dev_priv->irq_lock);
916
917         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
918         if (INTEL_GEN(dev_priv) >= 4)
919                 i915_enable_pipestat(dev_priv, PIPE_A,
920                                      PIPE_LEGACY_BLC_EVENT_STATUS);
921
922         spin_unlock_irq(&dev_priv->irq_lock);
923 }
924
925 /*
926  * This timing diagram depicts the video signal in and
927  * around the vertical blanking period.
928  *
929  * Assumptions about the fictitious mode used in this example:
930  *  vblank_start >= 3
931  *  vsync_start = vblank_start + 1
932  *  vsync_end = vblank_start + 2
933  *  vtotal = vblank_start + 3
934  *
935  *           start of vblank:
936  *           latch double buffered registers
937  *           increment frame counter (ctg+)
938  *           generate start of vblank interrupt (gen4+)
939  *           |
940  *           |          frame start:
941  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
942  *           |          may be shifted forward 1-3 extra lines via PIPECONF
943  *           |          |
944  *           |          |  start of vsync:
945  *           |          |  generate vsync interrupt
946  *           |          |  |
947  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
948  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
949  * ----va---> <-----------------vb--------------------> <--------va-------------
950  *       |          |       <----vs----->                     |
951  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
952  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
953  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
954  *       |          |                                         |
955  *       last visible pixel                                   first visible pixel
956  *                  |                                         increment frame counter (gen3/4)
957  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
958  *
959  * x  = horizontal active
960  * _  = horizontal blanking
961  * hs = horizontal sync
962  * va = vertical active
963  * vb = vertical blanking
964  * vs = vertical sync
965  * vbs = vblank_start (number)
966  *
967  * Summary:
968  * - most events happen at the start of horizontal sync
969  * - frame start happens at the start of horizontal blank, 1-4 lines
970  *   (depending on PIPECONF settings) after the start of vblank
971  * - gen3/4 pixel and frame counter are synchronized with the start
972  *   of horizontal active on the first line of vertical active
973  */
974
975 /* Called from drm generic code, passed a 'crtc', which
976  * we use as a pipe index
977  */
978 u32 i915_get_vblank_counter(struct drm_crtc *crtc)
979 {
980         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
981         struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
982         const struct drm_display_mode *mode = &vblank->hwmode;
983         enum pipe pipe = to_intel_crtc(crtc)->pipe;
984         i915_reg_t high_frame, low_frame;
985         u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
986         unsigned long irqflags;
987
988         /*
989          * On i965gm TV output the frame counter only works up to
990          * the point when we enable the TV encoder. After that the
991          * frame counter ceases to work and reads zero. We need a
992          * vblank wait before enabling the TV encoder and so we
993          * have to enable vblank interrupts while the frame counter
994          * is still in a working state. However the core vblank code
995          * does not like us returning non-zero frame counter values
996          * when we've told it that we don't have a working frame
997          * counter. Thus we must stop non-zero values leaking out.
998          */
999         if (!vblank->max_vblank_count)
1000                 return 0;
1001
1002         htotal = mode->crtc_htotal;
1003         hsync_start = mode->crtc_hsync_start;
1004         vbl_start = mode->crtc_vblank_start;
1005         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1006                 vbl_start = DIV_ROUND_UP(vbl_start, 2);
1007
1008         /* Convert to pixel count */
1009         vbl_start *= htotal;
1010
1011         /* Start of vblank event occurs at start of hsync */
1012         vbl_start -= htotal - hsync_start;
1013
1014         high_frame = PIPEFRAME(pipe);
1015         low_frame = PIPEFRAMEPIXEL(pipe);
1016
1017         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1018
1019         /*
1020          * High & low register fields aren't synchronized, so make sure
1021          * we get a low value that's stable across two reads of the high
1022          * register.
1023          */
1024         do {
1025                 high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
1026                 low   = I915_READ_FW(low_frame);
1027                 high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
1028         } while (high1 != high2);
1029
1030         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1031
1032         high1 >>= PIPE_FRAME_HIGH_SHIFT;
1033         pixel = low & PIPE_PIXEL_MASK;
1034         low >>= PIPE_FRAME_LOW_SHIFT;
1035
1036         /*
1037          * The frame counter increments at beginning of active.
1038          * Cook up a vblank counter by also checking the pixel
1039          * counter against vblank start.
1040          */
1041         return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
1042 }
1043
1044 u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
1045 {
1046         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
1047         enum pipe pipe = to_intel_crtc(crtc)->pipe;
1048
1049         return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
1050 }
1051
1052 /*
1053  * On certain encoders on certain platforms, pipe
1054  * scanline register will not work to get the scanline,
1055  * since the timings are driven from the PORT or issues
1056  * with scanline register updates.
1057  * This function will use Framestamp and current
1058  * timestamp registers to calculate the scanline.
1059  */
1060 static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
1061 {
1062         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1063         struct drm_vblank_crtc *vblank =
1064                 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
1065         const struct drm_display_mode *mode = &vblank->hwmode;
1066         u32 vblank_start = mode->crtc_vblank_start;
1067         u32 vtotal = mode->crtc_vtotal;
1068         u32 htotal = mode->crtc_htotal;
1069         u32 clock = mode->crtc_clock;
1070         u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;
1071
1072         /*
1073          * To avoid the race condition where we might cross into the
1074          * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
1075          * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
1076          * during the same frame.
1077          */
1078         do {
1079                 /*
1080                  * This field provides read back of the display
1081                  * pipe frame time stamp. The time stamp value
1082                  * is sampled at every start of vertical blank.
1083                  */
1084                 scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
1085
1086                 /*
1087                  * The TIMESTAMP_CTR register has the current
1088                  * time stamp value.
1089                  */
1090                 scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR);
1091
1092                 scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
1093         } while (scan_post_time != scan_prev_time);
1094
1095         scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
1096                                         clock), 1000 * htotal);
1097         scanline = min(scanline, vtotal - 1);
1098         scanline = (scanline + vblank_start) % vtotal;
1099
1100         return scanline;
1101 }
1102
1103 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
1104 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
1105 {
1106         struct drm_device *dev = crtc->base.dev;
1107         struct drm_i915_private *dev_priv = to_i915(dev);
1108         const struct drm_display_mode *mode;
1109         struct drm_vblank_crtc *vblank;
1110         enum pipe pipe = crtc->pipe;
1111         int position, vtotal;
1112
1113         if (!crtc->active)
1114                 return -1;
1115
1116         vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
1117         mode = &vblank->hwmode;
1118
1119         if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
1120                 return __intel_get_crtc_scanline_from_timestamp(crtc);
1121
1122         vtotal = mode->crtc_vtotal;
1123         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1124                 vtotal /= 2;
1125
1126         if (IS_GEN(dev_priv, 2))
1127                 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
1128         else
1129                 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
1130
1131         /*
1132          * On HSW, the DSL reg (0x70000) appears to return 0 if we
1133          * read it just before the start of vblank.  So try it again
1134          * so we don't accidentally end up spanning a vblank frame
1135          * increment, causing the pipe_update_end() code to squak at us.
1136          *
1137          * The nature of this problem means we can't simply check the ISR
1138          * bit and return the vblank start value; nor can we use the scanline
1139          * debug register in the transcoder as it appears to have the same
1140          * problem.  We may need to extend this to include other platforms,
1141          * but so far testing only shows the problem on HSW.
1142          */
1143         if (HAS_DDI(dev_priv) && !position) {
1144                 int i, temp;
1145
1146                 for (i = 0; i < 100; i++) {
1147                         udelay(1);
1148                         temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
1149                         if (temp != position) {
1150                                 position = temp;
1151                                 break;
1152                         }
1153                 }
1154         }
1155
1156         /*
1157          * See update_scanline_offset() for the details on the
1158          * scanline_offset adjustment.
1159          */
1160         return (position + crtc->scanline_offset) % vtotal;
1161 }
1162
1163 bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
1164                               bool in_vblank_irq, int *vpos, int *hpos,
1165                               ktime_t *stime, ktime_t *etime,
1166                               const struct drm_display_mode *mode)
1167 {
1168         struct drm_i915_private *dev_priv = to_i915(dev);
1169         struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
1170                                                                 pipe);
1171         int position;
1172         int vbl_start, vbl_end, hsync_start, htotal, vtotal;
1173         unsigned long irqflags;
1174         bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 ||
1175                 IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
1176                 mode->private_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
1177
1178         if (WARN_ON(!mode->crtc_clock)) {
1179                 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
1180                                  "pipe %c\n", pipe_name(pipe));
1181                 return false;
1182         }
1183
1184         htotal = mode->crtc_htotal;
1185         hsync_start = mode->crtc_hsync_start;
1186         vtotal = mode->crtc_vtotal;
1187         vbl_start = mode->crtc_vblank_start;
1188         vbl_end = mode->crtc_vblank_end;
1189
1190         if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
1191                 vbl_start = DIV_ROUND_UP(vbl_start, 2);
1192                 vbl_end /= 2;
1193                 vtotal /= 2;
1194         }
1195
1196         /*
1197          * Lock uncore.lock, as we will do multiple timing critical raw
1198          * register reads, potentially with preemption disabled, so the
1199          * following code must not block on uncore.lock.
1200          */
1201         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1202
1203         /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
1204
1205         /* Get optional system timestamp before query. */
1206         if (stime)
1207                 *stime = ktime_get();
1208
1209         if (use_scanline_counter) {
1210                 /* No obvious pixelcount register. Only query vertical
1211                  * scanout position from Display scan line register.
1212                  */
1213                 position = __intel_get_crtc_scanline(intel_crtc);
1214         } else {
1215                 /* Have access to pixelcount since start of frame.
1216                  * We can split this into vertical and horizontal
1217                  * scanout position.
1218                  */
1219                 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
1220
1221                 /* convert to pixel counts */
1222                 vbl_start *= htotal;
1223                 vbl_end *= htotal;
1224                 vtotal *= htotal;
1225
1226                 /*
1227                  * In interlaced modes, the pixel counter counts all pixels,
1228                  * so one field will have htotal more pixels. In order to avoid
1229                  * the reported position from jumping backwards when the pixel
1230                  * counter is beyond the length of the shorter field, just
1231                  * clamp the position the length of the shorter field. This
1232                  * matches how the scanline counter based position works since
1233                  * the scanline counter doesn't count the two half lines.
1234                  */
1235                 if (position >= vtotal)
1236                         position = vtotal - 1;
1237
1238                 /*
1239                  * Start of vblank interrupt is triggered at start of hsync,
1240                  * just prior to the first active line of vblank. However we
1241                  * consider lines to start at the leading edge of horizontal
1242                  * active. So, should we get here before we've crossed into
1243                  * the horizontal active of the first line in vblank, we would
1244                  * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
1245                  * always add htotal-hsync_start to the current pixel position.
1246                  */
1247                 position = (position + htotal - hsync_start) % vtotal;
1248         }
1249
1250         /* Get optional system timestamp after query. */
1251         if (etime)
1252                 *etime = ktime_get();
1253
1254         /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
1255
1256         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1257
1258         /*
1259          * While in vblank, position will be negative
1260          * counting up towards 0 at vbl_end. And outside
1261          * vblank, position will be positive counting
1262          * up since vbl_end.
1263          */
1264         if (position >= vbl_start)
1265                 position -= vbl_end;
1266         else
1267                 position += vtotal - vbl_end;
1268
1269         if (use_scanline_counter) {
1270                 *vpos = position;
1271                 *hpos = 0;
1272         } else {
1273                 *vpos = position / htotal;
1274                 *hpos = position - (*vpos * htotal);
1275         }
1276
1277         return true;
1278 }
1279
1280 int intel_get_crtc_scanline(struct intel_crtc *crtc)
1281 {
1282         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1283         unsigned long irqflags;
1284         int position;
1285
1286         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1287         position = __intel_get_crtc_scanline(crtc);
1288         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1289
1290         return position;
1291 }
1292
1293 static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
1294 {
1295         struct intel_uncore *uncore = &dev_priv->uncore;
1296         u32 busy_up, busy_down, max_avg, min_avg;
1297         u8 new_delay;
1298
1299         spin_lock(&mchdev_lock);
1300
1301         intel_uncore_write16(uncore,
1302                              MEMINTRSTS,
1303                              intel_uncore_read(uncore, MEMINTRSTS));
1304
1305         new_delay = dev_priv->ips.cur_delay;
1306
1307         intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
1308         busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG);
1309         busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG);
1310         max_avg = intel_uncore_read(uncore, RCBMAXAVG);
1311         min_avg = intel_uncore_read(uncore, RCBMINAVG);
1312
1313         /* Handle RCS change request from hw */
1314         if (busy_up > max_avg) {
1315                 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
1316                         new_delay = dev_priv->ips.cur_delay - 1;
1317                 if (new_delay < dev_priv->ips.max_delay)
1318                         new_delay = dev_priv->ips.max_delay;
1319         } else if (busy_down < min_avg) {
1320                 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
1321                         new_delay = dev_priv->ips.cur_delay + 1;
1322                 if (new_delay > dev_priv->ips.min_delay)
1323                         new_delay = dev_priv->ips.min_delay;
1324         }
1325
1326         if (ironlake_set_drps(dev_priv, new_delay))
1327                 dev_priv->ips.cur_delay = new_delay;
1328
1329         spin_unlock(&mchdev_lock);
1330
1331         return;
1332 }
1333
1334 static void vlv_c0_read(struct drm_i915_private *dev_priv,
1335                         struct intel_rps_ei *ei)
1336 {
1337         ei->ktime = ktime_get_raw();
1338         ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
1339         ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
1340 }
1341
1342 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1343 {
1344         memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei));
1345 }
1346
1347 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1348 {
1349         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1350         const struct intel_rps_ei *prev = &rps->ei;
1351         struct intel_rps_ei now;
1352         u32 events = 0;
1353
1354         if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
1355                 return 0;
1356
1357         vlv_c0_read(dev_priv, &now);
1358
1359         if (prev->ktime) {
1360                 u64 time, c0;
1361                 u32 render, media;
1362
1363                 time = ktime_us_delta(now.ktime, prev->ktime);
1364
1365                 time *= dev_priv->czclk_freq;
1366
1367                 /* Workload can be split between render + media,
1368                  * e.g. SwapBuffers being blitted in X after being rendered in
1369                  * mesa. To account for this we need to combine both engines
1370                  * into our activity counter.
1371                  */
1372                 render = now.render_c0 - prev->render_c0;
1373                 media = now.media_c0 - prev->media_c0;
1374                 c0 = max(render, media);
1375                 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
1376
1377                 if (c0 > time * rps->power.up_threshold)
1378                         events = GEN6_PM_RP_UP_THRESHOLD;
1379                 else if (c0 < time * rps->power.down_threshold)
1380                         events = GEN6_PM_RP_DOWN_THRESHOLD;
1381         }
1382
1383         rps->ei = now;
1384         return events;
1385 }
1386
1387 static void gen6_pm_rps_work(struct work_struct *work)
1388 {
1389         struct drm_i915_private *dev_priv =
1390                 container_of(work, struct drm_i915_private, gt_pm.rps.work);
1391         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1392         bool client_boost = false;
1393         int new_delay, adj, min, max;
1394         u32 pm_iir = 0;
1395
1396         spin_lock_irq(&dev_priv->irq_lock);
1397         if (rps->interrupts_enabled) {
1398                 pm_iir = fetch_and_zero(&rps->pm_iir);
1399                 client_boost = atomic_read(&rps->num_waiters);
1400         }
1401         spin_unlock_irq(&dev_priv->irq_lock);
1402
1403         /* Make sure we didn't queue anything we're not going to process. */
1404         WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1405         if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1406                 goto out;
1407
1408         mutex_lock(&rps->lock);
1409
1410         pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1411
1412         adj = rps->last_adj;
1413         new_delay = rps->cur_freq;
1414         min = rps->min_freq_softlimit;
1415         max = rps->max_freq_softlimit;
1416         if (client_boost)
1417                 max = rps->max_freq;
1418         if (client_boost && new_delay < rps->boost_freq) {
1419                 new_delay = rps->boost_freq;
1420                 adj = 0;
1421         } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1422                 if (adj > 0)
1423                         adj *= 2;
1424                 else /* CHV needs even encode values */
1425                         adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1426
1427                 if (new_delay >= rps->max_freq_softlimit)
1428                         adj = 0;
1429         } else if (client_boost) {
1430                 adj = 0;
1431         } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1432                 if (rps->cur_freq > rps->efficient_freq)
1433                         new_delay = rps->efficient_freq;
1434                 else if (rps->cur_freq > rps->min_freq_softlimit)
1435                         new_delay = rps->min_freq_softlimit;
1436                 adj = 0;
1437         } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1438                 if (adj < 0)
1439                         adj *= 2;
1440                 else /* CHV needs even encode values */
1441                         adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1442
1443                 if (new_delay <= rps->min_freq_softlimit)
1444                         adj = 0;
1445         } else { /* unknown event */
1446                 adj = 0;
1447         }
1448
1449         rps->last_adj = adj;
1450
1451         /*
1452          * Limit deboosting and boosting to keep ourselves at the extremes
1453          * when in the respective power modes (i.e. slowly decrease frequencies
1454          * while in the HIGH_POWER zone and slowly increase frequencies while
1455          * in the LOW_POWER zone). On idle, we will hit the timeout and drop
1456          * to the next level quickly, and conversely if busy we expect to
1457          * hit a waitboost and rapidly switch into max power.
1458          */
1459         if ((adj < 0 && rps->power.mode == HIGH_POWER) ||
1460             (adj > 0 && rps->power.mode == LOW_POWER))
1461                 rps->last_adj = 0;
1462
1463         /* sysfs frequency interfaces may have snuck in while servicing the
1464          * interrupt
1465          */
1466         new_delay += adj;
1467         new_delay = clamp_t(int, new_delay, min, max);
1468
1469         if (intel_set_rps(dev_priv, new_delay)) {
1470                 DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
1471                 rps->last_adj = 0;
1472         }
1473
1474         mutex_unlock(&rps->lock);
1475
1476 out:
1477         /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1478         spin_lock_irq(&dev_priv->irq_lock);
1479         if (rps->interrupts_enabled)
1480                 gen6_unmask_pm_irq(&dev_priv->gt, dev_priv->pm_rps_events);
1481         spin_unlock_irq(&dev_priv->irq_lock);
1482 }
1483
1484
1485 /**
1486  * ivybridge_parity_work - Workqueue called when a parity error interrupt
1487  * occurred.
1488  * @work: workqueue struct
1489  *
1490  * Doesn't actually do anything except notify userspace. As a consequence of
1491  * this event, userspace should try to remap the bad rows since statistically
1492  * it is likely the same row is more likely to go bad again.
1493  */
1494 static void ivybridge_parity_work(struct work_struct *work)
1495 {
1496         struct drm_i915_private *dev_priv =
1497                 container_of(work, typeof(*dev_priv), l3_parity.error_work);
1498         u32 error_status, row, bank, subbank;
1499         char *parity_event[6];
1500         u32 misccpctl;
1501         u8 slice = 0;
1502
1503         /* We must turn off DOP level clock gating to access the L3 registers.
1504          * In order to prevent a get/put style interface, acquire struct mutex
1505          * any time we access those registers.
1506          */
1507         mutex_lock(&dev_priv->drm.struct_mutex);
1508
1509         /* If we've screwed up tracking, just let the interrupt fire again */
1510         if (WARN_ON(!dev_priv->l3_parity.which_slice))
1511                 goto out;
1512
1513         misccpctl = I915_READ(GEN7_MISCCPCTL);
1514         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1515         POSTING_READ(GEN7_MISCCPCTL);
1516
1517         while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1518                 i915_reg_t reg;
1519
1520                 slice--;
1521                 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
1522                         break;
1523
1524                 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1525
1526                 reg = GEN7_L3CDERRST1(slice);
1527
1528                 error_status = I915_READ(reg);
1529                 row = GEN7_PARITY_ERROR_ROW(error_status);
1530                 bank = GEN7_PARITY_ERROR_BANK(error_status);
1531                 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1532
1533                 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1534                 POSTING_READ(reg);
1535
1536                 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1537                 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1538                 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1539                 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1540                 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1541                 parity_event[5] = NULL;
1542
1543                 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1544                                    KOBJ_CHANGE, parity_event);
1545
1546                 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1547                           slice, row, bank, subbank);
1548
1549                 kfree(parity_event[4]);
1550                 kfree(parity_event[3]);
1551                 kfree(parity_event[2]);
1552                 kfree(parity_event[1]);
1553         }
1554
1555         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1556
1557 out:
1558         WARN_ON(dev_priv->l3_parity.which_slice);
1559         spin_lock_irq(&dev_priv->irq_lock);
1560         gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1561         spin_unlock_irq(&dev_priv->irq_lock);
1562
1563         mutex_unlock(&dev_priv->drm.struct_mutex);
1564 }
1565
1566 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
1567                                                u32 iir)
1568 {
1569         if (!HAS_L3_DPF(dev_priv))
1570                 return;
1571
1572         spin_lock(&dev_priv->irq_lock);
1573         gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1574         spin_unlock(&dev_priv->irq_lock);
1575
1576         iir &= GT_PARITY_ERROR(dev_priv);
1577         if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1578                 dev_priv->l3_parity.which_slice |= 1 << 1;
1579
1580         if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1581                 dev_priv->l3_parity.which_slice |= 1 << 0;
1582
1583         queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1584 }
1585
1586 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
1587                                u32 gt_iir)
1588 {
1589         if (gt_iir & GT_RENDER_USER_INTERRUPT)
1590                 intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
1591         if (gt_iir & ILK_BSD_USER_INTERRUPT)
1592                 intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
1593 }
1594
1595 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
1596                                u32 gt_iir)
1597 {
1598         if (gt_iir & GT_RENDER_USER_INTERRUPT)
1599                 intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
1600         if (gt_iir & GT_BSD_USER_INTERRUPT)
1601                 intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
1602         if (gt_iir & GT_BLT_USER_INTERRUPT)
1603                 intel_engine_breadcrumbs_irq(dev_priv->engine[BCS0]);
1604
1605         if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1606                       GT_BSD_CS_ERROR_INTERRUPT |
1607                       GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1608                 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1609
1610         if (gt_iir & GT_PARITY_ERROR(dev_priv))
1611                 ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
1612 }
1613
1614 static void
1615 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
1616 {
1617         bool tasklet = false;
1618
1619         if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
1620                 tasklet = true;
1621
1622         if (iir & GT_RENDER_USER_INTERRUPT) {
1623                 intel_engine_breadcrumbs_irq(engine);
1624                 tasklet |= intel_engine_needs_breadcrumb_tasklet(engine);
1625         }
1626
1627         if (tasklet)
1628                 tasklet_hi_schedule(&engine->execlists.tasklet);
1629 }
1630
1631 static void gen8_gt_irq_ack(struct drm_i915_private *i915,
1632                             u32 master_ctl, u32 gt_iir[4])
1633 {
1634         void __iomem * const regs = i915->uncore.regs;
1635
1636 #define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
1637                       GEN8_GT_BCS_IRQ | \
1638                       GEN8_GT_VCS0_IRQ | \
1639                       GEN8_GT_VCS1_IRQ | \
1640                       GEN8_GT_VECS_IRQ | \
1641                       GEN8_GT_PM_IRQ | \
1642                       GEN8_GT_GUC_IRQ)
1643
1644         if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1645                 gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0));
1646                 if (likely(gt_iir[0]))
1647                         raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
1648         }
1649
1650         if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
1651                 gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
1652                 if (likely(gt_iir[1]))
1653                         raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
1654         }
1655
1656         if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
1657                 gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2));
1658                 if (likely(gt_iir[2]))
1659                         raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]);
1660         }
1661
1662         if (master_ctl & GEN8_GT_VECS_IRQ) {
1663                 gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3));
1664                 if (likely(gt_iir[3]))
1665                         raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]);
1666         }
1667 }
1668
1669 static void gen8_gt_irq_handler(struct drm_i915_private *i915,
1670                                 u32 master_ctl, u32 gt_iir[4])
1671 {
1672         if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1673                 gen8_cs_irq_handler(i915->engine[RCS0],
1674                                     gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
1675                 gen8_cs_irq_handler(i915->engine[BCS0],
1676                                     gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
1677         }
1678
1679         if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
1680                 gen8_cs_irq_handler(i915->engine[VCS0],
1681                                     gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT);
1682                 gen8_cs_irq_handler(i915->engine[VCS1],
1683                                     gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
1684         }
1685
1686         if (master_ctl & GEN8_GT_VECS_IRQ) {
1687                 gen8_cs_irq_handler(i915->engine[VECS0],
1688                                     gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
1689         }
1690
1691         if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
1692                 gen6_rps_irq_handler(i915, gt_iir[2]);
1693                 guc_irq_handler(&i915->gt.uc.guc, gt_iir[2] >> 16);
1694         }
1695 }
1696
1697 static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1698 {
1699         switch (pin) {
1700         case HPD_PORT_C:
1701                 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
1702         case HPD_PORT_D:
1703                 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
1704         case HPD_PORT_E:
1705                 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
1706         case HPD_PORT_F:
1707                 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
1708         default:
1709                 return false;
1710         }
1711 }
1712
1713 static bool gen12_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1714 {
1715         switch (pin) {
1716         case HPD_PORT_D:
1717                 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
1718         case HPD_PORT_E:
1719                 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
1720         case HPD_PORT_F:
1721                 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
1722         case HPD_PORT_G:
1723                 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
1724         case HPD_PORT_H:
1725                 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC5);
1726         case HPD_PORT_I:
1727                 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC6);
1728         default:
1729                 return false;
1730         }
1731 }
1732
1733 static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1734 {
1735         switch (pin) {
1736         case HPD_PORT_A:
1737                 return val & PORTA_HOTPLUG_LONG_DETECT;
1738         case HPD_PORT_B:
1739                 return val & PORTB_HOTPLUG_LONG_DETECT;
1740         case HPD_PORT_C:
1741                 return val & PORTC_HOTPLUG_LONG_DETECT;
1742         default:
1743                 return false;
1744         }
1745 }
1746
1747 static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1748 {
1749         switch (pin) {
1750         case HPD_PORT_A:
1751                 return val & ICP_DDIA_HPD_LONG_DETECT;
1752         case HPD_PORT_B:
1753                 return val & ICP_DDIB_HPD_LONG_DETECT;
1754         case HPD_PORT_C:
1755                 return val & TGP_DDIC_HPD_LONG_DETECT;
1756         default:
1757                 return false;
1758         }
1759 }
1760
1761 static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1762 {
1763         switch (pin) {
1764         case HPD_PORT_C:
1765                 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
1766         case HPD_PORT_D:
1767                 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
1768         case HPD_PORT_E:
1769                 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
1770         case HPD_PORT_F:
1771                 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
1772         default:
1773                 return false;
1774         }
1775 }
1776
1777 static bool tgp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1778 {
1779         switch (pin) {
1780         case HPD_PORT_A:
1781                 return val & ICP_DDIA_HPD_LONG_DETECT;
1782         case HPD_PORT_B:
1783                 return val & ICP_DDIB_HPD_LONG_DETECT;
1784         case HPD_PORT_C:
1785                 return val & TGP_DDIC_HPD_LONG_DETECT;
1786         default:
1787                 return false;
1788         }
1789 }
1790
1791 static bool tgp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1792 {
1793         switch (pin) {
1794         case HPD_PORT_D:
1795                 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
1796         case HPD_PORT_E:
1797                 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
1798         case HPD_PORT_F:
1799                 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
1800         case HPD_PORT_G:
1801                 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
1802         case HPD_PORT_H:
1803                 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC5);
1804         case HPD_PORT_I:
1805                 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC6);
1806         default:
1807                 return false;
1808         }
1809 }
1810
1811 static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
1812 {
1813         switch (pin) {
1814         case HPD_PORT_E:
1815                 return val & PORTE_HOTPLUG_LONG_DETECT;
1816         default:
1817                 return false;
1818         }
1819 }
1820
1821 static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1822 {
1823         switch (pin) {
1824         case HPD_PORT_A:
1825                 return val & PORTA_HOTPLUG_LONG_DETECT;
1826         case HPD_PORT_B:
1827                 return val & PORTB_HOTPLUG_LONG_DETECT;
1828         case HPD_PORT_C:
1829                 return val & PORTC_HOTPLUG_LONG_DETECT;
1830         case HPD_PORT_D:
1831                 return val & PORTD_HOTPLUG_LONG_DETECT;
1832         default:
1833                 return false;
1834         }
1835 }
1836
1837 static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1838 {
1839         switch (pin) {
1840         case HPD_PORT_A:
1841                 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1842         default:
1843                 return false;
1844         }
1845 }
1846
1847 static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1848 {
1849         switch (pin) {
1850         case HPD_PORT_B:
1851                 return val & PORTB_HOTPLUG_LONG_DETECT;
1852         case HPD_PORT_C:
1853                 return val & PORTC_HOTPLUG_LONG_DETECT;
1854         case HPD_PORT_D:
1855                 return val & PORTD_HOTPLUG_LONG_DETECT;
1856         default:
1857                 return false;
1858         }
1859 }
1860
1861 static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1862 {
1863         switch (pin) {
1864         case HPD_PORT_B:
1865                 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1866         case HPD_PORT_C:
1867                 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1868         case HPD_PORT_D:
1869                 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1870         default:
1871                 return false;
1872         }
1873 }
1874
1875 /*
1876  * Get a bit mask of pins that have triggered, and which ones may be long.
1877  * This can be called multiple times with the same masks to accumulate
1878  * hotplug detection results from several registers.
1879  *
1880  * Note that the caller is expected to zero out the masks initially.
1881  */
1882 static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
1883                                u32 *pin_mask, u32 *long_mask,
1884                                u32 hotplug_trigger, u32 dig_hotplug_reg,
1885                                const u32 hpd[HPD_NUM_PINS],
1886                                bool long_pulse_detect(enum hpd_pin pin, u32 val))
1887 {
1888         enum hpd_pin pin;
1889
1890         BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS);
1891
1892         for_each_hpd_pin(pin) {
1893                 if ((hpd[pin] & hotplug_trigger) == 0)
1894                         continue;
1895
1896                 *pin_mask |= BIT(pin);
1897
1898                 if (long_pulse_detect(pin, dig_hotplug_reg))
1899                         *long_mask |= BIT(pin);
1900         }
1901
1902         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
1903                          hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1904
1905 }
1906
1907 static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1908 {
1909         wake_up_all(&dev_priv->gmbus_wait_queue);
1910 }
1911
1912 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1913 {
1914         wake_up_all(&dev_priv->gmbus_wait_queue);
1915 }
1916
1917 #if defined(CONFIG_DEBUG_FS)
1918 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1919                                          enum pipe pipe,
1920                                          u32 crc0, u32 crc1,
1921                                          u32 crc2, u32 crc3,
1922                                          u32 crc4)
1923 {
1924         struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1925         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1926         u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
1927
1928         trace_intel_pipe_crc(crtc, crcs);
1929
1930         spin_lock(&pipe_crc->lock);
1931         /*
1932          * For some not yet identified reason, the first CRC is
1933          * bonkers. So let's just wait for the next vblank and read
1934          * out the buggy result.
1935          *
1936          * On GEN8+ sometimes the second CRC is bonkers as well, so
1937          * don't trust that one either.
1938          */
1939         if (pipe_crc->skipped <= 0 ||
1940             (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
1941                 pipe_crc->skipped++;
1942                 spin_unlock(&pipe_crc->lock);
1943                 return;
1944         }
1945         spin_unlock(&pipe_crc->lock);
1946
1947         drm_crtc_add_crc_entry(&crtc->base, true,
1948                                 drm_crtc_accurate_vblank_count(&crtc->base),
1949                                 crcs);
1950 }
1951 #else
1952 static inline void
1953 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1954                              enum pipe pipe,
1955                              u32 crc0, u32 crc1,
1956                              u32 crc2, u32 crc3,
1957                              u32 crc4) {}
1958 #endif
1959
1960
1961 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1962                                      enum pipe pipe)
1963 {
1964         display_pipe_crc_irq_handler(dev_priv, pipe,
1965                                      I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1966                                      0, 0, 0, 0);
1967 }
1968
1969 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1970                                      enum pipe pipe)
1971 {
1972         display_pipe_crc_irq_handler(dev_priv, pipe,
1973                                      I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1974                                      I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1975                                      I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1976                                      I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1977                                      I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1978 }
1979
1980 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1981                                       enum pipe pipe)
1982 {
1983         u32 res1, res2;
1984
1985         if (INTEL_GEN(dev_priv) >= 3)
1986                 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1987         else
1988                 res1 = 0;
1989
1990         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1991                 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1992         else
1993                 res2 = 0;
1994
1995         display_pipe_crc_irq_handler(dev_priv, pipe,
1996                                      I915_READ(PIPE_CRC_RES_RED(pipe)),
1997                                      I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1998                                      I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1999                                      res1, res2);
2000 }
2001
2002 /* The RPS events need forcewake, so we add them to a work queue and mask their
2003  * IMR bits until the work is done. Other interrupts can be processed without
2004  * the work queue. */
2005 static void gen11_rps_irq_handler(struct intel_gt *gt, u32 pm_iir)
2006 {
2007         struct drm_i915_private *i915 = gt->i915;
2008         struct intel_rps *rps = &i915->gt_pm.rps;
2009         const u32 events = i915->pm_rps_events & pm_iir;
2010
2011         lockdep_assert_held(&i915->irq_lock);
2012
2013         if (unlikely(!events))
2014                 return;
2015
2016         gen6_mask_pm_irq(gt, events);
2017
2018         if (!rps->interrupts_enabled)
2019                 return;
2020
2021         rps->pm_iir |= events;
2022         schedule_work(&rps->work);
2023 }
2024
2025 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
2026 {
2027         struct intel_rps *rps = &dev_priv->gt_pm.rps;
2028
2029         if (pm_iir & dev_priv->pm_rps_events) {
2030                 spin_lock(&dev_priv->irq_lock);
2031                 gen6_mask_pm_irq(&dev_priv->gt,
2032                                  pm_iir & dev_priv->pm_rps_events);
2033                 if (rps->interrupts_enabled) {
2034                         rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
2035                         schedule_work(&rps->work);
2036                 }
2037                 spin_unlock(&dev_priv->irq_lock);
2038         }
2039
2040         if (INTEL_GEN(dev_priv) >= 8)
2041                 return;
2042
2043         if (pm_iir & PM_VEBOX_USER_INTERRUPT)
2044                 intel_engine_breadcrumbs_irq(dev_priv->engine[VECS0]);
2045
2046         if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
2047                 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
2048 }
2049
2050 static void guc_irq_handler(struct intel_guc *guc, u16 iir)
2051 {
2052         if (iir & GUC_INTR_GUC2HOST)
2053                 intel_guc_to_host_event_handler(guc);
2054 }
2055
2056 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
2057 {
2058         enum pipe pipe;
2059
2060         for_each_pipe(dev_priv, pipe) {
2061                 I915_WRITE(PIPESTAT(pipe),
2062                            PIPESTAT_INT_STATUS_MASK |
2063                            PIPE_FIFO_UNDERRUN_STATUS);
2064
2065                 dev_priv->pipestat_irq_mask[pipe] = 0;
2066         }
2067 }
2068
2069 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
2070                                   u32 iir, u32 pipe_stats[I915_MAX_PIPES])
2071 {
2072         int pipe;
2073
2074         spin_lock(&dev_priv->irq_lock);
2075
2076         if (!dev_priv->display_irqs_enabled) {
2077                 spin_unlock(&dev_priv->irq_lock);
2078                 return;
2079         }
2080
2081         for_each_pipe(dev_priv, pipe) {
2082                 i915_reg_t reg;
2083                 u32 status_mask, enable_mask, iir_bit = 0;
2084
2085                 /*
2086                  * PIPESTAT bits get signalled even when the interrupt is
2087                  * disabled with the mask bits, and some of the status bits do
2088                  * not generate interrupts at all (like the underrun bit). Hence
2089                  * we need to be careful that we only handle what we want to
2090                  * handle.
2091                  */
2092
2093                 /* fifo underruns are filterered in the underrun handler. */
2094                 status_mask = PIPE_FIFO_UNDERRUN_STATUS;
2095
2096                 switch (pipe) {
2097                 case PIPE_A:
2098                         iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
2099                         break;
2100                 case PIPE_B:
2101                         iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
2102                         break;
2103                 case PIPE_C:
2104                         iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
2105                         break;
2106                 }
2107                 if (iir & iir_bit)
2108                         status_mask |= dev_priv->pipestat_irq_mask[pipe];
2109
2110                 if (!status_mask)
2111                         continue;
2112
2113                 reg = PIPESTAT(pipe);
2114                 pipe_stats[pipe] = I915_READ(reg) & status_mask;
2115                 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
2116
2117                 /*
2118                  * Clear the PIPE*STAT regs before the IIR
2119                  *
2120                  * Toggle the enable bits to make sure we get an
2121                  * edge in the ISR pipe event bit if we don't clear
2122                  * all the enabled status bits. Otherwise the edge
2123                  * triggered IIR on i965/g4x wouldn't notice that
2124                  * an interrupt is still pending.
2125                  */
2126                 if (pipe_stats[pipe]) {
2127                         I915_WRITE(reg, pipe_stats[pipe]);
2128                         I915_WRITE(reg, enable_mask);
2129                 }
2130         }
2131         spin_unlock(&dev_priv->irq_lock);
2132 }
2133
2134 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
2135                                       u16 iir, u32 pipe_stats[I915_MAX_PIPES])
2136 {
2137         enum pipe pipe;
2138
2139         for_each_pipe(dev_priv, pipe) {
2140                 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
2141                         drm_handle_vblank(&dev_priv->drm, pipe);
2142
2143                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2144                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2145
2146                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2147                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2148         }
2149 }
2150
2151 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
2152                                       u32 iir, u32 pipe_stats[I915_MAX_PIPES])
2153 {
2154         bool blc_event = false;
2155         enum pipe pipe;
2156
2157         for_each_pipe(dev_priv, pipe) {
2158                 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
2159                         drm_handle_vblank(&dev_priv->drm, pipe);
2160
2161                 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2162                         blc_event = true;
2163
2164                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2165                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2166
2167                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2168                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2169         }
2170
2171         if (blc_event || (iir & I915_ASLE_INTERRUPT))
2172                 intel_opregion_asle_intr(dev_priv);
2173 }
2174
2175 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
2176                                       u32 iir, u32 pipe_stats[I915_MAX_PIPES])
2177 {
2178         bool blc_event = false;
2179         enum pipe pipe;
2180
2181         for_each_pipe(dev_priv, pipe) {
2182                 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
2183                         drm_handle_vblank(&dev_priv->drm, pipe);
2184
2185                 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2186                         blc_event = true;
2187
2188                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2189                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2190
2191                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2192                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2193         }
2194
2195         if (blc_event || (iir & I915_ASLE_INTERRUPT))
2196                 intel_opregion_asle_intr(dev_priv);
2197
2198         if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2199                 gmbus_irq_handler(dev_priv);
2200 }
2201
2202 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
2203                                             u32 pipe_stats[I915_MAX_PIPES])
2204 {
2205         enum pipe pipe;
2206
2207         for_each_pipe(dev_priv, pipe) {
2208                 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
2209                         drm_handle_vblank(&dev_priv->drm, pipe);
2210
2211                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2212                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2213
2214                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2215                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2216         }
2217
2218         if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2219                 gmbus_irq_handler(dev_priv);
2220 }
2221
2222 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
2223 {
2224         u32 hotplug_status = 0, hotplug_status_mask;
2225         int i;
2226
2227         if (IS_G4X(dev_priv) ||
2228             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2229                 hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
2230                         DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
2231         else
2232                 hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
2233
2234         /*
2235          * We absolutely have to clear all the pending interrupt
2236          * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
2237          * interrupt bit won't have an edge, and the i965/g4x
2238          * edge triggered IIR will not notice that an interrupt
2239          * is still pending. We can't use PORT_HOTPLUG_EN to
2240          * guarantee the edge as the act of toggling the enable
2241          * bits can itself generate a new hotplug interrupt :(
2242          */
2243         for (i = 0; i < 10; i++) {
2244                 u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;
2245
2246                 if (tmp == 0)
2247                         return hotplug_status;
2248
2249                 hotplug_status |= tmp;
2250                 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2251         }
2252
2253         WARN_ONCE(1,
2254                   "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
2255                   I915_READ(PORT_HOTPLUG_STAT));
2256
2257         return hotplug_status;
2258 }
2259
2260 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
2261                                  u32 hotplug_status)
2262 {
2263         u32 pin_mask = 0, long_mask = 0;
2264
2265         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
2266             IS_CHERRYVIEW(dev_priv)) {
2267                 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
2268
2269                 if (hotplug_trigger) {
2270                         intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2271                                            hotplug_trigger, hotplug_trigger,
2272                                            hpd_status_g4x,
2273                                            i9xx_port_hotplug_long_detect);
2274
2275                         intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2276                 }
2277
2278                 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
2279                         dp_aux_irq_handler(dev_priv);
2280         } else {
2281                 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
2282
2283                 if (hotplug_trigger) {
2284                         intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2285                                            hotplug_trigger, hotplug_trigger,
2286                                            hpd_status_i915,
2287                                            i9xx_port_hotplug_long_detect);
2288                         intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2289                 }
2290         }
2291 }
2292
2293 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
2294 {
2295         struct drm_i915_private *dev_priv = arg;
2296         irqreturn_t ret = IRQ_NONE;
2297
2298         if (!intel_irqs_enabled(dev_priv))
2299                 return IRQ_NONE;
2300
2301         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2302         disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2303
2304         do {
2305                 u32 iir, gt_iir, pm_iir;
2306                 u32 pipe_stats[I915_MAX_PIPES] = {};
2307                 u32 hotplug_status = 0;
2308                 u32 ier = 0;
2309
2310                 gt_iir = I915_READ(GTIIR);
2311                 pm_iir = I915_READ(GEN6_PMIIR);
2312                 iir = I915_READ(VLV_IIR);
2313
2314                 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
2315                         break;
2316
2317                 ret = IRQ_HANDLED;
2318
2319                 /*
2320                  * Theory on interrupt generation, based on empirical evidence:
2321                  *
2322                  * x = ((VLV_IIR & VLV_IER) ||
2323                  *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
2324                  *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
2325                  *
2326                  * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
2327                  * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
2328                  * guarantee the CPU interrupt will be raised again even if we
2329                  * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
2330                  * bits this time around.
2331                  */
2332                 I915_WRITE(VLV_MASTER_IER, 0);
2333                 ier = I915_READ(VLV_IER);
2334                 I915_WRITE(VLV_IER, 0);
2335
2336                 if (gt_iir)
2337                         I915_WRITE(GTIIR, gt_iir);
2338                 if (pm_iir)
2339                         I915_WRITE(GEN6_PMIIR, pm_iir);
2340
2341                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
2342                         hotplug_status = i9xx_hpd_irq_ack(dev_priv);
2343
2344                 /* Call regardless, as some status bits might not be
2345                  * signalled in iir */
2346                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
2347
2348                 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
2349                            I915_LPE_PIPE_B_INTERRUPT))
2350                         intel_lpe_audio_irq_handler(dev_priv);
2351
2352                 /*
2353                  * VLV_IIR is single buffered, and reflects the level
2354                  * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
2355                  */
2356                 if (iir)
2357                         I915_WRITE(VLV_IIR, iir);
2358
2359                 I915_WRITE(VLV_IER, ier);
2360                 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2361
2362                 if (gt_iir)
2363                         snb_gt_irq_handler(dev_priv, gt_iir);
2364                 if (pm_iir)
2365                         gen6_rps_irq_handler(dev_priv, pm_iir);
2366
2367                 if (hotplug_status)
2368                         i9xx_hpd_irq_handler(dev_priv, hotplug_status);
2369
2370                 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
2371         } while (0);
2372
2373         enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2374
2375         return ret;
2376 }
2377
2378 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
2379 {
2380         struct drm_i915_private *dev_priv = arg;
2381         irqreturn_t ret = IRQ_NONE;
2382
2383         if (!intel_irqs_enabled(dev_priv))
2384                 return IRQ_NONE;
2385
2386         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2387         disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2388
2389         do {
2390                 u32 master_ctl, iir;
2391                 u32 pipe_stats[I915_MAX_PIPES] = {};
2392                 u32 hotplug_status = 0;
2393                 u32 gt_iir[4];
2394                 u32 ier = 0;
2395
2396                 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
2397                 iir = I915_READ(VLV_IIR);
2398
2399                 if (master_ctl == 0 && iir == 0)
2400                         break;
2401
2402                 ret = IRQ_HANDLED;
2403
2404                 /*
2405                  * Theory on interrupt generation, based on empirical evidence:
2406                  *
2407                  * x = ((VLV_IIR & VLV_IER) ||
2408                  *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
2409                  *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
2410                  *
2411                  * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
2412                  * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
2413                  * guarantee the CPU interrupt will be raised again even if we
2414                  * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
2415                  * bits this time around.
2416                  */
2417                 I915_WRITE(GEN8_MASTER_IRQ, 0);
2418                 ier = I915_READ(VLV_IER);
2419                 I915_WRITE(VLV_IER, 0);
2420
2421                 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2422
2423                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
2424                         hotplug_status = i9xx_hpd_irq_ack(dev_priv);
2425
2426                 /* Call regardless, as some status bits might not be
2427                  * signalled in iir */
2428                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
2429
2430                 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
2431                            I915_LPE_PIPE_B_INTERRUPT |
2432                            I915_LPE_PIPE_C_INTERRUPT))
2433                         intel_lpe_audio_irq_handler(dev_priv);
2434
2435                 /*
2436                  * VLV_IIR is single buffered, and reflects the level
2437                  * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
2438                  */
2439                 if (iir)
2440                         I915_WRITE(VLV_IIR, iir);
2441
2442                 I915_WRITE(VLV_IER, ier);
2443                 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2444
2445                 gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
2446
2447                 if (hotplug_status)
2448                         i9xx_hpd_irq_handler(dev_priv, hotplug_status);
2449
2450                 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
2451         } while (0);
2452
2453         enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2454
2455         return ret;
2456 }
2457
2458 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
2459                                 u32 hotplug_trigger,
2460                                 const u32 hpd[HPD_NUM_PINS])
2461 {
2462         u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2463
2464         /*
2465          * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
2466          * unless we touch the hotplug register, even if hotplug_trigger is
2467          * zero. Not acking leads to "The master control interrupt lied (SDE)!"
2468          * errors.
2469          */
2470         dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2471         if (!hotplug_trigger) {
2472                 u32 mask = PORTA_HOTPLUG_STATUS_MASK |
2473                         PORTD_HOTPLUG_STATUS_MASK |
2474                         PORTC_HOTPLUG_STATUS_MASK |
2475                         PORTB_HOTPLUG_STATUS_MASK;
2476                 dig_hotplug_reg &= ~mask;
2477         }
2478
2479         I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2480         if (!hotplug_trigger)
2481                 return;
2482
2483         intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2484                            dig_hotplug_reg, hpd,
2485                            pch_port_hotplug_long_detect);
2486
2487         intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2488 }
2489
2490 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2491 {
2492         int pipe;
2493         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
2494
2495         ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
2496
2497         if (pch_iir & SDE_AUDIO_POWER_MASK) {
2498                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
2499                                SDE_AUDIO_POWER_SHIFT);
2500                 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
2501                                  port_name(port));
2502         }
2503
2504         if (pch_iir & SDE_AUX_MASK)
2505                 dp_aux_irq_handler(dev_priv);
2506
2507         if (pch_iir & SDE_GMBUS)
2508                 gmbus_irq_handler(dev_priv);
2509
2510         if (pch_iir & SDE_AUDIO_HDCP_MASK)
2511                 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
2512
2513         if (pch_iir & SDE_AUDIO_TRANS_MASK)
2514                 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
2515
2516         if (pch_iir & SDE_POISON)
2517                 DRM_ERROR("PCH poison interrupt\n");
2518
2519         if (pch_iir & SDE_FDI_MASK)
2520                 for_each_pipe(dev_priv, pipe)
2521                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
2522                                          pipe_name(pipe),
2523                                          I915_READ(FDI_RX_IIR(pipe)));
2524
2525         if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
2526                 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
2527
2528         if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
2529                 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2530
2531         if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2532                 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
2533
2534         if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2535                 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
2536 }
2537
2538 static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
2539 {
2540         u32 err_int = I915_READ(GEN7_ERR_INT);
2541         enum pipe pipe;
2542
2543         if (err_int & ERR_INT_POISON)
2544                 DRM_ERROR("Poison interrupt\n");
2545
2546         for_each_pipe(dev_priv, pipe) {
2547                 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
2548                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2549
2550                 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2551                         if (IS_IVYBRIDGE(dev_priv))
2552                                 ivb_pipe_crc_irq_handler(dev_priv, pipe);
2553                         else
2554                                 hsw_pipe_crc_irq_handler(dev_priv, pipe);
2555                 }
2556         }
2557
2558         I915_WRITE(GEN7_ERR_INT, err_int);
2559 }
2560
2561 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
2562 {
2563         u32 serr_int = I915_READ(SERR_INT);
2564         enum pipe pipe;
2565
2566         if (serr_int & SERR_INT_POISON)
2567                 DRM_ERROR("PCH poison interrupt\n");
2568
2569         for_each_pipe(dev_priv, pipe)
2570                 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
2571                         intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
2572
2573         I915_WRITE(SERR_INT, serr_int);
2574 }
2575
2576 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2577 {
2578         int pipe;
2579         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2580
2581         ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
2582
2583         if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2584                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2585                                SDE_AUDIO_POWER_SHIFT_CPT);
2586                 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2587                                  port_name(port));
2588         }
2589
2590         if (pch_iir & SDE_AUX_MASK_CPT)
2591                 dp_aux_irq_handler(dev_priv);
2592
2593         if (pch_iir & SDE_GMBUS_CPT)
2594                 gmbus_irq_handler(dev_priv);
2595
2596         if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2597                 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2598
2599         if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2600                 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2601
2602         if (pch_iir & SDE_FDI_MASK_CPT)
2603                 for_each_pipe(dev_priv, pipe)
2604                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
2605                                          pipe_name(pipe),
2606                                          I915_READ(FDI_RX_IIR(pipe)));
2607
2608         if (pch_iir & SDE_ERROR_CPT)
2609                 cpt_serr_int_handler(dev_priv);
2610 }
2611
2612 static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir,
2613                             const u32 *pins)
2614 {
2615         u32 ddi_hotplug_trigger;
2616         u32 tc_hotplug_trigger;
2617         u32 pin_mask = 0, long_mask = 0;
2618
2619         if (HAS_PCH_MCC(dev_priv)) {
2620                 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
2621                 tc_hotplug_trigger = 0;
2622         } else {
2623                 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
2624                 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
2625         }
2626
2627         if (ddi_hotplug_trigger) {
2628                 u32 dig_hotplug_reg;
2629
2630                 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
2631                 I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
2632
2633                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2634                                    ddi_hotplug_trigger,
2635                                    dig_hotplug_reg, pins,
2636                                    icp_ddi_port_hotplug_long_detect);
2637         }
2638
2639         if (tc_hotplug_trigger) {
2640                 u32 dig_hotplug_reg;
2641
2642                 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
2643                 I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
2644
2645                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2646                                    tc_hotplug_trigger,
2647                                    dig_hotplug_reg, pins,
2648                                    icp_tc_port_hotplug_long_detect);
2649         }
2650
2651         if (pin_mask)
2652                 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2653
2654         if (pch_iir & SDE_GMBUS_ICP)
2655                 gmbus_irq_handler(dev_priv);
2656 }
2657
2658 static void tgp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2659 {
2660         u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
2661         u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP;
2662         u32 pin_mask = 0, long_mask = 0;
2663
2664         if (ddi_hotplug_trigger) {
2665                 u32 dig_hotplug_reg;
2666
2667                 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
2668                 I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
2669
2670                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2671                                    ddi_hotplug_trigger,
2672                                    dig_hotplug_reg, hpd_tgp,
2673                                    tgp_ddi_port_hotplug_long_detect);
2674         }
2675
2676         if (tc_hotplug_trigger) {
2677                 u32 dig_hotplug_reg;
2678
2679                 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
2680                 I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
2681
2682                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2683                                    tc_hotplug_trigger,
2684                                    dig_hotplug_reg, hpd_tgp,
2685                                    tgp_tc_port_hotplug_long_detect);
2686         }
2687
2688         if (pin_mask)
2689                 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2690
2691         if (pch_iir & SDE_GMBUS_ICP)
2692                 gmbus_irq_handler(dev_priv);
2693 }
2694
2695 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2696 {
2697         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2698                 ~SDE_PORTE_HOTPLUG_SPT;
2699         u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2700         u32 pin_mask = 0, long_mask = 0;
2701
2702         if (hotplug_trigger) {
2703                 u32 dig_hotplug_reg;
2704
2705                 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2706                 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2707
2708                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2709                                    hotplug_trigger, dig_hotplug_reg, hpd_spt,
2710                                    spt_port_hotplug_long_detect);
2711         }
2712
2713         if (hotplug2_trigger) {
2714                 u32 dig_hotplug_reg;
2715
2716                 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
2717                 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2718
2719                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2720                                    hotplug2_trigger, dig_hotplug_reg, hpd_spt,
2721                                    spt_port_hotplug2_long_detect);
2722         }
2723
2724         if (pin_mask)
2725                 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2726
2727         if (pch_iir & SDE_GMBUS_CPT)
2728                 gmbus_irq_handler(dev_priv);
2729 }
2730
2731 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2732                                 u32 hotplug_trigger,
2733                                 const u32 hpd[HPD_NUM_PINS])
2734 {
2735         u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2736
2737         dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2738         I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2739
2740         intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2741                            dig_hotplug_reg, hpd,
2742                            ilk_port_hotplug_long_detect);
2743
2744         intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2745 }
2746
2747 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2748                                     u32 de_iir)
2749 {
2750         enum pipe pipe;
2751         u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2752
2753         if (hotplug_trigger)
2754                 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
2755
2756         if (de_iir & DE_AUX_CHANNEL_A)
2757                 dp_aux_irq_handler(dev_priv);
2758
2759         if (de_iir & DE_GSE)
2760                 intel_opregion_asle_intr(dev_priv);
2761
2762         if (de_iir & DE_POISON)
2763                 DRM_ERROR("Poison interrupt\n");
2764
2765         for_each_pipe(dev_priv, pipe) {
2766                 if (de_iir & DE_PIPE_VBLANK(pipe))
2767                         drm_handle_vblank(&dev_priv->drm, pipe);
2768
2769                 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2770                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2771
2772                 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2773                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2774         }
2775
2776         /* check event from PCH */
2777         if (de_iir & DE_PCH_EVENT) {
2778                 u32 pch_iir = I915_READ(SDEIIR);
2779
2780                 if (HAS_PCH_CPT(dev_priv))
2781                         cpt_irq_handler(dev_priv, pch_iir);
2782                 else
2783                         ibx_irq_handler(dev_priv, pch_iir);
2784
2785                 /* should clear PCH hotplug event before clear CPU irq */
2786                 I915_WRITE(SDEIIR, pch_iir);
2787         }
2788
2789         if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
2790                 ironlake_rps_change_irq_handler(dev_priv);
2791 }
2792
2793 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2794                                     u32 de_iir)
2795 {
2796         enum pipe pipe;
2797         u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2798
2799         if (hotplug_trigger)
2800                 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
2801
2802         if (de_iir & DE_ERR_INT_IVB)
2803                 ivb_err_int_handler(dev_priv);
2804
2805         if (de_iir & DE_EDP_PSR_INT_HSW) {
2806                 u32 psr_iir = I915_READ(EDP_PSR_IIR);
2807
2808                 intel_psr_irq_handler(dev_priv, psr_iir);
2809                 I915_WRITE(EDP_PSR_IIR, psr_iir);
2810         }
2811
2812         if (de_iir & DE_AUX_CHANNEL_A_IVB)
2813                 dp_aux_irq_handler(dev_priv);
2814
2815         if (de_iir & DE_GSE_IVB)
2816                 intel_opregion_asle_intr(dev_priv);
2817
2818         for_each_pipe(dev_priv, pipe) {
2819                 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
2820                         drm_handle_vblank(&dev_priv->drm, pipe);
2821         }
2822
2823         /* check event from PCH */
2824         if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2825                 u32 pch_iir = I915_READ(SDEIIR);
2826
2827                 cpt_irq_handler(dev_priv, pch_iir);
2828
2829                 /* clear PCH hotplug event before clear CPU irq */
2830                 I915_WRITE(SDEIIR, pch_iir);
2831         }
2832 }
2833
2834 /*
2835  * To handle irqs with the minimum potential races with fresh interrupts, we:
2836  * 1 - Disable Master Interrupt Control.
2837  * 2 - Find the source(s) of the interrupt.
2838  * 3 - Clear the Interrupt Identity bits (IIR).
2839  * 4 - Process the interrupt(s) that had bits set in the IIRs.
2840  * 5 - Re-enable Master Interrupt Control.
2841  */
2842 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2843 {
2844         struct drm_i915_private *dev_priv = arg;
2845         u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2846         irqreturn_t ret = IRQ_NONE;
2847
2848         if (!intel_irqs_enabled(dev_priv))
2849                 return IRQ_NONE;
2850
2851         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2852         disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2853
2854         /* disable master interrupt before clearing iir  */
2855         de_ier = I915_READ(DEIER);
2856         I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2857
2858         /* Disable south interrupts. We'll only write to SDEIIR once, so further
2859          * interrupts will will be stored on its back queue, and then we'll be
2860          * able to process them after we restore SDEIER (as soon as we restore
2861          * it, we'll get an interrupt if SDEIIR still has something to process
2862          * due to its back queue). */
2863         if (!HAS_PCH_NOP(dev_priv)) {
2864                 sde_ier = I915_READ(SDEIER);
2865                 I915_WRITE(SDEIER, 0);
2866         }
2867
2868         /* Find, clear, then process each source of interrupt */
2869
2870         gt_iir = I915_READ(GTIIR);
2871         if (gt_iir) {
2872                 I915_WRITE(GTIIR, gt_iir);
2873                 ret = IRQ_HANDLED;
2874                 if (INTEL_GEN(dev_priv) >= 6)
2875                         snb_gt_irq_handler(dev_priv, gt_iir);
2876                 else
2877                         ilk_gt_irq_handler(dev_priv, gt_iir);
2878         }
2879
2880         de_iir = I915_READ(DEIIR);
2881         if (de_iir) {
2882                 I915_WRITE(DEIIR, de_iir);
2883                 ret = IRQ_HANDLED;
2884                 if (INTEL_GEN(dev_priv) >= 7)
2885                         ivb_display_irq_handler(dev_priv, de_iir);
2886                 else
2887                         ilk_display_irq_handler(dev_priv, de_iir);
2888         }
2889
2890         if (INTEL_GEN(dev_priv) >= 6) {
2891                 u32 pm_iir = I915_READ(GEN6_PMIIR);
2892                 if (pm_iir) {
2893                         I915_WRITE(GEN6_PMIIR, pm_iir);
2894                         ret = IRQ_HANDLED;
2895                         gen6_rps_irq_handler(dev_priv, pm_iir);
2896                 }
2897         }
2898
2899         I915_WRITE(DEIER, de_ier);
2900         if (!HAS_PCH_NOP(dev_priv))
2901                 I915_WRITE(SDEIER, sde_ier);
2902
2903         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2904         enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2905
2906         return ret;
2907 }
2908
2909 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2910                                 u32 hotplug_trigger,
2911                                 const u32 hpd[HPD_NUM_PINS])
2912 {
2913         u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2914
2915         dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2916         I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2917
2918         intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2919                            dig_hotplug_reg, hpd,
2920                            bxt_port_hotplug_long_detect);
2921
2922         intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2923 }
2924
2925 static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2926 {
2927         u32 pin_mask = 0, long_mask = 0;
2928         u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
2929         u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2930         long_pulse_detect_func long_pulse_detect;
2931         const u32 *hpd;
2932
2933         if (INTEL_GEN(dev_priv) >= 12) {
2934                 long_pulse_detect = gen12_port_hotplug_long_detect;
2935                 hpd = hpd_gen12;
2936         } else {
2937                 long_pulse_detect = gen11_port_hotplug_long_detect;
2938                 hpd = hpd_gen11;
2939         }
2940
2941         if (trigger_tc) {
2942                 u32 dig_hotplug_reg;
2943
2944                 dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
2945                 I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
2946
2947                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
2948                                    dig_hotplug_reg, hpd, long_pulse_detect);
2949         }
2950
2951         if (trigger_tbt) {
2952                 u32 dig_hotplug_reg;
2953
2954                 dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
2955                 I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
2956
2957                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
2958                                    dig_hotplug_reg, hpd, long_pulse_detect);
2959         }
2960
2961         if (pin_mask)
2962                 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2963         else
2964                 DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
2965 }
2966
2967 static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
2968 {
2969         u32 mask;
2970
2971         if (INTEL_GEN(dev_priv) >= 12)
2972                 /* TODO: Add AUX entries for USBC */
2973                 return TGL_DE_PORT_AUX_DDIA |
2974                         TGL_DE_PORT_AUX_DDIB |
2975                         TGL_DE_PORT_AUX_DDIC;
2976
2977         mask = GEN8_AUX_CHANNEL_A;
2978         if (INTEL_GEN(dev_priv) >= 9)
2979                 mask |= GEN9_AUX_CHANNEL_B |
2980                         GEN9_AUX_CHANNEL_C |
2981                         GEN9_AUX_CHANNEL_D;
2982
2983         if (IS_CNL_WITH_PORT_F(dev_priv) || IS_GEN(dev_priv, 11))
2984                 mask |= CNL_AUX_CHANNEL_F;
2985
2986         if (IS_GEN(dev_priv, 11))
2987                 mask |= ICL_AUX_CHANNEL_E;
2988
2989         return mask;
2990 }
2991
2992 static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
2993 {
2994         if (INTEL_GEN(dev_priv) >= 9)
2995                 return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2996         else
2997                 return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2998 }
2999
3000 static irqreturn_t
3001 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
3002 {
3003         irqreturn_t ret = IRQ_NONE;
3004         u32 iir;
3005         enum pipe pipe;
3006
3007         if (master_ctl & GEN8_DE_MISC_IRQ) {
3008                 iir = I915_READ(GEN8_DE_MISC_IIR);
3009                 if (iir) {
3010                         bool found = false;
3011
3012                         I915_WRITE(GEN8_DE_MISC_IIR, iir);
3013                         ret = IRQ_HANDLED;
3014
3015                         if (iir & GEN8_DE_MISC_GSE) {
3016                                 intel_opregion_asle_intr(dev_priv);
3017                                 found = true;
3018                         }
3019
3020                         if (iir & GEN8_DE_EDP_PSR) {
3021                                 u32 psr_iir = I915_READ(EDP_PSR_IIR);
3022
3023                                 intel_psr_irq_handler(dev_priv, psr_iir);
3024                                 I915_WRITE(EDP_PSR_IIR, psr_iir);
3025                                 found = true;
3026                         }
3027
3028                         if (!found)
3029                                 DRM_ERROR("Unexpected DE Misc interrupt\n");
3030                 }
3031                 else
3032                         DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
3033         }
3034
3035         if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
3036                 iir = I915_READ(GEN11_DE_HPD_IIR);
3037                 if (iir) {
3038                         I915_WRITE(GEN11_DE_HPD_IIR, iir);
3039                         ret = IRQ_HANDLED;
3040                         gen11_hpd_irq_handler(dev_priv, iir);
3041                 } else {
3042                         DRM_ERROR("The master control interrupt lied, (DE HPD)!\n");
3043                 }
3044         }
3045
3046         if (master_ctl & GEN8_DE_PORT_IRQ) {
3047                 iir = I915_READ(GEN8_DE_PORT_IIR);
3048                 if (iir) {
3049                         u32 tmp_mask;
3050                         bool found = false;
3051
3052                         I915_WRITE(GEN8_DE_PORT_IIR, iir);
3053                         ret = IRQ_HANDLED;
3054
3055                         if (iir & gen8_de_port_aux_mask(dev_priv)) {
3056                                 dp_aux_irq_handler(dev_priv);
3057                                 found = true;
3058                         }
3059
3060                         if (IS_GEN9_LP(dev_priv)) {
3061                                 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
3062                                 if (tmp_mask) {
3063                                         bxt_hpd_irq_handler(dev_priv, tmp_mask,
3064                                                             hpd_bxt);
3065                                         found = true;
3066                                 }
3067                         } else if (IS_BROADWELL(dev_priv)) {
3068                                 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
3069                                 if (tmp_mask) {
3070                                         ilk_hpd_irq_handler(dev_priv,
3071                                                             tmp_mask, hpd_bdw);
3072                                         found = true;
3073                                 }
3074                         }
3075
3076                         if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
3077                                 gmbus_irq_handler(dev_priv);
3078                                 found = true;
3079                         }
3080
3081                         if (!found)
3082                                 DRM_ERROR("Unexpected DE Port interrupt\n");
3083                 }
3084                 else
3085                         DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
3086         }
3087
3088         for_each_pipe(dev_priv, pipe) {
3089                 u32 fault_errors;
3090
3091                 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
3092                         continue;
3093
3094                 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
3095                 if (!iir) {
3096                         DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
3097                         continue;
3098                 }
3099
3100                 ret = IRQ_HANDLED;
3101                 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
3102
3103                 if (iir & GEN8_PIPE_VBLANK)
3104                         drm_handle_vblank(&dev_priv->drm, pipe);
3105
3106                 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
3107                         hsw_pipe_crc_irq_handler(dev_priv, pipe);
3108
3109                 if (iir & GEN8_PIPE_FIFO_UNDERRUN)
3110                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
3111
3112                 fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
3113                 if (fault_errors)
3114                         DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
3115                                   pipe_name(pipe),
3116                                   fault_errors);
3117         }
3118
3119         if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
3120             master_ctl & GEN8_DE_PCH_IRQ) {
3121                 /*
3122                  * FIXME(BDW): Assume for now that the new interrupt handling
3123                  * scheme also closed the SDE interrupt handling race we've seen
3124                  * on older pch-split platforms. But this needs testing.
3125                  */
3126                 iir = I915_READ(SDEIIR);
3127                 if (iir) {
3128                         I915_WRITE(SDEIIR, iir);
3129                         ret = IRQ_HANDLED;
3130
3131                         if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP)
3132                                 tgp_irq_handler(dev_priv, iir);
3133                         else if (INTEL_PCH_TYPE(dev_priv) >= PCH_MCC)
3134                                 icp_irq_handler(dev_priv, iir, hpd_mcc);
3135                         else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3136                                 icp_irq_handler(dev_priv, iir, hpd_icp);
3137                         else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
3138                                 spt_irq_handler(dev_priv, iir);
3139                         else
3140                                 cpt_irq_handler(dev_priv, iir);
3141                 } else {
3142                         /*
3143                          * Like on previous PCH there seems to be something
3144                          * fishy going on with forwarding PCH interrupts.
3145                          */
3146                         DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
3147                 }
3148         }
3149
3150         return ret;
3151 }
3152
3153 static inline u32 gen8_master_intr_disable(void __iomem * const regs)
3154 {
3155         raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
3156
3157         /*
3158          * Now with master disabled, get a sample of level indications
3159          * for this interrupt. Indications will be cleared on related acks.
3160          * New indications can and will light up during processing,
3161          * and will generate new interrupt after enabling master.
3162          */
3163         return raw_reg_read(regs, GEN8_MASTER_IRQ);
3164 }
3165
3166 static inline void gen8_master_intr_enable(void __iomem * const regs)
3167 {
3168         raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3169 }
3170
3171 static irqreturn_t gen8_irq_handler(int irq, void *arg)
3172 {
3173         struct drm_i915_private *dev_priv = arg;
3174         void __iomem * const regs = dev_priv->uncore.regs;
3175         u32 master_ctl;
3176         u32 gt_iir[4];
3177
3178         if (!intel_irqs_enabled(dev_priv))
3179                 return IRQ_NONE;
3180
3181         master_ctl = gen8_master_intr_disable(regs);
3182         if (!master_ctl) {
3183                 gen8_master_intr_enable(regs);
3184                 return IRQ_NONE;
3185         }
3186
3187         /* Find, clear, then process each source of interrupt */
3188         gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
3189
3190         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
3191         if (master_ctl & ~GEN8_GT_IRQS) {
3192                 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3193                 gen8_de_irq_handler(dev_priv, master_ctl);
3194                 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3195         }
3196
3197         gen8_master_intr_enable(regs);
3198
3199         gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
3200
3201         return IRQ_HANDLED;
3202 }
3203
3204 static u32
3205 gen11_gt_engine_identity(struct intel_gt *gt,
3206                          const unsigned int bank, const unsigned int bit)
3207 {
3208         void __iomem * const regs = gt->uncore->regs;
3209         u32 timeout_ts;
3210         u32 ident;
3211
3212         lockdep_assert_held(&gt->i915->irq_lock);
3213
3214         raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
3215
3216         /*
3217          * NB: Specs do not specify how long to spin wait,
3218          * so we do ~100us as an educated guess.
3219          */
3220         timeout_ts = (local_clock() >> 10) + 100;
3221         do {
3222                 ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
3223         } while (!(ident & GEN11_INTR_DATA_VALID) &&
3224                  !time_after32(local_clock() >> 10, timeout_ts));
3225
3226         if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
3227                 DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
3228                           bank, bit, ident);
3229                 return 0;
3230         }
3231
3232         raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
3233                       GEN11_INTR_DATA_VALID);
3234
3235         return ident;
3236 }
3237
3238 static void
3239 gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
3240                         const u16 iir)
3241 {
3242         if (instance == OTHER_GUC_INSTANCE)
3243                 return guc_irq_handler(&gt->uc.guc, iir);
3244
3245         if (instance == OTHER_GTPM_INSTANCE)
3246                 return gen11_rps_irq_handler(gt, iir);
3247
3248         WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
3249                   instance, iir);
3250 }
3251
3252 static void
3253 gen11_engine_irq_handler(struct intel_gt *gt, const u8 class,
3254                          const u8 instance, const u16 iir)
3255 {
3256         struct intel_engine_cs *engine;
3257
3258         if (instance <= MAX_ENGINE_INSTANCE)
3259                 engine = gt->engine_class[class][instance];
3260         else
3261                 engine = NULL;
3262
3263         if (likely(engine))
3264                 return gen8_cs_irq_handler(engine, iir);
3265
3266         WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
3267                   class, instance);
3268 }
3269
3270 static void
3271 gen11_gt_identity_handler(struct intel_gt *gt, const u32 identity)
3272 {
3273         const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
3274         const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
3275         const u16 intr = GEN11_INTR_ENGINE_INTR(identity);
3276
3277         if (unlikely(!intr))
3278                 return;
3279
3280         if (class <= COPY_ENGINE_CLASS)
3281                 return gen11_engine_irq_handler(gt, class, instance, intr);
3282
3283         if (class == OTHER_CLASS)
3284                 return gen11_other_irq_handler(gt, instance, intr);
3285
3286         WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
3287                   class, instance, intr);
3288 }
3289
3290 static void
3291 gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank)
3292 {
3293         void __iomem * const regs = gt->uncore->regs;
3294         unsigned long intr_dw;
3295         unsigned int bit;
3296
3297         lockdep_assert_held(&gt->i915->irq_lock);
3298
3299         intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
3300
3301         for_each_set_bit(bit, &intr_dw, 32) {
3302                 const u32 ident = gen11_gt_engine_identity(gt, bank, bit);
3303
3304                 gen11_gt_identity_handler(gt, ident);
3305         }
3306
3307         /* Clear must be after shared has been served for engine */
3308         raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
3309 }
3310
3311 static void
3312 gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl)
3313 {
3314         struct drm_i915_private *i915 = gt->i915;
3315         unsigned int bank;
3316
3317         spin_lock(&i915->irq_lock);
3318
3319         for (bank = 0; bank < 2; bank++) {
3320                 if (master_ctl & GEN11_GT_DW_IRQ(bank))
3321                         gen11_gt_bank_handler(gt, bank);
3322         }
3323
3324         spin_unlock(&i915->irq_lock);
3325 }
3326
3327 static u32
3328 gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
3329 {
3330         void __iomem * const regs = gt->uncore->regs;
3331         u32 iir;
3332
3333         if (!(master_ctl & GEN11_GU_MISC_IRQ))
3334                 return 0;
3335
3336         iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
3337         if (likely(iir))
3338                 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
3339
3340         return iir;
3341 }
3342
3343 static void
3344 gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir)
3345 {
3346         if (iir & GEN11_GU_MISC_GSE)
3347                 intel_opregion_asle_intr(gt->i915);
3348 }
3349
3350 static inline u32 gen11_master_intr_disable(void __iomem * const regs)
3351 {
3352         raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
3353
3354         /*
3355          * Now with master disabled, get a sample of level indications
3356          * for this interrupt. Indications will be cleared on related acks.
3357          * New indications can and will light up during processing,
3358          * and will generate new interrupt after enabling master.
3359          */
3360         return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
3361 }
3362
3363 static inline void gen11_master_intr_enable(void __iomem * const regs)
3364 {
3365         raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
3366 }
3367
3368 static irqreturn_t gen11_irq_handler(int irq, void *arg)
3369 {
3370         struct drm_i915_private * const i915 = arg;
3371         void __iomem * const regs = i915->uncore.regs;
3372         struct intel_gt *gt = &i915->gt;
3373         u32 master_ctl;
3374         u32 gu_misc_iir;
3375
3376         if (!intel_irqs_enabled(i915))
3377                 return IRQ_NONE;
3378
3379         master_ctl = gen11_master_intr_disable(regs);
3380         if (!master_ctl) {
3381                 gen11_master_intr_enable(regs);
3382                 return IRQ_NONE;
3383         }
3384
3385         /* Find, clear, then process each source of interrupt. */
3386         gen11_gt_irq_handler(gt, master_ctl);
3387
3388         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
3389         if (master_ctl & GEN11_DISPLAY_IRQ) {
3390                 const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
3391
3392                 disable_rpm_wakeref_asserts(&i915->runtime_pm);
3393                 /*
3394                  * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
3395                  * for the display related bits.
3396                  */
3397                 gen8_de_irq_handler(i915, disp_ctl);
3398                 enable_rpm_wakeref_asserts(&i915->runtime_pm);
3399         }
3400
3401         gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
3402
3403         gen11_master_intr_enable(regs);
3404
3405         gen11_gu_misc_irq_handler(gt, gu_misc_iir);
3406
3407         return IRQ_HANDLED;
3408 }
3409
3410 /* Called from drm generic code, passed 'crtc' which
3411  * we use as a pipe index
3412  */
3413 int i8xx_enable_vblank(struct drm_crtc *crtc)
3414 {
3415         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
3416         enum pipe pipe = to_intel_crtc(crtc)->pipe;
3417         unsigned long irqflags;
3418
3419         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3420         i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
3421         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3422
3423         return 0;
3424 }
3425
3426 int i945gm_enable_vblank(struct drm_crtc *crtc)
3427 {
3428         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
3429
3430         if (dev_priv->i945gm_vblank.enabled++ == 0)
3431                 schedule_work(&dev_priv->i945gm_vblank.work);
3432
3433         return i8xx_enable_vblank(crtc);
3434 }
3435
3436 int i965_enable_vblank(struct drm_crtc *crtc)
3437 {
3438         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
3439         enum pipe pipe = to_intel_crtc(crtc)->pipe;
3440         unsigned long irqflags;
3441
3442         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3443         i915_enable_pipestat(dev_priv, pipe,
3444                              PIPE_START_VBLANK_INTERRUPT_STATUS);
3445         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3446
3447         return 0;
3448 }
3449
3450 int ilk_enable_vblank(struct drm_crtc *crtc)
3451 {
3452         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
3453         enum pipe pipe = to_intel_crtc(crtc)->pipe;
3454         unsigned long irqflags;
3455         u32 bit = INTEL_GEN(dev_priv) >= 7 ?
3456                 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
3457
3458         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3459         ilk_enable_display_irq(dev_priv, bit);
3460         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3461
3462         /* Even though there is no DMC, frame counter can get stuck when
3463          * PSR is active as no frames are generated.
3464          */
3465         if (HAS_PSR(dev_priv))
3466                 drm_crtc_vblank_restore(crtc);
3467
3468         return 0;
3469 }
3470
3471 int bdw_enable_vblank(struct drm_crtc *crtc)
3472 {
3473         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
3474         enum pipe pipe = to_intel_crtc(crtc)->pipe;
3475         unsigned long irqflags;
3476
3477         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3478         bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3479         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3480
3481         /* Even if there is no DMC, frame counter can get stuck when
3482          * PSR is active as no frames are generated, so check only for PSR.
3483          */
3484         if (HAS_PSR(dev_priv))
3485                 drm_crtc_vblank_restore(crtc);
3486
3487         return 0;
3488 }
3489
3490 /* Called from drm generic code, passed 'crtc' which
3491  * we use as a pipe index
3492  */
3493 void i8xx_disable_vblank(struct drm_crtc *crtc)
3494 {
3495         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
3496         enum pipe pipe = to_intel_crtc(crtc)->pipe;
3497         unsigned long irqflags;
3498
3499         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3500         i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
3501         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3502 }
3503
3504 void i945gm_disable_vblank(struct drm_crtc *crtc)
3505 {
3506         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
3507
3508         i8xx_disable_vblank(crtc);
3509
3510         if (--dev_priv->i945gm_vblank.enabled == 0)
3511                 schedule_work(&dev_priv->i945gm_vblank.work);
3512 }
3513
3514 void i965_disable_vblank(struct drm_crtc *crtc)
3515 {
3516         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
3517         enum pipe pipe = to_intel_crtc(crtc)->pipe;
3518         unsigned long irqflags;
3519
3520         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3521         i915_disable_pipestat(dev_priv, pipe,
3522                               PIPE_START_VBLANK_INTERRUPT_STATUS);
3523         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3524 }
3525
3526 void ilk_disable_vblank(struct drm_crtc *crtc)
3527 {
3528         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
3529         enum pipe pipe = to_intel_crtc(crtc)->pipe;
3530         unsigned long irqflags;
3531         u32 bit = INTEL_GEN(dev_priv) >= 7 ?
3532                 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
3533
3534         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3535         ilk_disable_display_irq(dev_priv, bit);
3536         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3537 }
3538
3539 void bdw_disable_vblank(struct drm_crtc *crtc)
3540 {
3541         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
3542         enum pipe pipe = to_intel_crtc(crtc)->pipe;
3543         unsigned long irqflags;
3544
3545         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3546         bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3547         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3548 }
3549
3550 static void i945gm_vblank_work_func(struct work_struct *work)
3551 {
3552         struct drm_i915_private *dev_priv =
3553                 container_of(work, struct drm_i915_private, i945gm_vblank.work);
3554
3555         /*
3556          * Vblank interrupts fail to wake up the device from C3,
3557          * hence we want to prevent C3 usage while vblank interrupts
3558          * are enabled.
3559          */
3560         pm_qos_update_request(&dev_priv->i945gm_vblank.pm_qos,
3561                               READ_ONCE(dev_priv->i945gm_vblank.enabled) ?
3562                               dev_priv->i945gm_vblank.c3_disable_latency :
3563                               PM_QOS_DEFAULT_VALUE);
3564 }
3565
3566 static int cstate_disable_latency(const char *name)
3567 {
3568         const struct cpuidle_driver *drv;
3569         int i;
3570
3571         drv = cpuidle_get_driver();
3572         if (!drv)
3573                 return 0;
3574
3575         for (i = 0; i < drv->state_count; i++) {
3576                 const struct cpuidle_state *state = &drv->states[i];
3577
3578                 if (!strcmp(state->name, name))
3579                         return state->exit_latency ?
3580                                 state->exit_latency - 1 : 0;
3581         }
3582
3583         return 0;
3584 }
3585
3586 static void i945gm_vblank_work_init(struct drm_i915_private *dev_priv)
3587 {
3588         INIT_WORK(&dev_priv->i945gm_vblank.work,
3589                   i945gm_vblank_work_func);
3590
3591         dev_priv->i945gm_vblank.c3_disable_latency =
3592                 cstate_disable_latency("C3");
3593         pm_qos_add_request(&dev_priv->i945gm_vblank.pm_qos,
3594                            PM_QOS_CPU_DMA_LATENCY,
3595                            PM_QOS_DEFAULT_VALUE);
3596 }
3597
3598 static void i945gm_vblank_work_fini(struct drm_i915_private *dev_priv)
3599 {
3600         cancel_work_sync(&dev_priv->i945gm_vblank.work);
3601         pm_qos_remove_request(&dev_priv->i945gm_vblank.pm_qos);
3602 }
3603
3604 static void ibx_irq_reset(struct drm_i915_private *dev_priv)
3605 {
3606         struct intel_uncore *uncore = &dev_priv->uncore;
3607
3608         if (HAS_PCH_NOP(dev_priv))
3609                 return;
3610
3611         GEN3_IRQ_RESET(uncore, SDE);
3612
3613         if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3614                 I915_WRITE(SERR_INT, 0xffffffff);
3615 }
3616
3617 /*
3618  * SDEIER is also touched by the interrupt handler to work around missed PCH
3619  * interrupts. Hence we can't update it after the interrupt handler is enabled -
3620  * instead we unconditionally enable all PCH interrupt sources here, but then
3621  * only unmask them as needed with SDEIMR.
3622  *
3623  * This function needs to be called before interrupts are enabled.
3624  */
3625 static void ibx_irq_pre_postinstall(struct drm_i915_private *dev_priv)
3626 {
3627         if (HAS_PCH_NOP(dev_priv))
3628                 return;
3629
3630         WARN_ON(I915_READ(SDEIER) != 0);
3631         I915_WRITE(SDEIER, 0xffffffff);
3632         POSTING_READ(SDEIER);
3633 }
3634
3635 static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
3636 {
3637         struct intel_uncore *uncore = &dev_priv->uncore;
3638
3639         GEN3_IRQ_RESET(uncore, GT);
3640         if (INTEL_GEN(dev_priv) >= 6)
3641                 GEN3_IRQ_RESET(uncore, GEN6_PM);
3642 }
3643
3644 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3645 {
3646         struct intel_uncore *uncore = &dev_priv->uncore;
3647
3648         if (IS_CHERRYVIEW(dev_priv))
3649                 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3650         else
3651                 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK);
3652
3653         i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
3654         intel_uncore_write(uncore, PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3655
3656         i9xx_pipestat_irq_reset(dev_priv);
3657
3658         GEN3_IRQ_RESET(uncore, VLV_);
3659         dev_priv->irq_mask = ~0u;
3660 }
3661
3662 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3663 {
3664         struct intel_uncore *uncore = &dev_priv->uncore;
3665
3666         u32 pipestat_mask;
3667         u32 enable_mask;
3668         enum pipe pipe;
3669
3670         pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
3671
3672         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3673         for_each_pipe(dev_priv, pipe)
3674                 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3675
3676         enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3677                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3678                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3679                 I915_LPE_PIPE_A_INTERRUPT |
3680                 I915_LPE_PIPE_B_INTERRUPT;
3681
3682         if (IS_CHERRYVIEW(dev_priv))
3683                 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
3684                         I915_LPE_PIPE_C_INTERRUPT;
3685
3686         WARN_ON(dev_priv->irq_mask != ~0u);
3687
3688         dev_priv->irq_mask = ~enable_mask;
3689
3690         GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
3691 }
3692
3693 /* drm_dma.h hooks
3694 */
3695 static void ironlake_irq_reset(struct drm_i915_private *dev_priv)
3696 {
3697         struct intel_uncore *uncore = &dev_priv->uncore;
3698
3699         GEN3_IRQ_RESET(uncore, DE);
3700         if (IS_GEN(dev_priv, 7))
3701                 intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
3702
3703         if (IS_HASWELL(dev_priv)) {
3704                 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3705                 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3706         }
3707
3708         gen5_gt_irq_reset(dev_priv);
3709
3710         ibx_irq_reset(dev_priv);
3711 }
3712
3713 static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
3714 {
3715         I915_WRITE(VLV_MASTER_IER, 0);
3716         POSTING_READ(VLV_MASTER_IER);
3717
3718         gen5_gt_irq_reset(dev_priv);
3719
3720         spin_lock_irq(&dev_priv->irq_lock);
3721         if (dev_priv->display_irqs_enabled)
3722                 vlv_display_irq_reset(dev_priv);
3723         spin_unlock_irq(&dev_priv->irq_lock);
3724 }
3725
3726 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3727 {
3728         struct intel_uncore *uncore = &dev_priv->uncore;
3729
3730         GEN8_IRQ_RESET_NDX(uncore, GT, 0);
3731         GEN8_IRQ_RESET_NDX(uncore, GT, 1);
3732         GEN8_IRQ_RESET_NDX(uncore, GT, 2);
3733         GEN8_IRQ_RESET_NDX(uncore, GT, 3);
3734 }
3735
3736 static void gen8_irq_reset(struct drm_i915_private *dev_priv)
3737 {
3738         struct intel_uncore *uncore = &dev_priv->uncore;
3739         int pipe;
3740
3741         gen8_master_intr_disable(dev_priv->uncore.regs);
3742
3743         gen8_gt_irq_reset(dev_priv);
3744
3745         intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3746         intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3747
3748         for_each_pipe(dev_priv, pipe)
3749                 if (intel_display_power_is_enabled(dev_priv,
3750                                                    POWER_DOMAIN_PIPE(pipe)))
3751                         GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3752
3753         GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3754         GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3755         GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3756
3757         if (HAS_PCH_SPLIT(dev_priv))
3758                 ibx_irq_reset(dev_priv);
3759 }
3760
3761 static void gen11_gt_irq_reset(struct intel_gt *gt)
3762 {
3763         struct intel_uncore *uncore = gt->uncore;
3764
3765         /* Disable RCS, BCS, VCS and VECS class engines. */
3766         intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0);
3767         intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE,    0);
3768
3769         /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
3770         intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK,   ~0);
3771         intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK,    ~0);
3772         intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK,   ~0);
3773         intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK,   ~0);
3774         intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~0);
3775
3776         intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
3777         intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
3778         intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
3779         intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK,  ~0);
3780 }
3781
3782 static void gen11_irq_reset(struct drm_i915_private *dev_priv)
3783 {
3784         struct intel_uncore *uncore = &dev_priv->uncore;
3785         int pipe;
3786
3787         gen11_master_intr_disable(dev_priv->uncore.regs);
3788
3789         gen11_gt_irq_reset(&dev_priv->gt);
3790
3791         intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
3792
3793         intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3794         intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3795
3796         for_each_pipe(dev_priv, pipe)
3797                 if (intel_display_power_is_enabled(dev_priv,
3798                                                    POWER_DOMAIN_PIPE(pipe)))
3799                         GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3800
3801         GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3802         GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3803         GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
3804         GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
3805         GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3806
3807         if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3808                 GEN3_IRQ_RESET(uncore, SDE);
3809 }
3810
3811 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3812                                      u8 pipe_mask)
3813 {
3814         struct intel_uncore *uncore = &dev_priv->uncore;
3815
3816         u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3817         enum pipe pipe;
3818
3819         spin_lock_irq(&dev_priv->irq_lock);
3820
3821         if (!intel_irqs_enabled(dev_priv)) {
3822                 spin_unlock_irq(&dev_priv->irq_lock);
3823                 return;
3824         }
3825
3826         for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3827                 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3828                                   dev_priv->de_irq_mask[pipe],
3829                                   ~dev_priv->de_irq_mask[pipe] | extra_ier);
3830
3831         spin_unlock_irq(&dev_priv->irq_lock);
3832 }
3833
3834 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3835                                      u8 pipe_mask)
3836 {
3837         struct intel_uncore *uncore = &dev_priv->uncore;
3838         enum pipe pipe;
3839
3840         spin_lock_irq(&dev_priv->irq_lock);
3841
3842         if (!intel_irqs_enabled(dev_priv)) {
3843                 spin_unlock_irq(&dev_priv->irq_lock);
3844                 return;
3845         }
3846
3847         for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3848                 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3849
3850         spin_unlock_irq(&dev_priv->irq_lock);
3851
3852         /* make sure we're done processing display irqs */
3853         intel_synchronize_irq(dev_priv);
3854 }
3855
3856 static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
3857 {
3858         struct intel_uncore *uncore = &dev_priv->uncore;
3859
3860         I915_WRITE(GEN8_MASTER_IRQ, 0);
3861         POSTING_READ(GEN8_MASTER_IRQ);
3862
3863         gen8_gt_irq_reset(dev_priv);
3864
3865         GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3866
3867         spin_lock_irq(&dev_priv->irq_lock);
3868         if (dev_priv->display_irqs_enabled)
3869                 vlv_display_irq_reset(dev_priv);
3870         spin_unlock_irq(&dev_priv->irq_lock);
3871 }
3872
3873 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
3874                                   const u32 hpd[HPD_NUM_PINS])
3875 {
3876         struct intel_encoder *encoder;
3877         u32 enabled_irqs = 0;
3878
3879         for_each_intel_encoder(&dev_priv->drm, encoder)
3880                 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3881                         enabled_irqs |= hpd[encoder->hpd_pin];
3882
3883         return enabled_irqs;
3884 }
3885
3886 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
3887 {
3888         u32 hotplug;
3889
3890         /*
3891          * Enable digital hotplug on the PCH, and configure the DP short pulse
3892          * duration to 2ms (which is the minimum in the Display Port spec).
3893          * The pulse duration bits are reserved on LPT+.
3894          */
3895         hotplug = I915_READ(PCH_PORT_HOTPLUG);
3896         hotplug &= ~(PORTB_PULSE_DURATION_MASK |
3897                      PORTC_PULSE_DURATION_MASK |
3898                      PORTD_PULSE_DURATION_MASK);
3899         hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3900         hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3901         hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3902         /*
3903          * When CPU and PCH are on the same package, port A
3904          * HPD must be enabled in both north and south.
3905          */
3906         if (HAS_PCH_LPT_LP(dev_priv))
3907                 hotplug |= PORTA_HOTPLUG_ENABLE;
3908         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3909 }
3910
3911 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3912 {
3913         u32 hotplug_irqs, enabled_irqs;
3914
3915         if (HAS_PCH_IBX(dev_priv)) {
3916                 hotplug_irqs = SDE_HOTPLUG_MASK;
3917                 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
3918         } else {
3919                 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3920                 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
3921         }
3922
3923         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3924
3925         ibx_hpd_detection_setup(dev_priv);
3926 }
3927
3928 static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv,
3929                                     u32 ddi_hotplug_enable_mask,
3930                                     u32 tc_hotplug_enable_mask)
3931 {
3932         u32 hotplug;
3933
3934         hotplug = I915_READ(SHOTPLUG_CTL_DDI);
3935         hotplug |= ddi_hotplug_enable_mask;
3936         I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
3937
3938         if (tc_hotplug_enable_mask) {
3939                 hotplug = I915_READ(SHOTPLUG_CTL_TC);
3940                 hotplug |= tc_hotplug_enable_mask;
3941                 I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
3942         }
3943 }
3944
3945 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
3946 {
3947         u32 hotplug_irqs, enabled_irqs;
3948
3949         hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP;
3950         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp);
3951
3952         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3953
3954         icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
3955                                 ICP_TC_HPD_ENABLE_MASK);
3956 }
3957
3958 static void mcc_hpd_irq_setup(struct drm_i915_private *dev_priv)
3959 {
3960         u32 hotplug_irqs, enabled_irqs;
3961
3962         hotplug_irqs = SDE_DDI_MASK_TGP;
3963         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_mcc);
3964
3965         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3966
3967         icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, 0);
3968 }
3969
3970 static void tgp_hpd_irq_setup(struct drm_i915_private *dev_priv)
3971 {
3972         u32 hotplug_irqs, enabled_irqs;
3973
3974         hotplug_irqs = SDE_DDI_MASK_TGP | SDE_TC_MASK_TGP;
3975         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_tgp);
3976
3977         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3978
3979         icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK,
3980                                 TGP_TC_HPD_ENABLE_MASK);
3981 }
3982
3983 static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
3984 {
3985         u32 hotplug;
3986
3987         hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
3988         hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3989                    GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3990                    GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3991                    GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3992         I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
3993
3994         hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
3995         hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3996                    GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3997                    GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3998                    GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3999         I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
4000 }
4001
4002 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
4003 {
4004         u32 hotplug_irqs, enabled_irqs;
4005         const u32 *hpd;
4006         u32 val;
4007
4008         hpd = INTEL_GEN(dev_priv) >= 12 ? hpd_gen12 : hpd_gen11;
4009         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd);
4010         hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
4011
4012         val = I915_READ(GEN11_DE_HPD_IMR);
4013         val &= ~hotplug_irqs;
4014         I915_WRITE(GEN11_DE_HPD_IMR, val);
4015         POSTING_READ(GEN11_DE_HPD_IMR);
4016
4017         gen11_hpd_detection_setup(dev_priv);
4018
4019         if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP)
4020                 tgp_hpd_irq_setup(dev_priv);
4021         else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
4022                 icp_hpd_irq_setup(dev_priv);
4023 }
4024
4025 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
4026 {
4027         u32 val, hotplug;
4028
4029         /* Display WA #1179 WaHardHangonHotPlug: cnp */
4030         if (HAS_PCH_CNP(dev_priv)) {
4031                 val = I915_READ(SOUTH_CHICKEN1);
4032                 val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
4033                 val |= CHASSIS_CLK_REQ_DURATION(0xf);
4034                 I915_WRITE(SOUTH_CHICKEN1, val);
4035         }
4036
4037         /* Enable digital hotplug on the PCH */
4038         hotplug = I915_READ(PCH_PORT_HOTPLUG);
4039         hotplug |= PORTA_HOTPLUG_ENABLE |
4040                    PORTB_HOTPLUG_ENABLE |
4041                    PORTC_HOTPLUG_ENABLE |
4042                    PORTD_HOTPLUG_ENABLE;
4043         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
4044
4045         hotplug = I915_READ(PCH_PORT_HOTPLUG2);
4046         hotplug |= PORTE_HOTPLUG_ENABLE;
4047         I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
4048 }
4049
4050 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
4051 {
4052         u32 hotplug_irqs, enabled_irqs;
4053
4054         hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
4055         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
4056
4057         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
4058
4059         spt_hpd_detection_setup(dev_priv);
4060 }
4061
4062 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
4063 {
4064         u32 hotplug;
4065
4066         /*
4067          * Enable digital hotplug on the CPU, and configure the DP short pulse
4068          * duration to 2ms (which is the minimum in the Display Port spec)
4069          * The pulse duration bits are reserved on HSW+.
4070          */
4071         hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
4072         hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
4073         hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
4074                    DIGITAL_PORTA_PULSE_DURATION_2ms;
4075         I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
4076 }
4077
4078 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
4079 {
4080         u32 hotplug_irqs, enabled_irqs;
4081
4082         if (INTEL_GEN(dev_priv) >= 8) {
4083                 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
4084                 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
4085
4086                 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
4087         } else if (INTEL_GEN(dev_priv) >= 7) {
4088                 hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
4089                 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
4090
4091                 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
4092         } else {
4093                 hotplug_irqs = DE_DP_A_HOTPLUG;
4094                 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
4095
4096                 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
4097         }
4098
4099         ilk_hpd_detection_setup(dev_priv);
4100
4101         ibx_hpd_irq_setup(dev_priv);
4102 }
4103
4104 static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
4105                                       u32 enabled_irqs)
4106 {
4107         u32 hotplug;
4108
4109         hotplug = I915_READ(PCH_PORT_HOTPLUG);
4110         hotplug |= PORTA_HOTPLUG_ENABLE |
4111                    PORTB_HOTPLUG_ENABLE |
4112                    PORTC_HOTPLUG_ENABLE;
4113
4114         DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
4115                       hotplug, enabled_irqs);
4116         hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
4117
4118         /*
4119          * For BXT invert bit has to be set based on AOB design
4120          * for HPD detection logic, update it based on VBT fields.
4121          */
4122         if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
4123             intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
4124                 hotplug |= BXT_DDIA_HPD_INVERT;
4125         if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
4126             intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
4127                 hotplug |= BXT_DDIB_HPD_INVERT;
4128         if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
4129             intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
4130                 hotplug |= BXT_DDIC_HPD_INVERT;
4131
4132         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
4133 }
4134
4135 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
4136 {
4137         __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
4138 }
4139
4140 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
4141 {
4142         u32 hotplug_irqs, enabled_irqs;
4143
4144         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
4145         hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
4146
4147         bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
4148
4149         __bxt_hpd_detection_setup(dev_priv, enabled_irqs);
4150 }
4151
4152 static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
4153 {
4154         u32 mask;
4155
4156         if (HAS_PCH_NOP(dev_priv))
4157                 return;
4158
4159         if (HAS_PCH_IBX(dev_priv))
4160                 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
4161         else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
4162                 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
4163         else
4164                 mask = SDE_GMBUS_CPT;
4165
4166         gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
4167         I915_WRITE(SDEIMR, ~mask);
4168
4169         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
4170             HAS_PCH_LPT(dev_priv))
4171                 ibx_hpd_detection_setup(dev_priv);
4172         else
4173                 spt_hpd_detection_setup(dev_priv);
4174 }
4175
4176 static void gen5_gt_irq_postinstall(struct drm_i915_private *dev_priv)
4177 {
4178         struct intel_uncore *uncore = &dev_priv->uncore;
4179         u32 pm_irqs, gt_irqs;
4180
4181         pm_irqs = gt_irqs = 0;
4182
4183         dev_priv->gt_irq_mask = ~0;
4184         if (HAS_L3_DPF(dev_priv)) {
4185                 /* L3 parity interrupt is always unmasked. */
4186                 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
4187                 gt_irqs |= GT_PARITY_ERROR(dev_priv);
4188         }
4189
4190         gt_irqs |= GT_RENDER_USER_INTERRUPT;
4191         if (IS_GEN(dev_priv, 5)) {
4192                 gt_irqs |= ILK_BSD_USER_INTERRUPT;
4193         } else {
4194                 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
4195         }
4196
4197         GEN3_IRQ_INIT(uncore, GT, dev_priv->gt_irq_mask, gt_irqs);
4198
4199         if (INTEL_GEN(dev_priv) >= 6) {
4200                 /*
4201                  * RPS interrupts will get enabled/disabled on demand when RPS
4202                  * itself is enabled/disabled.
4203                  */
4204                 if (HAS_ENGINE(dev_priv, VECS0)) {
4205                         pm_irqs |= PM_VEBOX_USER_INTERRUPT;
4206                         dev_priv->gt.pm_ier |= PM_VEBOX_USER_INTERRUPT;
4207                 }
4208
4209                 dev_priv->gt.pm_imr = 0xffffffff;
4210                 GEN3_IRQ_INIT(uncore, GEN6_PM, dev_priv->gt.pm_imr, pm_irqs);
4211         }
4212 }
4213
4214 static void ironlake_irq_postinstall(struct drm_i915_private *dev_priv)
4215 {
4216         struct intel_uncore *uncore = &dev_priv->uncore;
4217         u32 display_mask, extra_mask;
4218
4219         if (INTEL_GEN(dev_priv) >= 7) {
4220                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
4221                                 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
4222                 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
4223                               DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
4224                               DE_DP_A_HOTPLUG_IVB);
4225         } else {
4226                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
4227                                 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
4228                                 DE_PIPEA_CRC_DONE | DE_POISON);
4229                 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
4230                               DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
4231                               DE_DP_A_HOTPLUG);
4232         }
4233
4234         if (IS_HASWELL(dev_priv)) {
4235                 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
4236                 intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
4237                 display_mask |= DE_EDP_PSR_INT_HSW;
4238         }
4239
4240         dev_priv->irq_mask = ~display_mask;
4241
4242         ibx_irq_pre_postinstall(dev_priv);
4243
4244         GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
4245                       display_mask | extra_mask);
4246
4247         gen5_gt_irq_postinstall(dev_priv);
4248
4249         ilk_hpd_detection_setup(dev_priv);
4250
4251         ibx_irq_postinstall(dev_priv);
4252
4253         if (IS_IRONLAKE_M(dev_priv)) {
4254                 /* Enable PCU event interrupts
4255                  *
4256                  * spinlocking not required here for correctness since interrupt
4257                  * setup is guaranteed to run in single-threaded context. But we
4258                  * need it to make the assert_spin_locked happy. */
4259                 spin_lock_irq(&dev_priv->irq_lock);
4260                 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
4261                 spin_unlock_irq(&dev_priv->irq_lock);
4262         }
4263 }
4264
4265 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
4266 {
4267         lockdep_assert_held(&dev_priv->irq_lock);
4268
4269         if (dev_priv->display_irqs_enabled)
4270                 return;
4271
4272         dev_priv->display_irqs_enabled = true;
4273
4274         if (intel_irqs_enabled(dev_priv)) {
4275                 vlv_display_irq_reset(dev_priv);
4276                 vlv_display_irq_postinstall(dev_priv);
4277         }
4278 }
4279
4280 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
4281 {
4282         lockdep_assert_held(&dev_priv->irq_lock);
4283
4284         if (!dev_priv->display_irqs_enabled)
4285                 return;
4286
4287         dev_priv->display_irqs_enabled = false;
4288
4289         if (intel_irqs_enabled(dev_priv))
4290                 vlv_display_irq_reset(dev_priv);
4291 }
4292
4293
4294 static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
4295 {
4296         gen5_gt_irq_postinstall(dev_priv);
4297
4298         spin_lock_irq(&dev_priv->irq_lock);
4299         if (dev_priv->display_irqs_enabled)
4300                 vlv_display_irq_postinstall(dev_priv);
4301         spin_unlock_irq(&dev_priv->irq_lock);
4302
4303         I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
4304         POSTING_READ(VLV_MASTER_IER);
4305 }
4306
4307 static void gen8_gt_irq_postinstall(struct drm_i915_private *i915)
4308 {
4309         struct intel_gt *gt = &i915->gt;
4310         struct intel_uncore *uncore = gt->uncore;
4311
4312         /* These are interrupts we'll toggle with the ring mask register */
4313         u32 gt_interrupts[] = {
4314                 (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
4315                  GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
4316                  GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
4317                  GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT),
4318
4319                 (GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
4320                  GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
4321                  GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
4322                  GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT),
4323
4324                 0,
4325
4326                 (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
4327                  GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)
4328         };
4329
4330         gt->pm_ier = 0x0;
4331         gt->pm_imr = ~gt->pm_ier;
4332         GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
4333         GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
4334         /*
4335          * RPS interrupts will get enabled/disabled on demand when RPS itself
4336          * is enabled/disabled. Same wil be the case for GuC interrupts.
4337          */
4338         GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier);
4339         GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
4340 }
4341
4342 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
4343 {
4344         struct intel_uncore *uncore = &dev_priv->uncore;
4345
4346         u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
4347         u32 de_pipe_enables;
4348         u32 de_port_masked = GEN8_AUX_CHANNEL_A;
4349         u32 de_port_enables;
4350         u32 de_misc_masked = GEN8_DE_EDP_PSR;
4351         enum pipe pipe;
4352
4353         if (INTEL_GEN(dev_priv) <= 10)
4354                 de_misc_masked |= GEN8_DE_MISC_GSE;
4355
4356         if (INTEL_GEN(dev_priv) >= 9) {
4357                 de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
4358                 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
4359                                   GEN9_AUX_CHANNEL_D;
4360                 if (IS_GEN9_LP(dev_priv))
4361                         de_port_masked |= BXT_DE_PORT_GMBUS;
4362         } else {
4363                 de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
4364         }
4365
4366         if (INTEL_GEN(dev_priv) >= 11)
4367                 de_port_masked |= ICL_AUX_CHANNEL_E;
4368
4369         if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11)
4370                 de_port_masked |= CNL_AUX_CHANNEL_F;
4371
4372         de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
4373                                            GEN8_PIPE_FIFO_UNDERRUN;
4374
4375         de_port_enables = de_port_masked;
4376         if (IS_GEN9_LP(dev_priv))
4377                 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
4378         else if (IS_BROADWELL(dev_priv))
4379                 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
4380
4381         gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
4382         intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
4383
4384         for_each_pipe(dev_priv, pipe) {
4385                 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
4386
4387                 if (intel_display_power_is_enabled(dev_priv,
4388                                 POWER_DOMAIN_PIPE(pipe)))
4389                         GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
4390                                           dev_priv->de_irq_mask[pipe],
4391                                           de_pipe_enables);
4392         }
4393
4394         GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
4395         GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
4396
4397         if (INTEL_GEN(dev_priv) >= 11) {
4398                 u32 de_hpd_masked = 0;
4399                 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
4400                                      GEN11_DE_TBT_HOTPLUG_MASK;
4401
4402                 GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
4403                               de_hpd_enables);
4404                 gen11_hpd_detection_setup(dev_priv);
4405         } else if (IS_GEN9_LP(dev_priv)) {
4406                 bxt_hpd_detection_setup(dev_priv);
4407         } else if (IS_BROADWELL(dev_priv)) {
4408                 ilk_hpd_detection_setup(dev_priv);
4409         }
4410 }
4411
4412 static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
4413 {
4414         if (HAS_PCH_SPLIT(dev_priv))
4415                 ibx_irq_pre_postinstall(dev_priv);
4416
4417         gen8_gt_irq_postinstall(dev_priv);
4418         gen8_de_irq_postinstall(dev_priv);
4419
4420         if (HAS_PCH_SPLIT(dev_priv))
4421                 ibx_irq_postinstall(dev_priv);
4422
4423         gen8_master_intr_enable(dev_priv->uncore.regs);
4424 }
4425
4426 static void gen11_gt_irq_postinstall(struct intel_gt *gt)
4427 {
4428         const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
4429         struct intel_uncore *uncore = gt->uncore;
4430         const u32 dmask = irqs << 16 | irqs;
4431         const u32 smask = irqs << 16;
4432
4433         BUILD_BUG_ON(irqs & 0xffff0000);
4434
4435         /* Enable RCS, BCS, VCS and VECS class interrupts. */
4436         intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask);
4437         intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask);
4438
4439         /* Unmask irqs on RCS, BCS, VCS and VECS engines. */
4440         intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask);
4441         intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~smask);
4442         intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~dmask);
4443         intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~dmask);
4444         intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~dmask);
4445
4446         /*
4447          * RPS interrupts will get enabled/disabled on demand when RPS itself
4448          * is enabled/disabled.
4449          */
4450         gt->pm_ier = 0x0;
4451         gt->pm_imr = ~gt->pm_ier;
4452         intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
4453         intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
4454
4455         /* Same thing for GuC interrupts */
4456         intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
4457         intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK,  ~0);
4458 }
4459
4460 static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
4461 {
4462         u32 mask = SDE_GMBUS_ICP;
4463
4464         WARN_ON(I915_READ(SDEIER) != 0);
4465         I915_WRITE(SDEIER, 0xffffffff);
4466         POSTING_READ(SDEIER);
4467
4468         gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
4469         I915_WRITE(SDEIMR, ~mask);
4470
4471         if (HAS_PCH_TGP(dev_priv))
4472                 icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK,
4473                                         TGP_TC_HPD_ENABLE_MASK);
4474         else if (HAS_PCH_MCC(dev_priv))
4475                 icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, 0);
4476         else
4477                 icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
4478                                         ICP_TC_HPD_ENABLE_MASK);
4479 }
4480
4481 static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
4482 {
4483         struct intel_uncore *uncore = &dev_priv->uncore;
4484         u32 gu_misc_masked = GEN11_GU_MISC_GSE;
4485
4486         if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
4487                 icp_irq_postinstall(dev_priv);
4488
4489         gen11_gt_irq_postinstall(&dev_priv->gt);
4490         gen8_de_irq_postinstall(dev_priv);
4491
4492         GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
4493
4494         I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
4495
4496         gen11_master_intr_enable(uncore->regs);
4497         POSTING_READ(GEN11_GFX_MSTR_IRQ);
4498 }
4499
4500 static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
4501 {
4502         gen8_gt_irq_postinstall(dev_priv);
4503
4504         spin_lock_irq(&dev_priv->irq_lock);
4505         if (dev_priv->display_irqs_enabled)
4506                 vlv_display_irq_postinstall(dev_priv);
4507         spin_unlock_irq(&dev_priv->irq_lock);
4508
4509         I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
4510         POSTING_READ(GEN8_MASTER_IRQ);
4511 }
4512
4513 static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
4514 {
4515         struct intel_uncore *uncore = &dev_priv->uncore;
4516
4517         i9xx_pipestat_irq_reset(dev_priv);
4518
4519         GEN2_IRQ_RESET(uncore);
4520 }
4521
4522 static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
4523 {
4524         struct intel_uncore *uncore = &dev_priv->uncore;
4525         u16 enable_mask;
4526
4527         intel_uncore_write16(uncore,
4528                              EMR,
4529                              ~(I915_ERROR_PAGE_TABLE |
4530                                I915_ERROR_MEMORY_REFRESH));
4531
4532         /* Unmask the interrupts that we always want on. */
4533         dev_priv->irq_mask =
4534                 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4535                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4536                   I915_MASTER_ERROR_INTERRUPT);
4537
4538         enable_mask =
4539                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4540                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4541                 I915_MASTER_ERROR_INTERRUPT |
4542                 I915_USER_INTERRUPT;
4543
4544         GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask);
4545
4546         /* Interrupt setup is already guaranteed to be single-threaded, this is
4547          * just to make the assert_spin_locked check happy. */
4548         spin_lock_irq(&dev_priv->irq_lock);
4549         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4550         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4551         spin_unlock_irq(&dev_priv->irq_lock);
4552 }
4553
4554 static void i8xx_error_irq_ack(struct drm_i915_private *i915,
4555                                u16 *eir, u16 *eir_stuck)
4556 {
4557         struct intel_uncore *uncore = &i915->uncore;
4558         u16 emr;
4559
4560         *eir = intel_uncore_read16(uncore, EIR);
4561
4562         if (*eir)
4563                 intel_uncore_write16(uncore, EIR, *eir);
4564
4565         *eir_stuck = intel_uncore_read16(uncore, EIR);
4566         if (*eir_stuck == 0)
4567                 return;
4568
4569         /*
4570          * Toggle all EMR bits to make sure we get an edge
4571          * in the ISR master error bit if we don't clear
4572          * all the EIR bits. Otherwise the edge triggered
4573          * IIR on i965/g4x wouldn't notice that an interrupt
4574          * is still pending. Also some EIR bits can't be
4575          * cleared except by handling the underlying error
4576          * (or by a GPU reset) so we mask any bit that
4577          * remains set.
4578          */
4579         emr = intel_uncore_read16(uncore, EMR);
4580         intel_uncore_write16(uncore, EMR, 0xffff);
4581         intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
4582 }
4583
4584 static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
4585                                    u16 eir, u16 eir_stuck)
4586 {
4587         DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
4588
4589         if (eir_stuck)
4590                 DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck);
4591 }
4592
4593 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
4594                                u32 *eir, u32 *eir_stuck)
4595 {
4596         u32 emr;
4597
4598         *eir = I915_READ(EIR);
4599
4600         I915_WRITE(EIR, *eir);
4601
4602         *eir_stuck = I915_READ(EIR);
4603         if (*eir_stuck == 0)
4604                 return;
4605
4606         /*
4607          * Toggle all EMR bits to make sure we get an edge
4608          * in the ISR master error bit if we don't clear
4609          * all the EIR bits. Otherwise the edge triggered
4610          * IIR on i965/g4x wouldn't notice that an interrupt
4611          * is still pending. Also some EIR bits can't be
4612          * cleared except by handling the underlying error
4613          * (or by a GPU reset) so we mask any bit that
4614          * remains set.
4615          */
4616         emr = I915_READ(EMR);
4617         I915_WRITE(EMR, 0xffffffff);
4618         I915_WRITE(EMR, emr | *eir_stuck);
4619 }
4620
4621 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
4622                                    u32 eir, u32 eir_stuck)
4623 {
4624         DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
4625
4626         if (eir_stuck)
4627                 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck);
4628 }
4629
4630 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4631 {
4632         struct drm_i915_private *dev_priv = arg;
4633         irqreturn_t ret = IRQ_NONE;
4634
4635         if (!intel_irqs_enabled(dev_priv))
4636                 return IRQ_NONE;
4637
4638         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4639         disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4640
4641         do {
4642                 u32 pipe_stats[I915_MAX_PIPES] = {};
4643                 u16 eir = 0, eir_stuck = 0;
4644                 u16 iir;
4645
4646                 iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
4647                 if (iir == 0)
4648                         break;
4649
4650                 ret = IRQ_HANDLED;
4651
4652                 /* Call regardless, as some status bits might not be
4653                  * signalled in iir */
4654                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4655
4656                 if (iir & I915_MASTER_ERROR_INTERRUPT)
4657                         i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4658
4659                 intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
4660
4661                 if (iir & I915_USER_INTERRUPT)
4662                         intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
4663
4664                 if (iir & I915_MASTER_ERROR_INTERRUPT)
4665                         i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
4666
4667                 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4668         } while (0);
4669
4670         enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4671
4672         return ret;
4673 }
4674
4675 static void i915_irq_reset(struct drm_i915_private *dev_priv)
4676 {
4677         struct intel_uncore *uncore = &dev_priv->uncore;
4678
4679         if (I915_HAS_HOTPLUG(dev_priv)) {
4680                 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4681                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4682         }
4683
4684         i9xx_pipestat_irq_reset(dev_priv);
4685
4686         GEN3_IRQ_RESET(uncore, GEN2_);
4687 }
4688
4689 static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
4690 {
4691         struct intel_uncore *uncore = &dev_priv->uncore;
4692         u32 enable_mask;
4693
4694         I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
4695                           I915_ERROR_MEMORY_REFRESH));
4696
4697         /* Unmask the interrupts that we always want on. */
4698         dev_priv->irq_mask =
4699                 ~(I915_ASLE_INTERRUPT |
4700                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4701                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4702                   I915_MASTER_ERROR_INTERRUPT);
4703
4704         enable_mask =
4705                 I915_ASLE_INTERRUPT |
4706                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4707                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4708                 I915_MASTER_ERROR_INTERRUPT |
4709                 I915_USER_INTERRUPT;
4710
4711         if (I915_HAS_HOTPLUG(dev_priv)) {
4712                 /* Enable in IER... */
4713                 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4714                 /* and unmask in IMR */
4715                 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4716         }
4717
4718         GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4719
4720         /* Interrupt setup is already guaranteed to be single-threaded, this is
4721          * just to make the assert_spin_locked check happy. */
4722         spin_lock_irq(&dev_priv->irq_lock);
4723         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4724         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4725         spin_unlock_irq(&dev_priv->irq_lock);
4726
4727         i915_enable_asle_pipestat(dev_priv);
4728 }
4729
4730 static irqreturn_t i915_irq_handler(int irq, void *arg)
4731 {
4732         struct drm_i915_private *dev_priv = arg;
4733         irqreturn_t ret = IRQ_NONE;
4734
4735         if (!intel_irqs_enabled(dev_priv))
4736                 return IRQ_NONE;
4737
4738         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4739         disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4740
4741         do {
4742                 u32 pipe_stats[I915_MAX_PIPES] = {};
4743                 u32 eir = 0, eir_stuck = 0;
4744                 u32 hotplug_status = 0;
4745                 u32 iir;
4746
4747                 iir = I915_READ(GEN2_IIR);
4748                 if (iir == 0)
4749                         break;
4750
4751                 ret = IRQ_HANDLED;
4752
4753                 if (I915_HAS_HOTPLUG(dev_priv) &&
4754                     iir & I915_DISPLAY_PORT_INTERRUPT)
4755                         hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4756
4757                 /* Call regardless, as some status bits might not be
4758                  * signalled in iir */
4759                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4760
4761                 if (iir & I915_MASTER_ERROR_INTERRUPT)
4762                         i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4763
4764                 I915_WRITE(GEN2_IIR, iir);
4765
4766                 if (iir & I915_USER_INTERRUPT)
4767                         intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
4768
4769                 if (iir & I915_MASTER_ERROR_INTERRUPT)
4770                         i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4771
4772                 if (hotplug_status)
4773                         i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4774
4775                 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4776         } while (0);
4777
4778         enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4779
4780         return ret;
4781 }
4782
4783 static void i965_irq_reset(struct drm_i915_private *dev_priv)
4784 {
4785         struct intel_uncore *uncore = &dev_priv->uncore;
4786
4787         i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4788         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4789
4790         i9xx_pipestat_irq_reset(dev_priv);
4791
4792         GEN3_IRQ_RESET(uncore, GEN2_);
4793 }
4794
4795 static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
4796 {
4797         struct intel_uncore *uncore = &dev_priv->uncore;
4798         u32 enable_mask;
4799         u32 error_mask;
4800
4801         /*
4802          * Enable some error detection, note the instruction error mask
4803          * bit is reserved, so we leave it masked.
4804          */
4805         if (IS_G4X(dev_priv)) {
4806                 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4807                                GM45_ERROR_MEM_PRIV |
4808                                GM45_ERROR_CP_PRIV |
4809                                I915_ERROR_MEMORY_REFRESH);
4810         } else {
4811                 error_mask = ~(I915_ERROR_PAGE_TABLE |
4812                                I915_ERROR_MEMORY_REFRESH);
4813         }
4814         I915_WRITE(EMR, error_mask);
4815
4816         /* Unmask the interrupts that we always want on. */
4817         dev_priv->irq_mask =
4818                 ~(I915_ASLE_INTERRUPT |
4819                   I915_DISPLAY_PORT_INTERRUPT |
4820                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4821                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4822                   I915_MASTER_ERROR_INTERRUPT);
4823
4824         enable_mask =
4825                 I915_ASLE_INTERRUPT |
4826                 I915_DISPLAY_PORT_INTERRUPT |
4827                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4828                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4829                 I915_MASTER_ERROR_INTERRUPT |
4830                 I915_USER_INTERRUPT;
4831
4832         if (IS_G4X(dev_priv))
4833                 enable_mask |= I915_BSD_USER_INTERRUPT;
4834
4835         GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4836
4837         /* Interrupt setup is already guaranteed to be single-threaded, this is
4838          * just to make the assert_spin_locked check happy. */
4839         spin_lock_irq(&dev_priv->irq_lock);
4840         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4841         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4842         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4843         spin_unlock_irq(&dev_priv->irq_lock);
4844
4845         i915_enable_asle_pipestat(dev_priv);
4846 }
4847
4848 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4849 {
4850         u32 hotplug_en;
4851
4852         lockdep_assert_held(&dev_priv->irq_lock);
4853
4854         /* Note HDMI and DP share hotplug bits */
4855         /* enable bits are the same for all generations */
4856         hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4857         /* Programming the CRT detection parameters tends
4858            to generate a spurious hotplug event about three
4859            seconds later.  So just do it once.
4860         */
4861         if (IS_G4X(dev_priv))
4862                 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4863         hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4864
4865         /* Ignore TV since it's buggy */
4866         i915_hotplug_interrupt_update_locked(dev_priv,
4867                                              HOTPLUG_INT_EN_MASK |
4868                                              CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4869                                              CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4870                                              hotplug_en);
4871 }
4872
4873 static irqreturn_t i965_irq_handler(int irq, void *arg)
4874 {
4875         struct drm_i915_private *dev_priv = arg;
4876         irqreturn_t ret = IRQ_NONE;
4877
4878         if (!intel_irqs_enabled(dev_priv))
4879                 return IRQ_NONE;
4880
4881         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4882         disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4883
4884         do {
4885                 u32 pipe_stats[I915_MAX_PIPES] = {};
4886                 u32 eir = 0, eir_stuck = 0;
4887                 u32 hotplug_status = 0;
4888                 u32 iir;
4889
4890                 iir = I915_READ(GEN2_IIR);
4891                 if (iir == 0)
4892                         break;
4893
4894                 ret = IRQ_HANDLED;
4895
4896                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4897                         hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4898
4899                 /* Call regardless, as some status bits might not be
4900                  * signalled in iir */
4901                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4902
4903                 if (iir & I915_MASTER_ERROR_INTERRUPT)
4904                         i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4905
4906                 I915_WRITE(GEN2_IIR, iir);
4907
4908                 if (iir & I915_USER_INTERRUPT)
4909                         intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
4910
4911                 if (iir & I915_BSD_USER_INTERRUPT)
4912                         intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
4913
4914                 if (iir & I915_MASTER_ERROR_INTERRUPT)
4915                         i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4916
4917                 if (hotplug_status)
4918                         i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4919
4920                 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4921         } while (0);
4922
4923         enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4924
4925         return ret;
4926 }
4927
4928 /**
4929  * intel_irq_init - initializes irq support
4930  * @dev_priv: i915 device instance
4931  *
4932  * This function initializes all the irq support including work items, timers
4933  * and all the vtables. It does not setup the interrupt itself though.
4934  */
4935 void intel_irq_init(struct drm_i915_private *dev_priv)
4936 {
4937         struct drm_device *dev = &dev_priv->drm;
4938         struct intel_rps *rps = &dev_priv->gt_pm.rps;
4939         int i;
4940
4941         if (IS_I945GM(dev_priv))
4942                 i945gm_vblank_work_init(dev_priv);
4943
4944         intel_hpd_init_work(dev_priv);
4945
4946         INIT_WORK(&rps->work, gen6_pm_rps_work);
4947
4948         INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4949         for (i = 0; i < MAX_L3_SLICES; ++i)
4950                 dev_priv->l3_parity.remap_info[i] = NULL;
4951
4952         /* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
4953         if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11)
4954                 dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16;
4955
4956         /* Let's track the enabled rps events */
4957         if (IS_VALLEYVIEW(dev_priv))
4958                 /* WaGsvRC0ResidencyMethod:vlv */
4959                 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4960         else
4961                 dev_priv->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD |
4962                                            GEN6_PM_RP_DOWN_THRESHOLD |
4963                                            GEN6_PM_RP_DOWN_TIMEOUT);
4964
4965         /* We share the register with other engine */
4966         if (INTEL_GEN(dev_priv) > 9)
4967                 GEM_WARN_ON(dev_priv->pm_rps_events & 0xffff0000);
4968
4969         rps->pm_intrmsk_mbz = 0;
4970
4971         /*
4972          * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
4973          * if GEN6_PM_UP_EI_EXPIRED is masked.
4974          *
4975          * TODO: verify if this can be reproduced on VLV,CHV.
4976          */
4977         if (INTEL_GEN(dev_priv) <= 7)
4978                 rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
4979
4980         if (INTEL_GEN(dev_priv) >= 8)
4981                 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
4982
4983         dev->vblank_disable_immediate = true;
4984
4985         /* Most platforms treat the display irq block as an always-on
4986          * power domain. vlv/chv can disable it at runtime and need
4987          * special care to avoid writing any of the display block registers
4988          * outside of the power domain. We defer setting up the display irqs
4989          * in this case to the runtime pm.
4990          */
4991         dev_priv->display_irqs_enabled = true;
4992         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4993                 dev_priv->display_irqs_enabled = false;
4994
4995         dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4996         /* If we have MST support, we want to avoid doing short HPD IRQ storm
4997          * detection, as short HPD storms will occur as a natural part of
4998          * sideband messaging with MST.
4999          * On older platforms however, IRQ storms can occur with both long and
5000          * short pulses, as seen on some G4x systems.
5001          */
5002         dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
5003
5004         if (HAS_GMCH(dev_priv)) {
5005                 if (I915_HAS_HOTPLUG(dev_priv))
5006                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
5007         } else {
5008                 if (HAS_PCH_MCC(dev_priv))
5009                         /* EHL doesn't need most of gen11_hpd_irq_setup */
5010                         dev_priv->display.hpd_irq_setup = mcc_hpd_irq_setup;
5011                 else if (INTEL_GEN(dev_priv) >= 11)
5012                         dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
5013                 else if (IS_GEN9_LP(dev_priv))
5014                         dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
5015                 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
5016                         dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
5017                 else
5018                         dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
5019         }
5020 }
5021
5022 /**
5023  * intel_irq_fini - deinitializes IRQ support
5024  * @i915: i915 device instance
5025  *
5026  * This function deinitializes all the IRQ support.
5027  */
5028 void intel_irq_fini(struct drm_i915_private *i915)
5029 {
5030         int i;
5031
5032         if (IS_I945GM(i915))
5033                 i945gm_vblank_work_fini(i915);
5034
5035         for (i = 0; i < MAX_L3_SLICES; ++i)
5036                 kfree(i915->l3_parity.remap_info[i]);
5037 }
5038
5039 static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
5040 {
5041         if (HAS_GMCH(dev_priv)) {
5042                 if (IS_CHERRYVIEW(dev_priv))
5043                         return cherryview_irq_handler;
5044                 else if (IS_VALLEYVIEW(dev_priv))
5045                         return valleyview_irq_handler;
5046                 else if (IS_GEN(dev_priv, 4))
5047                         return i965_irq_handler;
5048                 else if (IS_GEN(dev_priv, 3))
5049                         return i915_irq_handler;
5050                 else
5051                         return i8xx_irq_handler;
5052         } else {
5053                 if (INTEL_GEN(dev_priv) >= 11)
5054                         return gen11_irq_handler;
5055                 else if (INTEL_GEN(dev_priv) >= 8)
5056                         return gen8_irq_handler;
5057                 else
5058                         return ironlake_irq_handler;
5059         }
5060 }
5061
5062 static void intel_irq_reset(struct drm_i915_private *dev_priv)
5063 {
5064         if (HAS_GMCH(dev_priv)) {
5065                 if (IS_CHERRYVIEW(dev_priv))
5066                         cherryview_irq_reset(dev_priv);
5067                 else if (IS_VALLEYVIEW(dev_priv))
5068                         valleyview_irq_reset(dev_priv);
5069                 else if (IS_GEN(dev_priv, 4))
5070                         i965_irq_reset(dev_priv);
5071                 else if (IS_GEN(dev_priv, 3))
5072                         i915_irq_reset(dev_priv);
5073                 else
5074                         i8xx_irq_reset(dev_priv);
5075         } else {
5076                 if (INTEL_GEN(dev_priv) >= 11)
5077                         gen11_irq_reset(dev_priv);
5078                 else if (INTEL_GEN(dev_priv) >= 8)
5079                         gen8_irq_reset(dev_priv);
5080                 else
5081                         ironlake_irq_reset(dev_priv);
5082         }
5083 }
5084
5085 static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
5086 {
5087         if (HAS_GMCH(dev_priv)) {
5088                 if (IS_CHERRYVIEW(dev_priv))
5089                         cherryview_irq_postinstall(dev_priv);
5090                 else if (IS_VALLEYVIEW(dev_priv))
5091                         valleyview_irq_postinstall(dev_priv);
5092                 else if (IS_GEN(dev_priv, 4))
5093                         i965_irq_postinstall(dev_priv);
5094                 else if (IS_GEN(dev_priv, 3))
5095                         i915_irq_postinstall(dev_priv);
5096                 else
5097                         i8xx_irq_postinstall(dev_priv);
5098         } else {
5099                 if (INTEL_GEN(dev_priv) >= 11)
5100                         gen11_irq_postinstall(dev_priv);
5101                 else if (INTEL_GEN(dev_priv) >= 8)
5102                         gen8_irq_postinstall(dev_priv);
5103                 else
5104                         ironlake_irq_postinstall(dev_priv);
5105         }
5106 }
5107
5108 /**
5109  * intel_irq_install - enables the hardware interrupt
5110  * @dev_priv: i915 device instance
5111  *
5112  * This function enables the hardware interrupt handling, but leaves the hotplug
5113  * handling still disabled. It is called after intel_irq_init().
5114  *
5115  * In the driver load and resume code we need working interrupts in a few places
5116  * but don't want to deal with the hassle of concurrent probe and hotplug
5117  * workers. Hence the split into this two-stage approach.
5118  */
5119 int intel_irq_install(struct drm_i915_private *dev_priv)
5120 {
5121         int irq = dev_priv->drm.pdev->irq;
5122         int ret;
5123
5124         /*
5125          * We enable some interrupt sources in our postinstall hooks, so mark
5126          * interrupts as enabled _before_ actually enabling them to avoid
5127          * special cases in our ordering checks.
5128          */
5129         dev_priv->runtime_pm.irqs_enabled = true;
5130
5131         dev_priv->drm.irq_enabled = true;
5132
5133         intel_irq_reset(dev_priv);
5134
5135         ret = request_irq(irq, intel_irq_handler(dev_priv),
5136                           IRQF_SHARED, DRIVER_NAME, dev_priv);
5137         if (ret < 0) {
5138                 dev_priv->drm.irq_enabled = false;
5139                 return ret;
5140         }
5141
5142         intel_irq_postinstall(dev_priv);
5143
5144         return ret;
5145 }
5146
5147 /**
5148  * intel_irq_uninstall - finilizes all irq handling
5149  * @dev_priv: i915 device instance
5150  *
5151  * This stops interrupt and hotplug handling and unregisters and frees all
5152  * resources acquired in the init functions.
5153  */
5154 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
5155 {
5156         int irq = dev_priv->drm.pdev->irq;
5157
5158         /*
5159          * FIXME we can get called twice during driver load
5160          * error handling due to intel_modeset_cleanup()
5161          * calling us out of sequence. Would be nice if
5162          * it didn't do that...
5163          */
5164         if (!dev_priv->drm.irq_enabled)
5165                 return;
5166
5167         dev_priv->drm.irq_enabled = false;
5168
5169         intel_irq_reset(dev_priv);
5170
5171         free_irq(irq, dev_priv);
5172
5173         intel_hpd_cancel_work(dev_priv);
5174         dev_priv->runtime_pm.irqs_enabled = false;
5175 }
5176
5177 /**
5178  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
5179  * @dev_priv: i915 device instance
5180  *
5181  * This function is used to disable interrupts at runtime, both in the runtime
5182  * pm and the system suspend/resume code.
5183  */
5184 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
5185 {
5186         intel_irq_reset(dev_priv);
5187         dev_priv->runtime_pm.irqs_enabled = false;
5188         intel_synchronize_irq(dev_priv);
5189 }
5190
5191 /**
5192  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
5193  * @dev_priv: i915 device instance
5194  *
5195  * This function is used to enable interrupts at runtime, both in the runtime
5196  * pm and the system suspend/resume code.
5197  */
5198 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
5199 {
5200         dev_priv->runtime_pm.irqs_enabled = true;
5201         intel_irq_reset(dev_priv);
5202         intel_irq_postinstall(dev_priv);
5203 }
5204
5205 bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
5206 {
5207         /*
5208          * We only use drm_irq_uninstall() at unload and VT switch, so
5209          * this is the only thing we need to check.
5210          */
5211         return dev_priv->runtime_pm.irqs_enabled;
5212 }
5213
5214 void intel_synchronize_irq(struct drm_i915_private *i915)
5215 {
5216         synchronize_irq(i915->drm.pdev->irq);
5217 }