]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/gpu/drm/i915/i915_drv.c
Merge branches 'pm-core', 'pm-qos', 'pm-domains' and 'pm-opp'
[linux.git] / drivers / gpu / drm / i915 / i915_drv.c
index 18dfdd5c1b3b1ba5fc8b9c660b37c036d5ea87dd..728ca3ea74d2c85df8734ddaa9285e126bc2c82c 100644 (file)
@@ -114,7 +114,7 @@ static bool i915_error_injected(struct drm_i915_private *dev_priv)
                      fmt, ##__VA_ARGS__)
 
 
-static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
+static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv)
 {
        enum intel_pch ret = PCH_NOP;
 
@@ -125,16 +125,16 @@ static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
         * make an educated guess as to which PCH is really there.
         */
 
-       if (IS_GEN5(dev)) {
+       if (IS_GEN5(dev_priv)) {
                ret = PCH_IBX;
                DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
-       } else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
+       } else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
                ret = PCH_CPT;
                DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
-       } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+       } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
                ret = PCH_LPT;
                DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
-       } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+       } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
                ret = PCH_SPT;
                DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
        }
@@ -150,7 +150,7 @@ static void intel_detect_pch(struct drm_device *dev)
        /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
         * (which really amounts to a PCH but no South Display).
         */
-       if (INTEL_INFO(dev)->num_pipes == 0) {
+       if (INTEL_INFO(dev_priv)->num_pipes == 0) {
                dev_priv->pch_type = PCH_NOP;
                return;
        }
@@ -174,40 +174,47 @@ static void intel_detect_pch(struct drm_device *dev)
                        if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
                                dev_priv->pch_type = PCH_IBX;
                                DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
-                               WARN_ON(!IS_GEN5(dev));
+                               WARN_ON(!IS_GEN5(dev_priv));
                        } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
                                dev_priv->pch_type = PCH_CPT;
                                DRM_DEBUG_KMS("Found CougarPoint PCH\n");
-                               WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
+                               WARN_ON(!(IS_GEN6(dev_priv) ||
+                                       IS_IVYBRIDGE(dev_priv)));
                        } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
                                /* PantherPoint is CPT compatible */
                                dev_priv->pch_type = PCH_CPT;
                                DRM_DEBUG_KMS("Found PantherPoint PCH\n");
-                               WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
+                               WARN_ON(!(IS_GEN6(dev_priv) ||
+                                       IS_IVYBRIDGE(dev_priv)));
                        } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
                                dev_priv->pch_type = PCH_LPT;
                                DRM_DEBUG_KMS("Found LynxPoint PCH\n");
-                               WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
-                               WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
+                               WARN_ON(!IS_HASWELL(dev_priv) &&
+                                       !IS_BROADWELL(dev_priv));
+                               WARN_ON(IS_HSW_ULT(dev_priv) ||
+                                       IS_BDW_ULT(dev_priv));
                        } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
                                dev_priv->pch_type = PCH_LPT;
                                DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
-                               WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
-                               WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
+                               WARN_ON(!IS_HASWELL(dev_priv) &&
+                                       !IS_BROADWELL(dev_priv));
+                               WARN_ON(!IS_HSW_ULT(dev_priv) &&
+                                       !IS_BDW_ULT(dev_priv));
                        } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
                                dev_priv->pch_type = PCH_SPT;
                                DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
-                               WARN_ON(!IS_SKYLAKE(dev) &&
-                                       !IS_KABYLAKE(dev));
+                               WARN_ON(!IS_SKYLAKE(dev_priv) &&
+                                       !IS_KABYLAKE(dev_priv));
                        } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
                                dev_priv->pch_type = PCH_SPT;
                                DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
-                               WARN_ON(!IS_SKYLAKE(dev) &&
-                                       !IS_KABYLAKE(dev));
+                               WARN_ON(!IS_SKYLAKE(dev_priv) &&
+                                       !IS_KABYLAKE(dev_priv));
                        } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
                                dev_priv->pch_type = PCH_KBP;
                                DRM_DEBUG_KMS("Found KabyPoint PCH\n");
-                               WARN_ON(!IS_KABYLAKE(dev));
+                               WARN_ON(!IS_SKYLAKE(dev_priv) &&
+                                       !IS_KABYLAKE(dev_priv));
                        } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
                                   (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
                                   ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
@@ -215,7 +222,8 @@ static void intel_detect_pch(struct drm_device *dev)
                                            PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
                                    pch->subsystem_device ==
                                            PCI_SUBDEVICE_ID_QEMU)) {
-                               dev_priv->pch_type = intel_virt_detect_pch(dev);
+                               dev_priv->pch_type =
+                                       intel_virt_detect_pch(dev_priv);
                        } else
                                continue;
 
@@ -255,16 +263,16 @@ static int i915_getparam(struct drm_device *dev, void *data,
                value = dev_priv->overlay ? 1 : 0;
                break;
        case I915_PARAM_HAS_BSD:
-               value = intel_engine_initialized(&dev_priv->engine[VCS]);
+               value = !!dev_priv->engine[VCS];
                break;
        case I915_PARAM_HAS_BLT:
-               value = intel_engine_initialized(&dev_priv->engine[BCS]);
+               value = !!dev_priv->engine[BCS];
                break;
        case I915_PARAM_HAS_VEBOX:
-               value = intel_engine_initialized(&dev_priv->engine[VECS]);
+               value = !!dev_priv->engine[VECS];
                break;
        case I915_PARAM_HAS_BSD2:
-               value = intel_engine_initialized(&dev_priv->engine[VCS2]);
+               value = !!dev_priv->engine[VCS2];
                break;
        case I915_PARAM_HAS_EXEC_CONSTANTS:
                value = INTEL_GEN(dev_priv) >= 4;
@@ -316,6 +324,10 @@ static int i915_getparam(struct drm_device *dev, void *data,
                 */
                value = i915_gem_mmap_gtt_version();
                break;
+       case I915_PARAM_HAS_SCHEDULER:
+               value = dev_priv->engine[RCS] &&
+                       dev_priv->engine[RCS]->schedule;
+               break;
        case I915_PARAM_MMAP_VERSION:
                /* Remember to bump this if the version changes! */
        case I915_PARAM_HAS_GEM:
@@ -367,12 +379,12 @@ static int
 intel_alloc_mchbar_resource(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
-       int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+       int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
        u32 temp_lo, temp_hi = 0;
        u64 mchbar_addr;
        int ret;
 
-       if (INTEL_INFO(dev)->gen >= 4)
+       if (INTEL_GEN(dev_priv) >= 4)
                pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
        pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
        mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
@@ -399,7 +411,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
                return ret;
        }
 
-       if (INTEL_INFO(dev)->gen >= 4)
+       if (INTEL_GEN(dev_priv) >= 4)
                pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
                                       upper_32_bits(dev_priv->mch_res.start));
 
@@ -413,16 +425,16 @@ static void
 intel_setup_mchbar(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
-       int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+       int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
        u32 temp;
        bool enabled;
 
-       if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
                return;
 
        dev_priv->mchbar_need_disable = false;
 
-       if (IS_I915G(dev) || IS_I915GM(dev)) {
+       if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
                pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
                enabled = !!(temp & DEVEN_MCHBAR_EN);
        } else {
@@ -440,7 +452,7 @@ intel_setup_mchbar(struct drm_device *dev)
        dev_priv->mchbar_need_disable = true;
 
        /* Space is allocated or reserved, so enable it. */
-       if (IS_I915G(dev) || IS_I915GM(dev)) {
+       if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
                pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
                                       temp | DEVEN_MCHBAR_EN);
        } else {
@@ -453,10 +465,10 @@ static void
 intel_teardown_mchbar(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
-       int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+       int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
 
        if (dev_priv->mchbar_need_disable) {
-               if (IS_I915G(dev) || IS_I915GM(dev)) {
+               if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
                        u32 deven_val;
 
                        pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
@@ -484,7 +496,7 @@ static unsigned int i915_vga_set_decode(void *cookie, bool state)
 {
        struct drm_device *dev = cookie;
 
-       intel_modeset_vga_set_state(dev, state);
+       intel_modeset_vga_set_state(to_i915(dev), state);
        if (state)
                return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
                       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
@@ -530,40 +542,17 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
        .can_switch = i915_switcheroo_can_switch,
 };
 
-static void i915_gem_fini(struct drm_device *dev)
+static void i915_gem_fini(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       mutex_lock(&dev_priv->drm.struct_mutex);
+       i915_gem_cleanup_engines(&dev_priv->drm);
+       i915_gem_context_fini(&dev_priv->drm);
+       mutex_unlock(&dev_priv->drm.struct_mutex);
 
-       /*
-        * Neither the BIOS, ourselves or any other kernel
-        * expects the system to be in execlists mode on startup,
-        * so we need to reset the GPU back to legacy mode. And the only
-        * known way to disable logical contexts is through a GPU reset.
-        *
-        * So in order to leave the system in a known default configuration,
-        * always reset the GPU upon unload. Afterwards we then clean up the
-        * GEM state tracking, flushing off the requests and leaving the
-        * system in a known idle state.
-        *
-        * Note that is of the upmost importance that the GPU is idle and
-        * all stray writes are flushed *before* we dismantle the backing
-        * storage for the pinned objects.
-        *
-        * However, since we are uncertain that reseting the GPU on older
-        * machines is a good idea, we don't - just in case it leaves the
-        * machine in an unusable condition.
-        */
-       if (HAS_HW_CONTEXTS(dev)) {
-               int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
-               WARN_ON(reset && reset != -ENODEV);
-       }
-
-       mutex_lock(&dev->struct_mutex);
-       i915_gem_cleanup_engines(dev);
-       i915_gem_context_fini(dev);
-       mutex_unlock(&dev->struct_mutex);
+       rcu_barrier();
+       flush_work(&dev_priv->mm.free_work);
 
-       WARN_ON(!list_empty(&to_i915(dev)->context_list));
+       WARN_ON(!list_empty(&dev_priv->context_list));
 }
 
 static int i915_load_modeset_init(struct drm_device *dev)
@@ -611,7 +600,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
 
        /* Important: The output setup functions called by modeset_init need
         * working irqs for e.g. gmbus and dp aux transfers. */
-       intel_modeset_init(dev);
+       ret = intel_modeset_init(dev);
+       if (ret)
+               goto cleanup_irq;
 
        intel_guc_init(dev);
 
@@ -621,7 +612,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
 
        intel_modeset_gem_init(dev);
 
-       if (INTEL_INFO(dev)->num_pipes == 0)
+       if (INTEL_INFO(dev_priv)->num_pipes == 0)
                return 0;
 
        ret = intel_fbdev_init(dev);
@@ -636,7 +627,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
        return 0;
 
 cleanup_gem:
-       i915_gem_fini(dev);
+       if (i915_gem_suspend(dev))
+               DRM_ERROR("failed to idle hardware; continuing to unload!\n");
+       i915_gem_fini(dev_priv);
 cleanup_irq:
        intel_guc_fini(dev);
        drm_irq_uninstall(dev);
@@ -771,6 +764,19 @@ static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
        destroy_workqueue(dev_priv->wq);
 }
 
+/*
+ * We don't keep the workarounds for pre-production hardware, so we expect our
+ * driver to fail on these machines in one way or another. A little warning on
+ * dmesg may help both the user and the bug triagers.
+ */
+static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
+{
+       if (IS_HSW_EARLY_SDV(dev_priv) ||
+           IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0))
+               DRM_ERROR("This is a pre-production stepping. "
+                         "It may not be fully functional.\n");
+}
+
 /**
  * i915_driver_init_early - setup state not requiring device access
  * @dev_priv: device private
@@ -829,25 +835,24 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
        intel_init_dpio(dev_priv);
        intel_power_domains_init(dev_priv);
        intel_irq_init(dev_priv);
+       intel_hangcheck_init(dev_priv);
        intel_init_display_hooks(dev_priv);
        intel_init_clock_gating_hooks(dev_priv);
        intel_init_audio_hooks(dev_priv);
-       i915_gem_load_init(&dev_priv->drm);
+       ret = i915_gem_load_init(&dev_priv->drm);
+       if (ret < 0)
+               goto err_gvt;
 
        intel_display_crc_init(dev_priv);
 
        intel_device_info_dump(dev_priv);
 
-       /* Not all pre-production machines fall into this category, only the
-        * very first ones. Almost everything should work, except for maybe
-        * suspend/resume. And we don't implement workarounds that affect only
-        * pre-production machines. */
-       if (IS_HSW_EARLY_SDV(dev_priv))
-               DRM_INFO("This is an early pre-production Haswell machine. "
-                        "It may not be fully functional.\n");
+       intel_detect_preproduction_hw(dev_priv);
 
        return 0;
 
+err_gvt:
+       intel_gvt_cleanup(dev_priv);
 err_workqueues:
        i915_workqueues_cleanup(dev_priv);
        return ret;
@@ -870,7 +875,7 @@ static int i915_mmio_setup(struct drm_device *dev)
        int mmio_bar;
        int mmio_size;
 
-       mmio_bar = IS_GEN2(dev) ? 1 : 0;
+       mmio_bar = IS_GEN2(dev_priv) ? 1 : 0;
        /*
         * Before gen4, the registers and the GTT are behind different BARs.
         * However, from gen4 onwards, the registers and the GTT are shared
@@ -879,7 +884,7 @@ static int i915_mmio_setup(struct drm_device *dev)
         * the register BAR remains the same size for all the earlier
         * generations up to Ironlake.
         */
-       if (INTEL_INFO(dev)->gen < 5)
+       if (INTEL_GEN(dev_priv) < 5)
                mmio_size = 512 * 1024;
        else
                mmio_size = 2 * 1024 * 1024;
@@ -982,7 +987,6 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
 static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 {
        struct pci_dev *pdev = dev_priv->drm.pdev;
-       struct drm_device *dev = &dev_priv->drm;
        int ret;
 
        if (i915_inject_load_failure())
@@ -1023,7 +1027,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
        pci_set_master(pdev);
 
        /* overlay on gen2 is broken and can't address above 1G */
-       if (IS_GEN2(dev)) {
+       if (IS_GEN2(dev_priv)) {
                ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
                if (ret) {
                        DRM_ERROR("failed to set DMA mask\n");
@@ -1040,7 +1044,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
         * behaviour if any general state is accessed within a page above 4GB,
         * which also needs to be handled carefully.
         */
-       if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) {
+       if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv)) {
                ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
 
                if (ret) {
@@ -1070,7 +1074,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
         * be lost or delayed, but we use them anyways to avoid
         * stuck interrupts on some machines.
         */
-       if (!IS_I945G(dev) && !IS_I945GM(dev)) {
+       if (!IS_I945G(dev_priv) && !IS_I945GM(dev_priv)) {
                if (pci_enable_msi(pdev) < 0)
                        DRM_DEBUG_DRIVER("can't enable MSI");
        }
@@ -1121,6 +1125,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
        /* Reveal our presence to userspace */
        if (drm_dev_register(dev, 0) == 0) {
                i915_debugfs_register(dev_priv);
+               i915_guc_register(dev_priv);
                i915_setup_sysfs(dev_priv);
        } else
                DRM_ERROR("Failed to register driver for userspace access!\n");
@@ -1159,6 +1164,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
        intel_opregion_unregister(dev_priv);
 
        i915_teardown_sysfs(dev_priv);
+       i915_guc_unregister(dev_priv);
        i915_debugfs_unregister(dev_priv);
        drm_dev_unregister(&dev_priv->drm);
 
@@ -1167,8 +1173,8 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
 
 /**
  * i915_driver_load - setup chip and create an initial config
- * @dev: DRM device
- * @flags: startup flags
+ * @pdev: PCI device
+ * @ent: matching PCI ID entry
  *
  * The driver load routine has to do several things:
  *   - drive output discovery via intel_modeset_init()
@@ -1242,6 +1248,10 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
        DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
                 driver.name, driver.major, driver.minor, driver.patchlevel,
                 driver.date, pci_name(pdev), dev_priv->drm.primary->index);
+       if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
+               DRM_INFO("DRM_I915_DEBUG enabled\n");
+       if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+               DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
 
        intel_runtime_pm_put(dev_priv);
 
@@ -1309,7 +1319,7 @@ void i915_driver_unload(struct drm_device *dev)
        drain_workqueue(dev_priv->wq);
 
        intel_guc_fini(dev);
-       i915_gem_fini(dev);
+       i915_gem_fini(dev_priv);
        intel_fbc_cleanup_cfb(dev_priv);
 
        intel_power_domains_fini(dev_priv);
@@ -1431,9 +1441,9 @@ static int i915_drm_suspend(struct drm_device *dev)
 
        intel_suspend_encoders(dev_priv);
 
-       intel_suspend_hw(dev);
+       intel_suspend_hw(dev_priv);
 
-       i915_gem_suspend_gtt_mappings(dev);
+       i915_gem_suspend_gtt_mappings(dev_priv);
 
        i915_save_state(dev);
 
@@ -1507,7 +1517,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
         * Fujitsu FSC S7110
         * Acer Aspire 1830T
         */
-       if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
+       if (!(hibernation && INTEL_GEN(dev_priv) < 6))
                pci_set_power_state(pdev, PCI_D3hot);
 
        dev_priv->suspended_to_idle = suspend_to_idle(dev_priv);
@@ -1595,6 +1605,8 @@ static int i915_drm_resume(struct drm_device *dev)
 
        intel_display_resume(dev);
 
+       drm_kms_helper_poll_enable(dev);
+
        /*
         * ... but also need to make sure that hotplug processing
         * doesn't cause havoc. Like in the driver load code we don't
@@ -1602,8 +1614,6 @@ static int i915_drm_resume(struct drm_device *dev)
         * notifications.
         * */
        intel_hpd_init(dev_priv);
-       /* Config may have changed between suspend and resume */
-       drm_helper_hpd_irq_event(dev);
 
        intel_opregion_register(dev_priv);
 
@@ -1616,7 +1626,6 @@ static int i915_drm_resume(struct drm_device *dev)
        intel_opregion_notify_adapter(dev_priv, PCI_D0);
 
        intel_autoenable_gt_powersave(dev_priv);
-       drm_kms_helper_poll_enable(dev);
 
        enable_rpm_wakeref_asserts(dev_priv);
 
@@ -1721,6 +1730,22 @@ int i915_resume_switcheroo(struct drm_device *dev)
        return i915_drm_resume(dev);
 }
 
+static void disable_engines_irq(struct drm_i915_private *dev_priv)
+{
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+
+       /* Ensure irq handler finishes, and not run again. */
+       disable_irq(dev_priv->drm.irq);
+       for_each_engine(engine, dev_priv, id)
+               tasklet_kill(&engine->irq_tasklet);
+}
+
+static void enable_engines_irq(struct drm_i915_private *dev_priv)
+{
+       enable_irq(dev_priv->drm.irq);
+}
+
 /**
  * i915_reset - reset chip after a hang
  * @dev: drm device to reset
@@ -1754,7 +1779,11 @@ void i915_reset(struct drm_i915_private *dev_priv)
        error->reset_count++;
 
        pr_notice("drm/i915: Resetting chip after gpu hang\n");
+
+       disable_engines_irq(dev_priv);
        ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
+       enable_engines_irq(dev_priv);
+
        if (ret) {
                if (ret != -ENODEV)
                        DRM_ERROR("Failed to reset chip: %i\n", ret);
@@ -2240,7 +2269,6 @@ static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
 static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
                                bool rpm_resume)
 {
-       struct drm_device *dev = &dev_priv->drm;
        int err;
        int ret;
 
@@ -2264,10 +2292,8 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
 
        vlv_check_no_gt_access(dev_priv);
 
-       if (rpm_resume) {
-               intel_init_clock_gating(dev);
-               i915_gem_restore_fences(dev);
-       }
+       if (rpm_resume)
+               intel_init_clock_gating(dev_priv);
 
        return ret;
 }
@@ -2282,37 +2308,18 @@ static int intel_runtime_suspend(struct device *kdev)
        if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6())))
                return -ENODEV;
 
-       if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
+       if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
                return -ENODEV;
 
        DRM_DEBUG_KMS("Suspending device\n");
 
-       /*
-        * We could deadlock here in case another thread holding struct_mutex
-        * calls RPM suspend concurrently, since the RPM suspend will wait
-        * first for this RPM suspend to finish. In this case the concurrent
-        * RPM resume will be followed by its RPM suspend counterpart. Still
-        * for consistency return -EAGAIN, which will reschedule this suspend.
-        */
-       if (!mutex_trylock(&dev->struct_mutex)) {
-               DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
-               /*
-                * Bump the expiration timestamp, otherwise the suspend won't
-                * be rescheduled.
-                */
-               pm_runtime_mark_last_busy(kdev);
-
-               return -EAGAIN;
-       }
-
        disable_rpm_wakeref_asserts(dev_priv);
 
        /*
         * We are safe here against re-faults, since the fault handler takes
         * an RPM reference.
         */
-       i915_gem_release_all_mmaps(dev_priv);
-       mutex_unlock(&dev->struct_mutex);
+       i915_gem_runtime_suspend(dev_priv);
 
        intel_guc_suspend(dev);
 
@@ -2372,7 +2379,7 @@ static int intel_runtime_suspend(struct device *kdev)
 
        assert_forcewakes_inactive(dev_priv);
 
-       if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv))
+       if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
                intel_hpd_poll_init(dev_priv);
 
        DRM_DEBUG_KMS("Device suspended\n");
@@ -2386,7 +2393,7 @@ static int intel_runtime_resume(struct device *kdev)
        struct drm_i915_private *dev_priv = to_i915(dev);
        int ret = 0;
 
-       if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
+       if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
                return -ENODEV;
 
        DRM_DEBUG_KMS("Resuming device\n");
@@ -2404,7 +2411,7 @@ static int intel_runtime_resume(struct device *kdev)
        if (IS_GEN6(dev_priv))
                intel_init_pch_refclk(dev);
 
-       if (IS_BROXTON(dev)) {
+       if (IS_BROXTON(dev_priv)) {
                bxt_disable_dc9(dev_priv);
                bxt_display_core_init(dev_priv, true);
                if (dev_priv->csr.dmc_payload &&
@@ -2420,7 +2427,8 @@ static int intel_runtime_resume(struct device *kdev)
         * No point of rolling back things in case of an error, as the best
         * we can do is to hope that things will still work (and disable RPM).
         */
-       i915_gem_init_swizzling(dev);
+       i915_gem_init_swizzling(dev_priv);
+       i915_gem_restore_fences(dev_priv);
 
        intel_runtime_pm_enable_interrupts(dev_priv);
 
@@ -2495,9 +2503,7 @@ static const struct file_operations i915_driver_fops = {
        .mmap = drm_gem_mmap,
        .poll = drm_poll,
        .read = drm_read,
-#ifdef CONFIG_COMPAT
        .compat_ioctl = i915_compat_ioctl,
-#endif
        .llseek = noop_llseek,
 };
 
@@ -2577,7 +2583,7 @@ static struct drm_driver driver = {
        .set_busid = drm_pci_set_busid,
 
        .gem_close_object = i915_gem_close_object,
-       .gem_free_object = i915_gem_free_object,
+       .gem_free_object_unlocked = i915_gem_free_object,
        .gem_vm_ops = &i915_gem_vm_ops,
 
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,