1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
30 #include <linux/acpi.h>
31 #include <linux/device.h>
32 #include <linux/oom.h>
33 #include <linux/module.h>
34 #include <linux/pci.h>
36 #include <linux/pm_runtime.h>
37 #include <linux/pnp.h>
38 #include <linux/slab.h>
39 #include <linux/vgaarb.h>
40 #include <linux/vga_switcheroo.h>
42 #include <acpi/video.h>
45 #include <drm/drm_crtc_helper.h>
46 #include <drm/drm_atomic_helper.h>
47 #include <drm/i915_drm.h>
50 #include "i915_trace.h"
51 #include "i915_vgpu.h"
52 #include "intel_drv.h"
55 static struct drm_driver driver;
57 static unsigned int i915_load_fail_count;
59 bool __i915_inject_load_failure(const char *func, int line)
61 if (i915_load_fail_count >= i915.inject_load_failure)
64 if (++i915_load_fail_count == i915.inject_load_failure) {
65 DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
66 i915.inject_load_failure, func, line);
73 #define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
74 #define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
75 "providing the dmesg log by booting with drm.debug=0xf"
78 __i915_printk(struct drm_i915_private *dev_priv, const char *level,
81 static bool shown_bug_once;
82 struct device *kdev = dev_priv->drm.dev;
83 bool is_error = level[1] <= KERN_ERR[1];
84 bool is_debug = level[1] == KERN_DEBUG[1];
88 if (is_debug && !(drm_debug & DRM_UT_DRIVER))
96 dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
97 __builtin_return_address(0), &vaf);
99 if (is_error && !shown_bug_once) {
100 dev_notice(kdev, "%s", FDO_BUG_MSG);
101 shown_bug_once = true;
107 static bool i915_error_injected(struct drm_i915_private *dev_priv)
109 return i915.inject_load_failure &&
110 i915_load_fail_count == i915.inject_load_failure;
113 #define i915_load_error(dev_priv, fmt, ...) \
114 __i915_printk(dev_priv, \
115 i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \
119 static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv)
121 enum intel_pch ret = PCH_NOP;
124 * In a virtualized passthrough environment we can be in a
125 * setup where the ISA bridge is not able to be passed through.
126 * In this case, a south bridge can be emulated and we have to
127 * make an educated guess as to which PCH is really there.
130 if (IS_GEN5(dev_priv)) {
132 DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
133 } else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
135 DRM_DEBUG_KMS("Assuming CougarPoint PCH\n");
136 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
138 DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
139 } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
141 DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
142 } else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
144 DRM_DEBUG_KMS("Assuming CannonPoint PCH\n");
150 static void intel_detect_pch(struct drm_i915_private *dev_priv)
152 struct pci_dev *pch = NULL;
154 /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
155 * (which really amounts to a PCH but no South Display).
157 if (INTEL_INFO(dev_priv)->num_pipes == 0) {
158 dev_priv->pch_type = PCH_NOP;
163 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
164 * make graphics device passthrough work easy for VMM, that only
165 * need to expose ISA bridge to let driver know the real hardware
166 * underneath. This is a requirement from virtualization team.
168 * In some virtualized environments (e.g. XEN), there is irrelevant
169 * ISA bridge in the system. To work reliably, we should scan trhough
170 * all the ISA bridge devices and check for the first match, instead
171 * of only checking the first one.
173 while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
174 if (pch->vendor == PCI_VENDOR_ID_INTEL) {
175 unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
177 dev_priv->pch_id = id;
179 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
180 dev_priv->pch_type = PCH_IBX;
181 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
182 WARN_ON(!IS_GEN5(dev_priv));
183 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
184 dev_priv->pch_type = PCH_CPT;
185 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
186 WARN_ON(!IS_GEN6(dev_priv) &&
187 !IS_IVYBRIDGE(dev_priv));
188 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
189 /* PantherPoint is CPT compatible */
190 dev_priv->pch_type = PCH_CPT;
191 DRM_DEBUG_KMS("Found PantherPoint PCH\n");
192 WARN_ON(!IS_GEN6(dev_priv) &&
193 !IS_IVYBRIDGE(dev_priv));
194 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
195 dev_priv->pch_type = PCH_LPT;
196 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
197 WARN_ON(!IS_HASWELL(dev_priv) &&
198 !IS_BROADWELL(dev_priv));
199 WARN_ON(IS_HSW_ULT(dev_priv) ||
200 IS_BDW_ULT(dev_priv));
201 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
202 dev_priv->pch_type = PCH_LPT;
203 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
204 WARN_ON(!IS_HASWELL(dev_priv) &&
205 !IS_BROADWELL(dev_priv));
206 WARN_ON(!IS_HSW_ULT(dev_priv) &&
207 !IS_BDW_ULT(dev_priv));
208 } else if (id == INTEL_PCH_WPT_DEVICE_ID_TYPE) {
209 /* WildcatPoint is LPT compatible */
210 dev_priv->pch_type = PCH_LPT;
211 DRM_DEBUG_KMS("Found WildcatPoint PCH\n");
212 WARN_ON(!IS_HASWELL(dev_priv) &&
213 !IS_BROADWELL(dev_priv));
214 WARN_ON(IS_HSW_ULT(dev_priv) ||
215 IS_BDW_ULT(dev_priv));
216 } else if (id == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE) {
217 /* WildcatPoint is LPT compatible */
218 dev_priv->pch_type = PCH_LPT;
219 DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n");
220 WARN_ON(!IS_HASWELL(dev_priv) &&
221 !IS_BROADWELL(dev_priv));
222 WARN_ON(!IS_HSW_ULT(dev_priv) &&
223 !IS_BDW_ULT(dev_priv));
224 } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
225 dev_priv->pch_type = PCH_SPT;
226 DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
227 WARN_ON(!IS_SKYLAKE(dev_priv) &&
228 !IS_KABYLAKE(dev_priv));
229 } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
230 dev_priv->pch_type = PCH_SPT;
231 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
232 WARN_ON(!IS_SKYLAKE(dev_priv) &&
233 !IS_KABYLAKE(dev_priv));
234 } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
235 dev_priv->pch_type = PCH_KBP;
236 DRM_DEBUG_KMS("Found KabyPoint PCH\n");
237 WARN_ON(!IS_SKYLAKE(dev_priv) &&
238 !IS_KABYLAKE(dev_priv));
239 } else if (id == INTEL_PCH_CNP_DEVICE_ID_TYPE) {
240 dev_priv->pch_type = PCH_CNP;
241 DRM_DEBUG_KMS("Found CannonPoint PCH\n");
242 WARN_ON(!IS_CANNONLAKE(dev_priv) &&
243 !IS_COFFEELAKE(dev_priv));
244 } else if (id == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE) {
245 dev_priv->pch_type = PCH_CNP;
246 DRM_DEBUG_KMS("Found CannonPoint LP PCH\n");
247 WARN_ON(!IS_CANNONLAKE(dev_priv) &&
248 !IS_COFFEELAKE(dev_priv));
249 } else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE ||
250 id == INTEL_PCH_P3X_DEVICE_ID_TYPE ||
251 (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE &&
252 pch->subsystem_vendor ==
253 PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
254 pch->subsystem_device ==
255 PCI_SUBDEVICE_ID_QEMU)) {
257 intel_virt_detect_pch(dev_priv);
265 DRM_DEBUG_KMS("No PCH found.\n");
270 static int i915_getparam(struct drm_device *dev, void *data,
271 struct drm_file *file_priv)
273 struct drm_i915_private *dev_priv = to_i915(dev);
274 struct pci_dev *pdev = dev_priv->drm.pdev;
275 drm_i915_getparam_t *param = data;
278 switch (param->param) {
279 case I915_PARAM_IRQ_ACTIVE:
280 case I915_PARAM_ALLOW_BATCHBUFFER:
281 case I915_PARAM_LAST_DISPATCH:
282 case I915_PARAM_HAS_EXEC_CONSTANTS:
283 /* Reject all old ums/dri params. */
285 case I915_PARAM_CHIPSET_ID:
286 value = pdev->device;
288 case I915_PARAM_REVISION:
289 value = pdev->revision;
291 case I915_PARAM_NUM_FENCES_AVAIL:
292 value = dev_priv->num_fence_regs;
294 case I915_PARAM_HAS_OVERLAY:
295 value = dev_priv->overlay ? 1 : 0;
297 case I915_PARAM_HAS_BSD:
298 value = !!dev_priv->engine[VCS];
300 case I915_PARAM_HAS_BLT:
301 value = !!dev_priv->engine[BCS];
303 case I915_PARAM_HAS_VEBOX:
304 value = !!dev_priv->engine[VECS];
306 case I915_PARAM_HAS_BSD2:
307 value = !!dev_priv->engine[VCS2];
309 case I915_PARAM_HAS_LLC:
310 value = HAS_LLC(dev_priv);
312 case I915_PARAM_HAS_WT:
313 value = HAS_WT(dev_priv);
315 case I915_PARAM_HAS_ALIASING_PPGTT:
316 value = USES_PPGTT(dev_priv);
318 case I915_PARAM_HAS_SEMAPHORES:
319 value = i915.semaphores;
321 case I915_PARAM_HAS_SECURE_BATCHES:
322 value = capable(CAP_SYS_ADMIN);
324 case I915_PARAM_CMD_PARSER_VERSION:
325 value = i915_cmd_parser_get_version(dev_priv);
327 case I915_PARAM_SUBSLICE_TOTAL:
328 value = sseu_subslice_total(&INTEL_INFO(dev_priv)->sseu);
332 case I915_PARAM_EU_TOTAL:
333 value = INTEL_INFO(dev_priv)->sseu.eu_total;
337 case I915_PARAM_HAS_GPU_RESET:
338 value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
339 if (value && intel_has_reset_engine(dev_priv))
342 case I915_PARAM_HAS_RESOURCE_STREAMER:
343 value = HAS_RESOURCE_STREAMER(dev_priv);
345 case I915_PARAM_HAS_POOLED_EU:
346 value = HAS_POOLED_EU(dev_priv);
348 case I915_PARAM_MIN_EU_IN_POOL:
349 value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool;
351 case I915_PARAM_HUC_STATUS:
352 intel_runtime_pm_get(dev_priv);
353 value = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED;
354 intel_runtime_pm_put(dev_priv);
356 case I915_PARAM_MMAP_GTT_VERSION:
357 /* Though we've started our numbering from 1, and so class all
358 * earlier versions as 0, in effect their value is undefined as
359 * the ioctl will report EINVAL for the unknown param!
361 value = i915_gem_mmap_gtt_version();
363 case I915_PARAM_HAS_SCHEDULER:
364 value = dev_priv->engine[RCS] &&
365 dev_priv->engine[RCS]->schedule;
367 case I915_PARAM_MMAP_VERSION:
368 /* Remember to bump this if the version changes! */
369 case I915_PARAM_HAS_GEM:
370 case I915_PARAM_HAS_PAGEFLIPPING:
371 case I915_PARAM_HAS_EXECBUF2: /* depends on GEM */
372 case I915_PARAM_HAS_RELAXED_FENCING:
373 case I915_PARAM_HAS_COHERENT_RINGS:
374 case I915_PARAM_HAS_RELAXED_DELTA:
375 case I915_PARAM_HAS_GEN7_SOL_RESET:
376 case I915_PARAM_HAS_WAIT_TIMEOUT:
377 case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
378 case I915_PARAM_HAS_PINNED_BATCHES:
379 case I915_PARAM_HAS_EXEC_NO_RELOC:
380 case I915_PARAM_HAS_EXEC_HANDLE_LUT:
381 case I915_PARAM_HAS_COHERENT_PHYS_GTT:
382 case I915_PARAM_HAS_EXEC_SOFTPIN:
383 case I915_PARAM_HAS_EXEC_ASYNC:
384 case I915_PARAM_HAS_EXEC_FENCE:
385 case I915_PARAM_HAS_EXEC_CAPTURE:
386 case I915_PARAM_HAS_EXEC_BATCH_FIRST:
387 /* For the time being all of these are always true;
388 * if some supported hardware does not have one of these
389 * features this value needs to be provided from
390 * INTEL_INFO(), a feature macro, or similar.
394 case I915_PARAM_SLICE_MASK:
395 value = INTEL_INFO(dev_priv)->sseu.slice_mask;
399 case I915_PARAM_SUBSLICE_MASK:
400 value = INTEL_INFO(dev_priv)->sseu.subslice_mask;
405 DRM_DEBUG("Unknown parameter %d\n", param->param);
409 if (put_user(value, param->value))
415 static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
417 dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
418 if (!dev_priv->bridge_dev) {
419 DRM_ERROR("bridge device not found\n");
425 /* Allocate space for the MCH regs if needed, return nonzero on error */
427 intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
429 int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
430 u32 temp_lo, temp_hi = 0;
434 if (INTEL_GEN(dev_priv) >= 4)
435 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
436 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
437 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
439 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
442 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
446 /* Get some space for it */
447 dev_priv->mch_res.name = "i915 MCHBAR";
448 dev_priv->mch_res.flags = IORESOURCE_MEM;
449 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
451 MCHBAR_SIZE, MCHBAR_SIZE,
453 0, pcibios_align_resource,
454 dev_priv->bridge_dev);
456 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
457 dev_priv->mch_res.start = 0;
461 if (INTEL_GEN(dev_priv) >= 4)
462 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
463 upper_32_bits(dev_priv->mch_res.start));
465 pci_write_config_dword(dev_priv->bridge_dev, reg,
466 lower_32_bits(dev_priv->mch_res.start));
470 /* Setup MCHBAR if possible, return true if we should disable it again */
472 intel_setup_mchbar(struct drm_i915_private *dev_priv)
474 int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
478 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
481 dev_priv->mchbar_need_disable = false;
483 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
484 pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
485 enabled = !!(temp & DEVEN_MCHBAR_EN);
487 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
491 /* If it's already enabled, don't have to do anything */
495 if (intel_alloc_mchbar_resource(dev_priv))
498 dev_priv->mchbar_need_disable = true;
500 /* Space is allocated or reserved, so enable it. */
501 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
502 pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
503 temp | DEVEN_MCHBAR_EN);
505 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
506 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
511 intel_teardown_mchbar(struct drm_i915_private *dev_priv)
513 int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
515 if (dev_priv->mchbar_need_disable) {
516 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
519 pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
521 deven_val &= ~DEVEN_MCHBAR_EN;
522 pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
527 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
530 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
535 if (dev_priv->mch_res.start)
536 release_resource(&dev_priv->mch_res);
539 /* true = enable decode, false = disable decoder */
540 static unsigned int i915_vga_set_decode(void *cookie, bool state)
542 struct drm_i915_private *dev_priv = cookie;
544 intel_modeset_vga_set_state(dev_priv, state);
546 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
547 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
549 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
552 static int i915_resume_switcheroo(struct drm_device *dev);
553 static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
555 static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
557 struct drm_device *dev = pci_get_drvdata(pdev);
558 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
560 if (state == VGA_SWITCHEROO_ON) {
561 pr_info("switched on\n");
562 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
563 /* i915 resume handler doesn't set to D0 */
564 pci_set_power_state(pdev, PCI_D0);
565 i915_resume_switcheroo(dev);
566 dev->switch_power_state = DRM_SWITCH_POWER_ON;
568 pr_info("switched off\n");
569 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
570 i915_suspend_switcheroo(dev, pmm);
571 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
575 static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
577 struct drm_device *dev = pci_get_drvdata(pdev);
580 * FIXME: open_count is protected by drm_global_mutex but that would lead to
581 * locking inversion with the driver load path. And the access here is
582 * completely racy anyway. So don't bother with locking for now.
584 return dev->open_count == 0;
587 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
588 .set_gpu_state = i915_switcheroo_set_state,
590 .can_switch = i915_switcheroo_can_switch,
593 static void i915_gem_fini(struct drm_i915_private *dev_priv)
595 flush_workqueue(dev_priv->wq);
597 mutex_lock(&dev_priv->drm.struct_mutex);
598 intel_uc_fini_hw(dev_priv);
599 i915_gem_cleanup_engines(dev_priv);
600 i915_gem_contexts_fini(dev_priv);
601 i915_gem_cleanup_userptr(dev_priv);
602 mutex_unlock(&dev_priv->drm.struct_mutex);
604 i915_gem_drain_freed_objects(dev_priv);
606 WARN_ON(!list_empty(&dev_priv->contexts.list));
609 static int i915_load_modeset_init(struct drm_device *dev)
611 struct drm_i915_private *dev_priv = to_i915(dev);
612 struct pci_dev *pdev = dev_priv->drm.pdev;
615 if (i915_inject_load_failure())
618 intel_bios_init(dev_priv);
620 /* If we have > 1 VGA cards, then we need to arbitrate access
621 * to the common VGA resources.
623 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
624 * then we do not take part in VGA arbitration and the
625 * vga_client_register() fails with -ENODEV.
627 ret = vga_client_register(pdev, dev_priv, NULL, i915_vga_set_decode);
628 if (ret && ret != -ENODEV)
631 intel_register_dsm_handler();
633 ret = vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false);
635 goto cleanup_vga_client;
637 /* must happen before intel_power_domains_init_hw() on VLV/CHV */
638 intel_update_rawclk(dev_priv);
640 intel_power_domains_init_hw(dev_priv, false);
642 intel_csr_ucode_init(dev_priv);
644 ret = intel_irq_install(dev_priv);
648 intel_setup_gmbus(dev_priv);
650 /* Important: The output setup functions called by modeset_init need
651 * working irqs for e.g. gmbus and dp aux transfers. */
652 ret = intel_modeset_init(dev);
656 intel_uc_init_fw(dev_priv);
658 ret = i915_gem_init(dev_priv);
662 intel_modeset_gem_init(dev);
664 if (INTEL_INFO(dev_priv)->num_pipes == 0)
667 ret = intel_fbdev_init(dev);
671 /* Only enable hotplug handling once the fbdev is fully set up. */
672 intel_hpd_init(dev_priv);
674 drm_kms_helper_poll_init(dev);
679 if (i915_gem_suspend(dev_priv))
680 DRM_ERROR("failed to idle hardware; continuing to unload!\n");
681 i915_gem_fini(dev_priv);
683 intel_uc_fini_fw(dev_priv);
685 drm_irq_uninstall(dev);
686 intel_teardown_gmbus(dev_priv);
688 intel_csr_ucode_fini(dev_priv);
689 intel_power_domains_fini(dev_priv);
690 vga_switcheroo_unregister_client(pdev);
692 vga_client_register(pdev, NULL, NULL, NULL);
697 static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
699 struct apertures_struct *ap;
700 struct pci_dev *pdev = dev_priv->drm.pdev;
701 struct i915_ggtt *ggtt = &dev_priv->ggtt;
705 ap = alloc_apertures(1);
709 ap->ranges[0].base = ggtt->mappable_base;
710 ap->ranges[0].size = ggtt->mappable_end;
713 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
715 ret = drm_fb_helper_remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
722 #if !defined(CONFIG_VGA_CONSOLE)
723 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
727 #elif !defined(CONFIG_DUMMY_CONSOLE)
728 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
733 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
737 DRM_INFO("Replacing VGA console driver\n");
740 if (con_is_bound(&vga_con))
741 ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
743 ret = do_unregister_con_driver(&vga_con);
745 /* Ignore "already unregistered". */
755 static void intel_init_dpio(struct drm_i915_private *dev_priv)
758 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
759 * CHV x1 PHY (DP/HDMI D)
760 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
762 if (IS_CHERRYVIEW(dev_priv)) {
763 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
764 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
765 } else if (IS_VALLEYVIEW(dev_priv)) {
766 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
770 static int i915_workqueues_init(struct drm_i915_private *dev_priv)
773 * The i915 workqueue is primarily used for batched retirement of
774 * requests (and thus managing bo) once the task has been completed
775 * by the GPU. i915_gem_retire_requests() is called directly when we
776 * need high-priority retirement, such as waiting for an explicit
779 * It is also used for periodic low-priority events, such as
780 * idle-timers and recording error state.
782 * All tasks on the workqueue are expected to acquire the dev mutex
783 * so there is no point in running more than one instance of the
784 * workqueue at any time. Use an ordered one.
786 dev_priv->wq = alloc_ordered_workqueue("i915", 0);
787 if (dev_priv->wq == NULL)
790 dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
791 if (dev_priv->hotplug.dp_wq == NULL)
797 destroy_workqueue(dev_priv->wq);
799 DRM_ERROR("Failed to allocate workqueues.\n");
804 static void i915_engines_cleanup(struct drm_i915_private *i915)
806 struct intel_engine_cs *engine;
807 enum intel_engine_id id;
809 for_each_engine(engine, i915, id)
813 static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
815 destroy_workqueue(dev_priv->hotplug.dp_wq);
816 destroy_workqueue(dev_priv->wq);
820 * We don't keep the workarounds for pre-production hardware, so we expect our
821 * driver to fail on these machines in one way or another. A little warning on
822 * dmesg may help both the user and the bug triagers.
824 static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
828 pre |= IS_HSW_EARLY_SDV(dev_priv);
829 pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
830 pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
833 DRM_ERROR("This is a pre-production stepping. "
834 "It may not be fully functional.\n");
835 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
840 * i915_driver_init_early - setup state not requiring device access
841 * @dev_priv: device private
843 * Initialize everything that is a "SW-only" state, that is state not
844 * requiring accessing the device or exposing the driver via kernel internal
845 * or userspace interfaces. Example steps belonging here: lock initialization,
846 * system memory allocation, setting up device specific attributes and
847 * function hooks not requiring accessing the device.
849 static int i915_driver_init_early(struct drm_i915_private *dev_priv,
850 const struct pci_device_id *ent)
852 const struct intel_device_info *match_info =
853 (struct intel_device_info *)ent->driver_data;
854 struct intel_device_info *device_info;
857 if (i915_inject_load_failure())
860 /* Setup the write-once "constant" device info */
861 device_info = mkwrite_device_info(dev_priv);
862 memcpy(device_info, match_info, sizeof(*device_info));
863 device_info->device_id = dev_priv->drm.pdev->device;
865 BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
866 device_info->gen_mask = BIT(device_info->gen - 1);
868 spin_lock_init(&dev_priv->irq_lock);
869 spin_lock_init(&dev_priv->gpu_error.lock);
870 mutex_init(&dev_priv->backlight_lock);
871 spin_lock_init(&dev_priv->uncore.lock);
873 spin_lock_init(&dev_priv->mm.object_stat_lock);
874 spin_lock_init(&dev_priv->mmio_flip_lock);
875 mutex_init(&dev_priv->sb_lock);
876 mutex_init(&dev_priv->modeset_restore_lock);
877 mutex_init(&dev_priv->av_mutex);
878 mutex_init(&dev_priv->wm.wm_mutex);
879 mutex_init(&dev_priv->pps_mutex);
881 intel_uc_init_early(dev_priv);
882 i915_memcpy_init_early(dev_priv);
884 ret = i915_workqueues_init(dev_priv);
888 /* This must be called before any calls to HAS_PCH_* */
889 intel_detect_pch(dev_priv);
891 intel_pm_setup(dev_priv);
892 intel_init_dpio(dev_priv);
893 intel_power_domains_init(dev_priv);
894 intel_irq_init(dev_priv);
895 intel_hangcheck_init(dev_priv);
896 intel_init_display_hooks(dev_priv);
897 intel_init_clock_gating_hooks(dev_priv);
898 intel_init_audio_hooks(dev_priv);
899 ret = i915_gem_load_init(dev_priv);
903 intel_display_crc_init(dev_priv);
905 intel_device_info_dump(dev_priv);
907 intel_detect_preproduction_hw(dev_priv);
909 i915_perf_init(dev_priv);
914 intel_irq_fini(dev_priv);
915 i915_workqueues_cleanup(dev_priv);
917 i915_engines_cleanup(dev_priv);
922 * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early()
923 * @dev_priv: device private
925 static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
927 i915_perf_fini(dev_priv);
928 i915_gem_load_cleanup(dev_priv);
929 intel_irq_fini(dev_priv);
930 i915_workqueues_cleanup(dev_priv);
931 i915_engines_cleanup(dev_priv);
934 static int i915_mmio_setup(struct drm_i915_private *dev_priv)
936 struct pci_dev *pdev = dev_priv->drm.pdev;
940 mmio_bar = IS_GEN2(dev_priv) ? 1 : 0;
942 * Before gen4, the registers and the GTT are behind different BARs.
943 * However, from gen4 onwards, the registers and the GTT are shared
944 * in the same BAR, so we want to restrict this ioremap from
945 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
946 * the register BAR remains the same size for all the earlier
947 * generations up to Ironlake.
949 if (INTEL_GEN(dev_priv) < 5)
950 mmio_size = 512 * 1024;
952 mmio_size = 2 * 1024 * 1024;
953 dev_priv->regs = pci_iomap(pdev, mmio_bar, mmio_size);
954 if (dev_priv->regs == NULL) {
955 DRM_ERROR("failed to map registers\n");
960 /* Try to make sure MCHBAR is enabled before poking at it */
961 intel_setup_mchbar(dev_priv);
966 static void i915_mmio_cleanup(struct drm_i915_private *dev_priv)
968 struct pci_dev *pdev = dev_priv->drm.pdev;
970 intel_teardown_mchbar(dev_priv);
971 pci_iounmap(pdev, dev_priv->regs);
975 * i915_driver_init_mmio - setup device MMIO
976 * @dev_priv: device private
978 * Setup minimal device state necessary for MMIO accesses later in the
979 * initialization sequence. The setup here should avoid any other device-wide
980 * side effects or exposing the driver via kernel internal or user space
983 static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
987 if (i915_inject_load_failure())
990 if (i915_get_bridge_dev(dev_priv))
993 ret = i915_mmio_setup(dev_priv);
997 intel_uncore_init(dev_priv);
999 ret = intel_engines_init_mmio(dev_priv);
1003 i915_gem_init_mmio(dev_priv);
1008 intel_uncore_fini(dev_priv);
1010 pci_dev_put(dev_priv->bridge_dev);
1016 * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio()
1017 * @dev_priv: device private
1019 static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
1021 intel_uncore_fini(dev_priv);
1022 i915_mmio_cleanup(dev_priv);
1023 pci_dev_put(dev_priv->bridge_dev);
1026 static void intel_sanitize_options(struct drm_i915_private *dev_priv)
1028 i915.enable_execlists =
1029 intel_sanitize_enable_execlists(dev_priv,
1030 i915.enable_execlists);
1033 * i915.enable_ppgtt is read-only, so do an early pass to validate the
1034 * user's requested state against the hardware/driver capabilities. We
1035 * do this now so that we can print out any log messages once rather
1036 * than every time we check intel_enable_ppgtt().
1039 intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt);
1040 DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
1042 i915.semaphores = intel_sanitize_semaphores(dev_priv, i915.semaphores);
1043 DRM_DEBUG_DRIVER("use GPU semaphores? %s\n", yesno(i915.semaphores));
1045 intel_uc_sanitize_options(dev_priv);
1047 intel_gvt_sanitize_options(dev_priv);
1051 * i915_driver_init_hw - setup state requiring device access
1052 * @dev_priv: device private
1054 * Setup state that requires accessing the device, but doesn't require
1055 * exposing the driver via kernel internal or userspace interfaces.
1057 static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1059 struct pci_dev *pdev = dev_priv->drm.pdev;
1062 if (i915_inject_load_failure())
1065 intel_device_info_runtime_init(dev_priv);
1067 intel_sanitize_options(dev_priv);
1069 ret = i915_ggtt_probe_hw(dev_priv);
1073 /* WARNING: Apparently we must kick fbdev drivers before vgacon,
1074 * otherwise the vga fbdev driver falls over. */
1075 ret = i915_kick_out_firmware_fb(dev_priv);
1077 DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
1081 ret = i915_kick_out_vgacon(dev_priv);
1083 DRM_ERROR("failed to remove conflicting VGA console\n");
1087 ret = i915_ggtt_init_hw(dev_priv);
1091 ret = i915_ggtt_enable_hw(dev_priv);
1093 DRM_ERROR("failed to enable GGTT\n");
1097 pci_set_master(pdev);
1099 /* overlay on gen2 is broken and can't address above 1G */
1100 if (IS_GEN2(dev_priv)) {
1101 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
1103 DRM_ERROR("failed to set DMA mask\n");
1109 /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1110 * using 32bit addressing, overwriting memory if HWS is located
1113 * The documentation also mentions an issue with undefined
1114 * behaviour if any general state is accessed within a page above 4GB,
1115 * which also needs to be handled carefully.
1117 if (IS_I965G(dev_priv) || IS_I965GM(dev_priv)) {
1118 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1121 DRM_ERROR("failed to set DMA mask\n");
1127 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
1128 PM_QOS_DEFAULT_VALUE);
1130 intel_uncore_sanitize(dev_priv);
1132 intel_opregion_setup(dev_priv);
1134 i915_gem_load_init_fences(dev_priv);
1136 /* On the 945G/GM, the chipset reports the MSI capability on the
1137 * integrated graphics even though the support isn't actually there
1138 * according to the published specs. It doesn't appear to function
1139 * correctly in testing on 945G.
1140 * This may be a side effect of MSI having been made available for PEG
1141 * and the registers being closely associated.
1143 * According to chipset errata, on the 965GM, MSI interrupts may
1144 * be lost or delayed, but we use them anyways to avoid
1145 * stuck interrupts on some machines.
1147 if (!IS_I945G(dev_priv) && !IS_I945GM(dev_priv)) {
1148 if (pci_enable_msi(pdev) < 0)
1149 DRM_DEBUG_DRIVER("can't enable MSI");
1152 ret = intel_gvt_init(dev_priv);
1159 i915_ggtt_cleanup_hw(dev_priv);
1165 * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw()
1166 * @dev_priv: device private
1168 static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
1170 struct pci_dev *pdev = dev_priv->drm.pdev;
1172 if (pdev->msi_enabled)
1173 pci_disable_msi(pdev);
1175 pm_qos_remove_request(&dev_priv->pm_qos);
1176 i915_ggtt_cleanup_hw(dev_priv);
1180 * i915_driver_register - register the driver with the rest of the system
1181 * @dev_priv: device private
1183 * Perform any steps necessary to make the driver available via kernel
1184 * internal or userspace interfaces.
1186 static void i915_driver_register(struct drm_i915_private *dev_priv)
1188 struct drm_device *dev = &dev_priv->drm;
1190 i915_gem_shrinker_init(dev_priv);
1193 * Notify a valid surface after modesetting,
1194 * when running inside a VM.
1196 if (intel_vgpu_active(dev_priv))
1197 I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
1199 /* Reveal our presence to userspace */
1200 if (drm_dev_register(dev, 0) == 0) {
1201 i915_debugfs_register(dev_priv);
1202 i915_guc_log_register(dev_priv);
1203 i915_setup_sysfs(dev_priv);
1205 /* Depends on sysfs having been initialized */
1206 i915_perf_register(dev_priv);
1208 DRM_ERROR("Failed to register driver for userspace access!\n");
1210 if (INTEL_INFO(dev_priv)->num_pipes) {
1211 /* Must be done after probing outputs */
1212 intel_opregion_register(dev_priv);
1213 acpi_video_register();
1216 if (IS_GEN5(dev_priv))
1217 intel_gpu_ips_init(dev_priv);
1219 intel_audio_init(dev_priv);
1222 * Some ports require correctly set-up hpd registers for detection to
1223 * work properly (leading to ghost connected connector status), e.g. VGA
1224 * on gm45. Hence we can only set up the initial fbdev config after hpd
1225 * irqs are fully enabled. We do it last so that the async config
1226 * cannot run before the connectors are registered.
1228 intel_fbdev_initial_config_async(dev);
1232 * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
1233 * @dev_priv: device private
1235 static void i915_driver_unregister(struct drm_i915_private *dev_priv)
1237 intel_audio_deinit(dev_priv);
1239 intel_gpu_ips_teardown();
1240 acpi_video_unregister();
1241 intel_opregion_unregister(dev_priv);
1243 i915_perf_unregister(dev_priv);
1245 i915_teardown_sysfs(dev_priv);
1246 i915_guc_log_unregister(dev_priv);
1247 drm_dev_unregister(&dev_priv->drm);
1249 i915_gem_shrinker_cleanup(dev_priv);
1253 * i915_driver_load - setup chip and create an initial config
1255 * @ent: matching PCI ID entry
1257 * The driver load routine has to do several things:
1258 * - drive output discovery via intel_modeset_init()
1259 * - initialize the memory manager
1260 * - allocate initial config memory
1261 * - setup the DRM framebuffer with the allocated memory
1263 int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
1265 const struct intel_device_info *match_info =
1266 (struct intel_device_info *)ent->driver_data;
1267 struct drm_i915_private *dev_priv;
1270 /* Enable nuclear pageflip on ILK+ */
1271 if (!i915.nuclear_pageflip && match_info->gen < 5)
1272 driver.driver_features &= ~DRIVER_ATOMIC;
1275 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
1277 ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev);
1279 DRM_DEV_ERROR(&pdev->dev, "allocation failed\n");
1283 dev_priv->drm.pdev = pdev;
1284 dev_priv->drm.dev_private = dev_priv;
1286 ret = pci_enable_device(pdev);
1290 pci_set_drvdata(pdev, &dev_priv->drm);
1292 * Disable the system suspend direct complete optimization, which can
1293 * leave the device suspended skipping the driver's suspend handlers
1294 * if the device was already runtime suspended. This is needed due to
1295 * the difference in our runtime and system suspend sequence and
1296 * becaue the HDA driver may require us to enable the audio power
1297 * domain during system suspend.
1299 pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
1301 ret = i915_driver_init_early(dev_priv, ent);
1303 goto out_pci_disable;
1305 intel_runtime_pm_get(dev_priv);
1307 ret = i915_driver_init_mmio(dev_priv);
1309 goto out_runtime_pm_put;
1311 ret = i915_driver_init_hw(dev_priv);
1313 goto out_cleanup_mmio;
1316 * TODO: move the vblank init and parts of modeset init steps into one
1317 * of the i915_driver_init_/i915_driver_register functions according
1318 * to the role/effect of the given init step.
1320 if (INTEL_INFO(dev_priv)->num_pipes) {
1321 ret = drm_vblank_init(&dev_priv->drm,
1322 INTEL_INFO(dev_priv)->num_pipes);
1324 goto out_cleanup_hw;
1327 ret = i915_load_modeset_init(&dev_priv->drm);
1329 goto out_cleanup_vblank;
1331 i915_driver_register(dev_priv);
1333 intel_runtime_pm_enable(dev_priv);
1335 dev_priv->ipc_enabled = false;
1337 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
1338 DRM_INFO("DRM_I915_DEBUG enabled\n");
1339 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
1340 DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
1342 intel_runtime_pm_put(dev_priv);
1347 drm_vblank_cleanup(&dev_priv->drm);
1349 i915_driver_cleanup_hw(dev_priv);
1351 i915_driver_cleanup_mmio(dev_priv);
1353 intel_runtime_pm_put(dev_priv);
1354 i915_driver_cleanup_early(dev_priv);
1356 pci_disable_device(pdev);
1358 i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
1359 drm_dev_fini(&dev_priv->drm);
1365 void i915_driver_unload(struct drm_device *dev)
1367 struct drm_i915_private *dev_priv = to_i915(dev);
1368 struct pci_dev *pdev = dev_priv->drm.pdev;
1370 intel_fbdev_fini(dev);
1372 if (i915_gem_suspend(dev_priv))
1373 DRM_ERROR("failed to idle hardware; continuing to unload!\n");
1375 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1377 drm_atomic_helper_shutdown(dev);
1379 intel_gvt_cleanup(dev_priv);
1381 i915_driver_unregister(dev_priv);
1383 drm_vblank_cleanup(dev);
1385 intel_modeset_cleanup(dev);
1388 * free the memory space allocated for the child device
1389 * config parsed from VBT
1391 if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
1392 kfree(dev_priv->vbt.child_dev);
1393 dev_priv->vbt.child_dev = NULL;
1394 dev_priv->vbt.child_dev_num = 0;
1396 kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
1397 dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
1398 kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
1399 dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
1401 vga_switcheroo_unregister_client(pdev);
1402 vga_client_register(pdev, NULL, NULL, NULL);
1404 intel_csr_ucode_fini(dev_priv);
1406 /* Free error state after interrupts are fully disabled. */
1407 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
1408 i915_reset_error_state(dev_priv);
1410 /* Flush any outstanding unpin_work. */
1411 drain_workqueue(dev_priv->wq);
1413 i915_gem_fini(dev_priv);
1414 intel_uc_fini_fw(dev_priv);
1415 intel_fbc_cleanup_cfb(dev_priv);
1417 intel_power_domains_fini(dev_priv);
1419 i915_driver_cleanup_hw(dev_priv);
1420 i915_driver_cleanup_mmio(dev_priv);
1422 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
1425 static void i915_driver_release(struct drm_device *dev)
1427 struct drm_i915_private *dev_priv = to_i915(dev);
1429 i915_driver_cleanup_early(dev_priv);
1430 drm_dev_fini(&dev_priv->drm);
1435 static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1437 struct drm_i915_private *i915 = to_i915(dev);
1440 ret = i915_gem_open(i915, file);
1448 * i915_driver_lastclose - clean up after all DRM clients have exited
1451 * Take care of cleaning up after all DRM clients have exited. In the
1452 * mode setting case, we want to restore the kernel's initial mode (just
1453 * in case the last client left us in a bad state).
1455 * Additionally, in the non-mode setting case, we'll tear down the GTT
1456 * and DMA structures, since the kernel won't be using them, and clea
1459 static void i915_driver_lastclose(struct drm_device *dev)
1461 intel_fbdev_restore_mode(dev);
1462 vga_switcheroo_process_delayed_switch();
1465 static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1467 struct drm_i915_file_private *file_priv = file->driver_priv;
1469 mutex_lock(&dev->struct_mutex);
1470 i915_gem_context_close(file);
1471 i915_gem_release(dev, file);
1472 mutex_unlock(&dev->struct_mutex);
1477 static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
1479 struct drm_device *dev = &dev_priv->drm;
1480 struct intel_encoder *encoder;
1482 drm_modeset_lock_all(dev);
1483 for_each_intel_encoder(dev, encoder)
1484 if (encoder->suspend)
1485 encoder->suspend(encoder);
1486 drm_modeset_unlock_all(dev);
1489 static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
1491 static int vlv_suspend_complete(struct drm_i915_private *dev_priv);
1493 static bool suspend_to_idle(struct drm_i915_private *dev_priv)
1495 #if IS_ENABLED(CONFIG_ACPI_SLEEP)
1496 if (acpi_target_system_state() < ACPI_STATE_S3)
1502 static int i915_drm_suspend(struct drm_device *dev)
1504 struct drm_i915_private *dev_priv = to_i915(dev);
1505 struct pci_dev *pdev = dev_priv->drm.pdev;
1506 pci_power_t opregion_target_state;
1509 /* ignore lid events during suspend */
1510 mutex_lock(&dev_priv->modeset_restore_lock);
1511 dev_priv->modeset_restore = MODESET_SUSPENDED;
1512 mutex_unlock(&dev_priv->modeset_restore_lock);
1514 disable_rpm_wakeref_asserts(dev_priv);
1516 /* We do a lot of poking in a lot of registers, make sure they work
1518 intel_display_set_init_power(dev_priv, true);
1520 drm_kms_helper_poll_disable(dev);
1522 pci_save_state(pdev);
1524 error = i915_gem_suspend(dev_priv);
1527 "GEM idle failed, resume might fail\n");
1531 intel_display_suspend(dev);
1533 intel_dp_mst_suspend(dev);
1535 intel_runtime_pm_disable_interrupts(dev_priv);
1536 intel_hpd_cancel_work(dev_priv);
1538 intel_suspend_encoders(dev_priv);
1540 intel_suspend_hw(dev_priv);
1542 i915_gem_suspend_gtt_mappings(dev_priv);
1544 i915_save_state(dev_priv);
1546 opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
1547 intel_opregion_notify_adapter(dev_priv, opregion_target_state);
1549 intel_uncore_suspend(dev_priv);
1550 intel_opregion_unregister(dev_priv);
1552 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
1554 dev_priv->suspend_count++;
1556 intel_csr_ucode_suspend(dev_priv);
1559 enable_rpm_wakeref_asserts(dev_priv);
1564 static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
1566 struct drm_i915_private *dev_priv = to_i915(dev);
1567 struct pci_dev *pdev = dev_priv->drm.pdev;
1571 disable_rpm_wakeref_asserts(dev_priv);
1573 intel_display_set_init_power(dev_priv, false);
1575 fw_csr = !IS_GEN9_LP(dev_priv) &&
1576 suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
1578 * In case of firmware assisted context save/restore don't manually
1579 * deinit the power domains. This also means the CSR/DMC firmware will
1580 * stay active, it will power down any HW resources as required and
1581 * also enable deeper system power states that would be blocked if the
1582 * firmware was inactive.
1585 intel_power_domains_suspend(dev_priv);
1588 if (IS_GEN9_LP(dev_priv))
1589 bxt_enable_dc9(dev_priv);
1590 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1591 hsw_enable_pc8(dev_priv);
1592 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1593 ret = vlv_suspend_complete(dev_priv);
1596 DRM_ERROR("Suspend complete failed: %d\n", ret);
1598 intel_power_domains_init_hw(dev_priv, true);
1603 pci_disable_device(pdev);
1605 * During hibernation on some platforms the BIOS may try to access
1606 * the device even though it's already in D3 and hang the machine. So
1607 * leave the device in D0 on those platforms and hope the BIOS will
1608 * power down the device properly. The issue was seen on multiple old
1609 * GENs with different BIOS vendors, so having an explicit blacklist
1610 * is inpractical; apply the workaround on everything pre GEN6. The
1611 * platforms where the issue was seen:
1612 * Lenovo Thinkpad X301, X61s, X60, T60, X41
1616 if (!(hibernation && INTEL_GEN(dev_priv) < 6))
1617 pci_set_power_state(pdev, PCI_D3hot);
1619 dev_priv->suspended_to_idle = suspend_to_idle(dev_priv);
1622 enable_rpm_wakeref_asserts(dev_priv);
1627 static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
1632 DRM_ERROR("dev: %p\n", dev);
1633 DRM_ERROR("DRM not initialized, aborting suspend.\n");
1637 if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
1638 state.event != PM_EVENT_FREEZE))
1641 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1644 error = i915_drm_suspend(dev);
1648 return i915_drm_suspend_late(dev, false);
1651 static int i915_drm_resume(struct drm_device *dev)
1653 struct drm_i915_private *dev_priv = to_i915(dev);
1656 disable_rpm_wakeref_asserts(dev_priv);
1657 intel_sanitize_gt_powersave(dev_priv);
1659 ret = i915_ggtt_enable_hw(dev_priv);
1661 DRM_ERROR("failed to re-enable GGTT\n");
1663 intel_csr_ucode_resume(dev_priv);
1665 i915_gem_resume(dev_priv);
1667 i915_restore_state(dev_priv);
1668 intel_pps_unlock_regs_wa(dev_priv);
1669 intel_opregion_setup(dev_priv);
1671 intel_init_pch_refclk(dev_priv);
1674 * Interrupts have to be enabled before any batches are run. If not the
1675 * GPU will hang. i915_gem_init_hw() will initiate batches to
1676 * update/restore the context.
1678 * drm_mode_config_reset() needs AUX interrupts.
1680 * Modeset enabling in intel_modeset_init_hw() also needs working
1683 intel_runtime_pm_enable_interrupts(dev_priv);
1685 drm_mode_config_reset(dev);
1687 mutex_lock(&dev->struct_mutex);
1688 if (i915_gem_init_hw(dev_priv)) {
1689 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
1690 i915_gem_set_wedged(dev_priv);
1692 mutex_unlock(&dev->struct_mutex);
1694 intel_guc_resume(dev_priv);
1696 intel_modeset_init_hw(dev);
1698 spin_lock_irq(&dev_priv->irq_lock);
1699 if (dev_priv->display.hpd_irq_setup)
1700 dev_priv->display.hpd_irq_setup(dev_priv);
1701 spin_unlock_irq(&dev_priv->irq_lock);
1703 intel_dp_mst_resume(dev);
1705 intel_display_resume(dev);
1707 drm_kms_helper_poll_enable(dev);
1710 * ... but also need to make sure that hotplug processing
1711 * doesn't cause havoc. Like in the driver load code we don't
1712 * bother with the tiny race here where we might loose hotplug
1715 intel_hpd_init(dev_priv);
1717 intel_opregion_register(dev_priv);
1719 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
1721 mutex_lock(&dev_priv->modeset_restore_lock);
1722 dev_priv->modeset_restore = MODESET_DONE;
1723 mutex_unlock(&dev_priv->modeset_restore_lock);
1725 intel_opregion_notify_adapter(dev_priv, PCI_D0);
1727 intel_autoenable_gt_powersave(dev_priv);
1729 enable_rpm_wakeref_asserts(dev_priv);
1734 static int i915_drm_resume_early(struct drm_device *dev)
1736 struct drm_i915_private *dev_priv = to_i915(dev);
1737 struct pci_dev *pdev = dev_priv->drm.pdev;
1741 * We have a resume ordering issue with the snd-hda driver also
1742 * requiring our device to be power up. Due to the lack of a
1743 * parent/child relationship we currently solve this with an early
1746 * FIXME: This should be solved with a special hdmi sink device or
1747 * similar so that power domains can be employed.
1751 * Note that we need to set the power state explicitly, since we
1752 * powered off the device during freeze and the PCI core won't power
1753 * it back up for us during thaw. Powering off the device during
1754 * freeze is not a hard requirement though, and during the
1755 * suspend/resume phases the PCI core makes sure we get here with the
1756 * device powered on. So in case we change our freeze logic and keep
1757 * the device powered we can also remove the following set power state
1760 ret = pci_set_power_state(pdev, PCI_D0);
1762 DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
1767 * Note that pci_enable_device() first enables any parent bridge
1768 * device and only then sets the power state for this device. The
1769 * bridge enabling is a nop though, since bridge devices are resumed
1770 * first. The order of enabling power and enabling the device is
1771 * imposed by the PCI core as described above, so here we preserve the
1772 * same order for the freeze/thaw phases.
1774 * TODO: eventually we should remove pci_disable_device() /
1775 * pci_enable_enable_device() from suspend/resume. Due to how they
1776 * depend on the device enable refcount we can't anyway depend on them
1777 * disabling/enabling the device.
1779 if (pci_enable_device(pdev)) {
1784 pci_set_master(pdev);
1786 disable_rpm_wakeref_asserts(dev_priv);
1788 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1789 ret = vlv_resume_prepare(dev_priv, false);
1791 DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
1794 intel_uncore_resume_early(dev_priv);
1796 if (IS_GEN9_LP(dev_priv)) {
1797 if (!dev_priv->suspended_to_idle)
1798 gen9_sanitize_dc_state(dev_priv);
1799 bxt_disable_dc9(dev_priv);
1800 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
1801 hsw_disable_pc8(dev_priv);
1804 intel_uncore_sanitize(dev_priv);
1806 if (IS_GEN9_LP(dev_priv) ||
1807 !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
1808 intel_power_domains_init_hw(dev_priv, true);
1810 i915_gem_sanitize(dev_priv);
1812 enable_rpm_wakeref_asserts(dev_priv);
1815 dev_priv->suspended_to_idle = false;
1820 static int i915_resume_switcheroo(struct drm_device *dev)
1824 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1827 ret = i915_drm_resume_early(dev);
1831 return i915_drm_resume(dev);
1835 * i915_reset - reset chip after a hang
1836 * @dev_priv: device private to reset
1838 * Reset the chip. Useful if a hang is detected. Marks the device as wedged
1841 * Caller must hold the struct_mutex.
1843 * Procedure is fairly simple:
1844 * - reset the chip using the reset reg
1845 * - re-init context state
1846 * - re-init hardware status page
1847 * - re-init ring buffer
1848 * - re-init interrupt state
1851 void i915_reset(struct drm_i915_private *dev_priv)
1853 struct i915_gpu_error *error = &dev_priv->gpu_error;
1856 lockdep_assert_held(&dev_priv->drm.struct_mutex);
1857 GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
1859 if (!test_bit(I915_RESET_HANDOFF, &error->flags))
1862 /* Clear any previous failed attempts at recovery. Time to try again. */
1863 if (!i915_gem_unset_wedged(dev_priv))
1866 error->reset_count++;
1868 pr_notice("drm/i915: Resetting chip after gpu hang\n");
1869 disable_irq(dev_priv->drm.irq);
1870 ret = i915_gem_reset_prepare(dev_priv);
1872 DRM_ERROR("GPU recovery failed\n");
1873 intel_gpu_reset(dev_priv, ALL_ENGINES);
1877 ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
1880 DRM_ERROR("Failed to reset chip: %i\n", ret);
1882 DRM_DEBUG_DRIVER("GPU reset disabled\n");
1886 i915_gem_reset(dev_priv);
1887 intel_overlay_reset(dev_priv);
1889 /* Ok, now get things going again... */
1892 * Everything depends on having the GTT running, so we need to start
1893 * there. Fortunately we don't need to do this unless we reset the
1894 * chip at a PCI level.
1896 * Next we need to restore the context, but we don't use those
1899 * Ring buffer needs to be re-initialized in the KMS case, or if X
1900 * was running at the time of the reset (i.e. we weren't VT
1903 ret = i915_gem_init_hw(dev_priv);
1905 DRM_ERROR("Failed hw init on reset %d\n", ret);
1909 i915_queue_hangcheck(dev_priv);
1912 i915_gem_reset_finish(dev_priv);
1913 enable_irq(dev_priv->drm.irq);
1916 clear_bit(I915_RESET_HANDOFF, &error->flags);
1917 wake_up_bit(&error->flags, I915_RESET_HANDOFF);
1921 i915_gem_set_wedged(dev_priv);
1926 * i915_reset_engine - reset GPU engine to recover from a hang
1927 * @engine: engine to reset
1929 * Reset a specific GPU engine. Useful if a hang is detected.
1930 * Returns zero on successful reset or otherwise an error code.
1933 * - identifies the request that caused the hang and it is dropped
1934 * - reset engine (which will force the engine to idle)
1935 * - re-init/configure engine
1937 int i915_reset_engine(struct intel_engine_cs *engine)
1939 struct i915_gpu_error *error = &engine->i915->gpu_error;
1940 struct drm_i915_gem_request *active_request;
1943 GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
1945 DRM_DEBUG_DRIVER("resetting %s\n", engine->name);
1947 active_request = i915_gem_reset_prepare_engine(engine);
1948 if (IS_ERR(active_request)) {
1949 DRM_DEBUG_DRIVER("Previous reset failed, promote to full reset\n");
1950 ret = PTR_ERR(active_request);
1955 * The request that caused the hang is stuck on elsp, we know the
1956 * active request and can drop it, adjust head to skip the offending
1957 * request to resume executing remaining requests in the queue.
1959 i915_gem_reset_engine(engine, active_request);
1961 /* Finally, reset just this engine. */
1962 ret = intel_gpu_reset(engine->i915, intel_engine_flag(engine));
1964 i915_gem_reset_finish_engine(engine);
1967 /* If we fail here, we expect to fallback to a global reset */
1968 DRM_DEBUG_DRIVER("Failed to reset %s, ret=%d\n",
1974 * The engine and its registers (and workarounds in case of render)
1975 * have been reset to their default values. Follow the init_ring
1976 * process to program RING_MODE, HWSP and re-enable submission.
1978 ret = engine->init_hw(engine);
1982 error->reset_engine_count[engine->id]++;
1987 static int i915_pm_suspend(struct device *kdev)
1989 struct pci_dev *pdev = to_pci_dev(kdev);
1990 struct drm_device *dev = pci_get_drvdata(pdev);
1993 dev_err(kdev, "DRM not initialized, aborting suspend.\n");
1997 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2000 return i915_drm_suspend(dev);
2003 static int i915_pm_suspend_late(struct device *kdev)
2005 struct drm_device *dev = &kdev_to_i915(kdev)->drm;
2008 * We have a suspend ordering issue with the snd-hda driver also
2009 * requiring our device to be power up. Due to the lack of a
2010 * parent/child relationship we currently solve this with an late
2013 * FIXME: This should be solved with a special hdmi sink device or
2014 * similar so that power domains can be employed.
2016 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2019 return i915_drm_suspend_late(dev, false);
2022 static int i915_pm_poweroff_late(struct device *kdev)
2024 struct drm_device *dev = &kdev_to_i915(kdev)->drm;
2026 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2029 return i915_drm_suspend_late(dev, true);
2032 static int i915_pm_resume_early(struct device *kdev)
2034 struct drm_device *dev = &kdev_to_i915(kdev)->drm;
2036 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2039 return i915_drm_resume_early(dev);
2042 static int i915_pm_resume(struct device *kdev)
2044 struct drm_device *dev = &kdev_to_i915(kdev)->drm;
2046 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2049 return i915_drm_resume(dev);
2052 /* freeze: before creating the hibernation_image */
2053 static int i915_pm_freeze(struct device *kdev)
2057 ret = i915_pm_suspend(kdev);
2061 ret = i915_gem_freeze(kdev_to_i915(kdev));
2068 static int i915_pm_freeze_late(struct device *kdev)
2072 ret = i915_pm_suspend_late(kdev);
2076 ret = i915_gem_freeze_late(kdev_to_i915(kdev));
2083 /* thaw: called after creating the hibernation image, but before turning off. */
2084 static int i915_pm_thaw_early(struct device *kdev)
2086 return i915_pm_resume_early(kdev);
2089 static int i915_pm_thaw(struct device *kdev)
2091 return i915_pm_resume(kdev);
2094 /* restore: called after loading the hibernation image. */
2095 static int i915_pm_restore_early(struct device *kdev)
2097 return i915_pm_resume_early(kdev);
2100 static int i915_pm_restore(struct device *kdev)
2102 return i915_pm_resume(kdev);
2106 * Save all Gunit registers that may be lost after a D3 and a subsequent
2107 * S0i[R123] transition. The list of registers needing a save/restore is
2108 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
2109 * registers in the following way:
2110 * - Driver: saved/restored by the driver
2111 * - Punit : saved/restored by the Punit firmware
2112 * - No, w/o marking: no need to save/restore, since the register is R/O or
2113 * used internally by the HW in a way that doesn't depend
2114 * keeping the content across a suspend/resume.
2115 * - Debug : used for debugging
2117 * We save/restore all registers marked with 'Driver', with the following
2119 * - Registers out of use, including also registers marked with 'Debug'.
2120 * These have no effect on the driver's operation, so we don't save/restore
2121 * them to reduce the overhead.
2122 * - Registers that are fully setup by an initialization function called from
2123 * the resume path. For example many clock gating and RPS/RC6 registers.
2124 * - Registers that provide the right functionality with their reset defaults.
2126 * TODO: Except for registers that based on the above 3 criteria can be safely
2127 * ignored, we save/restore all others, practically treating the HW context as
2128 * a black-box for the driver. Further investigation is needed to reduce the
2129 * saved/restored registers even further, by following the same 3 criteria.
2131 static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
2133 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
2136 /* GAM 0x4000-0x4770 */
2137 s->wr_watermark = I915_READ(GEN7_WR_WATERMARK);
2138 s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL);
2139 s->arb_mode = I915_READ(ARB_MODE);
2140 s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0);
2141 s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1);
2143 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
2144 s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
2146 s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
2147 s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT);
2149 s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
2150 s->ecochk = I915_READ(GAM_ECOCHK);
2151 s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7);
2152 s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7);
2154 s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR);
2156 /* MBC 0x9024-0x91D0, 0x8500 */
2157 s->g3dctl = I915_READ(VLV_G3DCTL);
2158 s->gsckgctl = I915_READ(VLV_GSCKGCTL);
2159 s->mbctl = I915_READ(GEN6_MBCTL);
2161 /* GCP 0x9400-0x9424, 0x8100-0x810C */
2162 s->ucgctl1 = I915_READ(GEN6_UCGCTL1);
2163 s->ucgctl3 = I915_READ(GEN6_UCGCTL3);
2164 s->rcgctl1 = I915_READ(GEN6_RCGCTL1);
2165 s->rcgctl2 = I915_READ(GEN6_RCGCTL2);
2166 s->rstctl = I915_READ(GEN6_RSTCTL);
2167 s->misccpctl = I915_READ(GEN7_MISCCPCTL);
2169 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
2170 s->gfxpause = I915_READ(GEN6_GFXPAUSE);
2171 s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC);
2172 s->rpdeuc = I915_READ(GEN6_RPDEUC);
2173 s->ecobus = I915_READ(ECOBUS);
2174 s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL);
2175 s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT);
2176 s->rp_deucsw = I915_READ(GEN6_RPDEUCSW);
2177 s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR);
2178 s->rcedata = I915_READ(VLV_RCEDATA);
2179 s->spare2gh = I915_READ(VLV_SPAREG2H);
2181 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
2182 s->gt_imr = I915_READ(GTIMR);
2183 s->gt_ier = I915_READ(GTIER);
2184 s->pm_imr = I915_READ(GEN6_PMIMR);
2185 s->pm_ier = I915_READ(GEN6_PMIER);
2187 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
2188 s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
2190 /* GT SA CZ domain, 0x100000-0x138124 */
2191 s->tilectl = I915_READ(TILECTL);
2192 s->gt_fifoctl = I915_READ(GTFIFOCTL);
2193 s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL);
2194 s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
2195 s->pmwgicz = I915_READ(VLV_PMWGICZ);
2197 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
2198 s->gu_ctl0 = I915_READ(VLV_GU_CTL0);
2199 s->gu_ctl1 = I915_READ(VLV_GU_CTL1);
2200 s->pcbr = I915_READ(VLV_PCBR);
2201 s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2);
2204 * Not saving any of:
2205 * DFT, 0x9800-0x9EC0
2206 * SARB, 0xB000-0xB1FC
2207 * GAC, 0x5208-0x524C, 0x14000-0x14C000
2212 static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
2214 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
2218 /* GAM 0x4000-0x4770 */
2219 I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark);
2220 I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
2221 I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16));
2222 I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0);
2223 I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
2225 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
2226 I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
2228 I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
2229 I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
2231 I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
2232 I915_WRITE(GAM_ECOCHK, s->ecochk);
2233 I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp);
2234 I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp);
2236 I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr);
2238 /* MBC 0x9024-0x91D0, 0x8500 */
2239 I915_WRITE(VLV_G3DCTL, s->g3dctl);
2240 I915_WRITE(VLV_GSCKGCTL, s->gsckgctl);
2241 I915_WRITE(GEN6_MBCTL, s->mbctl);
2243 /* GCP 0x9400-0x9424, 0x8100-0x810C */
2244 I915_WRITE(GEN6_UCGCTL1, s->ucgctl1);
2245 I915_WRITE(GEN6_UCGCTL3, s->ucgctl3);
2246 I915_WRITE(GEN6_RCGCTL1, s->rcgctl1);
2247 I915_WRITE(GEN6_RCGCTL2, s->rcgctl2);
2248 I915_WRITE(GEN6_RSTCTL, s->rstctl);
2249 I915_WRITE(GEN7_MISCCPCTL, s->misccpctl);
2251 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
2252 I915_WRITE(GEN6_GFXPAUSE, s->gfxpause);
2253 I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc);
2254 I915_WRITE(GEN6_RPDEUC, s->rpdeuc);
2255 I915_WRITE(ECOBUS, s->ecobus);
2256 I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl);
2257 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
2258 I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw);
2259 I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr);
2260 I915_WRITE(VLV_RCEDATA, s->rcedata);
2261 I915_WRITE(VLV_SPAREG2H, s->spare2gh);
2263 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
2264 I915_WRITE(GTIMR, s->gt_imr);
2265 I915_WRITE(GTIER, s->gt_ier);
2266 I915_WRITE(GEN6_PMIMR, s->pm_imr);
2267 I915_WRITE(GEN6_PMIER, s->pm_ier);
2269 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
2270 I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
2272 /* GT SA CZ domain, 0x100000-0x138124 */
2273 I915_WRITE(TILECTL, s->tilectl);
2274 I915_WRITE(GTFIFOCTL, s->gt_fifoctl);
2276 * Preserve the GT allow wake and GFX force clock bit, they are not
2277 * be restored, as they are used to control the s0ix suspend/resume
2278 * sequence by the caller.
2280 val = I915_READ(VLV_GTLC_WAKE_CTRL);
2281 val &= VLV_GTLC_ALLOWWAKEREQ;
2282 val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
2283 I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
2285 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
2286 val &= VLV_GFX_CLK_FORCE_ON_BIT;
2287 val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
2288 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
2290 I915_WRITE(VLV_PMWGICZ, s->pmwgicz);
2292 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
2293 I915_WRITE(VLV_GU_CTL0, s->gu_ctl0);
2294 I915_WRITE(VLV_GU_CTL1, s->gu_ctl1);
2295 I915_WRITE(VLV_PCBR, s->pcbr);
2296 I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
2299 static int vlv_wait_for_pw_status(struct drm_i915_private *dev_priv,
2302 /* The HW does not like us polling for PW_STATUS frequently, so
2303 * use the sleeping loop rather than risk the busy spin within
2304 * intel_wait_for_register().
2306 * Transitioning between RC6 states should be at most 2ms (see
2307 * valleyview_enable_rps) so use a 3ms timeout.
2309 return wait_for((I915_READ_NOTRACE(VLV_GTLC_PW_STATUS) & mask) == val,
2313 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
2318 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
2319 val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
2321 val |= VLV_GFX_CLK_FORCE_ON_BIT;
2322 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
2327 err = intel_wait_for_register(dev_priv,
2328 VLV_GTLC_SURVIVABILITY_REG,
2329 VLV_GFX_CLK_STATUS_BIT,
2330 VLV_GFX_CLK_STATUS_BIT,
2333 DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
2334 I915_READ(VLV_GTLC_SURVIVABILITY_REG));
2339 static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
2345 val = I915_READ(VLV_GTLC_WAKE_CTRL);
2346 val &= ~VLV_GTLC_ALLOWWAKEREQ;
2348 val |= VLV_GTLC_ALLOWWAKEREQ;
2349 I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
2350 POSTING_READ(VLV_GTLC_WAKE_CTRL);
2352 mask = VLV_GTLC_ALLOWWAKEACK;
2353 val = allow ? mask : 0;
2355 err = vlv_wait_for_pw_status(dev_priv, mask, val);
2357 DRM_ERROR("timeout disabling GT waking\n");
2362 static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
2368 mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
2369 val = wait_for_on ? mask : 0;
2372 * RC6 transitioning can be delayed up to 2 msec (see
2373 * valleyview_enable_rps), use 3 msec for safety.
2375 if (vlv_wait_for_pw_status(dev_priv, mask, val))
2376 DRM_ERROR("timeout waiting for GT wells to go %s\n",
2377 onoff(wait_for_on));
2380 static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
2382 if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
2385 DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n");
2386 I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
2389 static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
2395 * Bspec defines the following GT well on flags as debug only, so
2396 * don't treat them as hard failures.
2398 vlv_wait_for_gt_wells(dev_priv, false);
2400 mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
2401 WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
2403 vlv_check_no_gt_access(dev_priv);
2405 err = vlv_force_gfx_clock(dev_priv, true);
2409 err = vlv_allow_gt_wake(dev_priv, false);
2413 if (!IS_CHERRYVIEW(dev_priv))
2414 vlv_save_gunit_s0ix_state(dev_priv);
2416 err = vlv_force_gfx_clock(dev_priv, false);
2423 /* For safety always re-enable waking and disable gfx clock forcing */
2424 vlv_allow_gt_wake(dev_priv, true);
2426 vlv_force_gfx_clock(dev_priv, false);
2431 static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
2438 * If any of the steps fail just try to continue, that's the best we
2439 * can do at this point. Return the first error code (which will also
2440 * leave RPM permanently disabled).
2442 ret = vlv_force_gfx_clock(dev_priv, true);
2444 if (!IS_CHERRYVIEW(dev_priv))
2445 vlv_restore_gunit_s0ix_state(dev_priv);
2447 err = vlv_allow_gt_wake(dev_priv, true);
2451 err = vlv_force_gfx_clock(dev_priv, false);
2455 vlv_check_no_gt_access(dev_priv);
2458 intel_init_clock_gating(dev_priv);
2463 static int intel_runtime_suspend(struct device *kdev)
2465 struct pci_dev *pdev = to_pci_dev(kdev);
2466 struct drm_device *dev = pci_get_drvdata(pdev);
2467 struct drm_i915_private *dev_priv = to_i915(dev);
2470 if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6())))
2473 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
2476 DRM_DEBUG_KMS("Suspending device\n");
2478 disable_rpm_wakeref_asserts(dev_priv);
2481 * We are safe here against re-faults, since the fault handler takes
2484 i915_gem_runtime_suspend(dev_priv);
2486 intel_guc_suspend(dev_priv);
2488 intel_runtime_pm_disable_interrupts(dev_priv);
2491 if (IS_GEN9_LP(dev_priv)) {
2492 bxt_display_core_uninit(dev_priv);
2493 bxt_enable_dc9(dev_priv);
2494 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2495 hsw_enable_pc8(dev_priv);
2496 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2497 ret = vlv_suspend_complete(dev_priv);
2501 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
2502 intel_runtime_pm_enable_interrupts(dev_priv);
2504 enable_rpm_wakeref_asserts(dev_priv);
2509 intel_uncore_suspend(dev_priv);
2511 enable_rpm_wakeref_asserts(dev_priv);
2512 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
2514 if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
2515 DRM_ERROR("Unclaimed access detected prior to suspending\n");
2517 dev_priv->pm.suspended = true;
2520 * FIXME: We really should find a document that references the arguments
2523 if (IS_BROADWELL(dev_priv)) {
2525 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
2526 * being detected, and the call we do at intel_runtime_resume()
2527 * won't be able to restore them. Since PCI_D3hot matches the
2528 * actual specification and appears to be working, use it.
2530 intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
2533 * current versions of firmware which depend on this opregion
2534 * notification have repurposed the D1 definition to mean
2535 * "runtime suspended" vs. what you would normally expect (D3)
2536 * to distinguish it from notifications that might be sent via
2539 intel_opregion_notify_adapter(dev_priv, PCI_D1);
2542 assert_forcewakes_inactive(dev_priv);
2544 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
2545 intel_hpd_poll_init(dev_priv);
2547 DRM_DEBUG_KMS("Device suspended\n");
2551 static int intel_runtime_resume(struct device *kdev)
2553 struct pci_dev *pdev = to_pci_dev(kdev);
2554 struct drm_device *dev = pci_get_drvdata(pdev);
2555 struct drm_i915_private *dev_priv = to_i915(dev);
2558 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
2561 DRM_DEBUG_KMS("Resuming device\n");
2563 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
2564 disable_rpm_wakeref_asserts(dev_priv);
2566 intel_opregion_notify_adapter(dev_priv, PCI_D0);
2567 dev_priv->pm.suspended = false;
2568 if (intel_uncore_unclaimed_mmio(dev_priv))
2569 DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
2571 intel_guc_resume(dev_priv);
2573 if (IS_GEN9_LP(dev_priv)) {
2574 bxt_disable_dc9(dev_priv);
2575 bxt_display_core_init(dev_priv, true);
2576 if (dev_priv->csr.dmc_payload &&
2577 (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
2578 gen9_enable_dc5(dev_priv);
2579 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2580 hsw_disable_pc8(dev_priv);
2581 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2582 ret = vlv_resume_prepare(dev_priv, true);
2586 * No point of rolling back things in case of an error, as the best
2587 * we can do is to hope that things will still work (and disable RPM).
2589 i915_gem_init_swizzling(dev_priv);
2590 i915_gem_restore_fences(dev_priv);
2592 intel_runtime_pm_enable_interrupts(dev_priv);
2595 * On VLV/CHV display interrupts are part of the display
2596 * power well, so hpd is reinitialized from there. For
2597 * everyone else do it here.
2599 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
2600 intel_hpd_init(dev_priv);
2602 enable_rpm_wakeref_asserts(dev_priv);
2605 DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
2607 DRM_DEBUG_KMS("Device resumed\n");
2612 const struct dev_pm_ops i915_pm_ops = {
2614 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
2617 .suspend = i915_pm_suspend,
2618 .suspend_late = i915_pm_suspend_late,
2619 .resume_early = i915_pm_resume_early,
2620 .resume = i915_pm_resume,
2624 * @freeze, @freeze_late : called (1) before creating the
2625 * hibernation image [PMSG_FREEZE] and
2626 * (2) after rebooting, before restoring
2627 * the image [PMSG_QUIESCE]
2628 * @thaw, @thaw_early : called (1) after creating the hibernation
2629 * image, before writing it [PMSG_THAW]
2630 * and (2) after failing to create or
2631 * restore the image [PMSG_RECOVER]
2632 * @poweroff, @poweroff_late: called after writing the hibernation
2633 * image, before rebooting [PMSG_HIBERNATE]
2634 * @restore, @restore_early : called after rebooting and restoring the
2635 * hibernation image [PMSG_RESTORE]
2637 .freeze = i915_pm_freeze,
2638 .freeze_late = i915_pm_freeze_late,
2639 .thaw_early = i915_pm_thaw_early,
2640 .thaw = i915_pm_thaw,
2641 .poweroff = i915_pm_suspend,
2642 .poweroff_late = i915_pm_poweroff_late,
2643 .restore_early = i915_pm_restore_early,
2644 .restore = i915_pm_restore,
2646 /* S0ix (via runtime suspend) event handlers */
2647 .runtime_suspend = intel_runtime_suspend,
2648 .runtime_resume = intel_runtime_resume,
2651 static const struct vm_operations_struct i915_gem_vm_ops = {
2652 .fault = i915_gem_fault,
2653 .open = drm_gem_vm_open,
2654 .close = drm_gem_vm_close,
2657 static const struct file_operations i915_driver_fops = {
2658 .owner = THIS_MODULE,
2660 .release = drm_release,
2661 .unlocked_ioctl = drm_ioctl,
2662 .mmap = drm_gem_mmap,
2665 .compat_ioctl = i915_compat_ioctl,
2666 .llseek = noop_llseek,
2670 i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
2671 struct drm_file *file)
2676 static const struct drm_ioctl_desc i915_ioctls[] = {
2677 DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2678 DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
2679 DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
2680 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
2681 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
2682 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
2683 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
2684 DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2685 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
2686 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
2687 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2688 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
2689 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2690 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2691 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
2692 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
2693 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2694 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2695 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
2696 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
2697 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
2698 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
2699 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2700 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
2701 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
2702 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2703 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2704 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2705 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
2706 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
2707 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
2708 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
2709 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
2710 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
2711 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
2712 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
2713 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
2714 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
2715 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
2716 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
2717 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
2718 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
2719 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
2720 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
2721 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2722 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
2723 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
2724 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
2725 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
2726 DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
2727 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
2728 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
2729 DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
2732 static struct drm_driver driver = {
2733 /* Don't use MTRRs here; the Xserver or userspace app should
2734 * deal with them for Intel hardware.
2737 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
2738 DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC,
2739 .release = i915_driver_release,
2740 .open = i915_driver_open,
2741 .lastclose = i915_driver_lastclose,
2742 .postclose = i915_driver_postclose,
2743 .set_busid = drm_pci_set_busid,
2745 .gem_close_object = i915_gem_close_object,
2746 .gem_free_object_unlocked = i915_gem_free_object,
2747 .gem_vm_ops = &i915_gem_vm_ops,
2749 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
2750 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
2751 .gem_prime_export = i915_gem_prime_export,
2752 .gem_prime_import = i915_gem_prime_import,
2754 .dumb_create = i915_gem_dumb_create,
2755 .dumb_map_offset = i915_gem_mmap_gtt,
2756 .dumb_destroy = drm_gem_dumb_destroy,
2757 .ioctls = i915_ioctls,
2758 .num_ioctls = ARRAY_SIZE(i915_ioctls),
2759 .fops = &i915_driver_fops,
2760 .name = DRIVER_NAME,
2761 .desc = DRIVER_DESC,
2762 .date = DRIVER_DATE,
2763 .major = DRIVER_MAJOR,
2764 .minor = DRIVER_MINOR,
2765 .patchlevel = DRIVER_PATCHLEVEL,
2768 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2769 #include "selftests/mock_drm.c"