1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/async.h>
33 #include <drm/drm_crtc_helper.h>
34 #include <drm/drm_fb_helper.h>
35 #include <drm/drm_legacy.h>
36 #include "intel_drv.h"
37 #include <drm/i915_drm.h>
39 #include "i915_vgpu.h"
40 #include "i915_trace.h"
41 #include <linux/pci.h>
42 #include <linux/console.h>
44 #include <linux/vgaarb.h>
45 #include <linux/acpi.h>
46 #include <linux/pnp.h>
47 #include <linux/vga_switcheroo.h>
48 #include <linux/slab.h>
49 #include <acpi/video.h>
51 #include <linux/pm_runtime.h>
52 #include <linux/oom.h>
55 static int i915_getparam(struct drm_device *dev, void *data,
56 struct drm_file *file_priv)
58 struct drm_i915_private *dev_priv = dev->dev_private;
59 drm_i915_getparam_t *param = data;
62 switch (param->param) {
63 case I915_PARAM_IRQ_ACTIVE:
64 case I915_PARAM_ALLOW_BATCHBUFFER:
65 case I915_PARAM_LAST_DISPATCH:
66 /* Reject all old ums/dri params. */
68 case I915_PARAM_CHIPSET_ID:
69 value = dev->pdev->device;
71 case I915_PARAM_REVISION:
72 value = dev->pdev->revision;
74 case I915_PARAM_HAS_GEM:
77 case I915_PARAM_NUM_FENCES_AVAIL:
78 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
80 case I915_PARAM_HAS_OVERLAY:
81 value = dev_priv->overlay ? 1 : 0;
83 case I915_PARAM_HAS_PAGEFLIPPING:
86 case I915_PARAM_HAS_EXECBUF2:
90 case I915_PARAM_HAS_BSD:
91 value = intel_ring_initialized(&dev_priv->ring[VCS]);
93 case I915_PARAM_HAS_BLT:
94 value = intel_ring_initialized(&dev_priv->ring[BCS]);
96 case I915_PARAM_HAS_VEBOX:
97 value = intel_ring_initialized(&dev_priv->ring[VECS]);
99 case I915_PARAM_HAS_BSD2:
100 value = intel_ring_initialized(&dev_priv->ring[VCS2]);
102 case I915_PARAM_HAS_RELAXED_FENCING:
105 case I915_PARAM_HAS_COHERENT_RINGS:
108 case I915_PARAM_HAS_EXEC_CONSTANTS:
109 value = INTEL_INFO(dev)->gen >= 4;
111 case I915_PARAM_HAS_RELAXED_DELTA:
114 case I915_PARAM_HAS_GEN7_SOL_RESET:
117 case I915_PARAM_HAS_LLC:
118 value = HAS_LLC(dev);
120 case I915_PARAM_HAS_WT:
123 case I915_PARAM_HAS_ALIASING_PPGTT:
124 value = USES_PPGTT(dev);
126 case I915_PARAM_HAS_WAIT_TIMEOUT:
129 case I915_PARAM_HAS_SEMAPHORES:
130 value = i915_semaphore_is_enabled(dev);
132 case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
135 case I915_PARAM_HAS_SECURE_BATCHES:
136 value = capable(CAP_SYS_ADMIN);
138 case I915_PARAM_HAS_PINNED_BATCHES:
141 case I915_PARAM_HAS_EXEC_NO_RELOC:
144 case I915_PARAM_HAS_EXEC_HANDLE_LUT:
147 case I915_PARAM_CMD_PARSER_VERSION:
148 value = i915_cmd_parser_get_version();
150 case I915_PARAM_HAS_COHERENT_PHYS_GTT:
153 case I915_PARAM_MMAP_VERSION:
156 case I915_PARAM_SUBSLICE_TOTAL:
157 value = INTEL_INFO(dev)->subslice_total;
161 case I915_PARAM_EU_TOTAL:
162 value = INTEL_INFO(dev)->eu_total;
166 case I915_PARAM_HAS_GPU_RESET:
167 value = i915.enable_hangcheck &&
168 intel_has_gpu_reset(dev);
170 case I915_PARAM_HAS_RESOURCE_STREAMER:
171 value = HAS_RESOURCE_STREAMER(dev);
174 DRM_DEBUG("Unknown parameter %d\n", param->param);
178 if (copy_to_user(param->value, &value, sizeof(int))) {
179 DRM_ERROR("copy_to_user failed\n");
186 static int i915_setparam(struct drm_device *dev, void *data,
187 struct drm_file *file_priv)
189 struct drm_i915_private *dev_priv = dev->dev_private;
190 drm_i915_setparam_t *param = data;
192 switch (param->param) {
193 case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
194 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
195 case I915_SETPARAM_ALLOW_BATCHBUFFER:
196 /* Reject all old ums/dri params. */
199 case I915_SETPARAM_NUM_USED_FENCES:
200 if (param->value > dev_priv->num_fence_regs ||
203 /* Userspace can use first N regs */
204 dev_priv->fence_reg_start = param->value;
207 DRM_DEBUG_DRIVER("unknown parameter %d\n",
215 static int i915_get_bridge_dev(struct drm_device *dev)
217 struct drm_i915_private *dev_priv = dev->dev_private;
219 dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
220 if (!dev_priv->bridge_dev) {
221 DRM_ERROR("bridge device not found\n");
227 #define MCHBAR_I915 0x44
228 #define MCHBAR_I965 0x48
229 #define MCHBAR_SIZE (4*4096)
231 #define DEVEN_REG 0x54
232 #define DEVEN_MCHBAR_EN (1 << 28)
234 /* Allocate space for the MCH regs if needed, return nonzero on error */
236 intel_alloc_mchbar_resource(struct drm_device *dev)
238 struct drm_i915_private *dev_priv = dev->dev_private;
239 int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
240 u32 temp_lo, temp_hi = 0;
244 if (INTEL_INFO(dev)->gen >= 4)
245 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
246 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
247 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
249 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
252 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
256 /* Get some space for it */
257 dev_priv->mch_res.name = "i915 MCHBAR";
258 dev_priv->mch_res.flags = IORESOURCE_MEM;
259 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
261 MCHBAR_SIZE, MCHBAR_SIZE,
263 0, pcibios_align_resource,
264 dev_priv->bridge_dev);
266 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
267 dev_priv->mch_res.start = 0;
271 if (INTEL_INFO(dev)->gen >= 4)
272 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
273 upper_32_bits(dev_priv->mch_res.start));
275 pci_write_config_dword(dev_priv->bridge_dev, reg,
276 lower_32_bits(dev_priv->mch_res.start));
280 /* Setup MCHBAR if possible, return true if we should disable it again */
282 intel_setup_mchbar(struct drm_device *dev)
284 struct drm_i915_private *dev_priv = dev->dev_private;
285 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
289 if (IS_VALLEYVIEW(dev))
292 dev_priv->mchbar_need_disable = false;
294 if (IS_I915G(dev) || IS_I915GM(dev)) {
295 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
296 enabled = !!(temp & DEVEN_MCHBAR_EN);
298 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
302 /* If it's already enabled, don't have to do anything */
306 if (intel_alloc_mchbar_resource(dev))
309 dev_priv->mchbar_need_disable = true;
311 /* Space is allocated or reserved, so enable it. */
312 if (IS_I915G(dev) || IS_I915GM(dev)) {
313 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
314 temp | DEVEN_MCHBAR_EN);
316 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
317 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
322 intel_teardown_mchbar(struct drm_device *dev)
324 struct drm_i915_private *dev_priv = dev->dev_private;
325 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
328 if (dev_priv->mchbar_need_disable) {
329 if (IS_I915G(dev) || IS_I915GM(dev)) {
330 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
331 temp &= ~DEVEN_MCHBAR_EN;
332 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
334 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
336 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
340 if (dev_priv->mch_res.start)
341 release_resource(&dev_priv->mch_res);
344 /* true = enable decode, false = disable decoder */
345 static unsigned int i915_vga_set_decode(void *cookie, bool state)
347 struct drm_device *dev = cookie;
349 intel_modeset_vga_set_state(dev, state);
351 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
352 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
354 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
357 static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
359 struct drm_device *dev = pci_get_drvdata(pdev);
360 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
362 if (state == VGA_SWITCHEROO_ON) {
363 pr_info("switched on\n");
364 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
365 /* i915 resume handler doesn't set to D0 */
366 pci_set_power_state(dev->pdev, PCI_D0);
367 i915_resume_legacy(dev);
368 dev->switch_power_state = DRM_SWITCH_POWER_ON;
370 pr_err("switched off\n");
371 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
372 i915_suspend_legacy(dev, pmm);
373 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
377 static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
379 struct drm_device *dev = pci_get_drvdata(pdev);
382 * FIXME: open_count is protected by drm_global_mutex but that would lead to
383 * locking inversion with the driver load path. And the access here is
384 * completely racy anyway. So don't bother with locking for now.
386 return dev->open_count == 0;
389 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
390 .set_gpu_state = i915_switcheroo_set_state,
392 .can_switch = i915_switcheroo_can_switch,
395 static int i915_load_modeset_init(struct drm_device *dev)
397 struct drm_i915_private *dev_priv = dev->dev_private;
400 ret = intel_parse_bios(dev);
402 DRM_INFO("failed to find VBIOS tables\n");
404 /* If we have > 1 VGA cards, then we need to arbitrate access
405 * to the common VGA resources.
407 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
408 * then we do not take part in VGA arbitration and the
409 * vga_client_register() fails with -ENODEV.
411 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
412 if (ret && ret != -ENODEV)
415 intel_register_dsm_handler();
417 ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
419 goto cleanup_vga_client;
421 /* Initialise stolen first so that we may reserve preallocated
422 * objects for the BIOS to KMS transition.
424 ret = i915_gem_init_stolen(dev);
426 goto cleanup_vga_switcheroo;
428 intel_power_domains_init_hw(dev_priv);
430 ret = intel_irq_install(dev_priv);
432 goto cleanup_gem_stolen;
434 /* Important: The output setup functions called by modeset_init need
435 * working irqs for e.g. gmbus and dp aux transfers. */
436 intel_modeset_init(dev);
438 /* intel_guc_ucode_init() needs the mutex to allocate GEM objects */
439 mutex_lock(&dev->struct_mutex);
440 intel_guc_ucode_init(dev);
441 mutex_unlock(&dev->struct_mutex);
443 ret = i915_gem_init(dev);
447 intel_modeset_gem_init(dev);
449 /* Always safe in the mode setting case. */
450 /* FIXME: do pre/post-mode set stuff in core KMS code */
451 dev->vblank_disable_allowed = true;
452 if (INTEL_INFO(dev)->num_pipes == 0)
455 ret = intel_fbdev_init(dev);
459 /* Only enable hotplug handling once the fbdev is fully set up. */
460 intel_hpd_init(dev_priv);
463 * Some ports require correctly set-up hpd registers for detection to
464 * work properly (leading to ghost connected connector status), e.g. VGA
465 * on gm45. Hence we can only set up the initial fbdev config after hpd
466 * irqs are fully enabled. Now we should scan for the initial config
467 * only once hotplug handling is enabled, but due to screwed-up locking
468 * around kms/fbdev init we can't protect the fdbev initial config
469 * scanning against hotplug events. Hence do this first and ignore the
470 * tiny window where we will loose hotplug notifactions.
472 async_schedule(intel_fbdev_initial_config, dev_priv);
474 drm_kms_helper_poll_init(dev);
479 mutex_lock(&dev->struct_mutex);
480 i915_gem_cleanup_ringbuffer(dev);
481 i915_gem_context_fini(dev);
482 mutex_unlock(&dev->struct_mutex);
484 mutex_lock(&dev->struct_mutex);
485 intel_guc_ucode_fini(dev);
486 mutex_unlock(&dev->struct_mutex);
487 drm_irq_uninstall(dev);
489 i915_gem_cleanup_stolen(dev);
490 cleanup_vga_switcheroo:
491 vga_switcheroo_unregister_client(dev->pdev);
493 vga_client_register(dev->pdev, NULL, NULL, NULL);
498 #if IS_ENABLED(CONFIG_FB)
499 static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
501 struct apertures_struct *ap;
502 struct pci_dev *pdev = dev_priv->dev->pdev;
506 ap = alloc_apertures(1);
510 ap->ranges[0].base = dev_priv->gtt.mappable_base;
511 ap->ranges[0].size = dev_priv->gtt.mappable_end;
514 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
516 ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
523 static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
529 #if !defined(CONFIG_VGA_CONSOLE)
530 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
534 #elif !defined(CONFIG_DUMMY_CONSOLE)
535 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
540 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
544 DRM_INFO("Replacing VGA console driver\n");
547 if (con_is_bound(&vga_con))
548 ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
550 ret = do_unregister_con_driver(&vga_con);
552 /* Ignore "already unregistered". */
562 static void i915_dump_device_info(struct drm_i915_private *dev_priv)
564 const struct intel_device_info *info = &dev_priv->info;
566 #define PRINT_S(name) "%s"
568 #define PRINT_FLAG(name) info->name ? #name "," : ""
570 DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
571 DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
573 dev_priv->dev->pdev->device,
574 dev_priv->dev->pdev->revision,
575 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
582 static void cherryview_sseu_info_init(struct drm_device *dev)
584 struct drm_i915_private *dev_priv = dev->dev_private;
585 struct intel_device_info *info;
588 info = (struct intel_device_info *)&dev_priv->info;
589 fuse = I915_READ(CHV_FUSE_GT);
591 info->slice_total = 1;
593 if (!(fuse & CHV_FGT_DISABLE_SS0)) {
594 info->subslice_per_slice++;
595 eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
596 CHV_FGT_EU_DIS_SS0_R1_MASK);
597 info->eu_total += 8 - hweight32(eu_dis);
600 if (!(fuse & CHV_FGT_DISABLE_SS1)) {
601 info->subslice_per_slice++;
602 eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
603 CHV_FGT_EU_DIS_SS1_R1_MASK);
604 info->eu_total += 8 - hweight32(eu_dis);
607 info->subslice_total = info->subslice_per_slice;
609 * CHV expected to always have a uniform distribution of EU
612 info->eu_per_subslice = info->subslice_total ?
613 info->eu_total / info->subslice_total :
616 * CHV supports subslice power gating on devices with more than
617 * one subslice, and supports EU power gating on devices with
618 * more than one EU pair per subslice.
620 info->has_slice_pg = 0;
621 info->has_subslice_pg = (info->subslice_total > 1);
622 info->has_eu_pg = (info->eu_per_subslice > 2);
625 static void gen9_sseu_info_init(struct drm_device *dev)
627 struct drm_i915_private *dev_priv = dev->dev_private;
628 struct intel_device_info *info;
629 int s_max = 3, ss_max = 4, eu_max = 8;
631 u32 fuse2, s_enable, ss_disable, eu_disable;
635 * BXT has a single slice. BXT also has at most 6 EU per subslice,
636 * and therefore only the lowest 6 bits of the 8-bit EU disable
639 if (IS_BROXTON(dev)) {
645 info = (struct intel_device_info *)&dev_priv->info;
646 fuse2 = I915_READ(GEN8_FUSE2);
647 s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
649 ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >>
650 GEN9_F2_SS_DIS_SHIFT;
652 info->slice_total = hweight32(s_enable);
654 * The subslice disable field is global, i.e. it applies
655 * to each of the enabled slices.
657 info->subslice_per_slice = ss_max - hweight32(ss_disable);
658 info->subslice_total = info->slice_total *
659 info->subslice_per_slice;
662 * Iterate through enabled slices and subslices to
663 * count the total enabled EU.
665 for (s = 0; s < s_max; s++) {
666 if (!(s_enable & (0x1 << s)))
667 /* skip disabled slice */
670 eu_disable = I915_READ(GEN9_EU_DISABLE(s));
671 for (ss = 0; ss < ss_max; ss++) {
674 if (ss_disable & (0x1 << ss))
675 /* skip disabled subslice */
678 eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) &
682 * Record which subslice(s) has(have) 7 EUs. we
683 * can tune the hash used to spread work among
684 * subslices if they are unbalanced.
687 info->subslice_7eu[s] |= 1 << ss;
689 info->eu_total += eu_per_ss;
694 * SKL is expected to always have a uniform distribution
695 * of EU across subslices with the exception that any one
696 * EU in any one subslice may be fused off for die
697 * recovery. BXT is expected to be perfectly uniform in EU
700 info->eu_per_subslice = info->subslice_total ?
701 DIV_ROUND_UP(info->eu_total,
702 info->subslice_total) : 0;
704 * SKL supports slice power gating on devices with more than
705 * one slice, and supports EU power gating on devices with
706 * more than one EU pair per subslice. BXT supports subslice
707 * power gating on devices with more than one subslice, and
708 * supports EU power gating on devices with more than one EU
711 info->has_slice_pg = (IS_SKYLAKE(dev) && (info->slice_total > 1));
712 info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1));
713 info->has_eu_pg = (info->eu_per_subslice > 2);
717 * Determine various intel_device_info fields at runtime.
719 * Use it when either:
720 * - it's judged too laborious to fill n static structures with the limit
721 * when a simple if statement does the job,
722 * - run-time checks (eg read fuse/strap registers) are needed.
724 * This function needs to be called:
725 * - after the MMIO has been setup as we are reading registers,
726 * - after the PCH has been detected,
727 * - before the first usage of the fields it can tweak.
729 static void intel_device_info_runtime_init(struct drm_device *dev)
731 struct drm_i915_private *dev_priv = dev->dev_private;
732 struct intel_device_info *info;
735 info = (struct intel_device_info *)&dev_priv->info;
738 * Skylake and Broxton currently don't expose the topmost plane as its
739 * use is exclusive with the legacy cursor and we only want to expose
740 * one of those, not both. Until we can safely expose the topmost plane
741 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
742 * we don't expose the topmost plane at all to prevent ABI breakage
745 if (IS_BROXTON(dev)) {
746 info->num_sprites[PIPE_A] = 2;
747 info->num_sprites[PIPE_B] = 2;
748 info->num_sprites[PIPE_C] = 1;
749 } else if (IS_VALLEYVIEW(dev))
750 for_each_pipe(dev_priv, pipe)
751 info->num_sprites[pipe] = 2;
753 for_each_pipe(dev_priv, pipe)
754 info->num_sprites[pipe] = 1;
756 if (i915.disable_display) {
757 DRM_INFO("Display disabled (module parameter)\n");
759 } else if (info->num_pipes > 0 &&
760 (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) &&
761 !IS_VALLEYVIEW(dev)) {
762 u32 fuse_strap = I915_READ(FUSE_STRAP);
763 u32 sfuse_strap = I915_READ(SFUSE_STRAP);
766 * SFUSE_STRAP is supposed to have a bit signalling the display
767 * is fused off. Unfortunately it seems that, at least in
768 * certain cases, fused off display means that PCH display
769 * reads don't land anywhere. In that case, we read 0s.
771 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
772 * should be set when taking over after the firmware.
774 if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
775 sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
776 (dev_priv->pch_type == PCH_CPT &&
777 !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
778 DRM_INFO("Display fused off, disabling\n");
783 /* Initialize slice/subslice/EU info */
784 if (IS_CHERRYVIEW(dev))
785 cherryview_sseu_info_init(dev);
786 else if (INTEL_INFO(dev)->gen >= 9)
787 gen9_sseu_info_init(dev);
789 DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
790 DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
791 DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
792 DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
793 DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
794 DRM_DEBUG_DRIVER("has slice power gating: %s\n",
795 info->has_slice_pg ? "y" : "n");
796 DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
797 info->has_subslice_pg ? "y" : "n");
798 DRM_DEBUG_DRIVER("has EU power gating: %s\n",
799 info->has_eu_pg ? "y" : "n");
803 * i915_driver_load - setup chip and create an initial config
805 * @flags: startup flags
807 * The driver load routine has to do several things:
808 * - drive output discovery via intel_modeset_init()
809 * - initialize the memory manager
810 * - allocate initial config memory
811 * - setup the DRM framebuffer with the allocated memory
813 int i915_driver_load(struct drm_device *dev, unsigned long flags)
815 struct drm_i915_private *dev_priv;
816 struct intel_device_info *info, *device_info;
817 int ret = 0, mmio_bar, mmio_size;
818 uint32_t aperture_size;
820 info = (struct intel_device_info *) flags;
822 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
823 if (dev_priv == NULL)
826 dev->dev_private = dev_priv;
829 /* Setup the write-once "constant" device info */
830 device_info = (struct intel_device_info *)&dev_priv->info;
831 memcpy(device_info, info, sizeof(dev_priv->info));
832 device_info->device_id = dev->pdev->device;
834 spin_lock_init(&dev_priv->irq_lock);
835 spin_lock_init(&dev_priv->gpu_error.lock);
836 mutex_init(&dev_priv->backlight_lock);
837 spin_lock_init(&dev_priv->uncore.lock);
838 spin_lock_init(&dev_priv->mm.object_stat_lock);
839 spin_lock_init(&dev_priv->mmio_flip_lock);
840 mutex_init(&dev_priv->sb_lock);
841 mutex_init(&dev_priv->modeset_restore_lock);
842 mutex_init(&dev_priv->csr_lock);
846 intel_display_crc_init(dev);
848 i915_dump_device_info(dev_priv);
850 /* Not all pre-production machines fall into this category, only the
851 * very first ones. Almost everything should work, except for maybe
852 * suspend/resume. And we don't implement workarounds that affect only
853 * pre-production machines. */
854 if (IS_HSW_EARLY_SDV(dev))
855 DRM_INFO("This is an early pre-production Haswell machine. "
856 "It may not be fully functional.\n");
858 if (i915_get_bridge_dev(dev)) {
863 mmio_bar = IS_GEN2(dev) ? 1 : 0;
864 /* Before gen4, the registers and the GTT are behind different BARs.
865 * However, from gen4 onwards, the registers and the GTT are shared
866 * in the same BAR, so we want to restrict this ioremap from
867 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
868 * the register BAR remains the same size for all the earlier
869 * generations up to Ironlake.
872 mmio_size = 512*1024;
874 mmio_size = 2*1024*1024;
876 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
877 if (!dev_priv->regs) {
878 DRM_ERROR("failed to map registers\n");
883 /* This must be called before any calls to HAS_PCH_* */
884 intel_detect_pch(dev);
886 intel_uncore_init(dev);
888 /* Load CSR Firmware for SKL */
889 intel_csr_ucode_init(dev);
891 ret = i915_gem_gtt_init(dev);
895 /* WARNING: Apparently we must kick fbdev drivers before vgacon,
896 * otherwise the vga fbdev driver falls over. */
897 ret = i915_kick_out_firmware_fb(dev_priv);
899 DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
903 ret = i915_kick_out_vgacon(dev_priv);
905 DRM_ERROR("failed to remove conflicting VGA console\n");
909 pci_set_master(dev->pdev);
911 /* overlay on gen2 is broken and can't address above 1G */
913 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
915 /* 965GM sometimes incorrectly writes to hardware status page (HWS)
916 * using 32bit addressing, overwriting memory if HWS is located
919 * The documentation also mentions an issue with undefined
920 * behaviour if any general state is accessed within a page above 4GB,
921 * which also needs to be handled carefully.
923 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
924 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
926 aperture_size = dev_priv->gtt.mappable_end;
928 dev_priv->gtt.mappable =
929 io_mapping_create_wc(dev_priv->gtt.mappable_base,
931 if (dev_priv->gtt.mappable == NULL) {
936 dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
939 /* The i915 workqueue is primarily used for batched retirement of
940 * requests (and thus managing bo) once the task has been completed
941 * by the GPU. i915_gem_retire_requests() is called directly when we
942 * need high-priority retirement, such as waiting for an explicit
945 * It is also used for periodic low-priority events, such as
946 * idle-timers and recording error state.
948 * All tasks on the workqueue are expected to acquire the dev mutex
949 * so there is no point in running more than one instance of the
950 * workqueue at any time. Use an ordered one.
952 dev_priv->wq = alloc_ordered_workqueue("i915", 0);
953 if (dev_priv->wq == NULL) {
954 DRM_ERROR("Failed to create our workqueue.\n");
959 dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
960 if (dev_priv->hotplug.dp_wq == NULL) {
961 DRM_ERROR("Failed to create our dp workqueue.\n");
966 dev_priv->gpu_error.hangcheck_wq =
967 alloc_ordered_workqueue("i915-hangcheck", 0);
968 if (dev_priv->gpu_error.hangcheck_wq == NULL) {
969 DRM_ERROR("Failed to create our hangcheck workqueue.\n");
974 intel_irq_init(dev_priv);
975 intel_uncore_sanitize(dev);
977 /* Try to make sure MCHBAR is enabled before poking at it */
978 intel_setup_mchbar(dev);
979 intel_setup_gmbus(dev);
980 intel_opregion_setup(dev);
982 intel_setup_bios(dev);
986 /* On the 945G/GM, the chipset reports the MSI capability on the
987 * integrated graphics even though the support isn't actually there
988 * according to the published specs. It doesn't appear to function
989 * correctly in testing on 945G.
990 * This may be a side effect of MSI having been made available for PEG
991 * and the registers being closely associated.
993 * According to chipset errata, on the 965GM, MSI interrupts may
994 * be lost or delayed, but we use them anyways to avoid
995 * stuck interrupts on some machines.
997 if (!IS_I945G(dev) && !IS_I945GM(dev))
998 pci_enable_msi(dev->pdev);
1000 intel_device_info_runtime_init(dev);
1002 if (INTEL_INFO(dev)->num_pipes) {
1003 ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
1005 goto out_gem_unload;
1008 intel_power_domains_init(dev_priv);
1010 ret = i915_load_modeset_init(dev);
1012 DRM_ERROR("failed to init modeset\n");
1013 goto out_power_well;
1017 * Notify a valid surface after modesetting,
1018 * when running inside a VM.
1020 if (intel_vgpu_active(dev))
1021 I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
1023 i915_setup_sysfs(dev);
1025 if (INTEL_INFO(dev)->num_pipes) {
1026 /* Must be done after probing outputs */
1027 intel_opregion_init(dev);
1028 acpi_video_register();
1032 intel_gpu_ips_init(dev_priv);
1034 intel_runtime_pm_enable(dev_priv);
1036 i915_audio_component_init(dev_priv);
1041 intel_power_domains_fini(dev_priv);
1042 drm_vblank_cleanup(dev);
1044 WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
1045 unregister_shrinker(&dev_priv->mm.shrinker);
1047 if (dev->pdev->msi_enabled)
1048 pci_disable_msi(dev->pdev);
1050 intel_teardown_gmbus(dev);
1051 intel_teardown_mchbar(dev);
1052 pm_qos_remove_request(&dev_priv->pm_qos);
1053 destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
1055 destroy_workqueue(dev_priv->hotplug.dp_wq);
1057 destroy_workqueue(dev_priv->wq);
1059 arch_phys_wc_del(dev_priv->gtt.mtrr);
1060 io_mapping_free(dev_priv->gtt.mappable);
1062 i915_global_gtt_cleanup(dev);
1064 intel_csr_ucode_fini(dev);
1065 intel_uncore_fini(dev);
1066 pci_iounmap(dev->pdev, dev_priv->regs);
1068 pci_dev_put(dev_priv->bridge_dev);
1070 if (dev_priv->requests)
1071 kmem_cache_destroy(dev_priv->requests);
1073 kmem_cache_destroy(dev_priv->vmas);
1074 if (dev_priv->objects)
1075 kmem_cache_destroy(dev_priv->objects);
1080 int i915_driver_unload(struct drm_device *dev)
1082 struct drm_i915_private *dev_priv = dev->dev_private;
1085 i915_audio_component_cleanup(dev_priv);
1087 ret = i915_gem_suspend(dev);
1089 DRM_ERROR("failed to idle hardware: %d\n", ret);
1093 intel_power_domains_fini(dev_priv);
1095 intel_gpu_ips_teardown();
1097 i915_teardown_sysfs(dev);
1099 WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
1100 unregister_shrinker(&dev_priv->mm.shrinker);
1102 io_mapping_free(dev_priv->gtt.mappable);
1103 arch_phys_wc_del(dev_priv->gtt.mtrr);
1105 acpi_video_unregister();
1107 intel_fbdev_fini(dev);
1109 drm_vblank_cleanup(dev);
1111 intel_modeset_cleanup(dev);
1114 * free the memory space allocated for the child device
1115 * config parsed from VBT
1117 if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
1118 kfree(dev_priv->vbt.child_dev);
1119 dev_priv->vbt.child_dev = NULL;
1120 dev_priv->vbt.child_dev_num = 0;
1123 vga_switcheroo_unregister_client(dev->pdev);
1124 vga_client_register(dev->pdev, NULL, NULL, NULL);
1126 /* Free error state after interrupts are fully disabled. */
1127 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
1128 i915_destroy_error_state(dev);
1130 if (dev->pdev->msi_enabled)
1131 pci_disable_msi(dev->pdev);
1133 intel_opregion_fini(dev);
1135 /* Flush any outstanding unpin_work. */
1136 flush_workqueue(dev_priv->wq);
1138 mutex_lock(&dev->struct_mutex);
1139 intel_guc_ucode_fini(dev);
1140 i915_gem_cleanup_ringbuffer(dev);
1141 i915_gem_context_fini(dev);
1142 mutex_unlock(&dev->struct_mutex);
1143 intel_fbc_cleanup_cfb(dev_priv);
1144 i915_gem_cleanup_stolen(dev);
1146 intel_csr_ucode_fini(dev);
1148 intel_teardown_gmbus(dev);
1149 intel_teardown_mchbar(dev);
1151 destroy_workqueue(dev_priv->hotplug.dp_wq);
1152 destroy_workqueue(dev_priv->wq);
1153 destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
1154 pm_qos_remove_request(&dev_priv->pm_qos);
1156 i915_global_gtt_cleanup(dev);
1158 intel_uncore_fini(dev);
1159 if (dev_priv->regs != NULL)
1160 pci_iounmap(dev->pdev, dev_priv->regs);
1162 if (dev_priv->requests)
1163 kmem_cache_destroy(dev_priv->requests);
1165 kmem_cache_destroy(dev_priv->vmas);
1166 if (dev_priv->objects)
1167 kmem_cache_destroy(dev_priv->objects);
1169 pci_dev_put(dev_priv->bridge_dev);
1175 int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1179 ret = i915_gem_open(dev, file);
1187 * i915_driver_lastclose - clean up after all DRM clients have exited
1190 * Take care of cleaning up after all DRM clients have exited. In the
1191 * mode setting case, we want to restore the kernel's initial mode (just
1192 * in case the last client left us in a bad state).
1194 * Additionally, in the non-mode setting case, we'll tear down the GTT
1195 * and DMA structures, since the kernel won't be using them, and clea
1198 void i915_driver_lastclose(struct drm_device *dev)
1200 intel_fbdev_restore_mode(dev);
1201 vga_switcheroo_process_delayed_switch();
1204 void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
1206 mutex_lock(&dev->struct_mutex);
1207 i915_gem_context_close(dev, file);
1208 i915_gem_release(dev, file);
1209 mutex_unlock(&dev->struct_mutex);
1211 intel_modeset_preclose(dev, file);
1214 void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1216 struct drm_i915_file_private *file_priv = file->driver_priv;
1218 if (file_priv && file_priv->bsd_ring)
1219 file_priv->bsd_ring = NULL;
1224 i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
1225 struct drm_file *file)
1230 const struct drm_ioctl_desc i915_ioctls[] = {
1231 DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1232 DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
1233 DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
1234 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
1235 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
1236 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
1237 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
1238 DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1239 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
1240 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
1241 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1242 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
1243 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1244 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1245 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
1246 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
1247 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1248 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1249 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
1250 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1251 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1252 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1253 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1254 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1255 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1256 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1257 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1258 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1259 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1260 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1261 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1262 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1263 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1264 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1265 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1266 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1267 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1268 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1269 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
1270 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1271 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1272 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1273 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1274 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1275 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1276 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1277 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1278 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1279 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1280 DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1281 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1282 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1285 int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);