2 * Copyright © 2010 Daniel Vetter
3 * Copyright © 2011-2014 Intel Corporation
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include <linux/slab.h> /* fault-inject.h is not standalone! */
28 #include <linux/fault-inject.h>
29 #include <linux/log2.h>
30 #include <linux/random.h>
31 #include <linux/seq_file.h>
32 #include <linux/stop_machine.h>
34 #include <asm/set_memory.h>
37 #include <drm/i915_drm.h>
40 #include "i915_vgpu.h"
41 #include "i915_trace.h"
42 #include "intel_drv.h"
43 #include "intel_frontbuffer.h"
45 #define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM)
48 * DOC: Global GTT views
50 * Background and previous state
52 * Historically objects could exists (be bound) in global GTT space only as
53 * singular instances with a view representing all of the object's backing pages
54 * in a linear fashion. This view will be called a normal view.
56 * To support multiple views of the same object, where the number of mapped
57 * pages is not equal to the backing store, or where the layout of the pages
58 * is not linear, concept of a GGTT view was added.
60 * One example of an alternative view is a stereo display driven by a single
61 * image. In this case we would have a framebuffer looking like this
67 * Above would represent a normal GGTT view as normally mapped for GPU or CPU
68 * rendering. In contrast, fed to the display engine would be an alternative
69 * view which could look something like this:
74 * In this example both the size and layout of pages in the alternative view is
75 * different from the normal view.
77 * Implementation and usage
79 * GGTT views are implemented using VMAs and are distinguished via enum
80 * i915_ggtt_view_type and struct i915_ggtt_view.
82 * A new flavour of core GEM functions which work with GGTT bound objects were
83 * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
84 * renaming in large amounts of code. They take the struct i915_ggtt_view
85 * parameter encapsulating all metadata required to implement a view.
87 * As a helper for callers which are only interested in the normal view,
88 * globally const i915_ggtt_view_normal singleton instance exists. All old core
89 * GEM API functions, the ones not taking the view parameter, are operating on,
90 * or with the normal GGTT view.
92 * Code wanting to add or use a new GGTT view needs to:
94 * 1. Add a new enum with a suitable name.
95 * 2. Extend the metadata in the i915_ggtt_view structure if required.
96 * 3. Add support to i915_get_vma_pages().
98 * New views are required to build a scatter-gather table from within the
99 * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
100 * exists for the lifetime of an VMA.
102 * Core API is designed to have copy semantics which means that passed in
103 * struct i915_ggtt_view does not need to be persistent (left around after
104 * calling the core API functions).
109 i915_get_ggtt_vma_pages(struct i915_vma *vma);
111 static void gen6_ggtt_invalidate(struct drm_i915_private *dev_priv)
113 /* Note that as an uncached mmio write, this should flush the
114 * WCB of the writes into the GGTT before it triggers the invalidate.
116 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
119 static void guc_ggtt_invalidate(struct drm_i915_private *dev_priv)
121 gen6_ggtt_invalidate(dev_priv);
122 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
125 static void gmch_ggtt_invalidate(struct drm_i915_private *dev_priv)
127 intel_gtt_chipset_flush();
130 static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
132 i915->ggtt.invalidate(i915);
135 int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
138 bool has_aliasing_ppgtt;
140 bool has_full_48bit_ppgtt;
142 has_aliasing_ppgtt = dev_priv->info.has_aliasing_ppgtt;
143 has_full_ppgtt = dev_priv->info.has_full_ppgtt;
144 has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt;
146 if (intel_vgpu_active(dev_priv)) {
147 /* emulation is too hard */
148 has_full_ppgtt = false;
149 has_full_48bit_ppgtt = false;
152 if (!has_aliasing_ppgtt)
156 * We don't allow disabling PPGTT for gen9+ as it's a requirement for
157 * execlists, the sole mechanism available to submit work.
159 if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
162 if (enable_ppgtt == 1)
165 if (enable_ppgtt == 2 && has_full_ppgtt)
168 if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
171 #ifdef CONFIG_INTEL_IOMMU
172 /* Disable ppgtt on SNB if VT-d is on. */
173 if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped) {
174 DRM_INFO("Disabling PPGTT because VT-d is on\n");
179 /* Early VLV doesn't have this */
180 if (IS_VALLEYVIEW(dev_priv) && dev_priv->drm.pdev->revision < 0xb) {
181 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
185 if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists && has_full_ppgtt)
186 return has_full_48bit_ppgtt ? 3 : 2;
188 return has_aliasing_ppgtt ? 1 : 0;
191 static int ppgtt_bind_vma(struct i915_vma *vma,
192 enum i915_cache_level cache_level,
198 if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
199 ret = vma->vm->allocate_va_range(vma->vm, vma->node.start,
205 vma->pages = vma->obj->mm.pages;
207 /* Currently applicable only to VLV */
210 pte_flags |= PTE_READ_ONLY;
212 vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
213 cache_level, pte_flags);
218 static void ppgtt_unbind_vma(struct i915_vma *vma)
220 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
223 static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
224 enum i915_cache_level level)
226 gen8_pte_t pte = _PAGE_PRESENT | _PAGE_RW;
230 case I915_CACHE_NONE:
231 pte |= PPAT_UNCACHED_INDEX;
234 pte |= PPAT_DISPLAY_ELLC_INDEX;
237 pte |= PPAT_CACHED_INDEX;
244 static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
245 const enum i915_cache_level level)
247 gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
249 if (level != I915_CACHE_NONE)
250 pde |= PPAT_CACHED_PDE_INDEX;
252 pde |= PPAT_UNCACHED_INDEX;
256 #define gen8_pdpe_encode gen8_pde_encode
257 #define gen8_pml4e_encode gen8_pde_encode
259 static gen6_pte_t snb_pte_encode(dma_addr_t addr,
260 enum i915_cache_level level,
263 gen6_pte_t pte = GEN6_PTE_VALID;
264 pte |= GEN6_PTE_ADDR_ENCODE(addr);
267 case I915_CACHE_L3_LLC:
269 pte |= GEN6_PTE_CACHE_LLC;
271 case I915_CACHE_NONE:
272 pte |= GEN6_PTE_UNCACHED;
281 static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
282 enum i915_cache_level level,
285 gen6_pte_t pte = GEN6_PTE_VALID;
286 pte |= GEN6_PTE_ADDR_ENCODE(addr);
289 case I915_CACHE_L3_LLC:
290 pte |= GEN7_PTE_CACHE_L3_LLC;
293 pte |= GEN6_PTE_CACHE_LLC;
295 case I915_CACHE_NONE:
296 pte |= GEN6_PTE_UNCACHED;
305 static gen6_pte_t byt_pte_encode(dma_addr_t addr,
306 enum i915_cache_level level,
309 gen6_pte_t pte = GEN6_PTE_VALID;
310 pte |= GEN6_PTE_ADDR_ENCODE(addr);
312 if (!(flags & PTE_READ_ONLY))
313 pte |= BYT_PTE_WRITEABLE;
315 if (level != I915_CACHE_NONE)
316 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
321 static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
322 enum i915_cache_level level,
325 gen6_pte_t pte = GEN6_PTE_VALID;
326 pte |= HSW_PTE_ADDR_ENCODE(addr);
328 if (level != I915_CACHE_NONE)
329 pte |= HSW_WB_LLC_AGE3;
334 static gen6_pte_t iris_pte_encode(dma_addr_t addr,
335 enum i915_cache_level level,
338 gen6_pte_t pte = GEN6_PTE_VALID;
339 pte |= HSW_PTE_ADDR_ENCODE(addr);
342 case I915_CACHE_NONE:
345 pte |= HSW_WT_ELLC_LLC_AGE3;
348 pte |= HSW_WB_ELLC_LLC_AGE3;
355 static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
359 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
360 i915_gem_shrink_all(vm->i915);
362 if (vm->free_pages.nr)
363 return vm->free_pages.pages[--vm->free_pages.nr];
365 page = alloc_page(gfp);
370 set_pages_array_wc(&page, 1);
375 static void vm_free_pages_release(struct i915_address_space *vm)
377 GEM_BUG_ON(!pagevec_count(&vm->free_pages));
380 set_pages_array_wb(vm->free_pages.pages,
381 pagevec_count(&vm->free_pages));
383 __pagevec_release(&vm->free_pages);
386 static void vm_free_page(struct i915_address_space *vm, struct page *page)
388 if (!pagevec_add(&vm->free_pages, page))
389 vm_free_pages_release(vm);
392 static int __setup_page_dma(struct i915_address_space *vm,
393 struct i915_page_dma *p,
396 p->page = vm_alloc_page(vm, gfp | __GFP_NOWARN | __GFP_NORETRY);
397 if (unlikely(!p->page))
400 p->daddr = dma_map_page(vm->dma, p->page, 0, PAGE_SIZE,
401 PCI_DMA_BIDIRECTIONAL);
402 if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
403 vm_free_page(vm, p->page);
410 static int setup_page_dma(struct i915_address_space *vm,
411 struct i915_page_dma *p)
413 return __setup_page_dma(vm, p, I915_GFP_DMA);
416 static void cleanup_page_dma(struct i915_address_space *vm,
417 struct i915_page_dma *p)
419 dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
420 vm_free_page(vm, p->page);
423 #define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
425 #define setup_px(vm, px) setup_page_dma((vm), px_base(px))
426 #define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px))
427 #define fill_px(ppgtt, px, v) fill_page_dma((vm), px_base(px), (v))
428 #define fill32_px(ppgtt, px, v) fill_page_dma_32((vm), px_base(px), (v))
430 static void fill_page_dma(struct i915_address_space *vm,
431 struct i915_page_dma *p,
434 u64 * const vaddr = kmap_atomic(p->page);
437 for (i = 0; i < 512; i++)
440 kunmap_atomic(vaddr);
443 static void fill_page_dma_32(struct i915_address_space *vm,
444 struct i915_page_dma *p,
447 fill_page_dma(vm, p, (u64)v << 32 | v);
451 setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
453 return __setup_page_dma(vm, &vm->scratch_page, gfp | __GFP_ZERO);
456 static void cleanup_scratch_page(struct i915_address_space *vm)
458 cleanup_page_dma(vm, &vm->scratch_page);
461 static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
463 struct i915_page_table *pt;
465 pt = kmalloc(sizeof(*pt), GFP_KERNEL | __GFP_NOWARN);
467 return ERR_PTR(-ENOMEM);
469 if (unlikely(setup_px(vm, pt))) {
471 return ERR_PTR(-ENOMEM);
478 static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt)
484 static void gen8_initialize_pt(struct i915_address_space *vm,
485 struct i915_page_table *pt)
488 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC));
491 static void gen6_initialize_pt(struct i915_address_space *vm,
492 struct i915_page_table *pt)
495 vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
498 static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
500 struct i915_page_directory *pd;
502 pd = kzalloc(sizeof(*pd), GFP_KERNEL | __GFP_NOWARN);
504 return ERR_PTR(-ENOMEM);
506 if (unlikely(setup_px(vm, pd))) {
508 return ERR_PTR(-ENOMEM);
515 static void free_pd(struct i915_address_space *vm,
516 struct i915_page_directory *pd)
522 static void gen8_initialize_pd(struct i915_address_space *vm,
523 struct i915_page_directory *pd)
528 gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC));
529 for (i = 0; i < I915_PDES; i++)
530 pd->page_table[i] = vm->scratch_pt;
533 static int __pdp_init(struct i915_address_space *vm,
534 struct i915_page_directory_pointer *pdp)
536 const unsigned int pdpes = i915_pdpes_per_pdp(vm);
539 pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory),
540 GFP_KERNEL | __GFP_NOWARN);
541 if (unlikely(!pdp->page_directory))
544 for (i = 0; i < pdpes; i++)
545 pdp->page_directory[i] = vm->scratch_pd;
550 static void __pdp_fini(struct i915_page_directory_pointer *pdp)
552 kfree(pdp->page_directory);
553 pdp->page_directory = NULL;
556 static inline bool use_4lvl(const struct i915_address_space *vm)
558 return i915_vm_is_48bit(vm);
561 static struct i915_page_directory_pointer *
562 alloc_pdp(struct i915_address_space *vm)
564 struct i915_page_directory_pointer *pdp;
567 WARN_ON(!use_4lvl(vm));
569 pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
571 return ERR_PTR(-ENOMEM);
573 ret = __pdp_init(vm, pdp);
577 ret = setup_px(vm, pdp);
591 static void free_pdp(struct i915_address_space *vm,
592 struct i915_page_directory_pointer *pdp)
603 static void gen8_initialize_pdp(struct i915_address_space *vm,
604 struct i915_page_directory_pointer *pdp)
606 gen8_ppgtt_pdpe_t scratch_pdpe;
608 scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
610 fill_px(vm, pdp, scratch_pdpe);
613 static void gen8_initialize_pml4(struct i915_address_space *vm,
614 struct i915_pml4 *pml4)
619 gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC));
620 for (i = 0; i < GEN8_PML4ES_PER_PML4; i++)
621 pml4->pdps[i] = vm->scratch_pdp;
624 /* Broadwell Page Directory Pointer Descriptors */
625 static int gen8_write_pdp(struct drm_i915_gem_request *req,
629 struct intel_engine_cs *engine = req->engine;
634 cs = intel_ring_begin(req, 6);
638 *cs++ = MI_LOAD_REGISTER_IMM(1);
639 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, entry));
640 *cs++ = upper_32_bits(addr);
641 *cs++ = MI_LOAD_REGISTER_IMM(1);
642 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry));
643 *cs++ = lower_32_bits(addr);
644 intel_ring_advance(req, cs);
649 static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt,
650 struct drm_i915_gem_request *req)
654 for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) {
655 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
657 ret = gen8_write_pdp(req, i, pd_daddr);
665 static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt,
666 struct drm_i915_gem_request *req)
668 return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
671 /* PDE TLBs are a pain to invalidate on GEN8+. When we modify
672 * the page table structures, we mark them dirty so that
673 * context switching/execlist queuing code takes extra steps
674 * to ensure that tlbs are flushed.
676 static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
678 ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.i915)->ring_mask;
681 /* Removes entries from a single page table, releasing it if it's empty.
682 * Caller can use the return value to update higher-level entries.
684 static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
685 struct i915_page_table *pt,
686 u64 start, u64 length)
688 unsigned int num_entries = gen8_pte_count(start, length);
689 unsigned int pte = gen8_pte_index(start);
690 unsigned int pte_end = pte + num_entries;
691 const gen8_pte_t scratch_pte =
692 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
695 GEM_BUG_ON(num_entries > pt->used_ptes);
697 pt->used_ptes -= num_entries;
701 vaddr = kmap_atomic_px(pt);
702 while (pte < pte_end)
703 vaddr[pte++] = scratch_pte;
704 kunmap_atomic(vaddr);
709 static void gen8_ppgtt_set_pde(struct i915_address_space *vm,
710 struct i915_page_directory *pd,
711 struct i915_page_table *pt,
716 pd->page_table[pde] = pt;
718 vaddr = kmap_atomic_px(pd);
719 vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC);
720 kunmap_atomic(vaddr);
723 static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
724 struct i915_page_directory *pd,
725 u64 start, u64 length)
727 struct i915_page_table *pt;
730 gen8_for_each_pde(pt, pd, start, length, pde) {
731 GEM_BUG_ON(pt == vm->scratch_pt);
733 if (!gen8_ppgtt_clear_pt(vm, pt, start, length))
736 gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde);
737 GEM_BUG_ON(!pd->used_pdes);
743 return !pd->used_pdes;
746 static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm,
747 struct i915_page_directory_pointer *pdp,
748 struct i915_page_directory *pd,
751 gen8_ppgtt_pdpe_t *vaddr;
753 pdp->page_directory[pdpe] = pd;
757 vaddr = kmap_atomic_px(pdp);
758 vaddr[pdpe] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
759 kunmap_atomic(vaddr);
762 /* Removes entries from a single page dir pointer, releasing it if it's empty.
763 * Caller can use the return value to update higher-level entries
765 static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
766 struct i915_page_directory_pointer *pdp,
767 u64 start, u64 length)
769 struct i915_page_directory *pd;
772 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
773 GEM_BUG_ON(pd == vm->scratch_pd);
775 if (!gen8_ppgtt_clear_pd(vm, pd, start, length))
778 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
779 GEM_BUG_ON(!pdp->used_pdpes);
785 return !pdp->used_pdpes;
788 static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm,
789 u64 start, u64 length)
791 gen8_ppgtt_clear_pdp(vm, &i915_vm_to_ppgtt(vm)->pdp, start, length);
794 static void gen8_ppgtt_set_pml4e(struct i915_pml4 *pml4,
795 struct i915_page_directory_pointer *pdp,
798 gen8_ppgtt_pml4e_t *vaddr;
800 pml4->pdps[pml4e] = pdp;
802 vaddr = kmap_atomic_px(pml4);
803 vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
804 kunmap_atomic(vaddr);
807 /* Removes entries from a single pml4.
808 * This is the top-level structure in 4-level page tables used on gen8+.
809 * Empty entries are always scratch pml4e.
811 static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
812 u64 start, u64 length)
814 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
815 struct i915_pml4 *pml4 = &ppgtt->pml4;
816 struct i915_page_directory_pointer *pdp;
819 GEM_BUG_ON(!use_4lvl(vm));
821 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
822 GEM_BUG_ON(pdp == vm->scratch_pdp);
824 if (!gen8_ppgtt_clear_pdp(vm, pdp, start, length))
827 gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
834 struct scatterlist *sg;
838 struct gen8_insert_pte {
845 static __always_inline struct gen8_insert_pte gen8_insert_pte(u64 start)
847 return (struct gen8_insert_pte) {
848 gen8_pml4e_index(start),
849 gen8_pdpe_index(start),
850 gen8_pde_index(start),
851 gen8_pte_index(start),
855 static __always_inline bool
856 gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
857 struct i915_page_directory_pointer *pdp,
858 struct sgt_dma *iter,
859 struct gen8_insert_pte *idx,
860 enum i915_cache_level cache_level)
862 struct i915_page_directory *pd;
863 const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
867 GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
868 pd = pdp->page_directory[idx->pdpe];
869 vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
871 vaddr[idx->pte] = pte_encode | iter->dma;
873 iter->dma += PAGE_SIZE;
874 if (iter->dma >= iter->max) {
875 iter->sg = __sg_next(iter->sg);
881 iter->dma = sg_dma_address(iter->sg);
882 iter->max = iter->dma + iter->sg->length;
885 if (++idx->pte == GEN8_PTES) {
888 if (++idx->pde == I915_PDES) {
891 /* Limited by sg length for 3lvl */
892 if (++idx->pdpe == GEN8_PML4ES_PER_PML4) {
898 GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
899 pd = pdp->page_directory[idx->pdpe];
902 kunmap_atomic(vaddr);
903 vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
906 kunmap_atomic(vaddr);
911 static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
912 struct sg_table *pages,
914 enum i915_cache_level cache_level,
917 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
918 struct sgt_dma iter = {
920 .dma = sg_dma_address(iter.sg),
921 .max = iter.dma + iter.sg->length,
923 struct gen8_insert_pte idx = gen8_insert_pte(start);
925 gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
929 static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
930 struct sg_table *pages,
932 enum i915_cache_level cache_level,
935 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
936 struct sgt_dma iter = {
938 .dma = sg_dma_address(iter.sg),
939 .max = iter.dma + iter.sg->length,
941 struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
942 struct gen8_insert_pte idx = gen8_insert_pte(start);
944 while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++], &iter,
946 GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
949 static void gen8_free_page_tables(struct i915_address_space *vm,
950 struct i915_page_directory *pd)
957 for (i = 0; i < I915_PDES; i++) {
958 if (pd->page_table[i] != vm->scratch_pt)
959 free_pt(vm, pd->page_table[i]);
963 static int gen8_init_scratch(struct i915_address_space *vm)
967 ret = setup_scratch_page(vm, I915_GFP_DMA);
971 vm->scratch_pt = alloc_pt(vm);
972 if (IS_ERR(vm->scratch_pt)) {
973 ret = PTR_ERR(vm->scratch_pt);
974 goto free_scratch_page;
977 vm->scratch_pd = alloc_pd(vm);
978 if (IS_ERR(vm->scratch_pd)) {
979 ret = PTR_ERR(vm->scratch_pd);
984 vm->scratch_pdp = alloc_pdp(vm);
985 if (IS_ERR(vm->scratch_pdp)) {
986 ret = PTR_ERR(vm->scratch_pdp);
991 gen8_initialize_pt(vm, vm->scratch_pt);
992 gen8_initialize_pd(vm, vm->scratch_pd);
994 gen8_initialize_pdp(vm, vm->scratch_pdp);
999 free_pd(vm, vm->scratch_pd);
1001 free_pt(vm, vm->scratch_pt);
1003 cleanup_scratch_page(vm);
1008 static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
1010 struct i915_address_space *vm = &ppgtt->base;
1011 struct drm_i915_private *dev_priv = vm->i915;
1012 enum vgt_g2v_type msg;
1016 const u64 daddr = px_dma(&ppgtt->pml4);
1018 I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
1019 I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
1021 msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
1022 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
1024 for (i = 0; i < GEN8_3LVL_PDPES; i++) {
1025 const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
1027 I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
1028 I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
1031 msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
1032 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
1035 I915_WRITE(vgtif_reg(g2v_notify), msg);
1040 static void gen8_free_scratch(struct i915_address_space *vm)
1043 free_pdp(vm, vm->scratch_pdp);
1044 free_pd(vm, vm->scratch_pd);
1045 free_pt(vm, vm->scratch_pt);
1046 cleanup_scratch_page(vm);
1049 static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm,
1050 struct i915_page_directory_pointer *pdp)
1052 const unsigned int pdpes = i915_pdpes_per_pdp(vm);
1055 for (i = 0; i < pdpes; i++) {
1056 if (pdp->page_directory[i] == vm->scratch_pd)
1059 gen8_free_page_tables(vm, pdp->page_directory[i]);
1060 free_pd(vm, pdp->page_directory[i]);
1066 static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
1070 for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) {
1071 if (ppgtt->pml4.pdps[i] == ppgtt->base.scratch_pdp)
1074 gen8_ppgtt_cleanup_3lvl(&ppgtt->base, ppgtt->pml4.pdps[i]);
1077 cleanup_px(&ppgtt->base, &ppgtt->pml4);
1080 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
1082 struct drm_i915_private *dev_priv = vm->i915;
1083 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1085 if (intel_vgpu_active(dev_priv))
1086 gen8_ppgtt_notify_vgt(ppgtt, false);
1089 gen8_ppgtt_cleanup_4lvl(ppgtt);
1091 gen8_ppgtt_cleanup_3lvl(&ppgtt->base, &ppgtt->pdp);
1093 gen8_free_scratch(vm);
1096 static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
1097 struct i915_page_directory *pd,
1098 u64 start, u64 length)
1100 struct i915_page_table *pt;
1104 gen8_for_each_pde(pt, pd, start, length, pde) {
1105 if (pt == vm->scratch_pt) {
1110 gen8_initialize_pt(vm, pt);
1112 gen8_ppgtt_set_pde(vm, pd, pt, pde);
1114 GEM_BUG_ON(pd->used_pdes > I915_PDES);
1117 pt->used_ptes += gen8_pte_count(start, length);
1122 gen8_ppgtt_clear_pd(vm, pd, from, start - from);
1126 static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
1127 struct i915_page_directory_pointer *pdp,
1128 u64 start, u64 length)
1130 struct i915_page_directory *pd;
1135 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1136 if (pd == vm->scratch_pd) {
1141 gen8_initialize_pd(vm, pd);
1142 gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
1144 GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm));
1146 mark_tlbs_dirty(i915_vm_to_ppgtt(vm));
1149 ret = gen8_ppgtt_alloc_pd(vm, pd, start, length);
1157 if (!pd->used_pdes) {
1158 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
1159 GEM_BUG_ON(!pdp->used_pdpes);
1164 gen8_ppgtt_clear_pdp(vm, pdp, from, start - from);
1168 static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm,
1169 u64 start, u64 length)
1171 return gen8_ppgtt_alloc_pdp(vm,
1172 &i915_vm_to_ppgtt(vm)->pdp, start, length);
1175 static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
1176 u64 start, u64 length)
1178 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1179 struct i915_pml4 *pml4 = &ppgtt->pml4;
1180 struct i915_page_directory_pointer *pdp;
1185 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1186 if (pml4->pdps[pml4e] == vm->scratch_pdp) {
1187 pdp = alloc_pdp(vm);
1191 gen8_initialize_pdp(vm, pdp);
1192 gen8_ppgtt_set_pml4e(pml4, pdp, pml4e);
1195 ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length);
1203 if (!pdp->used_pdpes) {
1204 gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
1208 gen8_ppgtt_clear_4lvl(vm, from, start - from);
1212 static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
1213 struct i915_page_directory_pointer *pdp,
1214 u64 start, u64 length,
1215 gen8_pte_t scratch_pte,
1218 struct i915_address_space *vm = &ppgtt->base;
1219 struct i915_page_directory *pd;
1222 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1223 struct i915_page_table *pt;
1224 u64 pd_len = length;
1225 u64 pd_start = start;
1228 if (pdp->page_directory[pdpe] == ppgtt->base.scratch_pd)
1231 seq_printf(m, "\tPDPE #%d\n", pdpe);
1232 gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
1234 gen8_pte_t *pt_vaddr;
1236 if (pd->page_table[pde] == ppgtt->base.scratch_pt)
1239 pt_vaddr = kmap_atomic_px(pt);
1240 for (pte = 0; pte < GEN8_PTES; pte += 4) {
1241 u64 va = (pdpe << GEN8_PDPE_SHIFT |
1242 pde << GEN8_PDE_SHIFT |
1243 pte << GEN8_PTE_SHIFT);
1247 for (i = 0; i < 4; i++)
1248 if (pt_vaddr[pte + i] != scratch_pte)
1253 seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte);
1254 for (i = 0; i < 4; i++) {
1255 if (pt_vaddr[pte + i] != scratch_pte)
1256 seq_printf(m, " %llx", pt_vaddr[pte + i]);
1258 seq_puts(m, " SCRATCH ");
1262 kunmap_atomic(pt_vaddr);
1267 static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1269 struct i915_address_space *vm = &ppgtt->base;
1270 const gen8_pte_t scratch_pte =
1271 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
1272 u64 start = 0, length = ppgtt->base.total;
1276 struct i915_pml4 *pml4 = &ppgtt->pml4;
1277 struct i915_page_directory_pointer *pdp;
1279 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1280 if (pml4->pdps[pml4e] == ppgtt->base.scratch_pdp)
1283 seq_printf(m, " PML4E #%llu\n", pml4e);
1284 gen8_dump_pdp(ppgtt, pdp, start, length, scratch_pte, m);
1287 gen8_dump_pdp(ppgtt, &ppgtt->pdp, start, length, scratch_pte, m);
1291 static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
1293 struct i915_address_space *vm = &ppgtt->base;
1294 struct i915_page_directory_pointer *pdp = &ppgtt->pdp;
1295 struct i915_page_directory *pd;
1296 u64 start = 0, length = ppgtt->base.total;
1300 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1305 gen8_initialize_pd(vm, pd);
1306 gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
1310 pdp->used_pdpes++; /* never remove */
1315 gen8_for_each_pdpe(pd, pdp, from, start, pdpe) {
1316 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
1319 pdp->used_pdpes = 0;
1324 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
1325 * with a net effect resembling a 2-level page table in normal x86 terms. Each
1326 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
1330 static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
1332 struct i915_address_space *vm = &ppgtt->base;
1333 struct drm_i915_private *dev_priv = vm->i915;
1336 ppgtt->base.total = USES_FULL_48BIT_PPGTT(dev_priv) ?
1340 ret = gen8_init_scratch(&ppgtt->base);
1342 ppgtt->base.total = 0;
1346 /* There are only few exceptions for gen >=6. chv and bxt.
1347 * And we are not sure about the latter so play safe for now.
1349 if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
1350 ppgtt->base.pt_kmap_wc = true;
1353 ret = setup_px(&ppgtt->base, &ppgtt->pml4);
1357 gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4);
1359 ppgtt->switch_mm = gen8_mm_switch_4lvl;
1360 ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_4lvl;
1361 ppgtt->base.insert_entries = gen8_ppgtt_insert_4lvl;
1362 ppgtt->base.clear_range = gen8_ppgtt_clear_4lvl;
1364 ret = __pdp_init(&ppgtt->base, &ppgtt->pdp);
1368 if (intel_vgpu_active(dev_priv)) {
1369 ret = gen8_preallocate_top_level_pdp(ppgtt);
1371 __pdp_fini(&ppgtt->pdp);
1376 ppgtt->switch_mm = gen8_mm_switch_3lvl;
1377 ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_3lvl;
1378 ppgtt->base.insert_entries = gen8_ppgtt_insert_3lvl;
1379 ppgtt->base.clear_range = gen8_ppgtt_clear_3lvl;
1382 if (intel_vgpu_active(dev_priv))
1383 gen8_ppgtt_notify_vgt(ppgtt, true);
1385 ppgtt->base.cleanup = gen8_ppgtt_cleanup;
1386 ppgtt->base.unbind_vma = ppgtt_unbind_vma;
1387 ppgtt->base.bind_vma = ppgtt_bind_vma;
1388 ppgtt->debug_dump = gen8_dump_ppgtt;
1393 gen8_free_scratch(&ppgtt->base);
1397 static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1399 struct i915_address_space *vm = &ppgtt->base;
1400 struct i915_page_table *unused;
1401 gen6_pte_t scratch_pte;
1402 u32 pd_entry, pte, pde;
1403 u32 start = 0, length = ppgtt->base.total;
1405 scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
1408 gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
1410 gen6_pte_t *pt_vaddr;
1411 const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
1412 pd_entry = readl(ppgtt->pd_addr + pde);
1413 expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
1415 if (pd_entry != expected)
1416 seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
1420 seq_printf(m, "\tPDE: %x\n", pd_entry);
1422 pt_vaddr = kmap_atomic_px(ppgtt->pd.page_table[pde]);
1424 for (pte = 0; pte < GEN6_PTES; pte+=4) {
1426 (pde * PAGE_SIZE * GEN6_PTES) +
1430 for (i = 0; i < 4; i++)
1431 if (pt_vaddr[pte + i] != scratch_pte)
1436 seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
1437 for (i = 0; i < 4; i++) {
1438 if (pt_vaddr[pte + i] != scratch_pte)
1439 seq_printf(m, " %08x", pt_vaddr[pte + i]);
1441 seq_puts(m, " SCRATCH ");
1445 kunmap_atomic(pt_vaddr);
1449 /* Write pde (index) from the page directory @pd to the page table @pt */
1450 static inline void gen6_write_pde(const struct i915_hw_ppgtt *ppgtt,
1451 const unsigned int pde,
1452 const struct i915_page_table *pt)
1454 /* Caller needs to make sure the write completes if necessary */
1455 writel_relaxed(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
1456 ppgtt->pd_addr + pde);
1459 /* Write all the page tables found in the ppgtt structure to incrementing page
1461 static void gen6_write_page_range(struct i915_hw_ppgtt *ppgtt,
1462 u32 start, u32 length)
1464 struct i915_page_table *pt;
1467 gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde)
1468 gen6_write_pde(ppgtt, pde, pt);
1470 mark_tlbs_dirty(ppgtt);
1474 static inline u32 get_pd_offset(struct i915_hw_ppgtt *ppgtt)
1476 GEM_BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
1477 return ppgtt->pd.base.ggtt_offset << 10;
1480 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
1481 struct drm_i915_gem_request *req)
1483 struct intel_engine_cs *engine = req->engine;
1486 /* NB: TLBs must be flushed and invalidated before a switch */
1487 cs = intel_ring_begin(req, 6);
1491 *cs++ = MI_LOAD_REGISTER_IMM(2);
1492 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
1493 *cs++ = PP_DIR_DCLV_2G;
1494 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
1495 *cs++ = get_pd_offset(ppgtt);
1497 intel_ring_advance(req, cs);
1502 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
1503 struct drm_i915_gem_request *req)
1505 struct intel_engine_cs *engine = req->engine;
1508 /* NB: TLBs must be flushed and invalidated before a switch */
1509 cs = intel_ring_begin(req, 6);
1513 *cs++ = MI_LOAD_REGISTER_IMM(2);
1514 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
1515 *cs++ = PP_DIR_DCLV_2G;
1516 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
1517 *cs++ = get_pd_offset(ppgtt);
1519 intel_ring_advance(req, cs);
1524 static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
1525 struct drm_i915_gem_request *req)
1527 struct intel_engine_cs *engine = req->engine;
1528 struct drm_i915_private *dev_priv = req->i915;
1530 I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
1531 I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
1535 static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv)
1537 struct intel_engine_cs *engine;
1538 enum intel_engine_id id;
1540 for_each_engine(engine, dev_priv, id) {
1541 u32 four_level = USES_FULL_48BIT_PPGTT(dev_priv) ?
1542 GEN8_GFX_PPGTT_48B : 0;
1543 I915_WRITE(RING_MODE_GEN7(engine),
1544 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
1548 static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
1550 struct intel_engine_cs *engine;
1551 u32 ecochk, ecobits;
1552 enum intel_engine_id id;
1554 ecobits = I915_READ(GAC_ECO_BITS);
1555 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
1557 ecochk = I915_READ(GAM_ECOCHK);
1558 if (IS_HASWELL(dev_priv)) {
1559 ecochk |= ECOCHK_PPGTT_WB_HSW;
1561 ecochk |= ECOCHK_PPGTT_LLC_IVB;
1562 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
1564 I915_WRITE(GAM_ECOCHK, ecochk);
1566 for_each_engine(engine, dev_priv, id) {
1567 /* GFX_MODE is per-ring on gen7+ */
1568 I915_WRITE(RING_MODE_GEN7(engine),
1569 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
1573 static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
1575 u32 ecochk, gab_ctl, ecobits;
1577 ecobits = I915_READ(GAC_ECO_BITS);
1578 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
1579 ECOBITS_PPGTT_CACHE64B);
1581 gab_ctl = I915_READ(GAB_CTL);
1582 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
1584 ecochk = I915_READ(GAM_ECOCHK);
1585 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
1587 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
1590 /* PPGTT support for Sandybdrige/Gen6 and later */
1591 static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
1592 u64 start, u64 length)
1594 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1595 unsigned int first_entry = start >> PAGE_SHIFT;
1596 unsigned int pde = first_entry / GEN6_PTES;
1597 unsigned int pte = first_entry % GEN6_PTES;
1598 unsigned int num_entries = length >> PAGE_SHIFT;
1599 gen6_pte_t scratch_pte =
1600 vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
1602 while (num_entries) {
1603 struct i915_page_table *pt = ppgtt->pd.page_table[pde++];
1604 unsigned int end = min(pte + num_entries, GEN6_PTES);
1607 num_entries -= end - pte;
1609 /* Note that the hw doesn't support removing PDE on the fly
1610 * (they are cached inside the context with no means to
1611 * invalidate the cache), so we can only reset the PTE
1612 * entries back to scratch.
1615 vaddr = kmap_atomic_px(pt);
1617 vaddr[pte++] = scratch_pte;
1618 } while (pte < end);
1619 kunmap_atomic(vaddr);
1625 static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
1626 struct sg_table *pages,
1628 enum i915_cache_level cache_level,
1631 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1632 unsigned first_entry = start >> PAGE_SHIFT;
1633 unsigned act_pt = first_entry / GEN6_PTES;
1634 unsigned act_pte = first_entry % GEN6_PTES;
1635 const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
1636 struct sgt_dma iter;
1639 vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
1640 iter.sg = pages->sgl;
1641 iter.dma = sg_dma_address(iter.sg);
1642 iter.max = iter.dma + iter.sg->length;
1644 vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
1646 iter.dma += PAGE_SIZE;
1647 if (iter.dma == iter.max) {
1648 iter.sg = __sg_next(iter.sg);
1652 iter.dma = sg_dma_address(iter.sg);
1653 iter.max = iter.dma + iter.sg->length;
1656 if (++act_pte == GEN6_PTES) {
1657 kunmap_atomic(vaddr);
1658 vaddr = kmap_atomic_px(ppgtt->pd.page_table[++act_pt]);
1662 kunmap_atomic(vaddr);
1665 static int gen6_alloc_va_range(struct i915_address_space *vm,
1666 u64 start, u64 length)
1668 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1669 struct i915_page_table *pt;
1674 gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
1675 if (pt == vm->scratch_pt) {
1680 gen6_initialize_pt(vm, pt);
1681 ppgtt->pd.page_table[pde] = pt;
1682 gen6_write_pde(ppgtt, pde, pt);
1688 mark_tlbs_dirty(ppgtt);
1695 gen6_ppgtt_clear_range(vm, from, start);
1699 static int gen6_init_scratch(struct i915_address_space *vm)
1703 ret = setup_scratch_page(vm, I915_GFP_DMA);
1707 vm->scratch_pt = alloc_pt(vm);
1708 if (IS_ERR(vm->scratch_pt)) {
1709 cleanup_scratch_page(vm);
1710 return PTR_ERR(vm->scratch_pt);
1713 gen6_initialize_pt(vm, vm->scratch_pt);
1718 static void gen6_free_scratch(struct i915_address_space *vm)
1720 free_pt(vm, vm->scratch_pt);
1721 cleanup_scratch_page(vm);
1724 static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
1726 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1727 struct i915_page_directory *pd = &ppgtt->pd;
1728 struct i915_page_table *pt;
1731 drm_mm_remove_node(&ppgtt->node);
1733 gen6_for_all_pdes(pt, pd, pde)
1734 if (pt != vm->scratch_pt)
1737 gen6_free_scratch(vm);
1740 static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
1742 struct i915_address_space *vm = &ppgtt->base;
1743 struct drm_i915_private *dev_priv = ppgtt->base.i915;
1744 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1747 /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
1748 * allocator works in address space sizes, so it's multiplied by page
1749 * size. We allocate at the top of the GTT to avoid fragmentation.
1751 BUG_ON(!drm_mm_initialized(&ggtt->base.mm));
1753 ret = gen6_init_scratch(vm);
1757 ret = i915_gem_gtt_insert(&ggtt->base, &ppgtt->node,
1758 GEN6_PD_SIZE, GEN6_PD_ALIGN,
1759 I915_COLOR_UNEVICTABLE,
1760 0, ggtt->base.total,
1765 if (ppgtt->node.start < ggtt->mappable_end)
1766 DRM_DEBUG("Forced to use aperture for PDEs\n");
1768 ppgtt->pd.base.ggtt_offset =
1769 ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
1771 ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm +
1772 ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
1777 gen6_free_scratch(vm);
1781 static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
1783 return gen6_ppgtt_allocate_page_directories(ppgtt);
1786 static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
1787 u64 start, u64 length)
1789 struct i915_page_table *unused;
1792 gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde)
1793 ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
1796 static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
1798 struct drm_i915_private *dev_priv = ppgtt->base.i915;
1799 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1802 ppgtt->base.pte_encode = ggtt->base.pte_encode;
1803 if (intel_vgpu_active(dev_priv) || IS_GEN6(dev_priv))
1804 ppgtt->switch_mm = gen6_mm_switch;
1805 else if (IS_HASWELL(dev_priv))
1806 ppgtt->switch_mm = hsw_mm_switch;
1807 else if (IS_GEN7(dev_priv))
1808 ppgtt->switch_mm = gen7_mm_switch;
1812 ret = gen6_ppgtt_alloc(ppgtt);
1816 ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
1818 gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
1819 gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
1821 ret = gen6_alloc_va_range(&ppgtt->base, 0, ppgtt->base.total);
1823 gen6_ppgtt_cleanup(&ppgtt->base);
1827 ppgtt->base.clear_range = gen6_ppgtt_clear_range;
1828 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
1829 ppgtt->base.unbind_vma = ppgtt_unbind_vma;
1830 ppgtt->base.bind_vma = ppgtt_bind_vma;
1831 ppgtt->base.cleanup = gen6_ppgtt_cleanup;
1832 ppgtt->debug_dump = gen6_dump_ppgtt;
1834 DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
1835 ppgtt->node.size >> 20,
1836 ppgtt->node.start / PAGE_SIZE);
1838 DRM_DEBUG_DRIVER("Adding PPGTT at offset %x\n",
1839 ppgtt->pd.base.ggtt_offset << 10);
1844 static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
1845 struct drm_i915_private *dev_priv)
1847 ppgtt->base.i915 = dev_priv;
1848 ppgtt->base.dma = &dev_priv->drm.pdev->dev;
1850 if (INTEL_INFO(dev_priv)->gen < 8)
1851 return gen6_ppgtt_init(ppgtt);
1853 return gen8_ppgtt_init(ppgtt);
1856 static void i915_address_space_init(struct i915_address_space *vm,
1857 struct drm_i915_private *dev_priv,
1860 i915_gem_timeline_init(dev_priv, &vm->timeline, name);
1862 drm_mm_init(&vm->mm, 0, vm->total);
1863 vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
1865 INIT_LIST_HEAD(&vm->active_list);
1866 INIT_LIST_HEAD(&vm->inactive_list);
1867 INIT_LIST_HEAD(&vm->unbound_list);
1869 list_add_tail(&vm->global_link, &dev_priv->vm_list);
1870 pagevec_init(&vm->free_pages, false);
1873 static void i915_address_space_fini(struct i915_address_space *vm)
1875 if (pagevec_count(&vm->free_pages))
1876 vm_free_pages_release(vm);
1878 i915_gem_timeline_fini(&vm->timeline);
1879 drm_mm_takedown(&vm->mm);
1880 list_del(&vm->global_link);
1883 static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
1885 /* This function is for gtt related workarounds. This function is
1886 * called on driver load and after a GPU reset, so you can place
1887 * workarounds here even if they get overwritten by GPU reset.
1889 /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk */
1890 if (IS_BROADWELL(dev_priv))
1891 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
1892 else if (IS_CHERRYVIEW(dev_priv))
1893 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
1894 else if (IS_GEN9_BC(dev_priv))
1895 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
1896 else if (IS_GEN9_LP(dev_priv))
1897 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
1900 int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
1902 gtt_write_workarounds(dev_priv);
1904 /* In the case of execlists, PPGTT is enabled by the context descriptor
1905 * and the PDPs are contained within the context itself. We don't
1906 * need to do anything here. */
1907 if (i915.enable_execlists)
1910 if (!USES_PPGTT(dev_priv))
1913 if (IS_GEN6(dev_priv))
1914 gen6_ppgtt_enable(dev_priv);
1915 else if (IS_GEN7(dev_priv))
1916 gen7_ppgtt_enable(dev_priv);
1917 else if (INTEL_GEN(dev_priv) >= 8)
1918 gen8_ppgtt_enable(dev_priv);
1920 MISSING_CASE(INTEL_GEN(dev_priv));
1925 struct i915_hw_ppgtt *
1926 i915_ppgtt_create(struct drm_i915_private *dev_priv,
1927 struct drm_i915_file_private *fpriv,
1930 struct i915_hw_ppgtt *ppgtt;
1933 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
1935 return ERR_PTR(-ENOMEM);
1937 ret = __hw_ppgtt_init(ppgtt, dev_priv);
1940 return ERR_PTR(ret);
1943 kref_init(&ppgtt->ref);
1944 i915_address_space_init(&ppgtt->base, dev_priv, name);
1945 ppgtt->base.file = fpriv;
1947 trace_i915_ppgtt_create(&ppgtt->base);
1952 void i915_ppgtt_close(struct i915_address_space *vm)
1954 struct list_head *phases[] = {
1961 GEM_BUG_ON(vm->closed);
1964 for (phase = phases; *phase; phase++) {
1965 struct i915_vma *vma, *vn;
1967 list_for_each_entry_safe(vma, vn, *phase, vm_link)
1968 if (!i915_vma_is_closed(vma))
1969 i915_vma_close(vma);
1973 void i915_ppgtt_release(struct kref *kref)
1975 struct i915_hw_ppgtt *ppgtt =
1976 container_of(kref, struct i915_hw_ppgtt, ref);
1978 trace_i915_ppgtt_release(&ppgtt->base);
1980 /* vmas should already be unbound and destroyed */
1981 WARN_ON(!list_empty(&ppgtt->base.active_list));
1982 WARN_ON(!list_empty(&ppgtt->base.inactive_list));
1983 WARN_ON(!list_empty(&ppgtt->base.unbound_list));
1985 ppgtt->base.cleanup(&ppgtt->base);
1986 i915_address_space_fini(&ppgtt->base);
1990 /* Certain Gen5 chipsets require require idling the GPU before
1991 * unmapping anything from the GTT when VT-d is enabled.
1993 static bool needs_idle_maps(struct drm_i915_private *dev_priv)
1995 #ifdef CONFIG_INTEL_IOMMU
1996 /* Query intel_iommu to see if we need the workaround. Presumably that
1999 if (IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_iommu_gfx_mapped)
2005 void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
2007 struct intel_engine_cs *engine;
2008 enum intel_engine_id id;
2010 if (INTEL_INFO(dev_priv)->gen < 6)
2013 for_each_engine(engine, dev_priv, id) {
2015 fault_reg = I915_READ(RING_FAULT_REG(engine));
2016 if (fault_reg & RING_FAULT_VALID) {
2017 DRM_DEBUG_DRIVER("Unexpected fault\n"
2019 "\tAddress space: %s\n"
2022 fault_reg & PAGE_MASK,
2023 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
2024 RING_FAULT_SRCID(fault_reg),
2025 RING_FAULT_FAULT_TYPE(fault_reg));
2026 I915_WRITE(RING_FAULT_REG(engine),
2027 fault_reg & ~RING_FAULT_VALID);
2031 /* Engine specific init may not have been done till this point. */
2032 if (dev_priv->engine[RCS])
2033 POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
2036 void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
2038 struct i915_ggtt *ggtt = &dev_priv->ggtt;
2040 /* Don't bother messing with faults pre GEN6 as we have little
2041 * documentation supporting that it's a good idea.
2043 if (INTEL_GEN(dev_priv) < 6)
2046 i915_check_and_clear_faults(dev_priv);
2048 ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
2050 i915_ggtt_invalidate(dev_priv);
2053 int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
2054 struct sg_table *pages)
2057 if (dma_map_sg(&obj->base.dev->pdev->dev,
2058 pages->sgl, pages->nents,
2059 PCI_DMA_BIDIRECTIONAL))
2062 /* If the DMA remap fails, one cause can be that we have
2063 * too many objects pinned in a small remapping table,
2064 * such as swiotlb. Incrementally purge all other objects and
2065 * try again - if there are no more pages to remove from
2066 * the DMA remapper, i915_gem_shrink will return 0.
2068 GEM_BUG_ON(obj->mm.pages == pages);
2069 } while (i915_gem_shrink(to_i915(obj->base.dev),
2070 obj->base.size >> PAGE_SHIFT,
2072 I915_SHRINK_UNBOUND |
2073 I915_SHRINK_ACTIVE));
2078 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
2083 static void gen8_ggtt_insert_page(struct i915_address_space *vm,
2086 enum i915_cache_level level,
2089 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2090 gen8_pte_t __iomem *pte =
2091 (gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
2093 gen8_set_pte(pte, gen8_pte_encode(addr, level));
2095 ggtt->invalidate(vm->i915);
2098 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
2099 struct sg_table *st,
2101 enum i915_cache_level level,
2104 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2105 struct sgt_iter sgt_iter;
2106 gen8_pte_t __iomem *gtt_entries;
2107 const gen8_pte_t pte_encode = gen8_pte_encode(0, level);
2110 gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
2111 gtt_entries += start >> PAGE_SHIFT;
2112 for_each_sgt_dma(addr, sgt_iter, st)
2113 gen8_set_pte(gtt_entries++, pte_encode | addr);
2117 /* This next bit makes the above posting read even more important. We
2118 * want to flush the TLBs only after we're certain all the PTE updates
2121 ggtt->invalidate(vm->i915);
2124 static void gen6_ggtt_insert_page(struct i915_address_space *vm,
2127 enum i915_cache_level level,
2130 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2131 gen6_pte_t __iomem *pte =
2132 (gen6_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
2134 iowrite32(vm->pte_encode(addr, level, flags), pte);
2136 ggtt->invalidate(vm->i915);
2140 * Binds an object into the global gtt with the specified cache level. The object
2141 * will be accessible to the GPU via commands whose operands reference offsets
2142 * within the global GTT as well as accessible by the GPU through the GMADR
2143 * mapped BAR (dev_priv->mm.gtt->gtt).
2145 static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
2146 struct sg_table *st,
2148 enum i915_cache_level level,
2151 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2152 gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
2153 unsigned int i = start >> PAGE_SHIFT;
2154 struct sgt_iter iter;
2156 for_each_sgt_dma(addr, iter, st)
2157 iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
2160 /* This next bit makes the above posting read even more important. We
2161 * want to flush the TLBs only after we're certain all the PTE updates
2164 ggtt->invalidate(vm->i915);
2167 static void nop_clear_range(struct i915_address_space *vm,
2168 u64 start, u64 length)
2172 static void gen8_ggtt_clear_range(struct i915_address_space *vm,
2173 u64 start, u64 length)
2175 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2176 unsigned first_entry = start >> PAGE_SHIFT;
2177 unsigned num_entries = length >> PAGE_SHIFT;
2178 const gen8_pte_t scratch_pte =
2179 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
2180 gen8_pte_t __iomem *gtt_base =
2181 (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
2182 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
2185 if (WARN(num_entries > max_entries,
2186 "First entry = %d; Num entries = %d (max=%d)\n",
2187 first_entry, num_entries, max_entries))
2188 num_entries = max_entries;
2190 for (i = 0; i < num_entries; i++)
2191 gen8_set_pte(>t_base[i], scratch_pte);
2194 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
2195 u64 start, u64 length)
2197 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2198 unsigned first_entry = start >> PAGE_SHIFT;
2199 unsigned num_entries = length >> PAGE_SHIFT;
2200 gen6_pte_t scratch_pte, __iomem *gtt_base =
2201 (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
2202 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
2205 if (WARN(num_entries > max_entries,
2206 "First entry = %d; Num entries = %d (max=%d)\n",
2207 first_entry, num_entries, max_entries))
2208 num_entries = max_entries;
2210 scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
2213 for (i = 0; i < num_entries; i++)
2214 iowrite32(scratch_pte, >t_base[i]);
2217 static void i915_ggtt_insert_page(struct i915_address_space *vm,
2220 enum i915_cache_level cache_level,
2223 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2224 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2226 intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
2229 static void i915_ggtt_insert_entries(struct i915_address_space *vm,
2230 struct sg_table *pages,
2232 enum i915_cache_level cache_level,
2235 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2236 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2238 intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags);
2241 static void i915_ggtt_clear_range(struct i915_address_space *vm,
2242 u64 start, u64 length)
2244 intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
2247 static int ggtt_bind_vma(struct i915_vma *vma,
2248 enum i915_cache_level cache_level,
2251 struct drm_i915_private *i915 = vma->vm->i915;
2252 struct drm_i915_gem_object *obj = vma->obj;
2255 if (unlikely(!vma->pages)) {
2256 int ret = i915_get_ggtt_vma_pages(vma);
2261 /* Currently applicable only to VLV */
2264 pte_flags |= PTE_READ_ONLY;
2266 intel_runtime_pm_get(i915);
2267 vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
2268 cache_level, pte_flags);
2269 intel_runtime_pm_put(i915);
2272 * Without aliasing PPGTT there's no difference between
2273 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
2274 * upgrade to both bound if we bind either to avoid double-binding.
2276 vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
2281 static void ggtt_unbind_vma(struct i915_vma *vma)
2283 struct drm_i915_private *i915 = vma->vm->i915;
2285 intel_runtime_pm_get(i915);
2286 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
2287 intel_runtime_pm_put(i915);
2290 static int aliasing_gtt_bind_vma(struct i915_vma *vma,
2291 enum i915_cache_level cache_level,
2294 struct drm_i915_private *i915 = vma->vm->i915;
2298 if (unlikely(!vma->pages)) {
2299 ret = i915_get_ggtt_vma_pages(vma);
2304 /* Currently applicable only to VLV */
2306 if (vma->obj->gt_ro)
2307 pte_flags |= PTE_READ_ONLY;
2309 if (flags & I915_VMA_LOCAL_BIND) {
2310 struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
2312 if (!(vma->flags & I915_VMA_LOCAL_BIND) &&
2313 appgtt->base.allocate_va_range) {
2314 ret = appgtt->base.allocate_va_range(&appgtt->base,
2321 appgtt->base.insert_entries(&appgtt->base,
2322 vma->pages, vma->node.start,
2323 cache_level, pte_flags);
2326 if (flags & I915_VMA_GLOBAL_BIND) {
2327 intel_runtime_pm_get(i915);
2328 vma->vm->insert_entries(vma->vm,
2329 vma->pages, vma->node.start,
2330 cache_level, pte_flags);
2331 intel_runtime_pm_put(i915);
2337 if (!(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND))) {
2338 if (vma->pages != vma->obj->mm.pages) {
2339 GEM_BUG_ON(!vma->pages);
2340 sg_free_table(vma->pages);
2348 static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
2350 struct drm_i915_private *i915 = vma->vm->i915;
2352 if (vma->flags & I915_VMA_GLOBAL_BIND) {
2353 intel_runtime_pm_get(i915);
2354 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
2355 intel_runtime_pm_put(i915);
2358 if (vma->flags & I915_VMA_LOCAL_BIND) {
2359 struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->base;
2361 vm->clear_range(vm, vma->node.start, vma->size);
2365 void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
2366 struct sg_table *pages)
2368 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2369 struct device *kdev = &dev_priv->drm.pdev->dev;
2370 struct i915_ggtt *ggtt = &dev_priv->ggtt;
2372 if (unlikely(ggtt->do_idle_maps)) {
2373 if (i915_gem_wait_for_idle(dev_priv, 0)) {
2374 DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
2375 /* Wait a bit, in hopes it avoids the hang */
2380 dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
2383 static void i915_gtt_color_adjust(const struct drm_mm_node *node,
2384 unsigned long color,
2388 if (node->allocated && node->color != color)
2389 *start += I915_GTT_PAGE_SIZE;
2391 /* Also leave a space between the unallocated reserved node after the
2392 * GTT and any objects within the GTT, i.e. we use the color adjustment
2393 * to insert a guard page to prevent prefetches crossing over the
2396 node = list_next_entry(node, node_list);
2397 if (node->color != color)
2398 *end -= I915_GTT_PAGE_SIZE;
2401 int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
2403 struct i915_ggtt *ggtt = &i915->ggtt;
2404 struct i915_hw_ppgtt *ppgtt;
2407 ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM), "[alias]");
2409 return PTR_ERR(ppgtt);
2411 if (WARN_ON(ppgtt->base.total < ggtt->base.total)) {
2416 if (ppgtt->base.allocate_va_range) {
2417 /* Note we only pre-allocate as far as the end of the global
2418 * GTT. On 48b / 4-level page-tables, the difference is very,
2419 * very significant! We have to preallocate as GVT/vgpu does
2420 * not like the page directory disappearing.
2422 err = ppgtt->base.allocate_va_range(&ppgtt->base,
2423 0, ggtt->base.total);
2428 i915->mm.aliasing_ppgtt = ppgtt;
2430 WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
2431 ggtt->base.bind_vma = aliasing_gtt_bind_vma;
2433 WARN_ON(ggtt->base.unbind_vma != ggtt_unbind_vma);
2434 ggtt->base.unbind_vma = aliasing_gtt_unbind_vma;
2439 i915_ppgtt_put(ppgtt);
2443 void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915)
2445 struct i915_ggtt *ggtt = &i915->ggtt;
2446 struct i915_hw_ppgtt *ppgtt;
2448 ppgtt = fetch_and_zero(&i915->mm.aliasing_ppgtt);
2452 i915_ppgtt_put(ppgtt);
2454 ggtt->base.bind_vma = ggtt_bind_vma;
2455 ggtt->base.unbind_vma = ggtt_unbind_vma;
2458 int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
2460 /* Let GEM Manage all of the aperture.
2462 * However, leave one page at the end still bound to the scratch page.
2463 * There are a number of places where the hardware apparently prefetches
2464 * past the end of the object, and we've seen multiple hangs with the
2465 * GPU head pointer stuck in a batchbuffer bound at the last page of the
2466 * aperture. One page should be enough to keep any prefetching inside
2469 struct i915_ggtt *ggtt = &dev_priv->ggtt;
2470 unsigned long hole_start, hole_end;
2471 struct drm_mm_node *entry;
2474 ret = intel_vgt_balloon(dev_priv);
2478 /* Reserve a mappable slot for our lockless error capture */
2479 ret = drm_mm_insert_node_in_range(&ggtt->base.mm, &ggtt->error_capture,
2480 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
2481 0, ggtt->mappable_end,
2486 /* Clear any non-preallocated blocks */
2487 drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
2488 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
2489 hole_start, hole_end);
2490 ggtt->base.clear_range(&ggtt->base, hole_start,
2491 hole_end - hole_start);
2494 /* And finally clear the reserved guard page */
2495 ggtt->base.clear_range(&ggtt->base,
2496 ggtt->base.total - PAGE_SIZE, PAGE_SIZE);
2498 if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
2499 ret = i915_gem_init_aliasing_ppgtt(dev_priv);
2507 drm_mm_remove_node(&ggtt->error_capture);
2512 * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
2513 * @dev_priv: i915 device
2515 void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
2517 struct i915_ggtt *ggtt = &dev_priv->ggtt;
2518 struct i915_vma *vma, *vn;
2520 ggtt->base.closed = true;
2522 mutex_lock(&dev_priv->drm.struct_mutex);
2523 WARN_ON(!list_empty(&ggtt->base.active_list));
2524 list_for_each_entry_safe(vma, vn, &ggtt->base.inactive_list, vm_link)
2525 WARN_ON(i915_vma_unbind(vma));
2526 mutex_unlock(&dev_priv->drm.struct_mutex);
2528 i915_gem_cleanup_stolen(&dev_priv->drm);
2530 mutex_lock(&dev_priv->drm.struct_mutex);
2531 i915_gem_fini_aliasing_ppgtt(dev_priv);
2533 if (drm_mm_node_allocated(&ggtt->error_capture))
2534 drm_mm_remove_node(&ggtt->error_capture);
2536 if (drm_mm_initialized(&ggtt->base.mm)) {
2537 intel_vgt_deballoon(dev_priv);
2538 i915_address_space_fini(&ggtt->base);
2541 ggtt->base.cleanup(&ggtt->base);
2542 mutex_unlock(&dev_priv->drm.struct_mutex);
2544 arch_phys_wc_del(ggtt->mtrr);
2545 io_mapping_fini(&ggtt->mappable);
2548 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
2550 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
2551 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
2552 return snb_gmch_ctl << 20;
2555 static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
2557 bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
2558 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
2560 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
2562 #ifdef CONFIG_X86_32
2563 /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */
2564 if (bdw_gmch_ctl > 4)
2568 return bdw_gmch_ctl << 20;
2571 static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
2573 gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
2574 gmch_ctrl &= SNB_GMCH_GGMS_MASK;
2577 return 1 << (20 + gmch_ctrl);
2582 static size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
2584 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
2585 snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
2586 return snb_gmch_ctl << 25; /* 32 MB units */
2589 static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
2591 bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2592 bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
2593 return bdw_gmch_ctl << 25; /* 32 MB units */
2596 static size_t chv_get_stolen_size(u16 gmch_ctrl)
2598 gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
2599 gmch_ctrl &= SNB_GMCH_GMS_MASK;
2602 * 0x0 to 0x10: 32MB increments starting at 0MB
2603 * 0x11 to 0x16: 4MB increments starting at 8MB
2604 * 0x17 to 0x1d: 4MB increments start at 36MB
2606 if (gmch_ctrl < 0x11)
2607 return gmch_ctrl << 25;
2608 else if (gmch_ctrl < 0x17)
2609 return (gmch_ctrl - 0x11 + 2) << 22;
2611 return (gmch_ctrl - 0x17 + 9) << 22;
2614 static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
2616 gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2617 gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
2619 if (gen9_gmch_ctl < 0xf0)
2620 return gen9_gmch_ctl << 25; /* 32 MB units */
2622 /* 4MB increments starting at 0xf0 for 4MB */
2623 return (gen9_gmch_ctl - 0xf0 + 1) << 22;
2626 static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
2628 struct drm_i915_private *dev_priv = ggtt->base.i915;
2629 struct pci_dev *pdev = dev_priv->drm.pdev;
2630 phys_addr_t phys_addr;
2633 /* For Modern GENs the PTEs and register space are split in the BAR */
2634 phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
2637 * On BXT writes larger than 64 bit to the GTT pagetable range will be
2638 * dropped. For WC mappings in general we have 64 byte burst writes
2639 * when the WC buffer is flushed, so we can't use it, but have to
2640 * resort to an uncached mapping. The WC issue is easily caught by the
2641 * readback check when writing GTT PTE entries.
2643 if (IS_GEN9_LP(dev_priv))
2644 ggtt->gsm = ioremap_nocache(phys_addr, size);
2646 ggtt->gsm = ioremap_wc(phys_addr, size);
2648 DRM_ERROR("Failed to map the ggtt page table\n");
2652 ret = setup_scratch_page(&ggtt->base, GFP_DMA32);
2654 DRM_ERROR("Scratch setup failed\n");
2655 /* iounmap will also get called at remove, but meh */
2663 /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
2664 * bits. When using advanced contexts each context stores its own PAT, but
2665 * writing this data shouldn't be harmful even in those cases. */
2666 static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
2670 pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
2671 GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
2672 GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
2673 GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
2674 GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
2675 GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
2676 GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
2677 GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
2679 if (!USES_PPGTT(dev_priv))
2680 /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
2681 * so RTL will always use the value corresponding to
2683 * So let's disable cache for GGTT to avoid screen corruptions.
2684 * MOCS still can be used though.
2685 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
2686 * before this patch, i.e. the same uncached + snooping access
2687 * like on gen6/7 seems to be in effect.
2688 * - So this just fixes blitter/render access. Again it looks
2689 * like it's not just uncached access, but uncached + snooping.
2690 * So we can still hold onto all our assumptions wrt cpu
2691 * clflushing on LLC machines.
2693 pat = GEN8_PPAT(0, GEN8_PPAT_UC);
2695 /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
2696 * write would work. */
2697 I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
2698 I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
2701 static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
2706 * Map WB on BDW to snooped on CHV.
2708 * Only the snoop bit has meaning for CHV, the rest is
2711 * The hardware will never snoop for certain types of accesses:
2712 * - CPU GTT (GMADR->GGTT->no snoop->memory)
2713 * - PPGTT page tables
2714 * - some other special cycles
2716 * As with BDW, we also need to consider the following for GT accesses:
2717 * "For GGTT, there is NO pat_sel[2:0] from the entry,
2718 * so RTL will always use the value corresponding to
2720 * Which means we must set the snoop bit in PAT entry 0
2721 * in order to keep the global status page working.
2723 pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
2727 GEN8_PPAT(4, CHV_PPAT_SNOOP) |
2728 GEN8_PPAT(5, CHV_PPAT_SNOOP) |
2729 GEN8_PPAT(6, CHV_PPAT_SNOOP) |
2730 GEN8_PPAT(7, CHV_PPAT_SNOOP);
2732 I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
2733 I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
2736 static void gen6_gmch_remove(struct i915_address_space *vm)
2738 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2741 cleanup_scratch_page(vm);
2744 static int gen8_gmch_probe(struct i915_ggtt *ggtt)
2746 struct drm_i915_private *dev_priv = ggtt->base.i915;
2747 struct pci_dev *pdev = dev_priv->drm.pdev;
2751 /* TODO: We're not aware of mappable constraints on gen8 yet */
2752 ggtt->mappable_base = pci_resource_start(pdev, 2);
2753 ggtt->mappable_end = pci_resource_len(pdev, 2);
2755 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(39)))
2756 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
2758 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
2760 if (INTEL_GEN(dev_priv) >= 9) {
2761 ggtt->stolen_size = gen9_get_stolen_size(snb_gmch_ctl);
2762 size = gen8_get_total_gtt_size(snb_gmch_ctl);
2763 } else if (IS_CHERRYVIEW(dev_priv)) {
2764 ggtt->stolen_size = chv_get_stolen_size(snb_gmch_ctl);
2765 size = chv_get_total_gtt_size(snb_gmch_ctl);
2767 ggtt->stolen_size = gen8_get_stolen_size(snb_gmch_ctl);
2768 size = gen8_get_total_gtt_size(snb_gmch_ctl);
2771 ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
2773 if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
2774 chv_setup_private_ppat(dev_priv);
2776 bdw_setup_private_ppat(dev_priv);
2778 ggtt->base.cleanup = gen6_gmch_remove;
2779 ggtt->base.bind_vma = ggtt_bind_vma;
2780 ggtt->base.unbind_vma = ggtt_unbind_vma;
2781 ggtt->base.insert_page = gen8_ggtt_insert_page;
2782 ggtt->base.clear_range = nop_clear_range;
2783 if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
2784 ggtt->base.clear_range = gen8_ggtt_clear_range;
2786 ggtt->base.insert_entries = gen8_ggtt_insert_entries;
2788 ggtt->invalidate = gen6_ggtt_invalidate;
2790 return ggtt_probe_common(ggtt, size);
2793 static int gen6_gmch_probe(struct i915_ggtt *ggtt)
2795 struct drm_i915_private *dev_priv = ggtt->base.i915;
2796 struct pci_dev *pdev = dev_priv->drm.pdev;
2800 ggtt->mappable_base = pci_resource_start(pdev, 2);
2801 ggtt->mappable_end = pci_resource_len(pdev, 2);
2803 /* 64/512MB is the current min/max we actually know of, but this is just
2804 * a coarse sanity check.
2806 if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
2807 DRM_ERROR("Unknown GMADR size (%llx)\n", ggtt->mappable_end);
2811 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(40)))
2812 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
2813 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
2815 ggtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
2817 size = gen6_get_total_gtt_size(snb_gmch_ctl);
2818 ggtt->base.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
2820 ggtt->base.clear_range = gen6_ggtt_clear_range;
2821 ggtt->base.insert_page = gen6_ggtt_insert_page;
2822 ggtt->base.insert_entries = gen6_ggtt_insert_entries;
2823 ggtt->base.bind_vma = ggtt_bind_vma;
2824 ggtt->base.unbind_vma = ggtt_unbind_vma;
2825 ggtt->base.cleanup = gen6_gmch_remove;
2827 ggtt->invalidate = gen6_ggtt_invalidate;
2829 if (HAS_EDRAM(dev_priv))
2830 ggtt->base.pte_encode = iris_pte_encode;
2831 else if (IS_HASWELL(dev_priv))
2832 ggtt->base.pte_encode = hsw_pte_encode;
2833 else if (IS_VALLEYVIEW(dev_priv))
2834 ggtt->base.pte_encode = byt_pte_encode;
2835 else if (INTEL_GEN(dev_priv) >= 7)
2836 ggtt->base.pte_encode = ivb_pte_encode;
2838 ggtt->base.pte_encode = snb_pte_encode;
2840 return ggtt_probe_common(ggtt, size);
2843 static void i915_gmch_remove(struct i915_address_space *vm)
2845 intel_gmch_remove();
2848 static int i915_gmch_probe(struct i915_ggtt *ggtt)
2850 struct drm_i915_private *dev_priv = ggtt->base.i915;
2853 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
2855 DRM_ERROR("failed to set up gmch\n");
2859 intel_gtt_get(&ggtt->base.total,
2861 &ggtt->mappable_base,
2862 &ggtt->mappable_end);
2864 ggtt->do_idle_maps = needs_idle_maps(dev_priv);
2865 ggtt->base.insert_page = i915_ggtt_insert_page;
2866 ggtt->base.insert_entries = i915_ggtt_insert_entries;
2867 ggtt->base.clear_range = i915_ggtt_clear_range;
2868 ggtt->base.bind_vma = ggtt_bind_vma;
2869 ggtt->base.unbind_vma = ggtt_unbind_vma;
2870 ggtt->base.cleanup = i915_gmch_remove;
2872 ggtt->invalidate = gmch_ggtt_invalidate;
2874 if (unlikely(ggtt->do_idle_maps))
2875 DRM_INFO("applying Ironlake quirks for intel_iommu\n");
2881 * i915_ggtt_probe_hw - Probe GGTT hardware location
2882 * @dev_priv: i915 device
2884 int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
2886 struct i915_ggtt *ggtt = &dev_priv->ggtt;
2889 ggtt->base.i915 = dev_priv;
2890 ggtt->base.dma = &dev_priv->drm.pdev->dev;
2892 if (INTEL_GEN(dev_priv) <= 5)
2893 ret = i915_gmch_probe(ggtt);
2894 else if (INTEL_GEN(dev_priv) < 8)
2895 ret = gen6_gmch_probe(ggtt);
2897 ret = gen8_gmch_probe(ggtt);
2901 /* Trim the GGTT to fit the GuC mappable upper range (when enabled).
2902 * This is easier than doing range restriction on the fly, as we
2903 * currently don't have any bits spare to pass in this upper
2906 if (HAS_GUC(dev_priv) && i915.enable_guc_loading) {
2907 ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP);
2908 ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
2911 if ((ggtt->base.total - 1) >> 32) {
2912 DRM_ERROR("We never expected a Global GTT with more than 32bits"
2913 " of address space! Found %lldM!\n",
2914 ggtt->base.total >> 20);
2915 ggtt->base.total = 1ULL << 32;
2916 ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
2919 if (ggtt->mappable_end > ggtt->base.total) {
2920 DRM_ERROR("mappable aperture extends past end of GGTT,"
2921 " aperture=%llx, total=%llx\n",
2922 ggtt->mappable_end, ggtt->base.total);
2923 ggtt->mappable_end = ggtt->base.total;
2926 /* GMADR is the PCI mmio aperture into the global GTT. */
2927 DRM_INFO("Memory usable by graphics device = %lluM\n",
2928 ggtt->base.total >> 20);
2929 DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20);
2930 DRM_DEBUG_DRIVER("GTT stolen size = %uM\n", ggtt->stolen_size >> 20);
2931 #ifdef CONFIG_INTEL_IOMMU
2932 if (intel_iommu_gfx_mapped)
2933 DRM_INFO("VT-d active for gfx access\n");
2940 * i915_ggtt_init_hw - Initialize GGTT hardware
2941 * @dev_priv: i915 device
2943 int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
2945 struct i915_ggtt *ggtt = &dev_priv->ggtt;
2948 INIT_LIST_HEAD(&dev_priv->vm_list);
2950 /* Note that we use page colouring to enforce a guard page at the
2951 * end of the address space. This is required as the CS may prefetch
2952 * beyond the end of the batch buffer, across the page boundary,
2953 * and beyond the end of the GTT if we do not provide a guard.
2955 mutex_lock(&dev_priv->drm.struct_mutex);
2956 i915_address_space_init(&ggtt->base, dev_priv, "[global]");
2957 if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv))
2958 ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
2959 mutex_unlock(&dev_priv->drm.struct_mutex);
2961 if (!io_mapping_init_wc(&dev_priv->ggtt.mappable,
2962 dev_priv->ggtt.mappable_base,
2963 dev_priv->ggtt.mappable_end)) {
2965 goto out_gtt_cleanup;
2968 ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base, ggtt->mappable_end);
2971 * Initialise stolen early so that we may reserve preallocated
2972 * objects for the BIOS to KMS transition.
2974 ret = i915_gem_init_stolen(dev_priv);
2976 goto out_gtt_cleanup;
2981 ggtt->base.cleanup(&ggtt->base);
2985 int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
2987 if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt())
2993 void i915_ggtt_enable_guc(struct drm_i915_private *i915)
2995 i915->ggtt.invalidate = guc_ggtt_invalidate;
2998 void i915_ggtt_disable_guc(struct drm_i915_private *i915)
3000 i915->ggtt.invalidate = gen6_ggtt_invalidate;
3003 void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
3005 struct i915_ggtt *ggtt = &dev_priv->ggtt;
3006 struct drm_i915_gem_object *obj, *on;
3008 i915_check_and_clear_faults(dev_priv);
3010 /* First fill our portion of the GTT with scratch pages */
3011 ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
3013 ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
3015 /* clflush objects bound into the GGTT and rebind them. */
3016 list_for_each_entry_safe(obj, on,
3017 &dev_priv->mm.bound_list, global_link) {
3018 bool ggtt_bound = false;
3019 struct i915_vma *vma;
3021 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3022 if (vma->vm != &ggtt->base)
3025 if (!i915_vma_unbind(vma))
3028 WARN_ON(i915_vma_bind(vma, obj->cache_level,
3034 WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
3037 ggtt->base.closed = false;
3039 if (INTEL_GEN(dev_priv) >= 8) {
3040 if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
3041 chv_setup_private_ppat(dev_priv);
3043 bdw_setup_private_ppat(dev_priv);
3048 if (USES_PPGTT(dev_priv)) {
3049 struct i915_address_space *vm;
3051 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
3052 struct i915_hw_ppgtt *ppgtt;
3054 if (i915_is_ggtt(vm))
3055 ppgtt = dev_priv->mm.aliasing_ppgtt;
3057 ppgtt = i915_vm_to_ppgtt(vm);
3059 gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
3063 i915_ggtt_invalidate(dev_priv);
3066 static struct scatterlist *
3067 rotate_pages(const dma_addr_t *in, unsigned int offset,
3068 unsigned int width, unsigned int height,
3069 unsigned int stride,
3070 struct sg_table *st, struct scatterlist *sg)
3072 unsigned int column, row;
3073 unsigned int src_idx;
3075 for (column = 0; column < width; column++) {
3076 src_idx = stride * (height - 1) + column;
3077 for (row = 0; row < height; row++) {
3079 /* We don't need the pages, but need to initialize
3080 * the entries so the sg list can be happily traversed.
3081 * The only thing we need are DMA addresses.
3083 sg_set_page(sg, NULL, PAGE_SIZE, 0);
3084 sg_dma_address(sg) = in[offset + src_idx];
3085 sg_dma_len(sg) = PAGE_SIZE;
3094 static noinline struct sg_table *
3095 intel_rotate_pages(struct intel_rotation_info *rot_info,
3096 struct drm_i915_gem_object *obj)
3098 const unsigned long n_pages = obj->base.size / PAGE_SIZE;
3099 unsigned int size = intel_rotation_info_size(rot_info);
3100 struct sgt_iter sgt_iter;
3101 dma_addr_t dma_addr;
3103 dma_addr_t *page_addr_list;
3104 struct sg_table *st;
3105 struct scatterlist *sg;
3108 /* Allocate a temporary list of source pages for random access. */
3109 page_addr_list = drm_malloc_gfp(n_pages,
3112 if (!page_addr_list)
3113 return ERR_PTR(ret);
3115 /* Allocate target SG list. */
3116 st = kmalloc(sizeof(*st), GFP_KERNEL);
3120 ret = sg_alloc_table(st, size, GFP_KERNEL);
3124 /* Populate source page list from the object. */
3126 for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages)
3127 page_addr_list[i++] = dma_addr;
3129 GEM_BUG_ON(i != n_pages);
3133 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
3134 sg = rotate_pages(page_addr_list, rot_info->plane[i].offset,
3135 rot_info->plane[i].width, rot_info->plane[i].height,
3136 rot_info->plane[i].stride, st, sg);
3139 DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages)\n",
3140 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
3142 drm_free_large(page_addr_list);
3149 drm_free_large(page_addr_list);
3151 DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
3152 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
3154 return ERR_PTR(ret);
3157 static noinline struct sg_table *
3158 intel_partial_pages(const struct i915_ggtt_view *view,
3159 struct drm_i915_gem_object *obj)
3161 struct sg_table *st;
3162 struct scatterlist *sg, *iter;
3163 unsigned int count = view->partial.size;
3164 unsigned int offset;
3167 st = kmalloc(sizeof(*st), GFP_KERNEL);
3171 ret = sg_alloc_table(st, count, GFP_KERNEL);
3175 iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
3183 len = min(iter->length - (offset << PAGE_SHIFT),
3184 count << PAGE_SHIFT);
3185 sg_set_page(sg, NULL, len, 0);
3186 sg_dma_address(sg) =
3187 sg_dma_address(iter) + (offset << PAGE_SHIFT);
3188 sg_dma_len(sg) = len;
3191 count -= len >> PAGE_SHIFT;
3198 iter = __sg_next(iter);
3205 return ERR_PTR(ret);
3209 i915_get_ggtt_vma_pages(struct i915_vma *vma)
3213 /* The vma->pages are only valid within the lifespan of the borrowed
3214 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
3215 * must be the vma->pages. A simple rule is that vma->pages must only
3216 * be accessed when the obj->mm.pages are pinned.
3218 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
3220 switch (vma->ggtt_view.type) {
3221 case I915_GGTT_VIEW_NORMAL:
3222 vma->pages = vma->obj->mm.pages;
3225 case I915_GGTT_VIEW_ROTATED:
3227 intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
3230 case I915_GGTT_VIEW_PARTIAL:
3231 vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
3235 WARN_ONCE(1, "GGTT view %u not implemented!\n",
3236 vma->ggtt_view.type);
3241 if (unlikely(IS_ERR(vma->pages))) {
3242 ret = PTR_ERR(vma->pages);
3244 DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
3245 vma->ggtt_view.type, ret);
3251 * i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
3252 * @vm: the &struct i915_address_space
3253 * @node: the &struct drm_mm_node (typically i915_vma.mode)
3254 * @size: how much space to allocate inside the GTT,
3255 * must be #I915_GTT_PAGE_SIZE aligned
3256 * @offset: where to insert inside the GTT,
3257 * must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
3258 * (@offset + @size) must fit within the address space
3259 * @color: color to apply to node, if this node is not from a VMA,
3260 * color must be #I915_COLOR_UNEVICTABLE
3261 * @flags: control search and eviction behaviour
3263 * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
3264 * the address space (using @size and @color). If the @node does not fit, it
3265 * tries to evict any overlapping nodes from the GTT, including any
3266 * neighbouring nodes if the colors do not match (to ensure guard pages between
3267 * differing domains). See i915_gem_evict_for_node() for the gory details
3268 * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
3269 * evicting active overlapping objects, and any overlapping node that is pinned
3270 * or marked as unevictable will also result in failure.
3272 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3273 * asked to wait for eviction and interrupted.
3275 int i915_gem_gtt_reserve(struct i915_address_space *vm,
3276 struct drm_mm_node *node,
3277 u64 size, u64 offset, unsigned long color,
3283 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3284 GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
3285 GEM_BUG_ON(range_overflows(offset, size, vm->total));
3286 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
3287 GEM_BUG_ON(drm_mm_node_allocated(node));
3290 node->start = offset;
3291 node->color = color;
3293 err = drm_mm_reserve_node(&vm->mm, node);
3297 err = i915_gem_evict_for_node(vm, node, flags);
3299 err = drm_mm_reserve_node(&vm->mm, node);
3304 static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
3308 GEM_BUG_ON(range_overflows(start, len, end));
3309 GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));
3311 range = round_down(end - len, align) - round_up(start, align);
3313 if (sizeof(unsigned long) == sizeof(u64)) {
3314 addr = get_random_long();
3316 addr = get_random_int();
3317 if (range > U32_MAX) {
3319 addr |= get_random_int();
3322 div64_u64_rem(addr, range, &addr);
3326 return round_up(start, align);
3330 * i915_gem_gtt_insert - insert a node into an address_space (GTT)
3331 * @vm: the &struct i915_address_space
3332 * @node: the &struct drm_mm_node (typically i915_vma.node)
3333 * @size: how much space to allocate inside the GTT,
3334 * must be #I915_GTT_PAGE_SIZE aligned
3335 * @alignment: required alignment of starting offset, may be 0 but
3336 * if specified, this must be a power-of-two and at least
3337 * #I915_GTT_MIN_ALIGNMENT
3338 * @color: color to apply to node
3339 * @start: start of any range restriction inside GTT (0 for all),
3340 * must be #I915_GTT_PAGE_SIZE aligned
3341 * @end: end of any range restriction inside GTT (U64_MAX for all),
3342 * must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX
3343 * @flags: control search and eviction behaviour
3345 * i915_gem_gtt_insert() first searches for an available hole into which
3346 * is can insert the node. The hole address is aligned to @alignment and
3347 * its @size must then fit entirely within the [@start, @end] bounds. The
3348 * nodes on either side of the hole must match @color, or else a guard page
3349 * will be inserted between the two nodes (or the node evicted). If no
3350 * suitable hole is found, first a victim is randomly selected and tested
3351 * for eviction, otherwise then the LRU list of objects within the GTT
3352 * is scanned to find the first set of replacement nodes to create the hole.
3353 * Those old overlapping nodes are evicted from the GTT (and so must be
3354 * rebound before any future use). Any node that is currently pinned cannot
3355 * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently
3356 * active and #PIN_NONBLOCK is specified, that node is also skipped when
3357 * searching for an eviction candidate. See i915_gem_evict_something() for
3358 * the gory details on the eviction algorithm.
3360 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3361 * asked to wait for eviction and interrupted.
3363 int i915_gem_gtt_insert(struct i915_address_space *vm,
3364 struct drm_mm_node *node,
3365 u64 size, u64 alignment, unsigned long color,
3366 u64 start, u64 end, unsigned int flags)
3368 enum drm_mm_insert_mode mode;
3372 lockdep_assert_held(&vm->i915->drm.struct_mutex);
3374 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3375 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
3376 GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
3377 GEM_BUG_ON(start >= end);
3378 GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
3379 GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
3380 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
3381 GEM_BUG_ON(drm_mm_node_allocated(node));
3383 if (unlikely(range_overflows(start, size, end)))
3386 if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
3389 mode = DRM_MM_INSERT_BEST;
3390 if (flags & PIN_HIGH)
3391 mode = DRM_MM_INSERT_HIGH;
3392 if (flags & PIN_MAPPABLE)
3393 mode = DRM_MM_INSERT_LOW;
3395 /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
3396 * so we know that we always have a minimum alignment of 4096.
3397 * The drm_mm range manager is optimised to return results
3398 * with zero alignment, so where possible use the optimal
3401 BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
3402 if (alignment <= I915_GTT_MIN_ALIGNMENT)
3405 err = drm_mm_insert_node_in_range(&vm->mm, node,
3406 size, alignment, color,
3411 /* No free space, pick a slot at random.
3413 * There is a pathological case here using a GTT shared between
3414 * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
3416 * |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->|
3417 * (64k objects) (448k objects)
3419 * Now imagine that the eviction LRU is ordered top-down (just because
3420 * pathology meets real life), and that we need to evict an object to
3421 * make room inside the aperture. The eviction scan then has to walk
3422 * the 448k list before it finds one within range. And now imagine that
3423 * it has to search for a new hole between every byte inside the memcpy,
3424 * for several simultaneous clients.
3426 * On a full-ppgtt system, if we have run out of available space, there
3427 * will be lots and lots of objects in the eviction list! Again,
3428 * searching that LRU list may be slow if we are also applying any
3429 * range restrictions (e.g. restriction to low 4GiB) and so, for
3430 * simplicity and similarilty between different GTT, try the single
3431 * random replacement first.
3433 offset = random_offset(start, end,
3434 size, alignment ?: I915_GTT_MIN_ALIGNMENT);
3435 err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags);
3439 /* Randomly selected placement is pinned, do a search */
3440 err = i915_gem_evict_something(vm, size, alignment, color,
3445 return drm_mm_insert_node_in_range(&vm->mm, node,
3446 size, alignment, color,
3447 start, end, DRM_MM_INSERT_EVICT);
3450 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
3451 #include "selftests/mock_gtt.c"
3452 #include "selftests/i915_gem_gtt.c"