2 * Copyright © 2010 Daniel Vetter
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include <drm/i915_drm.h>
28 #include "i915_trace.h"
29 #include "intel_drv.h"
31 /* PPGTT support for Sandybdrige/Gen6 and later */
32 static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
38 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
39 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
42 scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr);
43 scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC;
46 last_pte = first_pte + num_entries;
47 if (last_pte > I915_PPGTT_PT_ENTRIES)
48 last_pte = I915_PPGTT_PT_ENTRIES;
50 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
52 for (i = first_pte; i < last_pte; i++)
53 pt_vaddr[i] = scratch_pte;
55 kunmap_atomic(pt_vaddr);
57 num_entries -= last_pte - first_pte;
63 int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
65 struct drm_i915_private *dev_priv = dev->dev_private;
66 struct i915_hw_ppgtt *ppgtt;
67 unsigned first_pd_entry_in_global_pt;
71 /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
72 * entries. For aliasing ppgtt support we just steal them at the end for
74 first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES;
76 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
80 ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
81 ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
86 for (i = 0; i < ppgtt->num_pd_entries; i++) {
87 ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
88 if (!ppgtt->pt_pages[i])
92 if (dev_priv->mm.gtt->needs_dmar) {
93 ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t)
94 *ppgtt->num_pd_entries,
96 if (!ppgtt->pt_dma_addr)
99 for (i = 0; i < ppgtt->num_pd_entries; i++) {
102 pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i],
104 PCI_DMA_BIDIRECTIONAL);
106 if (pci_dma_mapping_error(dev->pdev,
112 ppgtt->pt_dma_addr[i] = pt_addr;
116 ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
118 i915_ppgtt_clear_range(ppgtt, 0,
119 ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
121 ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(uint32_t);
123 dev_priv->mm.aliasing_ppgtt = ppgtt;
128 if (ppgtt->pt_dma_addr) {
129 for (i--; i >= 0; i--)
130 pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
131 4096, PCI_DMA_BIDIRECTIONAL);
134 kfree(ppgtt->pt_dma_addr);
135 for (i = 0; i < ppgtt->num_pd_entries; i++) {
136 if (ppgtt->pt_pages[i])
137 __free_page(ppgtt->pt_pages[i]);
139 kfree(ppgtt->pt_pages);
146 void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
148 struct drm_i915_private *dev_priv = dev->dev_private;
149 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
155 if (ppgtt->pt_dma_addr) {
156 for (i = 0; i < ppgtt->num_pd_entries; i++)
157 pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
158 4096, PCI_DMA_BIDIRECTIONAL);
161 kfree(ppgtt->pt_dma_addr);
162 for (i = 0; i < ppgtt->num_pd_entries; i++)
163 __free_page(ppgtt->pt_pages[i]);
164 kfree(ppgtt->pt_pages);
168 static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
169 const struct sg_table *pages,
170 unsigned first_entry,
173 uint32_t *pt_vaddr, pte;
174 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
175 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
176 unsigned i, j, m, segment_len;
177 dma_addr_t page_addr;
178 struct scatterlist *sg;
180 /* init sg walking */
183 segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
186 while (i < pages->nents) {
187 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
189 for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
190 page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
191 pte = GEN6_PTE_ADDR_ENCODE(page_addr);
192 pt_vaddr[j] = pte | pte_flags;
194 /* grab the next page */
195 if (++m == segment_len) {
196 if (++i == pages->nents)
200 segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
205 kunmap_atomic(pt_vaddr);
212 void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
213 struct drm_i915_gem_object *obj,
214 enum i915_cache_level cache_level)
216 uint32_t pte_flags = GEN6_PTE_VALID;
218 switch (cache_level) {
219 case I915_CACHE_LLC_MLC:
220 /* Haswell doesn't set L3 this way */
221 if (IS_HASWELL(obj->base.dev))
222 pte_flags |= GEN6_PTE_CACHE_LLC;
224 pte_flags |= GEN6_PTE_CACHE_LLC_MLC;
227 pte_flags |= GEN6_PTE_CACHE_LLC;
229 case I915_CACHE_NONE:
230 if (IS_HASWELL(obj->base.dev))
231 pte_flags |= HSW_PTE_UNCACHED;
233 pte_flags |= GEN6_PTE_UNCACHED;
239 i915_ppgtt_insert_sg_entries(ppgtt,
241 obj->gtt_space->start >> PAGE_SHIFT,
245 void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
246 struct drm_i915_gem_object *obj)
248 i915_ppgtt_clear_range(ppgtt,
249 obj->gtt_space->start >> PAGE_SHIFT,
250 obj->base.size >> PAGE_SHIFT);
253 /* XXX kill agp_type! */
254 static unsigned int cache_level_to_agp_type(struct drm_device *dev,
255 enum i915_cache_level cache_level)
257 switch (cache_level) {
258 case I915_CACHE_LLC_MLC:
259 /* Older chipsets do not have this extra level of CPU
260 * cacheing, so fallthrough and request the PTE simply
263 if (INTEL_INFO(dev)->gen >= 6 && !IS_HASWELL(dev))
264 return AGP_USER_CACHED_MEMORY_LLC_MLC;
266 return AGP_USER_CACHED_MEMORY;
268 case I915_CACHE_NONE:
269 return AGP_USER_MEMORY;
273 static bool do_idling(struct drm_i915_private *dev_priv)
275 bool ret = dev_priv->mm.interruptible;
277 if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
278 dev_priv->mm.interruptible = false;
279 if (i915_gpu_idle(dev_priv->dev)) {
280 DRM_ERROR("Couldn't idle GPU\n");
281 /* Wait a bit, in hopes it avoids the hang */
289 static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
291 if (unlikely(dev_priv->mm.gtt->do_idle_maps))
292 dev_priv->mm.interruptible = interruptible;
295 void i915_gem_restore_gtt_mappings(struct drm_device *dev)
297 struct drm_i915_private *dev_priv = dev->dev_private;
298 struct drm_i915_gem_object *obj;
300 /* First fill our portion of the GTT with scratch pages */
301 intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
302 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
304 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
305 i915_gem_clflush_object(obj);
306 i915_gem_gtt_bind_object(obj, obj->cache_level);
309 intel_gtt_chipset_flush();
312 int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
314 if (obj->has_dma_mapping)
317 if (!dma_map_sg(&obj->base.dev->pdev->dev,
318 obj->pages->sgl, obj->pages->nents,
319 PCI_DMA_BIDIRECTIONAL))
325 void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
326 enum i915_cache_level cache_level)
328 struct drm_device *dev = obj->base.dev;
329 unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
331 intel_gtt_insert_sg_entries(obj->pages,
332 obj->gtt_space->start >> PAGE_SHIFT,
334 obj->has_global_gtt_mapping = 1;
337 void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
339 intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
340 obj->base.size >> PAGE_SHIFT);
342 obj->has_global_gtt_mapping = 0;
345 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
347 struct drm_device *dev = obj->base.dev;
348 struct drm_i915_private *dev_priv = dev->dev_private;
351 interruptible = do_idling(dev_priv);
353 if (!obj->has_dma_mapping)
354 dma_unmap_sg(&dev->pdev->dev,
355 obj->pages->sgl, obj->pages->nents,
356 PCI_DMA_BIDIRECTIONAL);
358 undo_idling(dev_priv, interruptible);
361 static void i915_gtt_color_adjust(struct drm_mm_node *node,
363 unsigned long *start,
366 if (node->color != color)
369 if (!list_empty(&node->node_list)) {
370 node = list_entry(node->node_list.next,
373 if (node->allocated && node->color != color)
378 void i915_gem_init_global_gtt(struct drm_device *dev,
380 unsigned long mappable_end,
383 drm_i915_private_t *dev_priv = dev->dev_private;
385 /* Substract the guard page ... */
386 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
388 dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
390 dev_priv->mm.gtt_start = start;
391 dev_priv->mm.gtt_mappable_end = mappable_end;
392 dev_priv->mm.gtt_end = end;
393 dev_priv->mm.gtt_total = end - start;
394 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
396 /* ... but ensure that we clear the entire range. */
397 intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);