2 * Copyright © 2008-2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uuk>
30 #include <drm/i915_drm.h>
33 #include "intel_drv.h"
34 #include "i915_trace.h"
36 static bool ggtt_is_idle(struct drm_i915_private *dev_priv)
38 struct i915_ggtt *ggtt = &dev_priv->ggtt;
39 struct intel_engine_cs *engine;
40 enum intel_engine_id id;
42 for_each_engine(engine, dev_priv, id) {
43 struct intel_timeline *tl;
45 tl = &ggtt->base.timeline.engine[engine->id];
46 if (i915_gem_active_isset(&tl->last_request))
54 mark_free(struct drm_mm_scan *scan,
57 struct list_head *unwind)
59 if (i915_vma_is_pinned(vma))
62 if (WARN_ON(!list_empty(&vma->exec_list)))
65 if (flags & PIN_NONFAULT && !list_empty(&vma->obj->userfault_link))
68 list_add(&vma->exec_list, unwind);
69 return drm_mm_scan_add_block(scan, &vma->node);
73 * i915_gem_evict_something - Evict vmas to make room for binding a new one
74 * @vm: address space to evict from
75 * @min_size: size of the desired free space
76 * @alignment: alignment constraint of the desired free space
77 * @cache_level: cache_level for the desired space
78 * @start: start (inclusive) of the range from which to evict objects
79 * @end: end (exclusive) of the range from which to evict objects
80 * @flags: additional flags to control the eviction algorithm
82 * This function will try to evict vmas until a free space satisfying the
83 * requirements is found. Callers must check first whether any such hole exists
84 * already before calling this function.
86 * This function is used by the object/vma binding code.
88 * Since this function is only used to free up virtual address space it only
89 * ignores pinned vmas, and not object where the backing storage itself is
90 * pinned. Hence obj->pages_pin_count does not protect against eviction.
92 * To clarify: This is for freeing up virtual address space, not for freeing
93 * memory in e.g. the shrinker.
96 i915_gem_evict_something(struct i915_address_space *vm,
97 u64 min_size, u64 alignment,
102 struct drm_i915_private *dev_priv = vm->i915;
103 struct drm_mm_scan scan;
104 struct list_head eviction_list;
105 struct list_head *phases[] = {
110 struct i915_vma *vma, *next;
111 struct drm_mm_node *node;
112 enum drm_mm_insert_mode mode;
115 lockdep_assert_held(&vm->i915->drm.struct_mutex);
116 trace_i915_gem_evict(vm, min_size, alignment, flags);
119 * The goal is to evict objects and amalgamate space in LRU order.
120 * The oldest idle objects reside on the inactive list, which is in
121 * retirement order. The next objects to retire are those in flight,
122 * on the active list, again in retirement order.
124 * The retirement sequence is thus:
125 * 1. Inactive objects (already retired)
126 * 2. Active objects (will stall on unbinding)
128 * On each list, the oldest objects lie at the HEAD with the freshest
129 * object on the TAIL.
131 mode = DRM_MM_INSERT_BEST;
132 if (flags & PIN_HIGH)
133 mode = DRM_MM_INSERT_HIGH;
134 if (flags & PIN_MAPPABLE)
135 mode = DRM_MM_INSERT_LOW;
136 drm_mm_scan_init_with_range(&scan, &vm->mm,
137 min_size, alignment, cache_level,
140 /* Retire before we search the active list. Although we have
141 * reasonable accuracy in our retirement lists, we may have
142 * a stray pin (preventing eviction) that can only be resolved by
145 if (!(flags & PIN_NONBLOCK))
146 i915_gem_retire_requests(dev_priv);
151 INIT_LIST_HEAD(&eviction_list);
154 list_for_each_entry(vma, *phase, vm_link)
155 if (mark_free(&scan, vma, flags, &eviction_list))
159 /* Nothing found, clean up and bail out! */
160 list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
161 ret = drm_mm_scan_remove_block(&scan, &vma->node);
164 INIT_LIST_HEAD(&vma->exec_list);
167 /* Can we unpin some objects such as idle hw contents,
168 * or pending flips? But since only the GGTT has global entries
169 * such as scanouts, rinbuffers and contexts, we can skip the
170 * purge when inspecting per-process local address spaces.
172 if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
175 if (ggtt_is_idle(dev_priv)) {
176 /* If we still have pending pageflip completions, drop
177 * back to userspace to give our workqueues time to
178 * acquire our locks and unpin the old scanouts.
180 return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
183 /* Not everything in the GGTT is tracked via vma (otherwise we
184 * could evict as required with minimal stalling) so we are forced
185 * to idle the GPU and explicitly retire outstanding requests in
186 * the hopes that we can then remove contexts and the like only
187 * bound by their active reference.
189 ret = i915_gem_switch_to_kernel_context(dev_priv);
193 ret = i915_gem_wait_for_idle(dev_priv,
194 I915_WAIT_INTERRUPTIBLE |
199 i915_gem_retire_requests(dev_priv);
203 /* drm_mm doesn't allow any other other operations while
204 * scanning, therefore store to-be-evicted objects on a
205 * temporary list and take a reference for all before
206 * calling unbind (which may remove the active reference
207 * of any of our objects, thus corrupting the list).
209 list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
210 if (drm_mm_scan_remove_block(&scan, &vma->node))
213 list_del_init(&vma->exec_list);
216 /* Unbinding will emit any required flushes */
218 while (!list_empty(&eviction_list)) {
219 vma = list_first_entry(&eviction_list,
223 list_del_init(&vma->exec_list);
224 __i915_vma_unpin(vma);
226 ret = i915_vma_unbind(vma);
229 while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) {
230 vma = container_of(node, struct i915_vma, node);
231 ret = i915_vma_unbind(vma);
238 * i915_gem_evict_for_vma - Evict vmas to make room for binding a new one
239 * @vm: address space to evict from
240 * @target: range (and color) to evict for
241 * @flags: additional flags to control the eviction algorithm
243 * This function will try to evict vmas that overlap the target node.
245 * To clarify: This is for freeing up virtual address space, not for freeing
246 * memory in e.g. the shrinker.
248 int i915_gem_evict_for_node(struct i915_address_space *vm,
249 struct drm_mm_node *target,
252 LIST_HEAD(eviction_list);
253 struct drm_mm_node *node;
254 u64 start = target->start;
255 u64 end = start + target->size;
256 struct i915_vma *vma, *next;
260 lockdep_assert_held(&vm->i915->drm.struct_mutex);
261 trace_i915_gem_evict_node(vm, target, flags);
263 /* Retire before we search the active list. Although we have
264 * reasonable accuracy in our retirement lists, we may have
265 * a stray pin (preventing eviction) that can only be resolved by
268 if (!(flags & PIN_NONBLOCK))
269 i915_gem_retire_requests(vm->i915);
271 check_color = vm->mm.color_adjust;
273 /* Expand search to cover neighbouring guard pages (or lack!) */
274 if (start > vm->start)
275 start -= I915_GTT_PAGE_SIZE;
276 if (end < vm->start + vm->total)
277 end += I915_GTT_PAGE_SIZE;
280 drm_mm_for_each_node_in_range(node, &vm->mm, start, end) {
281 /* If we find any non-objects (!vma), we cannot evict them */
282 if (node->color == I915_COLOR_UNEVICTABLE) {
287 vma = container_of(node, typeof(*vma), node);
289 /* If we are using coloring to insert guard pages between
290 * different cache domains within the address space, we have
291 * to check whether the objects on either side of our range
292 * abutt and conflict. If they are in conflict, then we evict
293 * those as well to make room for our guard pages.
296 if (vma->node.start + vma->node.size == node->start) {
297 if (vma->node.color == node->color)
300 if (vma->node.start == node->start + node->size) {
301 if (vma->node.color == node->color)
306 if (flags & PIN_NONBLOCK &&
307 (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))) {
312 /* Overlap of objects in the same batch? */
313 if (i915_vma_is_pinned(vma) || !list_empty(&vma->exec_list)) {
315 if (vma->exec_entry &&
316 vma->exec_entry->flags & EXEC_OBJECT_PINNED)
321 /* Never show fear in the face of dragons!
323 * We cannot directly remove this node from within this
324 * iterator and as with i915_gem_evict_something() we employ
325 * the vma pin_count in order to prevent the action of
326 * unbinding one vma from freeing (by dropping its active
327 * reference) another in our eviction list.
330 list_add(&vma->exec_list, &eviction_list);
333 list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
334 list_del_init(&vma->exec_list);
335 __i915_vma_unpin(vma);
337 ret = i915_vma_unbind(vma);
344 * i915_gem_evict_vm - Evict all idle vmas from a vm
345 * @vm: Address space to cleanse
346 * @do_idle: Boolean directing whether to idle first.
348 * This function evicts all idles vmas from a vm. If all unpinned vmas should be
349 * evicted the @do_idle needs to be set to true.
351 * This is used by the execbuf code as a last-ditch effort to defragment the
354 * To clarify: This is for freeing up virtual address space, not for freeing
355 * memory in e.g. the shrinker.
357 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
359 struct i915_vma *vma, *next;
362 lockdep_assert_held(&vm->i915->drm.struct_mutex);
363 trace_i915_gem_evict_vm(vm);
366 struct drm_i915_private *dev_priv = vm->i915;
368 if (i915_is_ggtt(vm)) {
369 ret = i915_gem_switch_to_kernel_context(dev_priv);
374 ret = i915_gem_wait_for_idle(dev_priv,
375 I915_WAIT_INTERRUPTIBLE |
380 i915_gem_retire_requests(dev_priv);
381 WARN_ON(!list_empty(&vm->active_list));
384 list_for_each_entry_safe(vma, next, &vm->inactive_list, vm_link)
385 if (!i915_vma_is_pinned(vma))
386 WARN_ON(i915_vma_unbind(vma));