/* i915_gem_evict.c */
int i915_gem_evict_something(struct drm_device *dev, int min_size,
unsigned alignment, bool mappable);
-int i915_gem_evict_everything(struct drm_device *dev);
-int i915_gem_evict_inactive(struct drm_device *dev);
+int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only);
+int i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only);
/* i915_gem_tiling.c */
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
int ret, i, retry;
/* attempt to pin all of the buffers into the GTT */
- for (retry = 0; retry < 2; retry++) {
+ retry = 0;
+ do {
ret = 0;
for (i = 0; i < count; i++) {
struct drm_i915_gem_exec_object2 *entry = &exec_list[i];
while (i--)
i915_gem_object_unpin(object_list[i]);
- if (ret == 0)
- break;
-
- if (ret != -ENOSPC || retry)
+ if (ret != -ENOSPC || retry > 1)
return ret;
- ret = i915_gem_evict_everything(dev);
+ /* First attempt, just clear anything that is purgeable.
+ * Second attempt, clear the entire GTT.
+ */
+ ret = i915_gem_evict_everything(dev, retry == 0);
if (ret)
return ret;
- }
- return 0;
+ retry++;
+ } while (1);
}
/* Throttle our rendering by waiting until the ring has completed our requests
/* Under UMS, be paranoid and evict. */
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = i915_gem_evict_inactive(dev);
+ ret = i915_gem_evict_inactive(dev, false);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return ret;
int
i915_gem_evict_something(struct drm_device *dev, int min_size,
- unsigned alignment, bool mappable)
+ unsigned alignment, bool mappable)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct list_head eviction_list, unwind_list;
/* Re-check for free space after retiring requests */
if (mappable) {
if (drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
- min_size, alignment, 0,
+ min_size, alignment, 0,
dev_priv->mm.gtt_mappable_end,
0))
return 0;
}
int
-i915_gem_evict_everything(struct drm_device *dev)
+i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
- ret = i915_gem_evict_inactive(dev);
- if (ret)
- return ret;
-
- lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
- list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->render_ring.active_list) &&
- list_empty(&dev_priv->bsd_ring.active_list) &&
- list_empty(&dev_priv->blt_ring.active_list));
- BUG_ON(!lists_empty);
-
- return 0;
+ return i915_gem_evict_inactive(dev, purgeable_only);
}
/** Unbinds all inactive objects. */
int
-i915_gem_evict_inactive(struct drm_device *dev)
+i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only)
{
drm_i915_private_t *dev_priv = dev->dev_private;
-
- while (!list_empty(&dev_priv->mm.inactive_list)) {
- struct drm_gem_object *obj;
- int ret;
-
- obj = &list_first_entry(&dev_priv->mm.inactive_list,
- struct drm_i915_gem_object,
- mm_list)->base;
-
- ret = i915_gem_object_unbind(obj);
- if (ret != 0) {
- DRM_ERROR("Error unbinding object: %d\n", ret);
- return ret;
+ struct drm_i915_gem_object *obj, *next;
+
+ list_for_each_entry_safe(obj, next,
+ &dev_priv->mm.inactive_list, mm_list) {
+ if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
+ int ret = i915_gem_object_unbind(&obj->base);
+ if (ret)
+ return ret;
}
}