uintptr_t list = (uintptr_t) node->info_ent->data;
struct list_head *head;
struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *vm = &dev_priv->gtt.base;
struct drm_i915_gem_object *obj;
size_t total_obj_size, total_gtt_size;
int count, ret;
switch (list) {
case ACTIVE_LIST:
seq_puts(m, "Active:\n");
- head = &dev_priv->mm.active_list;
+ head = &vm->active_list;
break;
case INACTIVE_LIST:
seq_puts(m, "Inactive:\n");
- head = &dev_priv->mm.inactive_list;
+ head = &vm->inactive_list;
break;
default:
mutex_unlock(&dev->struct_mutex);
u32 count, mappable_count, purgeable_count;
size_t size, mappable_size, purgeable_size;
struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm = &dev_priv->gtt.base;
struct drm_file *file;
int ret;
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
- count_objects(&dev_priv->mm.active_list, mm_list);
+ count_objects(&vm->active_list, mm_list);
seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
- count_objects(&dev_priv->mm.inactive_list, mm_list);
+ count_objects(&vm->inactive_list, mm_list);
seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
count, mappable_count, size, mappable_size);
struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj, *next;
+ struct i915_address_space *vm = &dev_priv->gtt.base;
int ret;
DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val);
i915_gem_retire_requests(dev);
if (val & DROP_BOUND) {
- list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list, mm_list)
+ list_for_each_entry_safe(obj, next, &vm->inactive_list,
+ mm_list)
if (obj->pin_count == 0) {
ret = i915_gem_object_unbind(obj);
if (ret)
struct page *page;
} scratch;
+ /**
+ * List of objects currently involved in rendering.
+ *
+ * Includes buffers having the contents of their GPU caches
+ * flushed, not necessarily primitives. last_rendering_seqno
+ * represents when the rendering involved will be completed.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head active_list;
+
+ /**
+ * LRU list of objects which are not in the ringbuffer and
+ * are ready to unbind, but are still in the GTT.
+ *
+ * last_rendering_seqno is 0 while an object is in this list.
+ *
+ * A reference is not held on the buffer while on this list,
+ * as merely being GTT-bound shouldn't prevent its being
+ * freed, and we'll pull it off the list in the free path.
+ */
+ struct list_head inactive_list;
+
/* FIXME: Need a more generic return type */
gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
enum i915_cache_level level);
struct shrinker inactive_shrinker;
bool shrinker_no_lock_stealing;
- /**
- * List of objects currently involved in rendering.
- *
- * Includes buffers having the contents of their GPU caches
- * flushed, not necessarily primitives. last_rendering_seqno
- * represents when the rendering involved will be completed.
- *
- * A reference is held on the buffer while on this list.
- */
- struct list_head active_list;
-
- /**
- * LRU list of objects which are not in the ringbuffer and
- * are ready to unbind, but are still in the GTT.
- *
- * last_rendering_seqno is 0 while an object is in this list.
- *
- * A reference is not held on the buffer while on this list,
- * as merely being GTT-bound shouldn't prevent its being
- * freed, and we'll pull it off the list in the free path.
- */
- struct list_head inactive_list;
-
/** LRU list of objects with fence regs on them. */
struct list_head fence_list;
bool purgeable_only)
{
struct drm_i915_gem_object *obj, *next;
+ struct i915_address_space *vm = &dev_priv->gtt.base;
long count = 0;
list_for_each_entry_safe(obj, next,
}
}
- list_for_each_entry_safe(obj, next,
- &dev_priv->mm.inactive_list,
- mm_list) {
+ list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list) {
if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
i915_gem_object_unbind(obj) == 0 &&
i915_gem_object_put_pages(obj) == 0) {
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *vm = &dev_priv->gtt.base;
u32 seqno = intel_ring_get_seqno(ring);
BUG_ON(ring == NULL);
}
/* Move from whatever list we were on to the tail of execution. */
- list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
+ list_move_tail(&obj->mm_list, &vm->active_list);
list_move_tail(&obj->ring_list, &ring->active_list);
obj->last_read_seqno = seqno;
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *vm = &dev_priv->gtt.base;
BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
BUG_ON(!obj->active);
- list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+ list_move_tail(&obj->mm_list, &vm->inactive_list);
list_del_init(&obj->ring_list);
obj->ring = NULL;
void i915_gem_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *vm = &dev_priv->gtt.base;
struct drm_i915_gem_object *obj;
struct intel_ring_buffer *ring;
int i;
/* Move everything out of the GPU domains to ensure we do any
* necessary invalidation upon reuse.
*/
- list_for_each_entry(obj,
- &dev_priv->mm.inactive_list,
- mm_list)
- {
+ list_for_each_entry(obj, &vm->inactive_list, mm_list)
obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
- }
/* The fence registers are invalidated so clear them out */
i915_gem_reset_fences(dev);
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct i915_address_space *vm = &dev_priv->gtt.base;
u32 size, fence_size, fence_alignment, unfenced_alignment;
bool mappable, fenceable;
size_t gtt_max = map_and_fenceable ?
}
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
- list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+ list_add_tail(&obj->mm_list, &vm->inactive_list);
fenceable =
i915_gem_obj_ggtt_size(obj) == fence_size &&
/* And bump the LRU for this access */
if (i915_gem_object_is_inactive(obj))
- list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+ list_move_tail(&obj->mm_list,
+ &dev_priv->gtt.base.inactive_list);
return 0;
}
return ret;
}
- BUG_ON(!list_empty(&dev_priv->mm.active_list));
+ BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
mutex_unlock(&dev->struct_mutex);
ret = drm_irq_install(dev);
SLAB_HWCACHE_ALIGN,
NULL);
- INIT_LIST_HEAD(&dev_priv->mm.active_list);
- INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+ INIT_LIST_HEAD(&dev_priv->gtt.base.active_list);
+ INIT_LIST_HEAD(&dev_priv->gtt.base.inactive_list);
INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
INIT_LIST_HEAD(&dev_priv->mm.bound_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
struct drm_i915_private,
mm.inactive_shrinker);
struct drm_device *dev = dev_priv->dev;
+ struct i915_address_space *vm = &dev_priv->gtt.base;
struct drm_i915_gem_object *obj;
int nr_to_scan = sc->nr_to_scan;
bool unlock = true;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
if (obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
- list_for_each_entry(obj, &dev_priv->mm.inactive_list, global_list)
+ list_for_each_entry(obj, &vm->inactive_list, global_list)
if (obj->pin_count == 0 && obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
}
}
- list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) {
+ list_for_each_entry(obj, &i915_gtt_vm->inactive_list, list) {
if (obj->base.dev != dev ||
!atomic_read(&obj->base.refcount.refcount)) {
DRM_ERROR("freed inactive %p\n", obj);
bool mappable, bool nonblocking)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct i915_address_space *vm = &dev_priv->gtt.base;
struct list_head eviction_list, unwind_list;
struct drm_i915_gem_object *obj;
int ret = 0;
INIT_LIST_HEAD(&unwind_list);
if (mappable)
- drm_mm_init_scan_with_range(&dev_priv->gtt.base.mm, min_size,
+ drm_mm_init_scan_with_range(&vm->mm, min_size,
alignment, cache_level, 0,
dev_priv->gtt.mappable_end);
else
- drm_mm_init_scan(&dev_priv->gtt.base.mm, min_size, alignment,
- cache_level);
+ drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
/* First see if there is a large enough contiguous idle region... */
- list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
+ list_for_each_entry(obj, &vm->inactive_list, mm_list) {
if (mark_free(obj, &unwind_list))
goto found;
}
goto none;
/* Now merge in the soon-to-be-expired objects... */
- list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
+ list_for_each_entry(obj, &vm->active_list, mm_list) {
if (mark_free(obj, &unwind_list))
goto found;
}
i915_gem_evict_everything(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct i915_address_space *vm = &dev_priv->gtt.base;
struct drm_i915_gem_object *obj, *next;
bool lists_empty;
int ret;
- lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
- list_empty(&dev_priv->mm.active_list));
+ lists_empty = (list_empty(&vm->inactive_list) &&
+ list_empty(&vm->active_list));
if (lists_empty)
return -ENOSPC;
i915_gem_retire_requests(dev);
/* Having flushed everything, unbind() should never raise an error */
- list_for_each_entry_safe(obj, next,
- &dev_priv->mm.inactive_list, mm_list)
+ list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list)
if (obj->pin_count == 0)
WARN_ON(i915_gem_object_unbind(obj));
u32 size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *vm = &dev_priv->gtt.base;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
int ret;
obj->has_global_gtt_mapping = 1;
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
- list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+ list_add_tail(&obj->mm_list, &vm->inactive_list);
return obj;
i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
struct intel_ring_buffer *ring)
{
+ struct i915_address_space *vm = &dev_priv->gtt.base;
struct drm_i915_gem_object *obj;
u32 seqno;
}
seqno = ring->get_seqno(ring, false);
- list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
+ list_for_each_entry(obj, &vm->active_list, mm_list) {
if (obj->ring != ring)
continue;
static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
{
+ struct i915_address_space *vm = &dev_priv->gtt.base;
struct drm_i915_gem_object *obj;
int i;
i = 0;
- list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
+ list_for_each_entry(obj, &vm->active_list, mm_list)
i++;
error->active_bo_count = i;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
error->active_bo_count =
capture_active_bo(error->active_bo,
error->active_bo_count,
- &dev_priv->mm.active_list);
+ &vm->active_list);
if (error->pinned_bo)
error->pinned_bo_count =