int (*enable)(struct drm_device *dev);
};
+/* To make things as simple as possible (ie. no refcounting), a VMA's lifetime
+ * will always be <= an objects lifetime. So object refcounting should cover us.
+ */
+struct i915_vma {
+ struct drm_mm_node node;
+ struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm;
+
+ struct list_head vma_link; /* Link in the object's VMA list */
+};
+
struct i915_ctx_hang_stats {
/* This context had batch pending when hang was declared */
unsigned batch_pending;
const struct drm_i915_gem_object_ops *ops;
- /** Current space allocated to this object in the GTT, if any. */
- struct drm_mm_node gtt_space;
+ /** List of VMAs backed by this object */
+ struct list_head vma_list;
+
/** Stolen memory for this object, instead of being backed by shmem. */
struct drm_mm_node *stolen;
struct list_head global_list;
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
-/* Offset of the first PTE pointing to this object */
-static inline unsigned long
-i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
+/* This is a temporary define to help transition us to real VMAs. If you see
+ * this, you're either reviewing code, or bisecting it. */
+static inline struct i915_vma *
+__i915_gem_obj_to_vma(struct drm_i915_gem_object *obj)
{
- return o->gtt_space.start;
+ if (list_empty(&obj->vma_list))
+ return NULL;
+ return list_first_entry(&obj->vma_list, struct i915_vma, vma_link);
}
/* Whether or not this object is currently mapped by the translation tables */
static inline bool
i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *o)
{
- return drm_mm_node_allocated(&o->gtt_space);
+ struct i915_vma *vma = __i915_gem_obj_to_vma(o);
+ if (vma == NULL)
+ return false;
+ return drm_mm_node_allocated(&vma->node);
+}
+
+/* Offset of the first PTE pointing to this object */
+static inline unsigned long
+i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
+{
+ BUG_ON(list_empty(&o->vma_list));
+ return __i915_gem_obj_to_vma(o)->node.start;
}
/* The size used in the translation tables may be larger than the actual size of
static inline unsigned long
i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
{
- return o->gtt_space.size;
+ BUG_ON(list_empty(&o->vma_list));
+ return __i915_gem_obj_to_vma(o)->node.size;
}
static inline void
i915_gem_obj_ggtt_set_color(struct drm_i915_gem_object *o,
enum i915_cache_level color)
{
- o->gtt_space.color = color;
+ __i915_gem_obj_to_vma(o)->node.color = color;
}
/**
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
+struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm);
+void i915_gem_vma_destroy(struct i915_vma *vma);
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
i915_gem_object_unbind(struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
+ struct i915_vma *vma;
int ret;
if (!i915_gem_obj_ggtt_bound(obj))
i915_gem_object_unpin_pages(obj);
list_del(&obj->mm_list);
- list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
/* Avoid an unnecessary call to unbind on rebind. */
obj->map_and_fenceable = true;
- drm_mm_remove_node(&obj->gtt_space);
+ vma = __i915_gem_obj_to_vma(obj);
+ list_del(&vma->vma_link);
+ drm_mm_remove_node(&vma->node);
+ i915_gem_vma_destroy(vma);
+
+ /* Since the unbound list is global, only move to that list if
+ * no more VMAs exist.
+ * NB: Until we have real VMAs there will only ever be one */
+ WARN_ON(!list_empty(&obj->vma_list));
+ if (list_empty(&obj->vma_list))
+ list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
return 0;
}
bool mappable, fenceable;
size_t gtt_max = map_and_fenceable ?
dev_priv->gtt.mappable_end : dev_priv->gtt.base.total;
+ struct i915_vma *vma;
int ret;
+ if (WARN_ON(!list_empty(&obj->vma_list)))
+ return -EBUSY;
+
fence_size = i915_gem_get_gtt_size(dev,
obj->base.size,
obj->tiling_mode);
i915_gem_object_pin_pages(obj);
+ vma = i915_gem_vma_create(obj, &dev_priv->gtt.base);
+ if (vma == NULL) {
+ i915_gem_object_unpin_pages(obj);
+ return -ENOMEM;
+ }
+
search_free:
ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
- &obj->gtt_space,
+ &vma->node,
size, alignment,
obj->cache_level, 0, gtt_max);
if (ret) {
if (ret == 0)
goto search_free;
- i915_gem_object_unpin_pages(obj);
- return ret;
+ goto err_out;
}
- if (WARN_ON(!i915_gem_valid_gtt_space(dev, &obj->gtt_space,
+ if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
obj->cache_level))) {
- i915_gem_object_unpin_pages(obj);
- drm_mm_remove_node(&obj->gtt_space);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_out;
}
ret = i915_gem_gtt_prepare_object(obj);
- if (ret) {
- i915_gem_object_unpin_pages(obj);
- drm_mm_remove_node(&obj->gtt_space);
- return ret;
- }
+ if (ret)
+ goto err_out;
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&obj->mm_list, &vm->inactive_list);
+ list_add(&vma->vma_link, &obj->vma_list);
fenceable =
i915_gem_obj_ggtt_size(obj) == fence_size &&
trace_i915_gem_object_bind(obj, map_and_fenceable);
i915_gem_verify_gtt(dev);
return 0;
+
+err_out:
+ i915_gem_vma_destroy(vma);
+ i915_gem_object_unpin_pages(obj);
+ drm_mm_remove_node(&vma->node);
+ return ret;
}
void
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct i915_vma *vma = __i915_gem_obj_to_vma(obj);
int ret;
if (obj->cache_level == cache_level)
return -EBUSY;
}
- if (!i915_gem_valid_gtt_space(dev, &obj->gtt_space, cache_level)) {
+ if (vma && !i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
ret = i915_gem_object_unbind(obj);
if (ret)
return ret;
INIT_LIST_HEAD(&obj->global_list);
INIT_LIST_HEAD(&obj->ring_list);
INIT_LIST_HEAD(&obj->exec_list);
+ INIT_LIST_HEAD(&obj->vma_list);
obj->ops = ops;
i915_gem_object_free(obj);
}
+struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
+{
+ struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+ if (vma == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&vma->vma_link);
+ vma->vm = vm;
+ vma->obj = obj;
+
+ return vma;
+}
+
+void i915_gem_vma_destroy(struct i915_vma *vma)
+{
+ WARN_ON(vma->node.allocated);
+ kfree(vma);
+}
+
int
i915_gem_idle(struct drm_device *dev)
{
static bool
mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
{
+ struct i915_vma *vma = __i915_gem_obj_to_vma(obj);
+
if (obj->pin_count)
return false;
list_add(&obj->exec_list, unwind);
- return drm_mm_scan_add_block(&obj->gtt_space);
+ return drm_mm_scan_add_block(&vma->node);
}
int
drm_i915_private_t *dev_priv = dev->dev_private;
struct i915_address_space *vm = &dev_priv->gtt.base;
struct list_head eviction_list, unwind_list;
+ struct i915_vma *vma;
struct drm_i915_gem_object *obj;
int ret = 0;
obj = list_first_entry(&unwind_list,
struct drm_i915_gem_object,
exec_list);
-
- ret = drm_mm_scan_remove_block(&obj->gtt_space);
+ vma = __i915_gem_obj_to_vma(obj);
+ ret = drm_mm_scan_remove_block(&vma->node);
BUG_ON(ret);
list_del_init(&obj->exec_list);
obj = list_first_entry(&unwind_list,
struct drm_i915_gem_object,
exec_list);
- if (drm_mm_scan_remove_block(&obj->gtt_space)) {
+ vma = __i915_gem_obj_to_vma(obj);
+ if (drm_mm_scan_remove_block(&vma->node)) {
list_move(&obj->exec_list, &eviction_list);
drm_gem_object_reference(&obj->base);
continue;
/* Mark any preallocated objects as occupied */
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ struct i915_vma *vma = __i915_gem_obj_to_vma(obj);
int ret;
DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
i915_gem_obj_ggtt_offset(obj), obj->base.size);
WARN_ON(i915_gem_obj_ggtt_bound(obj));
- ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm,
- &obj->gtt_space);
+ ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm, &vma->node);
if (ret)
DRM_DEBUG_KMS("Reservation failed\n");
obj->has_global_gtt_mapping = 1;
+ list_add(&vma->vma_link, &obj->vma_list);
}
dev_priv->gtt.base.start = start;
struct i915_address_space *vm = &dev_priv->gtt.base;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
+ struct i915_vma *vma;
int ret;
if (dev_priv->mm.stolen_base == 0)
if (gtt_offset == I915_GTT_OFFSET_NONE)
return obj;
+ vma = i915_gem_vma_create(obj, &dev_priv->gtt.base);
+ if (!vma) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
/* To simplify the initialisation sequence between KMS and GTT,
* we allow construction of the stolen object prior to
* setting up the GTT space. The actual reservation will occur
* later.
*/
- obj->gtt_space.start = gtt_offset;
- obj->gtt_space.size = size;
+ vma->node.start = gtt_offset;
+ vma->node.size = size;
if (drm_mm_initialized(&dev_priv->gtt.base.mm)) {
- ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm,
- &obj->gtt_space);
+ ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm, &vma->node);
if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
+ i915_gem_vma_destroy(vma);
goto err_out;
}
}