*t = '\0';
seq_printf(m, " (%s mappable)", s);
}
- if (obj->ring != NULL)
- seq_printf(m, " (%s)", obj->ring->name);
+ if (obj->last_read_req != NULL)
+ seq_printf(m, " (%s)",
+ i915_gem_request_get_ring(obj->last_read_req)->name);
if (obj->frontbuffer_bits)
seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
}
if (ppgtt->file_priv != stats->file_priv)
continue;
- if (obj->ring) /* XXX per-vma statistic */
+ if (obj->active) /* XXX per-vma statistic */
stats->active += obj->base.size;
else
stats->inactive += obj->base.size;
} else {
if (i915_gem_obj_ggtt_bound(obj)) {
stats->global += obj->base.size;
- if (obj->ring)
+ if (obj->active)
stats->active += obj->base.size;
else
stats->inactive += obj->base.size;
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring)
{
- struct drm_i915_gem_request *req = intel_ring_get_request(ring);
+ struct drm_i915_gem_request *req;
+ struct intel_engine_cs *old_ring;
BUG_ON(ring == NULL);
- if (obj->ring != ring && obj->last_write_req) {
+
+ req = intel_ring_get_request(ring);
+ old_ring = i915_gem_request_get_ring(obj->last_read_req);
+
+ if (old_ring != ring && obj->last_write_req) {
/* Keep the request relative to the current ring */
i915_gem_request_assign(&obj->last_write_req, req);
}
- obj->ring = ring;
/* Add a reference if we're newly entering the active list. */
if (!obj->active) {
intel_fb_obj_flush(obj, true);
list_del_init(&obj->ring_list);
- obj->ring = NULL;
i915_gem_request_assign(&obj->last_read_req, NULL);
i915_gem_request_assign(&obj->last_write_req, NULL);
static void
i915_gem_object_retire(struct drm_i915_gem_object *obj)
{
- struct intel_engine_cs *ring = obj->ring;
-
- if (ring == NULL)
+ if (obj->last_read_req == NULL)
return;
if (i915_gem_request_completed(obj->last_read_req, true))
static int
i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
{
+ struct intel_engine_cs *ring;
int ret;
if (obj->active) {
+ ring = i915_gem_request_get_ring(obj->last_read_req);
+
ret = i915_gem_check_olr(obj->last_read_req);
if (ret)
return ret;
- i915_gem_retire_requests_ring(obj->ring);
+ i915_gem_retire_requests_ring(ring);
}
return 0;
i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_engine_cs *to)
{
- struct intel_engine_cs *from = obj->ring;
+ struct intel_engine_cs *from;
u32 seqno;
int ret, idx;
+ from = i915_gem_request_get_ring(obj->last_read_req);
+
if (from == NULL || to == from)
return 0;
bool was_pin_display;
int ret;
- if (pipelined != obj->ring) {
+ if (pipelined != i915_gem_request_get_ring(obj->last_read_req)) {
ret = i915_gem_object_sync(obj, pipelined);
if (ret)
return ret;
ret = i915_gem_object_flush_active(obj);
args->busy = obj->active;
- if (obj->ring) {
+ if (obj->last_read_req) {
+ struct intel_engine_cs *ring;
BUILD_BUG_ON(I915_NUM_RINGS > 16);
- args->busy |= intel_ring_flag(obj->ring) << 16;
+ ring = i915_gem_request_get_ring(obj->last_read_req);
+ args->busy |= intel_ring_flag(ring) << 16;
}
drm_gem_object_unreference(&obj->base);
else if (i915.enable_execlists)
return true;
else
- return ring != obj->ring;
+ return ring != i915_gem_request_get_ring(obj->last_read_req);
}
static void skl_do_mmio_flip(struct intel_crtc *intel_crtc)
} else if (IS_IVYBRIDGE(dev)) {
ring = &dev_priv->ring[BCS];
} else if (INTEL_INFO(dev)->gen >= 7) {
- ring = obj->ring;
+ ring = i915_gem_request_get_ring(obj->last_read_req);
if (ring == NULL || ring->id != RCS)
ring = &dev_priv->ring[BCS];
} else {
i915_gem_request_assign(&work->flip_queued_req,
obj->last_write_req);
- work->flip_queued_ring = obj->ring;
+ work->flip_queued_ring =
+ i915_gem_request_get_ring(obj->last_write_req);
} else {
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
page_flip_flags);