struct page *page;
uint64_t *seqno;
- page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
+ page = i915_gem_object_get_page(dev_priv->semaphore->obj, 0);
seqno = (uint64_t *)kmap_atomic(page);
for_each_engine_id(engine, dev_priv, id) {
u64 fence[I915_MAX_NUM_FENCES];
struct intel_overlay_error_state *overlay;
struct intel_display_error_state *display;
- struct drm_i915_error_object *semaphore_obj;
+ struct drm_i915_error_object *semaphore;
struct drm_i915_error_engine {
int engine_id;
struct pci_dev *bridge_dev;
struct i915_gem_context *kernel_context;
struct intel_engine_cs engine[I915_NUM_ENGINES];
- struct drm_i915_gem_object *semaphore_obj;
+ struct i915_vma *semaphore;
u32 next_seqno;
struct drm_dma_handle *status_page_dmah;
}
}
- if ((obj = error->semaphore_obj)) {
+ if ((obj = error->semaphore)) {
err_printf(m, "Semaphore page = 0x%08x\n",
lower_32_bits(obj->gtt_offset));
for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
kfree(ee->waiters);
}
- i915_error_object_free(error->semaphore_obj);
+ i915_error_object_free(error->semaphore);
for (i = 0; i < ARRAY_SIZE(error->active_bo); i++)
kfree(error->active_bo[i]);
struct intel_engine_cs *to;
enum intel_engine_id id;
- if (!error->semaphore_obj)
+ if (!error->semaphore)
return;
for_each_engine_id(to, dev_priv, id) {
signal_offset =
(GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1)) / 4;
- tmp = error->semaphore_obj->pages[0];
+ tmp = error->semaphore->pages[0];
idx = intel_engine_sync_index(engine, to);
ee->semaphore_mboxes[idx] = tmp[signal_offset];
struct drm_i915_gem_request *request;
int i, count;
- if (dev_priv->semaphore_obj) {
- error->semaphore_obj =
- i915_error_ggtt_object_create(dev_priv,
- dev_priv->semaphore_obj);
- }
+ error->semaphore =
+ i915_error_ggtt_object_create(dev_priv,
+ dev_priv->semaphore->obj);
for (i = 0; i < I915_NUM_ENGINES; i++) {
struct intel_engine_cs *engine = &dev_priv->engine[i];
if (HAS_VEBOX(dev_priv))
I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
}
- if (dev_priv->semaphore_obj) {
- struct drm_i915_gem_object *obj = dev_priv->semaphore_obj;
- struct page *page = i915_gem_object_get_dirty_page(obj, 0);
- void *semaphores = kmap(page);
+ if (dev_priv->semaphore) {
+ struct page *page = i915_vma_first_page(dev_priv->semaphore);
+ void *semaphores;
+
+ /* Semaphores are in noncoherent memory, flush to be safe */
+ semaphores = kmap(page);
memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
0, I915_NUM_ENGINES * gen8_semaphore_seqno_size);
+ drm_clflush_virt_range(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
+ I915_NUM_ENGINES * gen8_semaphore_seqno_size);
kunmap(page);
}
memset(engine->semaphore.sync_seqno, 0,
static void render_ring_cleanup(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
+ struct i915_vma *vma;
- if (dev_priv->semaphore_obj) {
- i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
- i915_gem_object_put(dev_priv->semaphore_obj);
- dev_priv->semaphore_obj = NULL;
- }
+ vma = fetch_and_zero(&dev_priv->semaphore);
+ if (!vma)
+ return;
+
+ i915_vma_unpin(vma);
+ i915_vma_put(vma);
}
static int gen8_rcs_signal(struct drm_i915_gem_request *req)
if (!i915.semaphores)
return;
- if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore_obj) {
+ if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) {
+ struct i915_vma *vma;
+
obj = i915_gem_object_create(&dev_priv->drm, 4096);
- if (IS_ERR(obj)) {
- DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
- i915.semaphores = 0;
- } else {
- i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
- ret = i915_gem_object_ggtt_pin(obj, NULL,
- 0, 0, PIN_HIGH);
- if (ret != 0) {
- i915_gem_object_put(obj);
- DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
- i915.semaphores = 0;
- } else {
- dev_priv->semaphore_obj = obj;
- }
- }
- }
+ if (IS_ERR(obj))
+ goto err;
- if (!i915.semaphores)
- return;
+ vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
+ if (IS_ERR(vma))
+ goto err_obj;
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, false);
+ if (ret)
+ goto err_obj;
+
+ ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ if (ret)
+ goto err_obj;
+
+ dev_priv->semaphore = vma;
+ }
if (INTEL_GEN(dev_priv) >= 8) {
- u64 offset = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj);
+ u64 offset = dev_priv->semaphore->node.start;
engine->semaphore.sync_to = gen8_ring_sync_to;
engine->semaphore.signal = gen8_xcs_signal;
engine->semaphore.mbox.signal[i] = mbox_reg;
}
}
+
+ return;
+
+err_obj:
+ i915_gem_object_put(obj);
+err:
+ DRM_DEBUG_DRIVER("Failed to allocate space for semaphores, disabling\n");
+ i915.semaphores = 0;
}
static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
#define GEN8_SEMAPHORE_OFFSET(__from, __to) \
(((__from) * I915_NUM_ENGINES + (__to)) * gen8_semaphore_seqno_size)
#define GEN8_SIGNAL_OFFSET(__ring, to) \
- (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
+ (dev_priv->semaphore->node.start + \
GEN8_SEMAPHORE_OFFSET((__ring)->id, (to)))
#define GEN8_WAIT_OFFSET(__ring, from) \
- (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
+ (dev_priv->semaphore->node.start + \
GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
enum intel_engine_hangcheck_action {