]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - drivers/gpu/drm/i915/i915_guc_submission.c
drm/i915/guc: Add GuC ADS - MMIO reg state
[karo-tx-linux.git] / drivers / gpu / drm / i915 / i915_guc_submission.c
index 036b42bae827c02049649a9a747d456d462dcaaf..9c244247c13e879bb3a5c0918a4319c03f4bdc36 100644 (file)
@@ -27,7 +27,7 @@
 #include "intel_guc.h"
 
 /**
- * DOC: GuC Client
+ * DOC: GuC-based command submission
  *
  * i915_guc_client:
  * We use the term client to avoid confusion with contexts. A i915_guc_client is
@@ -86,7 +86,6 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
                return -EINVAL;
 
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
-       spin_lock(&dev_priv->guc.host2guc_lock);
 
        dev_priv->guc.action_count += 1;
        dev_priv->guc.action_cmd = data[0];
@@ -119,7 +118,6 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
        }
        dev_priv->guc.action_status = status;
 
-       spin_unlock(&dev_priv->guc.host2guc_lock);
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 
        return ret;
@@ -160,10 +158,8 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
 
        data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
        /* WaRsDisableCoarsePowerGating:skl,bxt */
-       if (!intel_enable_rc6(dev_priv->dev) ||
-           (IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) ||
-           (IS_SKL_GT3(dev) && (INTEL_REVID(dev) <= SKL_REVID_E0)) ||
-           (IS_SKL_GT4(dev) && (INTEL_REVID(dev) <= SKL_REVID_E0)))
+       if (!intel_enable_rc6(dev) ||
+           NEEDS_WaRsDisableCoarsePowerGating(dev))
                data[1] = 0;
        else
                /* bit 0 and 1 are for Render and Media domain separately */
@@ -248,6 +244,9 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
                        db_exc.cookie = 1;
        }
 
+       /* Finally, update the cached copy of the GuC's WQ head */
+       gc->wq_head = desc->head;
+
        kunmap_atomic(base);
        return ret;
 }
@@ -258,7 +257,7 @@ static void guc_disable_doorbell(struct intel_guc *guc,
        struct drm_i915_private *dev_priv = guc_to_i915(guc);
        struct guc_doorbell_info *doorbell;
        void *base;
-       int drbreg = GEN8_DRBREGL(client->doorbell_id);
+       i915_reg_t drbreg = GEN8_DRBREGL(client->doorbell_id);
        int value;
 
        base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
@@ -292,16 +291,12 @@ static uint32_t select_doorbell_cacheline(struct intel_guc *guc)
        const uint32_t cacheline_size = cache_line_size();
        uint32_t offset;
 
-       spin_lock(&guc->host2guc_lock);
-
        /* Doorbell uses a single cache line within a page */
        offset = offset_in_page(guc->db_cacheline);
 
        /* Moving to next cache line to reduce contention */
        guc->db_cacheline += cacheline_size;
 
-       spin_unlock(&guc->host2guc_lock);
-
        DRM_DEBUG_DRIVER("selected doorbell cacheline 0x%x, next 0x%x, linesize %u\n",
                        offset, guc->db_cacheline, cacheline_size);
 
@@ -322,13 +317,11 @@ static uint16_t assign_doorbell(struct intel_guc *guc, uint32_t priority)
        const uint16_t end = start + half;
        uint16_t id;
 
-       spin_lock(&guc->host2guc_lock);
        id = find_next_zero_bit(guc->doorbell_bitmap, end, start);
        if (id == end)
                id = GUC_INVALID_DOORBELL_ID;
        else
                bitmap_set(guc->doorbell_bitmap, id, 1);
-       spin_unlock(&guc->host2guc_lock);
 
        DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n",
                        hi_pri ? "high" : "normal", id);
@@ -338,9 +331,7 @@ static uint16_t assign_doorbell(struct intel_guc *guc, uint32_t priority)
 
 static void release_doorbell(struct intel_guc *guc, uint16_t id)
 {
-       spin_lock(&guc->host2guc_lock);
        bitmap_clear(guc->doorbell_bitmap, id, 1);
-       spin_unlock(&guc->host2guc_lock);
 }
 
 /*
@@ -481,31 +472,34 @@ static void guc_fini_ctx_desc(struct intel_guc *guc,
                             sizeof(desc) * client->ctx_index);
 }
 
-/* Get valid workqueue item and return it back to offset */
-static int guc_get_workqueue_space(struct i915_guc_client *gc, u32 *offset)
+int i915_guc_wq_check_space(struct i915_guc_client *gc)
 {
        struct guc_process_desc *desc;
        void *base;
        u32 size = sizeof(struct guc_wq_item);
-       int ret = 0, timeout_counter = 200;
+       int ret = -ETIMEDOUT, timeout_counter = 200;
+
+       if (!gc)
+               return 0;
+
+       /* Quickly return if wq space is available since last time we cache the
+        * head position. */
+       if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size)
+               return 0;
 
        base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
        desc = base + gc->proc_desc_offset;
 
        while (timeout_counter-- > 0) {
-               ret = wait_for_atomic(CIRC_SPACE(gc->wq_tail, desc->head,
-                               gc->wq_size) >= size, 1);
-
-               if (!ret) {
-                       *offset = gc->wq_tail;
+               gc->wq_head = desc->head;
 
-                       /* advance the tail for next workqueue item */
-                       gc->wq_tail += size;
-                       gc->wq_tail &= gc->wq_size - 1;
-
-                       /* this will break the loop */
-                       timeout_counter = 0;
+               if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size) {
+                       ret = 0;
+                       break;
                }
+
+               if (timeout_counter)
+                       usleep_range(1000, 2000);
        };
 
        kunmap_atomic(base);
@@ -519,12 +513,16 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
        enum intel_ring_id ring_id = rq->ring->id;
        struct guc_wq_item *wqi;
        void *base;
-       u32 tail, wq_len, wq_off = 0;
-       int ret;
+       u32 tail, wq_len, wq_off, space;
+
+       space = CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size);
+       if (WARN_ON(space < sizeof(struct guc_wq_item)))
+               return -ENOSPC; /* shouldn't happen */
 
-       ret = guc_get_workqueue_space(gc, &wq_off);
-       if (ret)
-               return ret;
+       /* postincrement WQ tail for next time */
+       wq_off = gc->wq_tail;
+       gc->wq_tail += sizeof(struct guc_wq_item);
+       gc->wq_tail &= gc->wq_size - 1;
 
        /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
         * should not have the case where structure wqi is across page, neither
@@ -577,7 +575,7 @@ static void lr_context_update(struct drm_i915_gem_request *rq)
        WARN_ON(!i915_gem_obj_is_pinned(ctx_obj));
        WARN_ON(!i915_gem_obj_is_pinned(rb_obj));
 
-       page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
+       page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
        reg_state = kmap_atomic(page);
 
        reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj);
@@ -588,8 +586,7 @@ static void lr_context_update(struct drm_i915_gem_request *rq)
 /**
  * i915_guc_submit() - Submit commands through GuC
  * @client:    the guc client where commands will go through
- * @ctx:       LRC where commands come from
- * @ring:      HW engine that will excute the commands
+ * @rq:                request associated with the commands
  *
  * Return:     0 if succeed
  */
@@ -598,15 +595,12 @@ int i915_guc_submit(struct i915_guc_client *client,
 {
        struct intel_guc *guc = client->guc;
        enum intel_ring_id ring_id = rq->ring->id;
-       unsigned long flags;
        int q_ret, b_ret;
 
        /* Need this because of the deferred pin ctx and ring */
        /* Shall we move this right after ring is pinned? */
        lr_context_update(rq);
 
-       spin_lock_irqsave(&client->wq_lock, flags);
-
        q_ret = guc_add_workqueue_item(client, rq);
        if (q_ret == 0)
                b_ret = guc_ring_doorbell(client);
@@ -621,12 +615,8 @@ int i915_guc_submit(struct i915_guc_client *client,
        } else {
                client->retcode = 0;
        }
-       spin_unlock_irqrestore(&client->wq_lock, flags);
-
-       spin_lock(&guc->host2guc_lock);
        guc->submissions[ring_id] += 1;
        guc->last_seqno[ring_id] = rq->seqno;
-       spin_unlock(&guc->host2guc_lock);
 
        return q_ret;
 }
@@ -678,7 +668,7 @@ static struct drm_i915_gem_object *gem_allocate_guc_obj(struct drm_device *dev,
 /**
  * gem_release_guc_obj() - Release gem object allocated for GuC usage
  * @obj:       gem obj to be released
 */
+ */
 static void gem_release_guc_obj(struct drm_i915_gem_object *obj)
 {
        if (!obj)
@@ -731,7 +721,8 @@ static void guc_client_free(struct drm_device *dev,
  *             The kernel client to replace ExecList submission is created with
  *             NORMAL priority. Priority of a client for scheduler can be HIGH,
  *             while a preemption context can use CRITICAL.
- * @ctx                the context to own the client (we use the default render context)
+ * @ctx:       the context that owns the client (we use the default render
+ *             context)
  *
  * Return:     An i915_guc_client object if success.
  */
@@ -768,7 +759,6 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
        client->client_obj = obj;
        client->wq_offset = GUC_DB_SIZE;
        client->wq_size = GUC_WQ_SIZE;
-       spin_lock_init(&client->wq_lock);
 
        client->doorbell_offset = select_doorbell_cacheline(guc);
 
@@ -849,6 +839,96 @@ static void guc_create_log(struct intel_guc *guc)
        guc->log_flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
 }
 
+static void init_guc_policies(struct guc_policies *policies)
+{
+       struct guc_policy *policy;
+       u32 p, i;
+
+       policies->dpc_promote_time = 500000;
+       policies->max_num_work_items = POLICY_MAX_NUM_WI;
+
+       for (p = 0; p < GUC_CTX_PRIORITY_NUM; p++) {
+               for (i = 0; i < I915_NUM_RINGS; i++) {
+                       policy = &policies->policy[p][i];
+
+                       policy->execution_quantum = 1000000;
+                       policy->preemption_time = 500000;
+                       policy->fault_time = 250000;
+                       policy->policy_flags = 0;
+               }
+       }
+
+       policies->is_valid = 1;
+}
+
+static void guc_create_ads(struct intel_guc *guc)
+{
+       struct drm_i915_private *dev_priv = guc_to_i915(guc);
+       struct drm_i915_gem_object *obj;
+       struct guc_ads *ads;
+       struct guc_policies *policies;
+       struct guc_mmio_reg_state *reg_state;
+       struct intel_engine_cs *ring;
+       struct page *page;
+       u32 size, i;
+
+       /* The ads obj includes the struct itself and buffers passed to GuC */
+       size = sizeof(struct guc_ads) + sizeof(struct guc_policies) +
+                       sizeof(struct guc_mmio_reg_state) +
+                       GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE;
+
+       obj = guc->ads_obj;
+       if (!obj) {
+               obj = gem_allocate_guc_obj(dev_priv->dev, PAGE_ALIGN(size));
+               if (!obj)
+                       return;
+
+               guc->ads_obj = obj;
+       }
+
+       page = i915_gem_object_get_page(obj, 0);
+       ads = kmap(page);
+
+       /*
+        * The GuC requires a "Golden Context" when it reinitialises
+        * engines after a reset. Here we use the Render ring default
+        * context, which must already exist and be pinned in the GGTT,
+        * so its address won't change after we've told the GuC where
+        * to find it.
+        */
+       ring = &dev_priv->ring[RCS];
+       ads->golden_context_lrca = ring->status_page.gfx_addr;
+
+       for_each_ring(ring, dev_priv, i)
+               ads->eng_state_size[i] = intel_lr_context_size(ring);
+
+       /* GuC scheduling policies */
+       policies = (void *)ads + sizeof(struct guc_ads);
+       init_guc_policies(policies);
+
+       ads->scheduler_policies = i915_gem_obj_ggtt_offset(obj) +
+                       sizeof(struct guc_ads);
+
+       /* MMIO reg state */
+       reg_state = (void *)policies + sizeof(struct guc_policies);
+
+       for (i = 0; i < I915_NUM_RINGS; i++) {
+               reg_state->mmio_white_list[i].mmio_start =
+                       dev_priv->ring[i].mmio_base + GUC_MMIO_WHITE_LIST_START;
+
+               /* Nothing to be saved or restored for now. */
+               reg_state->mmio_white_list[i].count = 0;
+       }
+
+       ads->reg_state_addr = ads->scheduler_policies +
+                       sizeof(struct guc_policies);
+
+       ads->reg_state_buffer = ads->reg_state_addr +
+                       sizeof(struct guc_mmio_reg_state);
+
+       kunmap(page);
+}
+
 /*
  * Set up the memory resources to be shared with the GuC.  At this point,
  * we require just one object that can be mapped through the GGTT.
@@ -871,12 +951,12 @@ int i915_guc_submission_init(struct drm_device *dev)
        if (!guc->ctx_pool_obj)
                return -ENOMEM;
 
-       spin_lock_init(&dev_priv->guc.host2guc_lock);
-
        ida_init(&guc->ctx_ids);
 
        guc_create_log(guc);
 
+       guc_create_ads(guc);
+
        return 0;
 }
 
@@ -915,6 +995,9 @@ void i915_guc_submission_fini(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_guc *guc = &dev_priv->guc;
 
+       gem_release_guc_obj(dev_priv->guc.ads_obj);
+       guc->ads_obj = NULL;
+
        gem_release_guc_obj(dev_priv->guc.log_obj);
        guc->log_obj = NULL;