]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - arch/powerpc/platforms/cell/spufs/context.c
[CELL] spufs: make sure context are scheduled again after spu_acquire_saved
[mv-sheeva.git] / arch / powerpc / platforms / cell / spufs / context.c
index c5ec7cfc24b5a42f6bb840e569bcc7e1b7a8ed67..6b091ea1d1923c7e27f3b348657055c95c0b7c36 100644 (file)
 #include <linux/fs.h>
 #include <linux/mm.h>
 #include <linux/slab.h>
+#include <asm/atomic.h>
 #include <asm/spu.h>
 #include <asm/spu_csa.h>
 #include "spufs.h"
 
+
+atomic_t nr_spu_contexts = ATOMIC_INIT(0);
+
 struct spu_context *alloc_spu_context(struct spu_gang *gang)
 {
        struct spu_context *ctx;
@@ -53,19 +57,11 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
        INIT_LIST_HEAD(&ctx->rq);
        if (gang)
                spu_gang_add_ctx(gang, ctx);
-
-       /*
-        * We do our own priority calculations, so we normally want
-        * ->static_prio to start with. Unfortunately thies field
-        * contains junk for threads with a realtime scheduling
-        * policy so we have to look at ->prio in this case.
-        */
-       if (rt_prio(current->prio))
-               ctx->prio = current->prio;
-       else
-               ctx->prio = current->static_prio;
-       ctx->policy = current->policy;
+       ctx->cpus_allowed = current->cpus_allowed;
        spu_set_timeslice(ctx);
+       ctx->stats.util_state = SPU_UTIL_IDLE_LOADED;
+
+       atomic_inc(&nr_spu_contexts);
        goto out;
 out_free:
        kfree(ctx);
@@ -85,6 +81,7 @@ void destroy_spu_context(struct kref *kref)
        if (ctx->gang)
                spu_gang_remove_ctx(ctx->gang, ctx);
        BUG_ON(!list_empty(&ctx->rq));
+       atomic_dec(&nr_spu_contexts);
        kfree(ctx);
 }
 
@@ -168,6 +165,22 @@ int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags)
 void spu_acquire_saved(struct spu_context *ctx)
 {
        spu_acquire(ctx);
-       if (ctx->state != SPU_STATE_SAVED)
+       if (ctx->state != SPU_STATE_SAVED) {
+               set_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags);
                spu_deactivate(ctx);
+       }
+}
+
+/**
+ * spu_release_saved - unlock spu context and return it to the runqueue
+ * @ctx:       context to unlock
+ */
+void spu_release_saved(struct spu_context *ctx)
+{
+       BUG_ON(ctx->state != SPU_STATE_SAVED);
+
+       if (test_and_clear_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags))
+               spu_activate(ctx, 0);
+
+       spu_release(ctx);
 }