include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[safe/jmp/linux-2.6] / arch / powerpc / platforms / cell / spufs / sched.c
index 002b40a..0b04662 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/sched.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
+#include <linux/slab.h>
 #include <linux/completion.h>
 #include <linux/vmalloc.h>
 #include <linux/smp.h>
@@ -36,6 +37,9 @@
 #include <linux/mutex.h>
 #include <linux/notifier.h>
 #include <linux/kthread.h>
+#include <linux/pid_namespace.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
 
 #include <asm/io.h>
 #include <asm/mmu_context.h>
 #include <asm/spu_csa.h>
 #include <asm/spu_priv1.h>
 #include "spufs.h"
+#define CREATE_TRACE_POINTS
+#include "sputrace.h"
 
 struct spu_prio_array {
        DECLARE_BITMAP(bitmap, MAX_PRIO);
        struct list_head runq[MAX_PRIO];
        spinlock_t runq_lock;
-       struct list_head active_list[MAX_NUMNODES];
-       struct mutex active_mutex[MAX_NUMNODES];
+       int nr_waiting;
 };
 
+static unsigned long spu_avenrun[3];
 static struct spu_prio_array *spu_prio;
 static struct task_struct *spusched_task;
 static struct timer_list spusched_timer;
+static struct timer_list spuloadavg_timer;
 
 /*
  * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
@@ -96,63 +103,116 @@ void spu_set_timeslice(struct spu_context *ctx)
                ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio);
 }
 
-static inline int node_allowed(int node)
+/*
+ * Update scheduling information from the owning thread.
+ */
+void __spu_update_sched_info(struct spu_context *ctx)
 {
-       cpumask_t mask;
+       /*
+        * assert that the context is not on the runqueue, so it is safe
+        * to change its scheduling parameters.
+        */
+       BUG_ON(!list_empty(&ctx->rq));
 
-       if (!nr_cpus_node(node))
-               return 0;
-       mask = node_to_cpumask(node);
-       if (!cpus_intersects(mask, current->cpus_allowed))
-               return 0;
-       return 1;
-}
+       /*
+        * 32-Bit assignments are atomic on powerpc, and we don't care about
+        * memory ordering here because retrieving the controlling thread is
+        * per definition racy.
+        */
+       ctx->tid = current->pid;
 
-/**
- * spu_add_to_active_list - add spu to active list
- * @spu:       spu to add to the active list
- */
-static void spu_add_to_active_list(struct spu *spu)
-{
-       mutex_lock(&spu_prio->active_mutex[spu->node]);
-       list_add_tail(&spu->list, &spu_prio->active_list[spu->node]);
-       mutex_unlock(&spu_prio->active_mutex[spu->node]);
-}
+       /*
+        * We do our own priority calculations, so we normally want
+        * ->static_prio to start with. Unfortunately this field
+        * contains junk for threads with a realtime scheduling
+        * policy so we have to look at ->prio in this case.
+        */
+       if (rt_prio(current->prio))
+               ctx->prio = current->prio;
+       else
+               ctx->prio = current->static_prio;
+       ctx->policy = current->policy;
 
-static void __spu_remove_from_active_list(struct spu *spu)
-{
-       list_del_init(&spu->list);
+       /*
+        * TO DO: the context may be loaded, so we may need to activate
+        * it again on a different node. But it shouldn't hurt anything
+        * to update its parameters, because we know that the scheduler
+        * is not actively looking at this field, since it is not on the
+        * runqueue. The context will be rescheduled on the proper node
+        * if it is timesliced or preempted.
+        */
+       ctx->cpus_allowed = current->cpus_allowed;
+
+       /* Save the current cpu id for spu interrupt routing. */
+       ctx->last_ran = raw_smp_processor_id();
 }
 
-/**
- * spu_remove_from_active_list - remove spu from active list
- * @spu:       spu to remove from the active list
- */
-static void spu_remove_from_active_list(struct spu *spu)
+void spu_update_sched_info(struct spu_context *ctx)
 {
-       int node = spu->node;
+       int node;
 
-       mutex_lock(&spu_prio->active_mutex[node]);
-       __spu_remove_from_active_list(spu);
-       mutex_unlock(&spu_prio->active_mutex[node]);
-}
+       if (ctx->state == SPU_STATE_RUNNABLE) {
+               node = ctx->spu->node;
 
-static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier);
+               /*
+                * Take list_mutex to sync with find_victim().
+                */
+               mutex_lock(&cbe_spu_info[node].list_mutex);
+               __spu_update_sched_info(ctx);
+               mutex_unlock(&cbe_spu_info[node].list_mutex);
+       } else {
+               __spu_update_sched_info(ctx);
+       }
+}
 
-static void spu_switch_notify(struct spu *spu, struct spu_context *ctx)
+static int __node_allowed(struct spu_context *ctx, int node)
 {
-       blocking_notifier_call_chain(&spu_switch_notifier,
-                           ctx ? ctx->object_id : 0, spu);
+       if (nr_cpus_node(node)) {
+               const struct cpumask *mask = cpumask_of_node(node);
+
+               if (cpumask_intersects(mask, &ctx->cpus_allowed))
+                       return 1;
+       }
+
+       return 0;
 }
 
-int spu_switch_event_register(struct notifier_block * n)
+static int node_allowed(struct spu_context *ctx, int node)
 {
-       return blocking_notifier_chain_register(&spu_switch_notifier, n);
+       int rval;
+
+       spin_lock(&spu_prio->runq_lock);
+       rval = __node_allowed(ctx, node);
+       spin_unlock(&spu_prio->runq_lock);
+
+       return rval;
 }
 
-int spu_switch_event_unregister(struct notifier_block * n)
+void do_notify_spus_active(void)
 {
-       return blocking_notifier_chain_unregister(&spu_switch_notifier, n);
+       int node;
+
+       /*
+        * Wake up the active spu_contexts.
+        *
+        * When the awakened processes see their "notify_active" flag is set,
+        * they will call spu_switch_notify().
+        */
+       for_each_online_node(node) {
+               struct spu *spu;
+
+               mutex_lock(&cbe_spu_info[node].list_mutex);
+               list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
+                       if (spu->alloc_state != SPU_FREE) {
+                               struct spu_context *ctx = spu->ctx;
+                               set_bit(SPU_SCHED_NOTIFY_ACTIVE,
+                                       &ctx->sched_flags);
+                               mb();
+                               wake_up_all(&ctx->stop_wq);
+                       }
+               }
+               mutex_unlock(&cbe_spu_info[node].list_mutex);
+       }
 }
 
 /**
@@ -162,26 +222,211 @@ int spu_switch_event_unregister(struct notifier_block * n)
  */
 static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
 {
-       pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
-                spu->number, spu->node);
+       spu_context_trace(spu_bind_context__enter, ctx, spu);
+
+       spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
+
+       if (ctx->flags & SPU_CREATE_NOSCHED)
+               atomic_inc(&cbe_spu_info[spu->node].reserved_spus);
+
+       ctx->stats.slb_flt_base = spu->stats.slb_flt;
+       ctx->stats.class2_intr_base = spu->stats.class2_intr;
+
+       spu_associate_mm(spu, ctx->owner);
+
+       spin_lock_irq(&spu->register_lock);
        spu->ctx = ctx;
        spu->flags = 0;
        ctx->spu = spu;
        ctx->ops = &spu_hw_ops;
        spu->pid = current->pid;
-       spu_associate_mm(spu, ctx->owner);
+       spu->tgid = current->tgid;
        spu->ibox_callback = spufs_ibox_callback;
        spu->wbox_callback = spufs_wbox_callback;
        spu->stop_callback = spufs_stop_callback;
        spu->mfc_callback = spufs_mfc_callback;
-       spu->dma_callback = spufs_dma_callback;
-       mb();
+       spin_unlock_irq(&spu->register_lock);
+
        spu_unmap_mappings(ctx);
+
+       spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0);
        spu_restore(&ctx->csa, spu);
        spu->timestamp = jiffies;
-       spu_cpu_affinity_set(spu, raw_smp_processor_id());
        spu_switch_notify(spu, ctx);
        ctx->state = SPU_STATE_RUNNABLE;
+
+       spuctx_switch_state(ctx, SPU_UTIL_USER);
+}
+
+/*
+ * Must be used with the list_mutex held.
+ */
+static inline int sched_spu(struct spu *spu)
+{
+       BUG_ON(!mutex_is_locked(&cbe_spu_info[spu->node].list_mutex));
+
+       return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED));
+}
+
+static void aff_merge_remaining_ctxs(struct spu_gang *gang)
+{
+       struct spu_context *ctx;
+
+       list_for_each_entry(ctx, &gang->aff_list_head, aff_list) {
+               if (list_empty(&ctx->aff_list))
+                       list_add(&ctx->aff_list, &gang->aff_list_head);
+       }
+       gang->aff_flags |= AFF_MERGED;
+}
+
+static void aff_set_offsets(struct spu_gang *gang)
+{
+       struct spu_context *ctx;
+       int offset;
+
+       offset = -1;
+       list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
+                                                               aff_list) {
+               if (&ctx->aff_list == &gang->aff_list_head)
+                       break;
+               ctx->aff_offset = offset--;
+       }
+
+       offset = 0;
+       list_for_each_entry(ctx, gang->aff_ref_ctx->aff_list.prev, aff_list) {
+               if (&ctx->aff_list == &gang->aff_list_head)
+                       break;
+               ctx->aff_offset = offset++;
+       }
+
+       gang->aff_flags |= AFF_OFFSETS_SET;
+}
+
+static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
+                int group_size, int lowest_offset)
+{
+       struct spu *spu;
+       int node, n;
+
+       /*
+        * TODO: A better algorithm could be used to find a good spu to be
+        *       used as reference location for the ctxs chain.
+        */
+       node = cpu_to_node(raw_smp_processor_id());
+       for (n = 0; n < MAX_NUMNODES; n++, node++) {
+               /*
+                * "available_spus" counts how many spus are not potentially
+                * going to be used by other affinity gangs whose reference
+                * context is already in place. Although this code seeks to
+                * avoid having affinity gangs with a summed amount of
+                * contexts bigger than the amount of spus in the node,
+                * this may happen sporadically. In this case, available_spus
+                * becomes negative, which is harmless.
+                */
+               int available_spus;
+
+               node = (node < MAX_NUMNODES) ? node : 0;
+               if (!node_allowed(ctx, node))
+                       continue;
+
+               available_spus = 0;
+               mutex_lock(&cbe_spu_info[node].list_mutex);
+               list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
+                       if (spu->ctx && spu->ctx->gang && !spu->ctx->aff_offset
+                                       && spu->ctx->gang->aff_ref_spu)
+                               available_spus -= spu->ctx->gang->contexts;
+                       available_spus++;
+               }
+               if (available_spus < ctx->gang->contexts) {
+                       mutex_unlock(&cbe_spu_info[node].list_mutex);
+                       continue;
+               }
+
+               list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
+                       if ((!mem_aff || spu->has_mem_affinity) &&
+                                                       sched_spu(spu)) {
+                               mutex_unlock(&cbe_spu_info[node].list_mutex);
+                               return spu;
+                       }
+               }
+               mutex_unlock(&cbe_spu_info[node].list_mutex);
+       }
+       return NULL;
+}
+
+static void aff_set_ref_point_location(struct spu_gang *gang)
+{
+       int mem_aff, gs, lowest_offset;
+       struct spu_context *ctx;
+       struct spu *tmp;
+
+       mem_aff = gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM;
+       lowest_offset = 0;
+       gs = 0;
+
+       list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
+               gs++;
+
+       list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
+                                                               aff_list) {
+               if (&ctx->aff_list == &gang->aff_list_head)
+                       break;
+               lowest_offset = ctx->aff_offset;
+       }
+
+       gang->aff_ref_spu = aff_ref_location(gang->aff_ref_ctx, mem_aff, gs,
+                                                       lowest_offset);
+}
+
+static struct spu *ctx_location(struct spu *ref, int offset, int node)
+{
+       struct spu *spu;
+
+       spu = NULL;
+       if (offset >= 0) {
+               list_for_each_entry(spu, ref->aff_list.prev, aff_list) {
+                       BUG_ON(spu->node != node);
+                       if (offset == 0)
+                               break;
+                       if (sched_spu(spu))
+                               offset--;
+               }
+       } else {
+               list_for_each_entry_reverse(spu, ref->aff_list.next, aff_list) {
+                       BUG_ON(spu->node != node);
+                       if (offset == 0)
+                               break;
+                       if (sched_spu(spu))
+                               offset++;
+               }
+       }
+
+       return spu;
+}
+
+/*
+ * affinity_check is called each time a context is going to be scheduled.
+ * It returns the spu ptr on which the context must run.
+ */
+static int has_affinity(struct spu_context *ctx)
+{
+       struct spu_gang *gang = ctx->gang;
+
+       if (list_empty(&ctx->aff_list))
+               return 0;
+
+       if (atomic_read(&ctx->gang->aff_sched_count) == 0)
+               ctx->gang->aff_ref_spu = NULL;
+
+       if (!gang->aff_ref_spu) {
+               if (!(gang->aff_flags & AFF_MERGED))
+                       aff_merge_remaining_ctxs(gang);
+               if (!(gang->aff_flags & AFF_OFFSETS_SET))
+                       aff_set_offsets(gang);
+               aff_set_ref_point_location(gang);
+       }
+
+       return gang->aff_ref_spu != NULL;
 }
 
 /**
@@ -191,25 +436,55 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
  */
 static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
 {
-       pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
-                spu->pid, spu->number, spu->node);
+       u32 status;
+
+       spu_context_trace(spu_unbind_context__enter, ctx, spu);
+
+       spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
+
+       if (spu->ctx->flags & SPU_CREATE_NOSCHED)
+               atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
+
+       if (ctx->gang)
+               /*
+                * If ctx->gang->aff_sched_count is positive, SPU affinity is
+                * being considered in this gang. Using atomic_dec_if_positive
+                * allow us to skip an explicit check for affinity in this gang
+                */
+               atomic_dec_if_positive(&ctx->gang->aff_sched_count);
 
        spu_switch_notify(spu, NULL);
        spu_unmap_mappings(ctx);
        spu_save(&ctx->csa, spu);
+       spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0);
+
+       spin_lock_irq(&spu->register_lock);
        spu->timestamp = jiffies;
        ctx->state = SPU_STATE_SAVED;
        spu->ibox_callback = NULL;
        spu->wbox_callback = NULL;
        spu->stop_callback = NULL;
        spu->mfc_callback = NULL;
-       spu->dma_callback = NULL;
-       spu_associate_mm(spu, NULL);
        spu->pid = 0;
+       spu->tgid = 0;
        ctx->ops = &spu_backing_ops;
-       ctx->spu = NULL;
        spu->flags = 0;
        spu->ctx = NULL;
+       spin_unlock_irq(&spu->register_lock);
+
+       spu_associate_mm(spu, NULL);
+
+       ctx->stats.slb_flt +=
+               (spu->stats.slb_flt - ctx->stats.slb_flt_base);
+       ctx->stats.class2_intr +=
+               (spu->stats.class2_intr - ctx->stats.class2_intr_base);
+
+       /* This maps the underlying spu state to idle */
+       spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
+       ctx->spu = NULL;
+
+       if (spu_stopped(ctx, &status))
+               wake_up_all(&ctx->stop_wq);
 }
 
 /**
@@ -218,26 +493,66 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
  */
 static void __spu_add_to_rq(struct spu_context *ctx)
 {
-       int prio = ctx->prio;
+       /*
+        * Unfortunately this code path can be called from multiple threads
+        * on behalf of a single context due to the way the problem state
+        * mmap support works.
+        *
+        * Fortunately we need to wake up all these threads at the same time
+        * and can simply skip the runqueue addition for every but the first
+        * thread getting into this codepath.
+        *
+        * It's still quite hacky, and long-term we should proxy all other
+        * threads through the owner thread so that spu_run is in control
+        * of all the scheduling activity for a given context.
+        */
+       if (list_empty(&ctx->rq)) {
+               list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
+               set_bit(ctx->prio, spu_prio->bitmap);
+               if (!spu_prio->nr_waiting++)
+                       mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
+       }
+}
 
-       list_add_tail(&ctx->rq, &spu_prio->runq[prio]);
-       set_bit(prio, spu_prio->bitmap);
+static void spu_add_to_rq(struct spu_context *ctx)
+{
+       spin_lock(&spu_prio->runq_lock);
+       __spu_add_to_rq(ctx);
+       spin_unlock(&spu_prio->runq_lock);
 }
 
 static void __spu_del_from_rq(struct spu_context *ctx)
 {
        int prio = ctx->prio;
 
-       if (!list_empty(&ctx->rq))
+       if (!list_empty(&ctx->rq)) {
+               if (!--spu_prio->nr_waiting)
+                       del_timer(&spusched_timer);
                list_del_init(&ctx->rq);
-       if (list_empty(&spu_prio->runq[prio]))
-               clear_bit(prio, spu_prio->bitmap);
+
+               if (list_empty(&spu_prio->runq[prio]))
+                       clear_bit(prio, spu_prio->bitmap);
+       }
+}
+
+void spu_del_from_rq(struct spu_context *ctx)
+{
+       spin_lock(&spu_prio->runq_lock);
+       __spu_del_from_rq(ctx);
+       spin_unlock(&spu_prio->runq_lock);
 }
 
 static void spu_prio_wait(struct spu_context *ctx)
 {
        DEFINE_WAIT(wait);
 
+       /*
+        * The caller must explicitly wait for a context to be loaded
+        * if the nosched flag is set.  If NOSCHED is not set, the caller
+        * queues the context and waits for an spu event or error.
+        */
+       BUG_ON(!(ctx->flags & SPU_CREATE_NOSCHED));
+
        spin_lock(&spu_prio->runq_lock);
        prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
        if (!signal_pending(current)) {
@@ -256,18 +571,53 @@ static void spu_prio_wait(struct spu_context *ctx)
 
 static struct spu *spu_get_idle(struct spu_context *ctx)
 {
-       struct spu *spu = NULL;
-       int node = cpu_to_node(raw_smp_processor_id());
-       int n;
+       struct spu *spu, *aff_ref_spu;
+       int node, n;
+
+       spu_context_nospu_trace(spu_get_idle__enter, ctx);
+
+       if (ctx->gang) {
+               mutex_lock(&ctx->gang->aff_mutex);
+               if (has_affinity(ctx)) {
+                       aff_ref_spu = ctx->gang->aff_ref_spu;
+                       atomic_inc(&ctx->gang->aff_sched_count);
+                       mutex_unlock(&ctx->gang->aff_mutex);
+                       node = aff_ref_spu->node;
 
+                       mutex_lock(&cbe_spu_info[node].list_mutex);
+                       spu = ctx_location(aff_ref_spu, ctx->aff_offset, node);
+                       if (spu && spu->alloc_state == SPU_FREE)
+                               goto found;
+                       mutex_unlock(&cbe_spu_info[node].list_mutex);
+
+                       atomic_dec(&ctx->gang->aff_sched_count);
+                       goto not_found;
+               }
+               mutex_unlock(&ctx->gang->aff_mutex);
+       }
+       node = cpu_to_node(raw_smp_processor_id());
        for (n = 0; n < MAX_NUMNODES; n++, node++) {
                node = (node < MAX_NUMNODES) ? node : 0;
-               if (!node_allowed(node))
+               if (!node_allowed(ctx, node))
                        continue;
-               spu = spu_alloc_node(node);
-               if (spu)
-                       break;
+
+               mutex_lock(&cbe_spu_info[node].list_mutex);
+               list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
+                       if (spu->alloc_state == SPU_FREE)
+                               goto found;
+               }
+               mutex_unlock(&cbe_spu_info[node].list_mutex);
        }
+
+ not_found:
+       spu_context_nospu_trace(spu_get_idle__not_found, ctx);
+       return NULL;
+
+ found:
+       spu->alloc_state = SPU_USED;
+       mutex_unlock(&cbe_spu_info[node].list_mutex);
+       spu_context_trace(spu_get_idle__found, ctx, spu);
+       spu_init_channels(spu);
        return spu;
 }
 
@@ -283,10 +633,12 @@ static struct spu *find_victim(struct spu_context *ctx)
        struct spu *spu;
        int node, n;
 
+       spu_context_nospu_trace(spu_find_victim__enter, ctx);
+
        /*
         * Look for a possible preemption candidate on the local node first.
         * If there is no candidate look at the other nodes.  This isn't
-        * exactly fair, but so far the whole spu schedule tries to keep
+        * exactly fair, but so far the whole spu scheduler tries to keep
         * a strong node affinity.  We might want to fine-tune this in
         * the future.
         */
@@ -294,18 +646,22 @@ static struct spu *find_victim(struct spu_context *ctx)
        node = cpu_to_node(raw_smp_processor_id());
        for (n = 0; n < MAX_NUMNODES; n++, node++) {
                node = (node < MAX_NUMNODES) ? node : 0;
-               if (!node_allowed(node))
+               if (!node_allowed(ctx, node))
                        continue;
 
-               mutex_lock(&spu_prio->active_mutex[node]);
-               list_for_each_entry(spu, &spu_prio->active_list[node], list) {
+               mutex_lock(&cbe_spu_info[node].list_mutex);
+               list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
                        struct spu_context *tmp = spu->ctx;
 
-                       if (tmp->prio > ctx->prio &&
-                           (!victim || tmp->prio > victim->prio))
+                       if (tmp && tmp->prio > ctx->prio &&
+                           !(tmp->flags & SPU_CREATE_NOSCHED) &&
+                           (!victim || tmp->prio > victim->prio)) {
                                victim = spu->ctx;
+                       }
                }
-               mutex_unlock(&spu_prio->active_mutex[node]);
+               if (victim)
+                       get_spu_context(victim);
+               mutex_unlock(&cbe_spu_info[node].list_mutex);
 
                if (victim) {
                        /*
@@ -313,32 +669,45 @@ static struct spu *find_victim(struct spu_context *ctx)
                         * higher priority contexts before lower priority
                         * ones, so this is safe until we introduce
                         * priority inheritance schemes.
+                        *
+                        * XXX if the highest priority context is locked,
+                        * this can loop a long time.  Might be better to
+                        * look at another context or give up after X retries.
                         */
                        if (!mutex_trylock(&victim->state_mutex)) {
+                               put_spu_context(victim);
                                victim = NULL;
                                goto restart;
                        }
 
                        spu = victim->spu;
-                       if (!spu) {
+                       if (!spu || victim->prio <= ctx->prio) {
                                /*
                                 * This race can happen because we've dropped
-                                * the active list mutex.  No a problem, just
+                                * the active list mutex.  Not a problem, just
                                 * restart the search.
                                 */
                                mutex_unlock(&victim->state_mutex);
+                               put_spu_context(victim);
                                victim = NULL;
                                goto restart;
                        }
-                       spu_remove_from_active_list(spu);
+
+                       spu_context_trace(__spu_deactivate__unload, ctx, spu);
+
+                       mutex_lock(&cbe_spu_info[node].list_mutex);
+                       cbe_spu_info[node].nr_active--;
                        spu_unbind_context(spu, victim);
+                       mutex_unlock(&cbe_spu_info[node].list_mutex);
+
+                       victim->stats.invol_ctx_switch++;
+                       spu->stats.invol_ctx_switch++;
+                       if (test_bit(SPU_SCHED_SPU_RUN, &victim->sched_flags))
+                               spu_add_to_rq(victim);
+
                        mutex_unlock(&victim->state_mutex);
-                       /*
-                        * We need to break out of the wait loop in spu_run
-                        * manually to ensure this context gets put on the
-                        * runqueue again ASAP.
-                        */
-                       wake_up(&victim->stop_wq);
+                       put_spu_context(victim);
+
                        return spu;
                }
        }
@@ -346,6 +715,66 @@ static struct spu *find_victim(struct spu_context *ctx)
        return NULL;
 }
 
+static void __spu_schedule(struct spu *spu, struct spu_context *ctx)
+{
+       int node = spu->node;
+       int success = 0;
+
+       spu_set_timeslice(ctx);
+
+       mutex_lock(&cbe_spu_info[node].list_mutex);
+       if (spu->ctx == NULL) {
+               spu_bind_context(spu, ctx);
+               cbe_spu_info[node].nr_active++;
+               spu->alloc_state = SPU_USED;
+               success = 1;
+       }
+       mutex_unlock(&cbe_spu_info[node].list_mutex);
+
+       if (success)
+               wake_up_all(&ctx->run_wq);
+       else
+               spu_add_to_rq(ctx);
+}
+
+static void spu_schedule(struct spu *spu, struct spu_context *ctx)
+{
+       /* not a candidate for interruptible because it's called either
+          from the scheduler thread or from spu_deactivate */
+       mutex_lock(&ctx->state_mutex);
+       if (ctx->state == SPU_STATE_SAVED)
+               __spu_schedule(spu, ctx);
+       spu_release(ctx);
+}
+
+/**
+ * spu_unschedule - remove a context from a spu, and possibly release it.
+ * @spu:       The SPU to unschedule from
+ * @ctx:       The context currently scheduled on the SPU
+ * @free_spu   Whether to free the SPU for other contexts
+ *
+ * Unbinds the context @ctx from the SPU @spu. If @free_spu is non-zero, the
+ * SPU is made available for other contexts (ie, may be returned by
+ * spu_get_idle). If this is zero, the caller is expected to schedule another
+ * context to this spu.
+ *
+ * Should be called with ctx->state_mutex held.
+ */
+static void spu_unschedule(struct spu *spu, struct spu_context *ctx,
+               int free_spu)
+{
+       int node = spu->node;
+
+       mutex_lock(&cbe_spu_info[node].list_mutex);
+       cbe_spu_info[node].nr_active--;
+       if (free_spu)
+               spu->alloc_state = SPU_FREE;
+       spu_unbind_context(spu, ctx);
+       ctx->stats.invol_ctx_switch++;
+       spu->stats.invol_ctx_switch++;
+       mutex_unlock(&cbe_spu_info[node].list_mutex);
+}
+
 /**
  * spu_activate - find a free spu for a context and execute it
  * @ctx:       spu context to schedule
@@ -357,30 +786,47 @@ static struct spu *find_victim(struct spu_context *ctx)
  */
 int spu_activate(struct spu_context *ctx, unsigned long flags)
 {
+       struct spu *spu;
 
+       /*
+        * If there are multiple threads waiting for a single context
+        * only one actually binds the context while the others will
+        * only be able to acquire the state_mutex once the context
+        * already is in runnable state.
+        */
        if (ctx->spu)
                return 0;
 
-       do {
-               struct spu *spu;
+spu_activate_top:
+       if (signal_pending(current))
+               return -ERESTARTSYS;
 
-               spu = spu_get_idle(ctx);
-               /*
-                * If this is a realtime thread we try to get it running by
-                * preempting a lower priority thread.
-                */
-               if (!spu && rt_prio(ctx->prio))
-                       spu = find_victim(ctx);
-               if (spu) {
-                       spu_bind_context(spu, ctx);
-                       spu_add_to_active_list(spu);
-                       return 0;
-               }
+       spu = spu_get_idle(ctx);
+       /*
+        * If this is a realtime thread we try to get it running by
+        * preempting a lower priority thread.
+        */
+       if (!spu && rt_prio(ctx->prio))
+               spu = find_victim(ctx);
+       if (spu) {
+               unsigned long runcntl;
+
+               runcntl = ctx->ops->runcntl_read(ctx);
+               __spu_schedule(spu, ctx);
+               if (runcntl & SPU_RUNCNTL_RUNNABLE)
+                       spuctx_switch_state(ctx, SPU_UTIL_USER);
+
+               return 0;
+       }
 
+       if (ctx->flags & SPU_CREATE_NOSCHED) {
                spu_prio_wait(ctx);
-       } while (!signal_pending(current));
+               goto spu_activate_top;
+       }
+
+       spu_add_to_rq(ctx);
 
-       return -ERESTARTSYS;
+       return 0;
 }
 
 /**
@@ -389,23 +835,28 @@ int spu_activate(struct spu_context *ctx, unsigned long flags)
  * Remove the highest priority context on the runqueue and return it
  * to the caller.  Returns %NULL if no runnable context was found.
  */
-static struct spu_context *grab_runnable_context(int prio)
+static struct spu_context *grab_runnable_context(int prio, int node)
 {
-       struct spu_context *ctx = NULL;
+       struct spu_context *ctx;
        int best;
 
        spin_lock(&spu_prio->runq_lock);
-       best = sched_find_first_bit(spu_prio->bitmap);
-       if (best < prio) {
+       best = find_first_bit(spu_prio->bitmap, prio);
+       while (best < prio) {
                struct list_head *rq = &spu_prio->runq[best];
 
-               BUG_ON(list_empty(rq));
-
-               ctx = list_entry(rq->next, struct spu_context, rq);
-               __spu_del_from_rq(ctx);
+               list_for_each_entry(ctx, rq, rq) {
+                       /* XXX(hch): check for affinity here aswell */
+                       if (__node_allowed(ctx, node)) {
+                               __spu_del_from_rq(ctx);
+                               goto found;
+                       }
+               }
+               best++;
        }
+       ctx = NULL;
+ found:
        spin_unlock(&spu_prio->runq_lock);
-
        return ctx;
 }
 
@@ -415,15 +866,21 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
        struct spu_context *new = NULL;
 
        if (spu) {
-               new = grab_runnable_context(max_prio);
+               new = grab_runnable_context(max_prio, spu->node);
                if (new || force) {
-                       spu_remove_from_active_list(spu);
-                       spu_unbind_context(spu, ctx);
-                       spu_free(spu);
-                       if (new)
-                               wake_up(&new->stop_wq);
+                       spu_unschedule(spu, ctx, new == NULL);
+                       if (new) {
+                               if (new->flags & SPU_CREATE_NOSCHED)
+                                       wake_up(&new->stop_wq);
+                               else {
+                                       spu_release(ctx);
+                                       spu_schedule(spu, new);
+                                       /* this one can't easily be made
+                                          interruptible */
+                                       mutex_lock(&ctx->state_mutex);
+                               }
+                       }
                }
-
        }
 
        return new != NULL;
@@ -438,11 +895,12 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
  */
 void spu_deactivate(struct spu_context *ctx)
 {
+       spu_context_nospu_trace(spu_deactivate__enter, ctx);
        __spu_deactivate(ctx, 1, MAX_PRIO);
 }
 
 /**
- * spu_yield -  yield a physical spu if others are waiting
+ * spu_yield - yield a physical spu if others are waiting
  * @ctx:       spu context to yield
  *
  * Check if there is a higher priority context waiting and if yes
@@ -451,6 +909,7 @@ void spu_deactivate(struct spu_context *ctx)
  */
 void spu_yield(struct spu_context *ctx)
 {
+       spu_context_nospu_trace(spu_yield__enter, ctx);
        if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
                mutex_lock(&ctx->state_mutex);
                __spu_deactivate(ctx, 0, MAX_PRIO);
@@ -458,37 +917,79 @@ void spu_yield(struct spu_context *ctx)
        }
 }
 
-static void spusched_tick(struct spu_context *ctx)
+static noinline void spusched_tick(struct spu_context *ctx)
 {
-       if (ctx->policy == SCHED_FIFO || --ctx->time_slice)
-               return;
+       struct spu_context *new = NULL;
+       struct spu *spu = NULL;
 
-       /*
-        * Unfortunately active_mutex ranks outside of state_mutex, so
-        * we have to trylock here.  If we fail give the context another
-        * tick and try again.
-        */
-       if (mutex_trylock(&ctx->state_mutex)) {
-               struct spu_context *new = grab_runnable_context(ctx->prio + 1);
-               if (new) {
-                       struct spu *spu = ctx->spu;
-
-                       __spu_remove_from_active_list(spu);
-                       spu_unbind_context(spu, ctx);
-                       spu_free(spu);
-                       wake_up(&new->stop_wq);
-                       /*
-                        * We need to break out of the wait loop in
-                        * spu_run manually to ensure this context
-                        * gets put on the runqueue again ASAP.
-                        */
-                       wake_up(&ctx->stop_wq);
-               }
-               spu_set_timeslice(ctx);
-               mutex_unlock(&ctx->state_mutex);
+       if (spu_acquire(ctx))
+               BUG();  /* a kernel thread never has signals pending */
+
+       if (ctx->state != SPU_STATE_RUNNABLE)
+               goto out;
+       if (ctx->flags & SPU_CREATE_NOSCHED)
+               goto out;
+       if (ctx->policy == SCHED_FIFO)
+               goto out;
+
+       if (--ctx->time_slice && test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
+               goto out;
+
+       spu = ctx->spu;
+
+       spu_context_trace(spusched_tick__preempt, ctx, spu);
+
+       new = grab_runnable_context(ctx->prio + 1, spu->node);
+       if (new) {
+               spu_unschedule(spu, ctx, 0);
+               if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
+                       spu_add_to_rq(ctx);
        } else {
-               ctx->time_slice++;
+               spu_context_nospu_trace(spusched_tick__newslice, ctx);
+               if (!ctx->time_slice)
+                       ctx->time_slice++;
        }
+out:
+       spu_release(ctx);
+
+       if (new)
+               spu_schedule(spu, new);
+}
+
+/**
+ * count_active_contexts - count nr of active tasks
+ *
+ * Return the number of tasks currently running or waiting to run.
+ *
+ * Note that we don't take runq_lock / list_mutex here.  Reading
+ * a single 32bit value is atomic on powerpc, and we don't care
+ * about memory ordering issues here.
+ */
+static unsigned long count_active_contexts(void)
+{
+       int nr_active = 0, node;
+
+       for (node = 0; node < MAX_NUMNODES; node++)
+               nr_active += cbe_spu_info[node].nr_active;
+       nr_active += spu_prio->nr_waiting;
+
+       return nr_active;
+}
+
+/**
+ * spu_calc_load - update the avenrun load estimates.
+ *
+ * No locking against reading these values from userspace, as for
+ * the CPU loadavg code.
+ */
+static void spu_calc_load(void)
+{
+       unsigned long active_tasks; /* fixed-point */
+
+       active_tasks = count_active_contexts() * FIXED_1;
+       CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks);
+       CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks);
+       CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
 }
 
 static void spusched_wake(unsigned long data)
@@ -497,77 +998,178 @@ static void spusched_wake(unsigned long data)
        wake_up_process(spusched_task);
 }
 
+static void spuloadavg_wake(unsigned long data)
+{
+       mod_timer(&spuloadavg_timer, jiffies + LOAD_FREQ);
+       spu_calc_load();
+}
+
 static int spusched_thread(void *unused)
 {
-       struct spu *spu, *next;
+       struct spu *spu;
        int node;
 
-       setup_timer(&spusched_timer, spusched_wake, 0);
-       __mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
-
        while (!kthread_should_stop()) {
                set_current_state(TASK_INTERRUPTIBLE);
                schedule();
                for (node = 0; node < MAX_NUMNODES; node++) {
-                       mutex_lock(&spu_prio->active_mutex[node]);
-                       list_for_each_entry_safe(spu, next,
-                                                &spu_prio->active_list[node],
-                                                list)
-                               spusched_tick(spu->ctx);
-                       mutex_unlock(&spu_prio->active_mutex[node]);
+                       struct mutex *mtx = &cbe_spu_info[node].list_mutex;
+
+                       mutex_lock(mtx);
+                       list_for_each_entry(spu, &cbe_spu_info[node].spus,
+                                       cbe_list) {
+                               struct spu_context *ctx = spu->ctx;
+
+                               if (ctx) {
+                                       get_spu_context(ctx);
+                                       mutex_unlock(mtx);
+                                       spusched_tick(ctx);
+                                       mutex_lock(mtx);
+                                       put_spu_context(ctx);
+                               }
+                       }
+                       mutex_unlock(mtx);
                }
        }
 
-       del_timer_sync(&spusched_timer);
        return 0;
 }
 
+void spuctx_switch_state(struct spu_context *ctx,
+               enum spu_utilization_state new_state)
+{
+       unsigned long long curtime;
+       signed long long delta;
+       struct timespec ts;
+       struct spu *spu;
+       enum spu_utilization_state old_state;
+       int node;
+
+       ktime_get_ts(&ts);
+       curtime = timespec_to_ns(&ts);
+       delta = curtime - ctx->stats.tstamp;
+
+       WARN_ON(!mutex_is_locked(&ctx->state_mutex));
+       WARN_ON(delta < 0);
+
+       spu = ctx->spu;
+       old_state = ctx->stats.util_state;
+       ctx->stats.util_state = new_state;
+       ctx->stats.tstamp = curtime;
+
+       /*
+        * Update the physical SPU utilization statistics.
+        */
+       if (spu) {
+               ctx->stats.times[old_state] += delta;
+               spu->stats.times[old_state] += delta;
+               spu->stats.util_state = new_state;
+               spu->stats.tstamp = curtime;
+               node = spu->node;
+               if (old_state == SPU_UTIL_USER)
+                       atomic_dec(&cbe_spu_info[node].busy_spus);
+               if (new_state == SPU_UTIL_USER)
+                       atomic_inc(&cbe_spu_info[node].busy_spus);
+       }
+}
+
+#define LOAD_INT(x) ((x) >> FSHIFT)
+#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
+
+static int show_spu_loadavg(struct seq_file *s, void *private)
+{
+       int a, b, c;
+
+       a = spu_avenrun[0] + (FIXED_1/200);
+       b = spu_avenrun[1] + (FIXED_1/200);
+       c = spu_avenrun[2] + (FIXED_1/200);
+
+       /*
+        * Note that last_pid doesn't really make much sense for the
+        * SPU loadavg (it even seems very odd on the CPU side...),
+        * but we include it here to have a 100% compatible interface.
+        */
+       seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
+               LOAD_INT(a), LOAD_FRAC(a),
+               LOAD_INT(b), LOAD_FRAC(b),
+               LOAD_INT(c), LOAD_FRAC(c),
+               count_active_contexts(),
+               atomic_read(&nr_spu_contexts),
+               current->nsproxy->pid_ns->last_pid);
+       return 0;
+}
+
+static int spu_loadavg_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, show_spu_loadavg, NULL);
+}
+
+static const struct file_operations spu_loadavg_fops = {
+       .open           = spu_loadavg_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
 int __init spu_sched_init(void)
 {
-       int i;
+       struct proc_dir_entry *entry;
+       int err = -ENOMEM, i;
 
        spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
        if (!spu_prio)
-               return -ENOMEM;
+               goto out;
 
        for (i = 0; i < MAX_PRIO; i++) {
                INIT_LIST_HEAD(&spu_prio->runq[i]);
                __clear_bit(i, spu_prio->bitmap);
        }
-       __set_bit(MAX_PRIO, spu_prio->bitmap);
-       for (i = 0; i < MAX_NUMNODES; i++) {
-               mutex_init(&spu_prio->active_mutex[i]);
-               INIT_LIST_HEAD(&spu_prio->active_list[i]);
-       }
        spin_lock_init(&spu_prio->runq_lock);
 
+       setup_timer(&spusched_timer, spusched_wake, 0);
+       setup_timer(&spuloadavg_timer, spuloadavg_wake, 0);
+
        spusched_task = kthread_run(spusched_thread, NULL, "spusched");
        if (IS_ERR(spusched_task)) {
-               kfree(spu_prio);
-               return PTR_ERR(spusched_task);
+               err = PTR_ERR(spusched_task);
+               goto out_free_spu_prio;
        }
 
+       mod_timer(&spuloadavg_timer, 0);
+
+       entry = proc_create("spu_loadavg", 0, NULL, &spu_loadavg_fops);
+       if (!entry)
+               goto out_stop_kthread;
+
        pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n",
                        SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE);
        return 0;
 
+ out_stop_kthread:
+       kthread_stop(spusched_task);
+ out_free_spu_prio:
+       kfree(spu_prio);
+ out:
+       return err;
 }
 
-void __exit spu_sched_exit(void)
+void spu_sched_exit(void)
 {
-       struct spu *spu, *tmp;
+       struct spu *spu;
        int node;
 
+       remove_proc_entry("spu_loadavg", NULL);
+
+       del_timer_sync(&spusched_timer);
+       del_timer_sync(&spuloadavg_timer);
        kthread_stop(spusched_task);
 
        for (node = 0; node < MAX_NUMNODES; node++) {
-               mutex_lock(&spu_prio->active_mutex[node]);
-               list_for_each_entry_safe(spu, tmp, &spu_prio->active_list[node],
-                                        list) {
-                       list_del_init(&spu->list);
-                       spu_free(spu);
-               }
-               mutex_unlock(&spu_prio->active_mutex[node]);
+               mutex_lock(&cbe_spu_info[node].list_mutex);
+               list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list)
+                       if (spu->alloc_state != SPU_FREE)
+                               spu->alloc_state = SPU_FREE;
+               mutex_unlock(&cbe_spu_info[node].list_mutex);
        }
        kfree(spu_prio);
 }