1 /* sched.c - SPU scheduler.
3 * Copyright (C) IBM 2005
4 * Author: Mark Nutter <mnutter@us.ibm.com>
6 * 2006-03-31 NUMA domains added.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/module.h>
26 #include <linux/errno.h>
27 #include <linux/sched.h>
28 #include <linux/kernel.h>
30 #include <linux/completion.h>
31 #include <linux/vmalloc.h>
32 #include <linux/smp.h>
33 #include <linux/stddef.h>
34 #include <linux/unistd.h>
35 #include <linux/numa.h>
36 #include <linux/mutex.h>
37 #include <linux/notifier.h>
38 #include <linux/kthread.h>
39 #include <linux/pid_namespace.h>
40 #include <linux/proc_fs.h>
41 #include <linux/seq_file.h>
44 #include <asm/mmu_context.h>
46 #include <asm/spu_csa.h>
47 #include <asm/spu_priv1.h>
50 struct spu_prio_array {
51 DECLARE_BITMAP(bitmap, MAX_PRIO);
52 struct list_head runq[MAX_PRIO];
54 struct list_head active_list[MAX_NUMNODES];
55 struct mutex active_mutex[MAX_NUMNODES];
56 int nr_active[MAX_NUMNODES];
60 static unsigned long spu_avenrun[3];
61 static struct spu_prio_array *spu_prio;
62 static struct task_struct *spusched_task;
63 static struct timer_list spusched_timer;
66 * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
68 #define NORMAL_PRIO 120
71 * Frequency of the spu scheduler tick. By default we do one SPU scheduler
72 * tick for every 10 CPU scheduler ticks.
74 #define SPUSCHED_TICK (10)
77 * These are the 'tuning knobs' of the scheduler:
79 * Minimum timeslice is 5 msecs (or 1 spu scheduler tick, whichever is
80 * larger), default timeslice is 100 msecs, maximum timeslice is 800 msecs.
82 #define MIN_SPU_TIMESLICE max(5 * HZ / (1000 * SPUSCHED_TICK), 1)
83 #define DEF_SPU_TIMESLICE (100 * HZ / (1000 * SPUSCHED_TICK))
85 #define MAX_USER_PRIO (MAX_PRIO - MAX_RT_PRIO)
86 #define SCALE_PRIO(x, prio) \
87 max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE)
90 * scale user-nice values [ -20 ... 0 ... 19 ] to time slice values:
91 * [800ms ... 100ms ... 5ms]
93 * The higher a thread's priority, the bigger timeslices
94 * it gets during one round of execution. But even the lowest
95 * priority thread gets MIN_TIMESLICE worth of execution time.
97 void spu_set_timeslice(struct spu_context *ctx)
99 if (ctx->prio < NORMAL_PRIO)
100 ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio);
102 ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio);
106 * Update scheduling information from the owning thread.
108 void __spu_update_sched_info(struct spu_context *ctx)
111 * 32-Bit assignment are atomic on powerpc, and we don't care about
112 * memory ordering here because retriving the controlling thread is
113 * per defintion racy.
115 ctx->tid = current->pid;
118 * We do our own priority calculations, so we normally want
119 * ->static_prio to start with. Unfortunately thies field
120 * contains junk for threads with a realtime scheduling
121 * policy so we have to look at ->prio in this case.
123 if (rt_prio(current->prio))
124 ctx->prio = current->prio;
126 ctx->prio = current->static_prio;
127 ctx->policy = current->policy;
130 * A lot of places that don't hold active_mutex poke into
131 * cpus_allowed, including grab_runnable_context which
132 * already holds the runq_lock. So abuse runq_lock
133 * to protect this field aswell.
135 spin_lock(&spu_prio->runq_lock);
136 ctx->cpus_allowed = current->cpus_allowed;
137 spin_unlock(&spu_prio->runq_lock);
140 void spu_update_sched_info(struct spu_context *ctx)
142 int node = ctx->spu->node;
144 mutex_lock(&spu_prio->active_mutex[node]);
145 __spu_update_sched_info(ctx);
146 mutex_unlock(&spu_prio->active_mutex[node]);
149 static int __node_allowed(struct spu_context *ctx, int node)
151 if (nr_cpus_node(node)) {
152 cpumask_t mask = node_to_cpumask(node);
154 if (cpus_intersects(mask, ctx->cpus_allowed))
161 static int node_allowed(struct spu_context *ctx, int node)
165 spin_lock(&spu_prio->runq_lock);
166 rval = __node_allowed(ctx, node);
167 spin_unlock(&spu_prio->runq_lock);
173 * spu_add_to_active_list - add spu to active list
174 * @spu: spu to add to the active list
176 static void spu_add_to_active_list(struct spu *spu)
178 int node = spu->node;
180 mutex_lock(&spu_prio->active_mutex[node]);
181 spu_prio->nr_active[node]++;
182 list_add_tail(&spu->list, &spu_prio->active_list[node]);
183 mutex_unlock(&spu_prio->active_mutex[node]);
186 static void __spu_remove_from_active_list(struct spu *spu)
188 list_del_init(&spu->list);
189 spu_prio->nr_active[spu->node]--;
193 * spu_remove_from_active_list - remove spu from active list
194 * @spu: spu to remove from the active list
196 static void spu_remove_from_active_list(struct spu *spu)
198 int node = spu->node;
200 mutex_lock(&spu_prio->active_mutex[node]);
201 __spu_remove_from_active_list(spu);
202 mutex_unlock(&spu_prio->active_mutex[node]);
205 static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier);
207 void spu_switch_notify(struct spu *spu, struct spu_context *ctx)
209 blocking_notifier_call_chain(&spu_switch_notifier,
210 ctx ? ctx->object_id : 0, spu);
213 static void notify_spus_active(void)
218 * Wake up the active spu_contexts.
220 * When the awakened processes see their "notify_active" flag is set,
221 * they will call spu_switch_notify();
223 for_each_online_node(node) {
225 mutex_lock(&spu_prio->active_mutex[node]);
226 list_for_each_entry(spu, &spu_prio->active_list[node], list) {
227 struct spu_context *ctx = spu->ctx;
228 set_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags);
229 mb(); /* make sure any tasks woken up below */
230 /* can see the bit(s) set above */
231 wake_up_all(&ctx->stop_wq);
233 mutex_unlock(&spu_prio->active_mutex[node]);
237 int spu_switch_event_register(struct notifier_block * n)
240 ret = blocking_notifier_chain_register(&spu_switch_notifier, n);
242 notify_spus_active();
245 EXPORT_SYMBOL_GPL(spu_switch_event_register);
247 int spu_switch_event_unregister(struct notifier_block * n)
249 return blocking_notifier_chain_unregister(&spu_switch_notifier, n);
251 EXPORT_SYMBOL_GPL(spu_switch_event_unregister);
254 * spu_bind_context - bind spu context to physical spu
255 * @spu: physical spu to bind to
256 * @ctx: context to bind
258 static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
260 pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
261 spu->number, spu->node);
262 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
264 if (ctx->flags & SPU_CREATE_NOSCHED)
265 atomic_inc(&cbe_spu_info[spu->node].reserved_spus);
266 if (!list_empty(&ctx->aff_list))
267 atomic_inc(&ctx->gang->aff_sched_count);
269 ctx->stats.slb_flt_base = spu->stats.slb_flt;
270 ctx->stats.class2_intr_base = spu->stats.class2_intr;
275 ctx->ops = &spu_hw_ops;
276 spu->pid = current->pid;
277 spu->tgid = current->tgid;
278 spu_associate_mm(spu, ctx->owner);
279 spu->ibox_callback = spufs_ibox_callback;
280 spu->wbox_callback = spufs_wbox_callback;
281 spu->stop_callback = spufs_stop_callback;
282 spu->mfc_callback = spufs_mfc_callback;
283 spu->dma_callback = spufs_dma_callback;
285 spu_unmap_mappings(ctx);
286 spu_restore(&ctx->csa, spu);
287 spu->timestamp = jiffies;
288 spu_cpu_affinity_set(spu, raw_smp_processor_id());
289 spu_switch_notify(spu, ctx);
290 ctx->state = SPU_STATE_RUNNABLE;
292 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
296 * XXX(hch): needs locking.
298 static inline int sched_spu(struct spu *spu)
300 return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED));
303 static void aff_merge_remaining_ctxs(struct spu_gang *gang)
305 struct spu_context *ctx;
307 list_for_each_entry(ctx, &gang->aff_list_head, aff_list) {
308 if (list_empty(&ctx->aff_list))
309 list_add(&ctx->aff_list, &gang->aff_list_head);
311 gang->aff_flags |= AFF_MERGED;
314 static void aff_set_offsets(struct spu_gang *gang)
316 struct spu_context *ctx;
320 list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
322 if (&ctx->aff_list == &gang->aff_list_head)
324 ctx->aff_offset = offset--;
328 list_for_each_entry(ctx, gang->aff_ref_ctx->aff_list.prev, aff_list) {
329 if (&ctx->aff_list == &gang->aff_list_head)
331 ctx->aff_offset = offset++;
334 gang->aff_flags |= AFF_OFFSETS_SET;
337 static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
338 int group_size, int lowest_offset)
344 * TODO: A better algorithm could be used to find a good spu to be
345 * used as reference location for the ctxs chain.
347 node = cpu_to_node(raw_smp_processor_id());
348 for (n = 0; n < MAX_NUMNODES; n++, node++) {
349 node = (node < MAX_NUMNODES) ? node : 0;
350 if (!node_allowed(ctx, node))
352 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
353 if ((!mem_aff || spu->has_mem_affinity) &&
361 static void aff_set_ref_point_location(struct spu_gang *gang)
363 int mem_aff, gs, lowest_offset;
364 struct spu_context *ctx;
367 mem_aff = gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM;
371 list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
374 list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
376 if (&ctx->aff_list == &gang->aff_list_head)
378 lowest_offset = ctx->aff_offset;
381 gang->aff_ref_spu = aff_ref_location(ctx, mem_aff, gs, lowest_offset);
384 static struct spu *ctx_location(struct spu *ref, int offset)
390 list_for_each_entry(spu, ref->aff_list.prev, aff_list) {
397 list_for_each_entry_reverse(spu, ref->aff_list.next, aff_list) {
408 * affinity_check is called each time a context is going to be scheduled.
409 * It returns the spu ptr on which the context must run.
411 struct spu *affinity_check(struct spu_context *ctx)
413 struct spu_gang *gang;
415 if (list_empty(&ctx->aff_list))
418 mutex_lock(&gang->aff_mutex);
419 if (!gang->aff_ref_spu) {
420 if (!(gang->aff_flags & AFF_MERGED))
421 aff_merge_remaining_ctxs(gang);
422 if (!(gang->aff_flags & AFF_OFFSETS_SET))
423 aff_set_offsets(gang);
424 aff_set_ref_point_location(gang);
426 mutex_unlock(&gang->aff_mutex);
427 if (!gang->aff_ref_spu)
429 return ctx_location(gang->aff_ref_spu, ctx->aff_offset);
433 * spu_unbind_context - unbind spu context from physical spu
434 * @spu: physical spu to unbind from
435 * @ctx: context to unbind
437 static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
439 pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
440 spu->pid, spu->number, spu->node);
441 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
443 if (spu->ctx->flags & SPU_CREATE_NOSCHED)
444 atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
445 if (!list_empty(&ctx->aff_list))
446 if (atomic_dec_and_test(&ctx->gang->aff_sched_count))
447 ctx->gang->aff_ref_spu = NULL;
448 spu_switch_notify(spu, NULL);
449 spu_unmap_mappings(ctx);
450 spu_save(&ctx->csa, spu);
451 spu->timestamp = jiffies;
452 ctx->state = SPU_STATE_SAVED;
453 spu->ibox_callback = NULL;
454 spu->wbox_callback = NULL;
455 spu->stop_callback = NULL;
456 spu->mfc_callback = NULL;
457 spu->dma_callback = NULL;
458 spu_associate_mm(spu, NULL);
461 ctx->ops = &spu_backing_ops;
465 ctx->stats.slb_flt +=
466 (spu->stats.slb_flt - ctx->stats.slb_flt_base);
467 ctx->stats.class2_intr +=
468 (spu->stats.class2_intr - ctx->stats.class2_intr_base);
470 /* This maps the underlying spu state to idle */
471 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
476 * spu_add_to_rq - add a context to the runqueue
477 * @ctx: context to add
479 static void __spu_add_to_rq(struct spu_context *ctx)
482 * Unfortunately this code path can be called from multiple threads
483 * on behalf of a single context due to the way the problem state
484 * mmap support works.
486 * Fortunately we need to wake up all these threads at the same time
487 * and can simply skip the runqueue addition for every but the first
488 * thread getting into this codepath.
490 * It's still quite hacky, and long-term we should proxy all other
491 * threads through the owner thread so that spu_run is in control
492 * of all the scheduling activity for a given context.
494 if (list_empty(&ctx->rq)) {
495 list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
496 set_bit(ctx->prio, spu_prio->bitmap);
497 if (!spu_prio->nr_waiting++)
498 __mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
502 static void __spu_del_from_rq(struct spu_context *ctx)
504 int prio = ctx->prio;
506 if (!list_empty(&ctx->rq)) {
507 if (!--spu_prio->nr_waiting)
508 del_timer(&spusched_timer);
509 list_del_init(&ctx->rq);
511 if (list_empty(&spu_prio->runq[prio]))
512 clear_bit(prio, spu_prio->bitmap);
516 static void spu_prio_wait(struct spu_context *ctx)
520 spin_lock(&spu_prio->runq_lock);
521 prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
522 if (!signal_pending(current)) {
523 __spu_add_to_rq(ctx);
524 spin_unlock(&spu_prio->runq_lock);
525 mutex_unlock(&ctx->state_mutex);
527 mutex_lock(&ctx->state_mutex);
528 spin_lock(&spu_prio->runq_lock);
529 __spu_del_from_rq(ctx);
531 spin_unlock(&spu_prio->runq_lock);
532 __set_current_state(TASK_RUNNING);
533 remove_wait_queue(&ctx->stop_wq, &wait);
536 static struct spu *spu_get_idle(struct spu_context *ctx)
538 struct spu *spu = NULL;
539 int node = cpu_to_node(raw_smp_processor_id());
542 spu = affinity_check(ctx);
544 return spu_alloc_spu(spu);
546 for (n = 0; n < MAX_NUMNODES; n++, node++) {
547 node = (node < MAX_NUMNODES) ? node : 0;
548 if (!node_allowed(ctx, node))
550 spu = spu_alloc_node(node);
558 * find_victim - find a lower priority context to preempt
559 * @ctx: canidate context for running
561 * Returns the freed physical spu to run the new context on.
563 static struct spu *find_victim(struct spu_context *ctx)
565 struct spu_context *victim = NULL;
570 * Look for a possible preemption candidate on the local node first.
571 * If there is no candidate look at the other nodes. This isn't
572 * exactly fair, but so far the whole spu schedule tries to keep
573 * a strong node affinity. We might want to fine-tune this in
577 node = cpu_to_node(raw_smp_processor_id());
578 for (n = 0; n < MAX_NUMNODES; n++, node++) {
579 node = (node < MAX_NUMNODES) ? node : 0;
580 if (!node_allowed(ctx, node))
583 mutex_lock(&spu_prio->active_mutex[node]);
584 list_for_each_entry(spu, &spu_prio->active_list[node], list) {
585 struct spu_context *tmp = spu->ctx;
587 if (tmp->prio > ctx->prio &&
588 (!victim || tmp->prio > victim->prio))
591 mutex_unlock(&spu_prio->active_mutex[node]);
595 * This nests ctx->state_mutex, but we always lock
596 * higher priority contexts before lower priority
597 * ones, so this is safe until we introduce
598 * priority inheritance schemes.
600 if (!mutex_trylock(&victim->state_mutex)) {
608 * This race can happen because we've dropped
609 * the active list mutex. No a problem, just
610 * restart the search.
612 mutex_unlock(&victim->state_mutex);
616 spu_remove_from_active_list(spu);
617 spu_unbind_context(spu, victim);
618 victim->stats.invol_ctx_switch++;
619 spu->stats.invol_ctx_switch++;
620 mutex_unlock(&victim->state_mutex);
622 * We need to break out of the wait loop in spu_run
623 * manually to ensure this context gets put on the
624 * runqueue again ASAP.
626 wake_up(&victim->stop_wq);
635 * spu_activate - find a free spu for a context and execute it
636 * @ctx: spu context to schedule
637 * @flags: flags (currently ignored)
639 * Tries to find a free spu to run @ctx. If no free spu is available
640 * add the context to the runqueue so it gets woken up once an spu
643 int spu_activate(struct spu_context *ctx, unsigned long flags)
649 * If there are multiple threads waiting for a single context
650 * only one actually binds the context while the others will
651 * only be able to acquire the state_mutex once the context
652 * already is in runnable state.
657 spu = spu_get_idle(ctx);
659 * If this is a realtime thread we try to get it running by
660 * preempting a lower priority thread.
662 if (!spu && rt_prio(ctx->prio))
663 spu = find_victim(ctx);
665 spu_bind_context(spu, ctx);
666 spu_add_to_active_list(spu);
671 } while (!signal_pending(current));
677 * grab_runnable_context - try to find a runnable context
679 * Remove the highest priority context on the runqueue and return it
680 * to the caller. Returns %NULL if no runnable context was found.
682 static struct spu_context *grab_runnable_context(int prio, int node)
684 struct spu_context *ctx;
687 spin_lock(&spu_prio->runq_lock);
688 best = find_first_bit(spu_prio->bitmap, prio);
689 while (best < prio) {
690 struct list_head *rq = &spu_prio->runq[best];
692 list_for_each_entry(ctx, rq, rq) {
693 /* XXX(hch): check for affinity here aswell */
694 if (__node_allowed(ctx, node)) {
695 __spu_del_from_rq(ctx);
703 spin_unlock(&spu_prio->runq_lock);
707 static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
709 struct spu *spu = ctx->spu;
710 struct spu_context *new = NULL;
713 new = grab_runnable_context(max_prio, spu->node);
715 spu_remove_from_active_list(spu);
716 spu_unbind_context(spu, ctx);
717 ctx->stats.vol_ctx_switch++;
718 spu->stats.vol_ctx_switch++;
721 wake_up(&new->stop_wq);
730 * spu_deactivate - unbind a context from it's physical spu
731 * @ctx: spu context to unbind
733 * Unbind @ctx from the physical spu it is running on and schedule
734 * the highest priority context to run on the freed physical spu.
736 void spu_deactivate(struct spu_context *ctx)
738 __spu_deactivate(ctx, 1, MAX_PRIO);
742 * spu_yield - yield a physical spu if others are waiting
743 * @ctx: spu context to yield
745 * Check if there is a higher priority context waiting and if yes
746 * unbind @ctx from the physical spu and schedule the highest
747 * priority context to run on the freed physical spu instead.
749 void spu_yield(struct spu_context *ctx)
751 if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
752 mutex_lock(&ctx->state_mutex);
753 __spu_deactivate(ctx, 0, MAX_PRIO);
754 mutex_unlock(&ctx->state_mutex);
758 static void spusched_tick(struct spu_context *ctx)
760 if (ctx->flags & SPU_CREATE_NOSCHED)
762 if (ctx->policy == SCHED_FIFO)
765 if (--ctx->time_slice)
769 * Unfortunately active_mutex ranks outside of state_mutex, so
770 * we have to trylock here. If we fail give the context another
771 * tick and try again.
773 if (mutex_trylock(&ctx->state_mutex)) {
774 struct spu *spu = ctx->spu;
775 struct spu_context *new;
777 new = grab_runnable_context(ctx->prio + 1, spu->node);
780 __spu_remove_from_active_list(spu);
781 spu_unbind_context(spu, ctx);
782 ctx->stats.invol_ctx_switch++;
783 spu->stats.invol_ctx_switch++;
785 wake_up(&new->stop_wq);
787 * We need to break out of the wait loop in
788 * spu_run manually to ensure this context
789 * gets put on the runqueue again ASAP.
791 wake_up(&ctx->stop_wq);
793 spu_set_timeslice(ctx);
794 mutex_unlock(&ctx->state_mutex);
801 * count_active_contexts - count nr of active tasks
803 * Return the number of tasks currently running or waiting to run.
805 * Note that we don't take runq_lock / active_mutex here. Reading
806 * a single 32bit value is atomic on powerpc, and we don't care
807 * about memory ordering issues here.
809 static unsigned long count_active_contexts(void)
811 int nr_active = 0, node;
813 for (node = 0; node < MAX_NUMNODES; node++)
814 nr_active += spu_prio->nr_active[node];
815 nr_active += spu_prio->nr_waiting;
821 * spu_calc_load - given tick count, update the avenrun load estimates.
824 * No locking against reading these values from userspace, as for
825 * the CPU loadavg code.
827 static void spu_calc_load(unsigned long ticks)
829 unsigned long active_tasks; /* fixed-point */
830 static int count = LOAD_FREQ;
834 if (unlikely(count < 0)) {
835 active_tasks = count_active_contexts() * FIXED_1;
837 CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks);
838 CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks);
839 CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
845 static void spusched_wake(unsigned long data)
847 mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
848 wake_up_process(spusched_task);
849 spu_calc_load(SPUSCHED_TICK);
852 static int spusched_thread(void *unused)
854 struct spu *spu, *next;
857 while (!kthread_should_stop()) {
858 set_current_state(TASK_INTERRUPTIBLE);
860 for (node = 0; node < MAX_NUMNODES; node++) {
861 mutex_lock(&spu_prio->active_mutex[node]);
862 list_for_each_entry_safe(spu, next,
863 &spu_prio->active_list[node],
865 spusched_tick(spu->ctx);
866 mutex_unlock(&spu_prio->active_mutex[node]);
873 #define LOAD_INT(x) ((x) >> FSHIFT)
874 #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
876 static int show_spu_loadavg(struct seq_file *s, void *private)
880 a = spu_avenrun[0] + (FIXED_1/200);
881 b = spu_avenrun[1] + (FIXED_1/200);
882 c = spu_avenrun[2] + (FIXED_1/200);
885 * Note that last_pid doesn't really make much sense for the
886 * SPU loadavg (it even seems very odd on the CPU side..),
887 * but we include it here to have a 100% compatible interface.
889 seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
890 LOAD_INT(a), LOAD_FRAC(a),
891 LOAD_INT(b), LOAD_FRAC(b),
892 LOAD_INT(c), LOAD_FRAC(c),
893 count_active_contexts(),
894 atomic_read(&nr_spu_contexts),
895 current->nsproxy->pid_ns->last_pid);
899 static int spu_loadavg_open(struct inode *inode, struct file *file)
901 return single_open(file, show_spu_loadavg, NULL);
904 static const struct file_operations spu_loadavg_fops = {
905 .open = spu_loadavg_open,
908 .release = single_release,
911 int __init spu_sched_init(void)
913 struct proc_dir_entry *entry;
914 int err = -ENOMEM, i;
916 spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
920 for (i = 0; i < MAX_PRIO; i++) {
921 INIT_LIST_HEAD(&spu_prio->runq[i]);
922 __clear_bit(i, spu_prio->bitmap);
924 for (i = 0; i < MAX_NUMNODES; i++) {
925 mutex_init(&spu_prio->active_mutex[i]);
926 INIT_LIST_HEAD(&spu_prio->active_list[i]);
928 spin_lock_init(&spu_prio->runq_lock);
930 setup_timer(&spusched_timer, spusched_wake, 0);
932 spusched_task = kthread_run(spusched_thread, NULL, "spusched");
933 if (IS_ERR(spusched_task)) {
934 err = PTR_ERR(spusched_task);
935 goto out_free_spu_prio;
938 entry = create_proc_entry("spu_loadavg", 0, NULL);
940 goto out_stop_kthread;
941 entry->proc_fops = &spu_loadavg_fops;
943 pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n",
944 SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE);
948 kthread_stop(spusched_task);
955 void spu_sched_exit(void)
957 struct spu *spu, *tmp;
960 remove_proc_entry("spu_loadavg", NULL);
962 del_timer_sync(&spusched_timer);
963 kthread_stop(spusched_task);
965 for (node = 0; node < MAX_NUMNODES; node++) {
966 mutex_lock(&spu_prio->active_mutex[node]);
967 list_for_each_entry_safe(spu, tmp, &spu_prio->active_list[node],
969 list_del_init(&spu->list);
972 mutex_unlock(&spu_prio->active_mutex[node]);