/*
- * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
+ * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Copyright 2003 PathScale, Inc.
* Licensed under the GPL
*/
-#include "linux/kernel.h"
-#include "linux/sched.h"
-#include "linux/interrupt.h"
-#include "linux/string.h"
-#include "linux/mm.h"
-#include "linux/slab.h"
-#include "linux/utsname.h"
-#include "linux/fs.h"
-#include "linux/utime.h"
-#include "linux/smp_lock.h"
-#include "linux/module.h"
-#include "linux/init.h"
-#include "linux/capability.h"
-#include "linux/vmalloc.h"
-#include "linux/spinlock.h"
-#include "linux/proc_fs.h"
-#include "linux/ptrace.h"
-#include "linux/random.h"
-#include "linux/personality.h"
-#include "asm/unistd.h"
-#include "asm/mman.h"
-#include "asm/segment.h"
-#include "asm/stat.h"
-#include "asm/pgtable.h"
-#include "asm/processor.h"
-#include "asm/tlbflush.h"
-#include "asm/uaccess.h"
-#include "asm/user.h"
-#include "user_util.h"
-#include "kern_util.h"
+#include <linux/stddef.h>
+#include <linux/err.h>
+#include <linux/hardirq.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/personality.h>
+#include <linux/proc_fs.h>
+#include <linux/ptrace.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/tick.h>
+#include <linux/threads.h>
+#include <asm/current.h>
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
#include "as-layout.h"
-#include "kern.h"
-#include "signal_kern.h"
-#include "init.h"
-#include "irq_user.h"
-#include "mem_user.h"
-#include "tlb.h"
-#include "frame_kern.h"
-#include "sigcontext.h"
+#include "kern_util.h"
#include "os.h"
-#include "mode.h"
-#include "mode_kern.h"
-#include "choose-mode.h"
-#include "um_malloc.h"
+#include "skas.h"
+#include "tlb.h"
-/* This is a per-cpu array. A processor only modifies its entry and it only
+/*
+ * This is a per-cpu array. A processor only modifies its entry and it only
* cares about its entry, so it's OK if another processor is modifying its
* entry.
*/
struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
-int external_pid(void *t)
+static inline int external_pid(void)
{
- struct task_struct *task = t ? t : current;
-
- return(CHOOSE_MODE_PROC(external_pid_tt, external_pid_skas, task));
+ /* FIXME: Need to look up userspace_pid by cpu */
+ return userspace_pid[0];
}
int pid_to_processor_id(int pid)
{
int i;
- for(i = 0; i < ncpus; i++){
- if(cpu_tasks[i].pid == pid) return(i);
+ for (i = 0; i < ncpus; i++) {
+ if (cpu_tasks[i].pid == pid)
+ return i;
}
- return(-1);
+ return -1;
}
void free_stack(unsigned long stack, int order)
if (atomic)
flags = GFP_ATOMIC;
page = __get_free_pages(flags, order);
- if(page == 0)
- return(0);
- stack_protections(page);
- return(page);
+
+ return page;
}
int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
current->thread.request.u.thread.arg = arg;
pid = do_fork(CLONE_VM | CLONE_UNTRACED | flags, 0,
¤t->thread.regs, 0, NULL, NULL);
- if(pid < 0)
- panic("do_fork failed in kernel_thread, errno = %d", pid);
- return(pid);
+ return pid;
}
-void set_current(void *t)
+static inline void set_current(struct task_struct *task)
{
- struct task_struct *task = t;
-
cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
- { external_pid(task), task });
+ { external_pid(), task });
}
+extern void arch_switch_to(struct task_struct *to);
+
void *_switch_to(void *prev, void *next, void *last)
{
struct task_struct *from = prev;
- struct task_struct *to= next;
+ struct task_struct *to = next;
to->thread.prev_sched = from;
set_current(to);
do {
- current->thread.saved_task = NULL ;
- CHOOSE_MODE_PROC(switch_to_tt, switch_to_skas, prev, next);
- if(current->thread.saved_task)
+ current->thread.saved_task = NULL;
+
+ switch_threads(&from->thread.switch_buf,
+ &to->thread.switch_buf);
+
+ arch_switch_to(current);
+
+ if (current->thread.saved_task)
show_regs(&(current->thread.regs));
- next= current->thread.saved_task;
- prev= current;
- } while(current->thread.saved_task);
+ to = current->thread.saved_task;
+ from = current;
+ } while (current->thread.saved_task);
- return(current->thread.prev_sched);
+ return current->thread.prev_sched;
}
void interrupt_end(void)
{
- if(need_resched()) schedule();
- if(test_tsk_thread_flag(current, TIF_SIGPENDING)) do_signal();
-}
-
-void release_thread(struct task_struct *task)
-{
- CHOOSE_MODE(release_thread_tt(task), release_thread_skas(task));
+ if (need_resched())
+ schedule();
+ if (test_tsk_thread_flag(current, TIF_SIGPENDING))
+ do_signal();
}
void exit_thread(void)
{
- unprotect_stack((unsigned long) current_thread);
}
void *get_current(void)
{
- return(current);
+ return current;
}
-int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
- unsigned long stack_top, struct task_struct * p,
- struct pt_regs *regs)
+/*
+ * This is called magically, by its address being stuffed in a jmp_buf
+ * and being longjmp-d to.
+ */
+void new_thread_handler(void)
{
- int ret;
+ int (*fn)(void *), n;
+ void *arg;
- p->thread = (struct thread_struct) INIT_THREAD;
- ret = CHOOSE_MODE_PROC(copy_thread_tt, copy_thread_skas, nr,
- clone_flags, sp, stack_top, p, regs);
-
- if (ret || !current->thread.forking)
- goto out;
+ if (current->thread.prev_sched != NULL)
+ schedule_tail(current->thread.prev_sched);
+ current->thread.prev_sched = NULL;
- clear_flushed_tls(p);
+ fn = current->thread.request.u.thread.proc;
+ arg = current->thread.request.u.thread.arg;
/*
- * Set a new TLS for the child thread?
+ * The return value is 1 if the kernel thread execs a process,
+ * 0 if it just exits
*/
- if (clone_flags & CLONE_SETTLS)
- ret = arch_copy_tls(p);
-
-out:
- return ret;
-}
-
-void initial_thread_cb(void (*proc)(void *), void *arg)
-{
- int save_kmalloc_ok = kmalloc_ok;
-
- kmalloc_ok = 0;
- CHOOSE_MODE_PROC(initial_thread_cb_tt, initial_thread_cb_skas, proc,
- arg);
- kmalloc_ok = save_kmalloc_ok;
-}
-
-unsigned long stack_sp(unsigned long page)
-{
- return(page + PAGE_SIZE - sizeof(void *));
+ n = run_kernel_thread(fn, arg, ¤t->thread.exec_buf);
+ if (n == 1) {
+ /* Handle any immediate reschedules or signals */
+ interrupt_end();
+ userspace(¤t->thread.regs.regs);
+ }
+ else do_exit(0);
}
-int current_pid(void)
+/* Called magically, see new_thread_handler above */
+void fork_handler(void)
{
- return(current->pid);
-}
+ force_flush_all();
-void default_idle(void)
-{
- CHOOSE_MODE(uml_idle_timer(), (void) 0);
+ schedule_tail(current->thread.prev_sched);
- while(1){
- /* endless idle loop with no priority at all */
+ /*
+ * XXX: if interrupt_end() calls schedule, this call to
+ * arch_switch_to isn't needed. We could want to apply this to
+ * improve performance. -bb
+ */
+ arch_switch_to(current);
- /*
- * although we are an idle CPU, we do not want to
- * get into the scheduler unnecessarily.
- */
- if(need_resched())
- schedule();
+ current->thread.prev_sched = NULL;
- idle_sleep(10);
- }
-}
+ /* Handle any immediate reschedules or signals */
+ interrupt_end();
-void cpu_idle(void)
-{
- CHOOSE_MODE(init_idle_tt(), init_idle_skas());
+ userspace(¤t->thread.regs.regs);
}
-int page_size(void)
+int copy_thread(unsigned long clone_flags, unsigned long sp,
+ unsigned long stack_top, struct task_struct * p,
+ struct pt_regs *regs)
{
- return(PAGE_SIZE);
-}
+ void (*handler)(void);
+ int ret = 0;
-void *um_virt_to_phys(struct task_struct *task, unsigned long addr,
- pte_t *pte_out)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- pte_t ptent;
+ p->thread = (struct thread_struct) INIT_THREAD;
- if(task->mm == NULL)
- return(ERR_PTR(-EINVAL));
- pgd = pgd_offset(task->mm, addr);
- if(!pgd_present(*pgd))
- return(ERR_PTR(-EINVAL));
+ if (current->thread.forking) {
+ memcpy(&p->thread.regs.regs, ®s->regs,
+ sizeof(p->thread.regs.regs));
+ REGS_SET_SYSCALL_RETURN(p->thread.regs.regs.gp, 0);
+ if (sp != 0)
+ REGS_SP(p->thread.regs.regs.gp) = sp;
- pud = pud_offset(pgd, addr);
- if(!pud_present(*pud))
- return(ERR_PTR(-EINVAL));
+ handler = fork_handler;
- pmd = pmd_offset(pud, addr);
- if(!pmd_present(*pmd))
- return(ERR_PTR(-EINVAL));
+ arch_copy_thread(¤t->thread.arch, &p->thread.arch);
+ }
+ else {
+ get_safe_registers(p->thread.regs.regs.gp);
+ p->thread.request.u.thread = current->thread.request.u.thread;
+ handler = new_thread_handler;
+ }
- pte = pte_offset_kernel(pmd, addr);
- ptent = *pte;
- if(!pte_present(ptent))
- return(ERR_PTR(-EINVAL));
+ new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
- if(pte_out != NULL)
- *pte_out = ptent;
- return((void *) (pte_val(ptent) & PAGE_MASK) + (addr & ~PAGE_MASK));
-}
+ if (current->thread.forking) {
+ clear_flushed_tls(p);
-char *current_cmd(void)
-{
-#if defined(CONFIG_SMP) || defined(CONFIG_HIGHMEM)
- return("(Unknown)");
-#else
- void *addr = um_virt_to_phys(current, current->mm->arg_start, NULL);
- return IS_ERR(addr) ? "(Unknown)": __va((unsigned long) addr);
-#endif
-}
+ /*
+ * Set a new TLS for the child thread?
+ */
+ if (clone_flags & CLONE_SETTLS)
+ ret = arch_copy_tls(p);
+ }
-void force_sigbus(void)
-{
- printk(KERN_ERR "Killing pid %d because of a lack of memory\n",
- current->pid);
- lock_kernel();
- sigaddset(¤t->pending.signal, SIGBUS);
- recalc_sigpending();
- current->flags |= PF_SIGNALED;
- do_exit(SIGBUS | 0x80);
+ return ret;
}
-void dump_thread(struct pt_regs *regs, struct user *u)
+void initial_thread_cb(void (*proc)(void *), void *arg)
{
-}
+ int save_kmalloc_ok = kmalloc_ok;
-void enable_hlt(void)
-{
- panic("enable_hlt");
+ kmalloc_ok = 0;
+ initial_thread_cb_skas(proc, arg);
+ kmalloc_ok = save_kmalloc_ok;
}
-EXPORT_SYMBOL(enable_hlt);
-
-void disable_hlt(void)
+void default_idle(void)
{
- panic("disable_hlt");
-}
+ unsigned long long nsecs;
-EXPORT_SYMBOL(disable_hlt);
-
-void *um_kmalloc(int size)
-{
- return kmalloc(size, GFP_KERNEL);
-}
+ while (1) {
+ /* endless idle loop with no priority at all */
-void *um_kmalloc_atomic(int size)
-{
- return kmalloc(size, GFP_ATOMIC);
-}
+ /*
+ * although we are an idle CPU, we do not want to
+ * get into the scheduler unnecessarily.
+ */
+ if (need_resched())
+ schedule();
-void *um_vmalloc(int size)
-{
- return vmalloc(size);
+ tick_nohz_stop_sched_tick(1);
+ nsecs = disable_timer();
+ idle_sleep(nsecs);
+ tick_nohz_restart_sched_tick();
+ }
}
-void *um_vmalloc_atomic(int size)
+void cpu_idle(void)
{
- return __vmalloc(size, GFP_ATOMIC | __GFP_HIGHMEM, PAGE_KERNEL);
+ cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
+ default_idle();
}
int __cant_sleep(void) {
/* Is in_interrupt() really needed? */
}
-unsigned long get_fault_addr(void)
-{
- return((unsigned long) current->thread.fault_addr);
-}
-
-EXPORT_SYMBOL(get_fault_addr);
-
-void not_implemented(void)
-{
- printk(KERN_DEBUG "Something isn't implemented in here\n");
-}
-
-EXPORT_SYMBOL(not_implemented);
-
int user_context(unsigned long sp)
{
unsigned long stack;
stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
- return(stack != (unsigned long) current_thread);
+ return stack != (unsigned long) current_thread_info();
}
extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
(*call)();
}
-char *uml_strdup(char *string)
+char *uml_strdup(const char *string)
{
return kstrdup(string, GFP_KERNEL);
}
int copy_to_user_proc(void __user *to, void *from, int size)
{
- return(copy_to_user(to, from, size));
+ return copy_to_user(to, from, size);
}
int copy_from_user_proc(void *to, void __user *from, int size)
{
- return(copy_from_user(to, from, size));
+ return copy_from_user(to, from, size);
}
int clear_user_proc(void __user *buf, int size)
{
- return(clear_user(buf, size));
+ return clear_user(buf, size);
}
int strlen_user_proc(char __user *str)
{
- return(strlen_user(str));
+ return strlen_user(str);
}
int smp_sigio_handler(void)
{
#ifdef CONFIG_SMP
- int cpu = current_thread->cpu;
+ int cpu = current_thread_info()->cpu;
IPI_handler(cpu);
- if(cpu != 0)
- return(1);
+ if (cpu != 0)
+ return 1;
#endif
- return(0);
+ return 0;
}
int cpu(void)
{
- return(current_thread->cpu);
+ return current_thread_info()->cpu;
}
static atomic_t using_sysemu = ATOMIC_INIT(0);
return atomic_read(&using_sysemu);
}
-static int proc_read_sysemu(char *buf, char **start, off_t offset, int size,int *eof, void *data)
+static int sysemu_proc_show(struct seq_file *m, void *v)
{
- if (snprintf(buf, size, "%d\n", get_using_sysemu()) < size) /*No overflow*/
- *eof = 1;
+ seq_printf(m, "%d\n", get_using_sysemu());
+ return 0;
+}
- return strlen(buf);
+static int sysemu_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, sysemu_proc_show, NULL);
}
-static int proc_write_sysemu(struct file *file,const char __user *buf, unsigned long count,void *data)
+static ssize_t sysemu_proc_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
{
char tmp[2];
if (tmp[0] >= '0' && tmp[0] <= '2')
set_using_sysemu(tmp[0] - '0');
- return count; /*We use the first char, but pretend to write everything*/
+ /* We use the first char, but pretend to write everything */
+ return count;
}
+static const struct file_operations sysemu_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = sysemu_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = sysemu_proc_write,
+};
+
int __init make_proc_sysemu(void)
{
struct proc_dir_entry *ent;
if (!sysemu_supported)
return 0;
- ent = create_proc_entry("sysemu", 0600, &proc_root);
+ ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_fops);
if (ent == NULL)
{
printk(KERN_WARNING "Failed to register /proc/sysemu\n");
- return(0);
+ return 0;
}
- ent->read_proc = proc_read_sysemu;
- ent->write_proc = proc_write_sysemu;
-
return 0;
}
{
struct task_struct *task = t ? t : current;
- if ( ! (task->ptrace & PT_DTRACE) )
- return(0);
+ if (!(task->ptrace & PT_DTRACE))
+ return 0;
if (task->thread.singlestep_syscall)
- return(1);
+ return 1;
return 2;
}
return sp & ~0xf;
}
#endif
+
+unsigned long get_wchan(struct task_struct *p)
+{
+ unsigned long stack_page, sp, ip;
+ bool seen_sched = 0;
+
+ if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING))
+ return 0;
+
+ stack_page = (unsigned long) task_stack_page(p);
+ /* Bail if the process has no kernel stack for some reason */
+ if (stack_page == 0)
+ return 0;
+
+ sp = p->thread.switch_buf->JB_SP;
+ /*
+ * Bail if the stack pointer is below the bottom of the kernel
+ * stack for some reason
+ */
+ if (sp < stack_page)
+ return 0;
+
+ while (sp < stack_page + THREAD_SIZE) {
+ ip = *((unsigned long *) sp);
+ if (in_sched_functions(ip))
+ /* Ignore everything until we're above the scheduler */
+ seen_sched = 1;
+ else if (kernel_text_address(ip) && seen_sched)
+ return ip;
+
+ sp += sizeof(unsigned long);
+ }
+
+ return 0;
+}
+
+int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
+{
+ int cpu = current_thread_info()->cpu;
+
+ return save_fp_registers(userspace_pid[cpu], (unsigned long *) fpu);
+}
+