uml: remove code made redundant by CHOOSE_MODE removal
[safe/jmp/linux-2.6] / arch / um / kernel / process.c
1 /*
2  * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3  * Copyright 2003 PathScale, Inc.
4  * Licensed under the GPL
5  */
6
7 #include "linux/kernel.h"
8 #include "linux/sched.h"
9 #include "linux/interrupt.h"
10 #include "linux/string.h"
11 #include "linux/mm.h"
12 #include "linux/slab.h"
13 #include "linux/utsname.h"
14 #include "linux/fs.h"
15 #include "linux/utime.h"
16 #include "linux/smp_lock.h"
17 #include "linux/module.h"
18 #include "linux/init.h"
19 #include "linux/capability.h"
20 #include "linux/vmalloc.h"
21 #include "linux/spinlock.h"
22 #include "linux/proc_fs.h"
23 #include "linux/ptrace.h"
24 #include "linux/random.h"
25 #include "linux/personality.h"
26 #include "asm/unistd.h"
27 #include "asm/mman.h"
28 #include "asm/segment.h"
29 #include "asm/stat.h"
30 #include "asm/pgtable.h"
31 #include "asm/processor.h"
32 #include "asm/tlbflush.h"
33 #include "asm/uaccess.h"
34 #include "asm/user.h"
35 #include "kern_util.h"
36 #include "as-layout.h"
37 #include "kern.h"
38 #include "signal_kern.h"
39 #include "init.h"
40 #include "irq_user.h"
41 #include "mem_user.h"
42 #include "tlb.h"
43 #include "frame_kern.h"
44 #include "sigcontext.h"
45 #include "os.h"
46 #include "skas.h"
47
48 /* This is a per-cpu array.  A processor only modifies its entry and it only
49  * cares about its entry, so it's OK if another processor is modifying its
50  * entry.
51  */
52 struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
53
54 static inline int external_pid(struct task_struct *task)
55 {
56         /* FIXME: Need to look up userspace_pid by cpu */
57         return(userspace_pid[0]);
58 }
59
60 int pid_to_processor_id(int pid)
61 {
62         int i;
63
64         for(i = 0; i < ncpus; i++){
65                 if(cpu_tasks[i].pid == pid)
66                         return i;
67         }
68         return -1;
69 }
70
71 void free_stack(unsigned long stack, int order)
72 {
73         free_pages(stack, order);
74 }
75
76 unsigned long alloc_stack(int order, int atomic)
77 {
78         unsigned long page;
79         gfp_t flags = GFP_KERNEL;
80
81         if (atomic)
82                 flags = GFP_ATOMIC;
83         page = __get_free_pages(flags, order);
84         if (page == 0)
85                 return 0;
86
87         return page;
88 }
89
90 int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
91 {
92         int pid;
93
94         current->thread.request.u.thread.proc = fn;
95         current->thread.request.u.thread.arg = arg;
96         pid = do_fork(CLONE_VM | CLONE_UNTRACED | flags, 0,
97                       &current->thread.regs, 0, NULL, NULL);
98         return pid;
99 }
100
101 static inline void set_current(struct task_struct *task)
102 {
103         cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
104                 { external_pid(task), task });
105 }
106
107 extern void arch_switch_to(struct task_struct *from, struct task_struct *to);
108
109 void *_switch_to(void *prev, void *next, void *last)
110 {
111         struct task_struct *from = prev;
112         struct task_struct *to= next;
113
114         to->thread.prev_sched = from;
115         set_current(to);
116
117         do {
118                 current->thread.saved_task = NULL;
119
120                 /* XXX need to check runqueues[cpu].idle */
121                 if(current->pid == 0)
122                         switch_timers(0);
123
124                 switch_threads(&from->thread.switch_buf,
125                                &to->thread.switch_buf);
126
127                 arch_switch_to(current->thread.prev_sched, current);
128
129                 if(current->pid == 0)
130                         switch_timers(1);
131
132                 if(current->thread.saved_task)
133                         show_regs(&(current->thread.regs));
134                 next= current->thread.saved_task;
135                 prev= current;
136         } while(current->thread.saved_task);
137
138         return current->thread.prev_sched;
139
140 }
141
142 void interrupt_end(void)
143 {
144         if(need_resched())
145                 schedule();
146         if(test_tsk_thread_flag(current, TIF_SIGPENDING))
147                 do_signal();
148 }
149
150 void exit_thread(void)
151 {
152 }
153
154 void *get_current(void)
155 {
156         return current;
157 }
158
159 extern void schedule_tail(struct task_struct *prev);
160
161 /* This is called magically, by its address being stuffed in a jmp_buf
162  * and being longjmp-d to.
163  */
164 void new_thread_handler(void)
165 {
166         int (*fn)(void *), n;
167         void *arg;
168
169         if(current->thread.prev_sched != NULL)
170                 schedule_tail(current->thread.prev_sched);
171         current->thread.prev_sched = NULL;
172
173         fn = current->thread.request.u.thread.proc;
174         arg = current->thread.request.u.thread.arg;
175
176         /* The return value is 1 if the kernel thread execs a process,
177          * 0 if it just exits
178          */
179         n = run_kernel_thread(fn, arg, &current->thread.exec_buf);
180         if(n == 1){
181                 /* Handle any immediate reschedules or signals */
182                 interrupt_end();
183                 userspace(&current->thread.regs.regs);
184         }
185         else do_exit(0);
186 }
187
188 /* Called magically, see new_thread_handler above */
189 void fork_handler(void)
190 {
191         force_flush_all();
192         if(current->thread.prev_sched == NULL)
193                 panic("blech");
194
195         schedule_tail(current->thread.prev_sched);
196
197         /* XXX: if interrupt_end() calls schedule, this call to
198          * arch_switch_to isn't needed. We could want to apply this to
199          * improve performance. -bb */
200         arch_switch_to(current->thread.prev_sched, current);
201
202         current->thread.prev_sched = NULL;
203
204         /* Handle any immediate reschedules or signals */
205         interrupt_end();
206
207         userspace(&current->thread.regs.regs);
208 }
209
210 int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
211                 unsigned long stack_top, struct task_struct * p,
212                 struct pt_regs *regs)
213 {
214         void (*handler)(void);
215         int ret = 0;
216
217         p->thread = (struct thread_struct) INIT_THREAD;
218
219         if(current->thread.forking){
220                 memcpy(&p->thread.regs.regs, &regs->regs,
221                        sizeof(p->thread.regs.regs));
222                 REGS_SET_SYSCALL_RETURN(p->thread.regs.regs.regs, 0);
223                 if(sp != 0)
224                         REGS_SP(p->thread.regs.regs.regs) = sp;
225
226                 handler = fork_handler;
227
228                 arch_copy_thread(&current->thread.arch, &p->thread.arch);
229         }
230         else {
231                 init_thread_registers(&p->thread.regs.regs);
232                 p->thread.request.u.thread = current->thread.request.u.thread;
233                 handler = new_thread_handler;
234         }
235
236         new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
237
238         if (current->thread.forking) {
239                 clear_flushed_tls(p);
240
241                 /*
242                  * Set a new TLS for the child thread?
243                  */
244                 if (clone_flags & CLONE_SETTLS)
245                         ret = arch_copy_tls(p);
246         }
247
248         return ret;
249 }
250
251 void initial_thread_cb(void (*proc)(void *), void *arg)
252 {
253         int save_kmalloc_ok = kmalloc_ok;
254
255         kmalloc_ok = 0;
256         initial_thread_cb_skas(proc, arg);
257         kmalloc_ok = save_kmalloc_ok;
258 }
259
260 void default_idle(void)
261 {
262         while(1){
263                 /* endless idle loop with no priority at all */
264
265                 /*
266                  * although we are an idle CPU, we do not want to
267                  * get into the scheduler unnecessarily.
268                  */
269                 if(need_resched())
270                         schedule();
271
272                 idle_sleep(10);
273         }
274 }
275
276 void cpu_idle(void)
277 {
278         cpu_tasks[current_thread->cpu].pid = os_getpid();
279         default_idle();
280 }
281
282 void *um_virt_to_phys(struct task_struct *task, unsigned long addr,
283                       pte_t *pte_out)
284 {
285         pgd_t *pgd;
286         pud_t *pud;
287         pmd_t *pmd;
288         pte_t *pte;
289         pte_t ptent;
290
291         if(task->mm == NULL)
292                 return ERR_PTR(-EINVAL);
293         pgd = pgd_offset(task->mm, addr);
294         if(!pgd_present(*pgd))
295                 return ERR_PTR(-EINVAL);
296
297         pud = pud_offset(pgd, addr);
298         if(!pud_present(*pud))
299                 return ERR_PTR(-EINVAL);
300
301         pmd = pmd_offset(pud, addr);
302         if(!pmd_present(*pmd))
303                 return ERR_PTR(-EINVAL);
304
305         pte = pte_offset_kernel(pmd, addr);
306         ptent = *pte;
307         if(!pte_present(ptent))
308                 return ERR_PTR(-EINVAL);
309
310         if(pte_out != NULL)
311                 *pte_out = ptent;
312         return (void *) (pte_val(ptent) & PAGE_MASK) + (addr & ~PAGE_MASK);
313 }
314
315 char *current_cmd(void)
316 {
317 #if defined(CONFIG_SMP) || defined(CONFIG_HIGHMEM)
318         return "(Unknown)";
319 #else
320         void *addr = um_virt_to_phys(current, current->mm->arg_start, NULL);
321         return IS_ERR(addr) ? "(Unknown)": __va((unsigned long) addr);
322 #endif
323 }
324
325 void dump_thread(struct pt_regs *regs, struct user *u)
326 {
327 }
328
329 int __cant_sleep(void) {
330         return in_atomic() || irqs_disabled() || in_interrupt();
331         /* Is in_interrupt() really needed? */
332 }
333
334 int user_context(unsigned long sp)
335 {
336         unsigned long stack;
337
338         stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
339         return stack != (unsigned long) current_thread;
340 }
341
342 extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
343
344 void do_uml_exitcalls(void)
345 {
346         exitcall_t *call;
347
348         call = &__uml_exitcall_end;
349         while (--call >= &__uml_exitcall_begin)
350                 (*call)();
351 }
352
353 char *uml_strdup(char *string)
354 {
355         return kstrdup(string, GFP_KERNEL);
356 }
357
358 int copy_to_user_proc(void __user *to, void *from, int size)
359 {
360         return copy_to_user(to, from, size);
361 }
362
363 int copy_from_user_proc(void *to, void __user *from, int size)
364 {
365         return copy_from_user(to, from, size);
366 }
367
368 int clear_user_proc(void __user *buf, int size)
369 {
370         return clear_user(buf, size);
371 }
372
373 int strlen_user_proc(char __user *str)
374 {
375         return strlen_user(str);
376 }
377
378 int smp_sigio_handler(void)
379 {
380 #ifdef CONFIG_SMP
381         int cpu = current_thread->cpu;
382         IPI_handler(cpu);
383         if(cpu != 0)
384                 return 1;
385 #endif
386         return 0;
387 }
388
389 int cpu(void)
390 {
391         return current_thread->cpu;
392 }
393
394 static atomic_t using_sysemu = ATOMIC_INIT(0);
395 int sysemu_supported;
396
397 void set_using_sysemu(int value)
398 {
399         if (value > sysemu_supported)
400                 return;
401         atomic_set(&using_sysemu, value);
402 }
403
404 int get_using_sysemu(void)
405 {
406         return atomic_read(&using_sysemu);
407 }
408
409 static int proc_read_sysemu(char *buf, char **start, off_t offset, int size,int *eof, void *data)
410 {
411         if (snprintf(buf, size, "%d\n", get_using_sysemu()) < size) /*No overflow*/
412                 *eof = 1;
413
414         return strlen(buf);
415 }
416
417 static int proc_write_sysemu(struct file *file,const char __user *buf, unsigned long count,void *data)
418 {
419         char tmp[2];
420
421         if (copy_from_user(tmp, buf, 1))
422                 return -EFAULT;
423
424         if (tmp[0] >= '0' && tmp[0] <= '2')
425                 set_using_sysemu(tmp[0] - '0');
426         return count; /*We use the first char, but pretend to write everything*/
427 }
428
429 int __init make_proc_sysemu(void)
430 {
431         struct proc_dir_entry *ent;
432         if (!sysemu_supported)
433                 return 0;
434
435         ent = create_proc_entry("sysemu", 0600, &proc_root);
436
437         if (ent == NULL)
438         {
439                 printk(KERN_WARNING "Failed to register /proc/sysemu\n");
440                 return 0;
441         }
442
443         ent->read_proc  = proc_read_sysemu;
444         ent->write_proc = proc_write_sysemu;
445
446         return 0;
447 }
448
449 late_initcall(make_proc_sysemu);
450
451 int singlestepping(void * t)
452 {
453         struct task_struct *task = t ? t : current;
454
455         if ( ! (task->ptrace & PT_DTRACE) )
456                 return(0);
457
458         if (task->thread.singlestep_syscall)
459                 return(1);
460
461         return 2;
462 }
463
464 /*
465  * Only x86 and x86_64 have an arch_align_stack().
466  * All other arches have "#define arch_align_stack(x) (x)"
467  * in their asm/system.h
468  * As this is included in UML from asm-um/system-generic.h,
469  * we can use it to behave as the subarch does.
470  */
471 #ifndef arch_align_stack
472 unsigned long arch_align_stack(unsigned long sp)
473 {
474         if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
475                 sp -= get_random_int() % 8192;
476         return sp & ~0xf;
477 }
478 #endif