[S390] Convert monitor calls to function calls.
[safe/jmp/linux-2.6] / arch / s390 / kernel / process.c
index 11d9b01..eb768ce 100644 (file)
 #include <linux/sched.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
+#include <linux/fs.h>
 #include <linux/smp.h>
-#include <linux/smp_lock.h>
 #include <linux/stddef.h>
 #include <linux/unistd.h>
 #include <linux/ptrace.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <linux/user.h>
-#include <linux/a.out.h>
 #include <linux/interrupt.h>
 #include <linux/delay.h>
 #include <linux/reboot.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/notifier.h>
-
+#include <linux/utsname.h>
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
 #include <asm/system.h>
@@ -44,6 +43,7 @@
 #include <asm/processor.h>
 #include <asm/irq.h>
 #include <asm/timer.h>
+#include <asm/cpu.h>
 
 asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
 
@@ -76,6 +76,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
  * Need to know about CPUs going idle?
  */
 static ATOMIC_NOTIFIER_HEAD(idle_chain);
+DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
 
 int register_idle_notifier(struct notifier_block *nb)
 {
@@ -89,13 +90,42 @@ int unregister_idle_notifier(struct notifier_block *nb)
 }
 EXPORT_SYMBOL(unregister_idle_notifier);
 
-void do_monitor_call(struct pt_regs *regs, long interruption_code)
+static int s390_idle_enter(void)
 {
-       /* disable monitor call class 0 */
-       __ctl_clear_bit(8, 15);
+       struct s390_idle_data *idle;
+       int nr_calls = 0;
+       void *hcpu;
+       int rc;
+
+       hcpu = (void *)(long)smp_processor_id();
+       rc = __atomic_notifier_call_chain(&idle_chain, S390_CPU_IDLE, hcpu, -1,
+                                         &nr_calls);
+       if (rc == NOTIFY_BAD) {
+               nr_calls--;
+               __atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
+                                            hcpu, nr_calls, NULL);
+               return rc;
+       }
+       idle = &__get_cpu_var(s390_idle);
+       spin_lock(&idle->lock);
+       idle->idle_count++;
+       idle->in_idle = 1;
+       idle->idle_enter = get_clock();
+       spin_unlock(&idle->lock);
+       return NOTIFY_OK;
+}
 
-       atomic_notifier_call_chain(&idle_chain, CPU_NOT_IDLE,
-                           (void *)(long) smp_processor_id());
+void s390_idle_leave(void)
+{
+       struct s390_idle_data *idle;
+
+       idle = &__get_cpu_var(s390_idle);
+       spin_lock(&idle->lock);
+       idle->idle_time += get_clock() - idle->idle_enter;
+       idle->in_idle = 0;
+       spin_unlock(&idle->lock);
+       atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
+                                  (void *)(long) smp_processor_id());
 }
 
 extern void s390_handle_mcck(void);
@@ -104,44 +134,30 @@ extern void s390_handle_mcck(void);
  */
 static void default_idle(void)
 {
-       int cpu, rc;
-
        /* CPU is going idle. */
-       cpu = smp_processor_id();
-
        local_irq_disable();
        if (need_resched()) {
                local_irq_enable();
                return;
        }
-
-       rc = atomic_notifier_call_chain(&idle_chain,
-                       CPU_IDLE, (void *)(long) cpu);
-       if (rc != NOTIFY_OK && rc != NOTIFY_DONE)
-               BUG();
-       if (rc != NOTIFY_OK) {
+       if (s390_idle_enter() == NOTIFY_BAD) {
                local_irq_enable();
                return;
        }
-
-       /* enable monitor call class 0 */
-       __ctl_set_bit(8, 15);
-
 #ifdef CONFIG_HOTPLUG_CPU
-       if (cpu_is_offline(cpu)) {
+       if (cpu_is_offline(smp_processor_id())) {
                preempt_enable_no_resched();
                cpu_die();
        }
 #endif
-
        local_mcck_disable();
        if (test_thread_flag(TIF_MCCK_PENDING)) {
                local_mcck_enable();
+               s390_idle_leave();
                local_irq_enable();
                s390_handle_mcck();
                return;
        }
-
        trace_hardirqs_on();
        /* Wait for external, I/O or machine check interrupt. */
        __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
@@ -162,13 +178,15 @@ void cpu_idle(void)
 
 void show_regs(struct pt_regs *regs)
 {
-       struct task_struct *tsk = current;
-
-        printk("CPU:    %d    %s\n", task_thread_info(tsk)->cpu, print_tainted());
-        printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
-              current->comm, current->pid, (void *) tsk,
-              (void *) tsk->thread.ksp);
-
+       print_modules();
+       printk("CPU: %d %s %s %.*s\n",
+              task_thread_info(current)->cpu, print_tainted(),
+              init_utsname()->release,
+              (int)strcspn(init_utsname()->version, " "),
+              init_utsname()->version);
+       printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
+              current->comm, current->pid, current,
+              (void *) current->thread.ksp);
        show_registers(regs);
        /* Show stack backtrace if pt_regs is from kernel mode */
        if (!(regs->psw.mask & PSW_MASK_PSTATE))
@@ -254,14 +272,12 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp,
        save_fp_regs(&current->thread.fp_regs);
        memcpy(&p->thread.fp_regs, &current->thread.fp_regs,
               sizeof(s390_fp_regs));
-        p->thread.user_seg = __pa((unsigned long) p->mm->pgd) | _SEGMENT_TABLE;
        /* Set a new TLS ?  */
        if (clone_flags & CLONE_SETTLS)
                p->thread.acrs[0] = regs->gprs[6];
 #else /* CONFIG_64BIT */
        /* Save the fpu registers to new thread structure. */
        save_fp_regs(&p->thread.fp_regs);
-        p->thread.user_seg = __pa((unsigned long) p->mm->pgd) | _REGION_TABLE;
        /* Set a new TLS ?  */
        if (clone_flags & CLONE_SETTLS) {
                if (test_thread_flag(TIF_31BIT)) {