[SPARC]: Kill BOOTME_SINGLE.
[safe/jmp/linux-2.6] / arch / sparc64 / kernel / traps.c
index 1e9a4b6..68420e2 100644 (file)
@@ -9,7 +9,6 @@
  * I like traps on v9, :))))
  */
 
-#include <linux/config.h>
 #include <linux/module.h>
 #include <linux/sched.h>  /* for jiffies */
 #include <linux/kernel.h>
 #ifdef CONFIG_KMOD
 #include <linux/kmod.h>
 #endif
+#include <asm/prom.h>
 
-struct notifier_block *sparc64die_chain;
-static DEFINE_SPINLOCK(die_notifier_lock);
+ATOMIC_NOTIFIER_HEAD(sparc64die_chain);
 
 int register_die_notifier(struct notifier_block *nb)
 {
-       int err = 0;
-       unsigned long flags;
-       spin_lock_irqsave(&die_notifier_lock, flags);
-       err = notifier_chain_register(&sparc64die_chain, nb);
-       spin_unlock_irqrestore(&die_notifier_lock, flags);
-       return err;
+       return atomic_notifier_chain_register(&sparc64die_chain, nb);
 }
+EXPORT_SYMBOL(register_die_notifier);
+
+int unregister_die_notifier(struct notifier_block *nb)
+{
+       return atomic_notifier_chain_unregister(&sparc64die_chain, nb);
+}
+EXPORT_SYMBOL(unregister_die_notifier);
 
 /* When an irrecoverable trap occurs at tl > 0, the trap entry
  * code logs the trap state registers at every level in the trap
@@ -73,12 +74,14 @@ struct tl1_traplog {
 
 static void dump_tl1_traplog(struct tl1_traplog *p)
 {
-       int i;
+       int i, limit;
 
-       printk("TRAPLOG: Error at trap level 0x%lx, dumping track stack.\n",
-              p->tl);
-       for (i = 0; i < 4; i++) {
-               printk(KERN_CRIT
+       printk(KERN_EMERG "TRAPLOG: Error at trap level 0x%lx, "
+              "dumping track stack.\n", p->tl);
+
+       limit = (tlb_type == hypervisor) ? 2 : 4;
+       for (i = 0; i < limit; i++) {
+               printk(KERN_EMERG
                       "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
                       "TNPC[%016lx] TT[%lx]\n",
                       i + 1,
@@ -281,7 +284,7 @@ void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsig
                printk("sun4v_data_access_exception: ADDR[%016lx] "
                       "CTX[%04x] TYPE[%04x], going.\n",
                       addr, ctx, type);
-               die_if_kernel("Iax", regs);
+               die_if_kernel("Dax", regs);
        }
 
        if (test_thread_flag(TIF_32BIT)) {
@@ -804,7 +807,8 @@ extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector
 void __init cheetah_ecache_flush_init(void)
 {
        unsigned long largest_size, smallest_linesize, order, ver;
-       int node, i, instance;
+       struct device_node *dp;
+       int i, instance, sz;
 
        /* Scan all cpu device tree nodes, note two values:
         * 1) largest E-cache size
@@ -814,14 +818,14 @@ void __init cheetah_ecache_flush_init(void)
        smallest_linesize = ~0UL;
 
        instance = 0;
-       while (!cpu_find_by_instance(instance, &node, NULL)) {
+       while (!cpu_find_by_instance(instance, &dp, NULL)) {
                unsigned long val;
 
-               val = prom_getintdefault(node, "ecache-size",
-                                        (2 * 1024 * 1024));
+               val = of_getintprop_default(dp, "ecache-size",
+                                           (2 * 1024 * 1024));
                if (val > largest_size)
                        largest_size = val;
-               val = prom_getintdefault(node, "ecache-line-size", 64);
+               val = of_getintprop_default(dp, "ecache-line-size", 64);
                if (val < smallest_linesize)
                        smallest_linesize = val;
                instance++;
@@ -846,16 +850,16 @@ void __init cheetah_ecache_flush_init(void)
        }
 
        /* Now allocate error trap reporting scoreboard. */
-       node = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
+       sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
        for (order = 0; order < MAX_ORDER; order++) {
-               if ((PAGE_SIZE << order) >= node)
+               if ((PAGE_SIZE << order) >= sz)
                        break;
        }
        cheetah_error_log = (struct cheetah_err_info *)
                __get_free_pages(GFP_KERNEL, order);
        if (!cheetah_error_log) {
                prom_printf("cheetah_ecache_flush_init: Failed to allocate "
-                           "error logging scoreboard (%d bytes).\n", node);
+                           "error logging scoreboard (%d bytes).\n", sz);
                prom_halt();
        }
        memset(cheetah_error_log, 0, PAGE_SIZE << order);
@@ -1127,9 +1131,9 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
               (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
               afsr, afar,
               (afsr & CHAFSR_TL1) ? 1 : 0);
-       printk("%s" "ERROR(%d): TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
+       printk("%s" "ERROR(%d): TPC[%lx] TNPC[%lx] O7[%lx] TSTATE[%lx]\n",
               (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
-              regs->tpc, regs->tnpc, regs->tstate);
+              regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
        printk("%s" "ERROR(%d): M_SYND(%lx),  E_SYND(%lx)%s%s\n",
               (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
               (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
@@ -1794,7 +1798,9 @@ static const char *sun4v_err_type_to_str(u32 type)
        };
 }
 
-static void sun4v_log_error(struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
+extern void __show_regs(struct pt_regs * regs);
+
+static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
 {
        int cnt;
 
@@ -1827,6 +1833,8 @@ static void sun4v_log_error(struct sun4v_error_entry *ent, int cpu, const char *
               pfx,
               ent->err_raddr, ent->err_size, ent->err_cpu);
 
+       __show_regs(regs);
+
        if ((cnt = atomic_read(ocnt)) != 0) {
                atomic_set(ocnt, 0);
                wmb();
@@ -1859,7 +1867,7 @@ void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
 
        put_cpu();
 
-       sun4v_log_error(&local_copy, cpu,
+       sun4v_log_error(regs, &local_copy, cpu,
                        KERN_ERR "RESUMABLE ERROR",
                        &sun4v_resum_oflow_cnt);
 }
@@ -1907,7 +1915,7 @@ void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
        }
 #endif
 
-       sun4v_log_error(&local_copy, cpu,
+       sun4v_log_error(regs, &local_copy, cpu,
                        KERN_EMERG "NON-RESUMABLE ERROR",
                        &sun4v_nonresum_oflow_cnt);
 
@@ -1926,6 +1934,58 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
        atomic_inc(&sun4v_nonresum_oflow_cnt);
 }
 
+unsigned long sun4v_err_itlb_vaddr;
+unsigned long sun4v_err_itlb_ctx;
+unsigned long sun4v_err_itlb_pte;
+unsigned long sun4v_err_itlb_error;
+
+void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
+{
+       if (tl > 1)
+               dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+
+       printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
+              regs->tpc, tl);
+       printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
+              "pte[%lx] error[%lx]\n",
+              sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx,
+              sun4v_err_itlb_pte, sun4v_err_itlb_error);
+
+       prom_halt();
+}
+
+unsigned long sun4v_err_dtlb_vaddr;
+unsigned long sun4v_err_dtlb_ctx;
+unsigned long sun4v_err_dtlb_pte;
+unsigned long sun4v_err_dtlb_error;
+
+void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
+{
+       if (tl > 1)
+               dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+
+       printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
+              regs->tpc, tl);
+       printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
+              "pte[%lx] error[%lx]\n",
+              sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx,
+              sun4v_err_dtlb_pte, sun4v_err_dtlb_error);
+
+       prom_halt();
+}
+
+void hypervisor_tlbop_error(unsigned long err, unsigned long op)
+{
+       printk(KERN_CRIT "SUN4V: TLB hv call error %lu for op %lu\n",
+              err, op);
+}
+
+void hypervisor_tlbop_error_xcall(unsigned long err, unsigned long op)
+{
+       printk(KERN_CRIT "SUN4V: XCALL TLB hv call error %lu for op %lu\n",
+              err, op);
+}
+
 void do_fpe_common(struct pt_regs *regs)
 {
        if (regs->tstate & TSTATE_PRIV) {
@@ -2145,7 +2205,6 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
 void die_if_kernel(char *str, struct pt_regs *regs)
 {
        static int die_counter;
-       extern void __show_regs(struct pt_regs * regs);
        extern void smp_report_regs(void);
        int count = 0;
        
@@ -2184,10 +2243,11 @@ void die_if_kernel(char *str, struct pt_regs *regs)
                }
                user_instruction_dump ((unsigned int __user *) regs->tpc);
        }
+#if 0
 #ifdef CONFIG_SMP
        smp_report_regs();
 #endif
-                                                       
+#endif                                                 
        if (regs->tstate & TSTATE_PRIV)
                do_exit(SIGKILL);
        do_exit(SIGSEGV);
@@ -2218,6 +2278,11 @@ void do_illegal_instruction(struct pt_regs *regs)
                } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
                        if (handle_ldf_stq(insn, regs))
                                return;
+               } else if (tlb_type == hypervisor) {
+                       extern int vis_emul(struct pt_regs *, unsigned int);
+
+                       if (!vis_emul(regs, insn))
+                               return;
                }
        }
        info.si_signo = SIGILL;
@@ -2250,7 +2315,7 @@ void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned lo
        force_sig_info(SIGBUS, &info, current);
 }
 
-void sun4v_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
+void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
 {
        siginfo_t info;
 
@@ -2411,17 +2476,18 @@ struct trap_per_cpu trap_block[NR_CPUS];
 /* This can get invoked before sched_init() so play it super safe
  * and use hard_smp_processor_id().
  */
-void init_cur_cpu_trap(void)
+void init_cur_cpu_trap(struct thread_info *t)
 {
        int cpu = hard_smp_processor_id();
        struct trap_per_cpu *p = &trap_block[cpu];
 
-       p->thread = current_thread_info();
+       p->thread = t;
        p->pgd_paddr = 0;
 }
 
 extern void thread_info_offsets_are_bolixed_dave(void);
 extern void trap_per_cpu_offsets_are_bolixed_dave(void);
+extern void tsb_config_offsets_are_bolixed_dave(void);
 
 /* Only invoked on boot processor. */
 void __init trap_init(void)
@@ -2475,9 +2541,29 @@ void __init trap_init(void)
            (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
             offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
            (TRAP_PER_CPU_CPU_LIST_PA !=
-            offsetof(struct trap_per_cpu, cpu_list_pa)))
+            offsetof(struct trap_per_cpu, cpu_list_pa)) ||
+           (TRAP_PER_CPU_TSB_HUGE !=
+            offsetof(struct trap_per_cpu, tsb_huge)) ||
+           (TRAP_PER_CPU_TSB_HUGE_TEMP !=
+            offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
+           (TRAP_PER_CPU_IRQ_WORKLIST !=
+            offsetof(struct trap_per_cpu, irq_worklist)))
                trap_per_cpu_offsets_are_bolixed_dave();
 
+       if ((TSB_CONFIG_TSB !=
+            offsetof(struct tsb_config, tsb)) ||
+           (TSB_CONFIG_RSS_LIMIT !=
+            offsetof(struct tsb_config, tsb_rss_limit)) ||
+           (TSB_CONFIG_NENTRIES !=
+            offsetof(struct tsb_config, tsb_nentries)) ||
+           (TSB_CONFIG_REG_VAL !=
+            offsetof(struct tsb_config, tsb_reg_val)) ||
+           (TSB_CONFIG_MAP_VADDR !=
+            offsetof(struct tsb_config, tsb_map_vaddr)) ||
+           (TSB_CONFIG_MAP_PTE !=
+            offsetof(struct tsb_config, tsb_map_pte)))
+               tsb_config_offsets_are_bolixed_dave();
+
        /* Attach to the address space of init_task.  On SMP we
         * do this in smp.c:smp_callin for other cpus.
         */