sparc64: Allow chmc to be built as a module.
[safe/jmp/linux-2.6] / arch / sparc64 / kernel / traps.c
index f47f487..71644da 100644 (file)
@@ -1,7 +1,6 @@
-/* $Id: traps.c,v 1.85 2002/02/09 19:49:31 davem Exp $
- * arch/sparc64/kernel/traps.c
+/* arch/sparc64/kernel/traps.c
  *
- * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1995,1997,2008 David S. Miller (davem@davemloft.net)
  * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
  */
 
@@ -9,17 +8,16 @@
  * I like traps on v9, :))))
  */
 
-#include <linux/config.h>
 #include <linux/module.h>
-#include <linux/sched.h>  /* for jiffies */
+#include <linux/sched.h>
 #include <linux/kernel.h>
-#include <linux/kallsyms.h>
 #include <linux/signal.h>
 #include <linux/smp.h>
-#include <linux/smp_lock.h>
 #include <linux/mm.h>
 #include <linux/init.h>
+#include <linux/kdebug.h>
 
+#include <asm/smp.h>
 #include <asm/delay.h>
 #include <asm/system.h>
 #include <asm/ptrace.h>
 #include <asm/psrcompat.h>
 #include <asm/processor.h>
 #include <asm/timer.h>
-#include <asm/kdebug.h>
-#ifdef CONFIG_KMOD
-#include <linux/kmod.h>
-#endif
+#include <asm/head.h>
+#include <asm/prom.h>
+#include <asm/memctrl.h>
 
-struct notifier_block *sparc64die_chain;
-static DEFINE_SPINLOCK(die_notifier_lock);
-
-int register_die_notifier(struct notifier_block *nb)
-{
-       int err = 0;
-       unsigned long flags;
-       spin_lock_irqsave(&die_notifier_lock, flags);
-       err = notifier_chain_register(&sparc64die_chain, nb);
-       spin_unlock_irqrestore(&die_notifier_lock, flags);
-       return err;
-}
+#include "entry.h"
+#include "kstack.h"
 
 /* When an irrecoverable trap occurs at tl > 0, the trap entry
  * code logs the trap state registers at every level in the trap
@@ -72,25 +59,23 @@ struct tl1_traplog {
 
 static void dump_tl1_traplog(struct tl1_traplog *p)
 {
-       int i;
+       int i, limit;
+
+       printk(KERN_EMERG "TRAPLOG: Error at trap level 0x%lx, "
+              "dumping track stack.\n", p->tl);
 
-       printk("TRAPLOG: Error at trap level 0x%lx, dumping track stack.\n",
-              p->tl);
-       for (i = 0; i < 4; i++) {
-               printk(KERN_CRIT
+       limit = (tlb_type == hypervisor) ? 2 : 4;
+       for (i = 0; i < limit; i++) {
+               printk(KERN_EMERG
                       "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
                       "TNPC[%016lx] TT[%lx]\n",
                       i + 1,
                       p->trapstack[i].tstate, p->trapstack[i].tpc,
                       p->trapstack[i].tnpc, p->trapstack[i].tt);
+               printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
        }
 }
 
-void do_call_debug(struct pt_regs *regs) 
-{ 
-       notify_die(DIE_CALL, "debug call", regs, 0, 255, SIGINT); 
-}
-
 void bad_trap(struct pt_regs *regs, long lvl)
 {
        char buffer[32];
@@ -144,6 +129,56 @@ void do_BUG(const char *file, int line)
 }
 #endif
 
+static DEFINE_SPINLOCK(dimm_handler_lock);
+static dimm_printer_t dimm_handler;
+
+static int sprintf_dimm(int synd_code, unsigned long paddr, char *buf, int buflen)
+{
+       unsigned long flags;
+       int ret = -ENODEV;
+
+       spin_lock_irqsave(&dimm_handler_lock, flags);
+       if (dimm_handler) {
+               ret = dimm_handler(synd_code, paddr, buf, buflen);
+       } else if (tlb_type == spitfire) {
+               if (prom_getunumber(synd_code, paddr, buf, buflen) == -1)
+                       ret = -EINVAL;
+               else
+                       ret = 0;
+       } else
+               ret = -ENODEV;
+       spin_unlock_irqrestore(&dimm_handler_lock, flags);
+
+       return ret;
+}
+
+int register_dimm_printer(dimm_printer_t func)
+{
+       unsigned long flags;
+       int ret = 0;
+
+       spin_lock_irqsave(&dimm_handler_lock, flags);
+       if (!dimm_handler)
+               dimm_handler = func;
+       else
+               ret = -EEXIST;
+       spin_unlock_irqrestore(&dimm_handler_lock, flags);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(register_dimm_printer);
+
+void unregister_dimm_printer(dimm_printer_t func)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&dimm_handler_lock, flags);
+       if (dimm_handler == func)
+               dimm_handler = NULL;
+       spin_unlock_irqrestore(&dimm_handler_lock, flags);
+}
+EXPORT_SYMBOL_GPL(unregister_dimm_printer);
+
 void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
 {
        siginfo_t info;
@@ -179,6 +214,45 @@ void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr
        spitfire_insn_access_exception(regs, sfsr, sfar);
 }
 
+void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
+{
+       unsigned short type = (type_ctx >> 16);
+       unsigned short ctx  = (type_ctx & 0xffff);
+       siginfo_t info;
+
+       if (notify_die(DIE_TRAP, "instruction access exception", regs,
+                      0, 0x8, SIGTRAP) == NOTIFY_STOP)
+               return;
+
+       if (regs->tstate & TSTATE_PRIV) {
+               printk("sun4v_insn_access_exception: ADDR[%016lx] "
+                      "CTX[%04x] TYPE[%04x], going.\n",
+                      addr, ctx, type);
+               die_if_kernel("Iax", regs);
+       }
+
+       if (test_thread_flag(TIF_32BIT)) {
+               regs->tpc &= 0xffffffff;
+               regs->tnpc &= 0xffffffff;
+       }
+       info.si_signo = SIGSEGV;
+       info.si_errno = 0;
+       info.si_code = SEGV_MAPERR;
+       info.si_addr = (void __user *) addr;
+       info.si_trapno = 0;
+       force_sig_info(SIGSEGV, &info, current);
+}
+
+void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
+{
+       if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
+                      0, 0x8, SIGTRAP) == NOTIFY_STOP)
+               return;
+
+       dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+       sun4v_insn_access_exception(regs, addr, type_ctx);
+}
+
 void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
 {
        siginfo_t info;
@@ -227,6 +301,45 @@ void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr
        spitfire_data_access_exception(regs, sfsr, sfar);
 }
 
+void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
+{
+       unsigned short type = (type_ctx >> 16);
+       unsigned short ctx  = (type_ctx & 0xffff);
+       siginfo_t info;
+
+       if (notify_die(DIE_TRAP, "data access exception", regs,
+                      0, 0x8, SIGTRAP) == NOTIFY_STOP)
+               return;
+
+       if (regs->tstate & TSTATE_PRIV) {
+               printk("sun4v_data_access_exception: ADDR[%016lx] "
+                      "CTX[%04x] TYPE[%04x], going.\n",
+                      addr, ctx, type);
+               die_if_kernel("Dax", regs);
+       }
+
+       if (test_thread_flag(TIF_32BIT)) {
+               regs->tpc &= 0xffffffff;
+               regs->tnpc &= 0xffffffff;
+       }
+       info.si_signo = SIGSEGV;
+       info.si_errno = 0;
+       info.si_code = SEGV_MAPERR;
+       info.si_addr = (void __user *) addr;
+       info.si_trapno = 0;
+       force_sig_info(SIGSEGV, &info, current);
+}
+
+void sun4v_data_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
+{
+       if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
+                      0, 0x8, SIGTRAP) == NOTIFY_STOP)
+               return;
+
+       dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+       sun4v_data_access_exception(regs, addr, type_ctx);
+}
+
 #ifdef CONFIG_PCI
 /* This is really pathetic... */
 extern volatile int pci_poke_in_progress;
@@ -313,8 +426,7 @@ static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, un
 
        if (udbl & bit) {
                scode = ecc_syndrome_table[udbl & 0xff];
-               if (prom_getunumber(scode, afar,
-                                   memmod_str, sizeof(memmod_str)) == -1)
+               if (sprintf_dimm(scode, afar, memmod_str, sizeof(memmod_str)) < 0)
                        p = syndrome_unknown;
                else
                        p = memmod_str;
@@ -325,8 +437,7 @@ static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, un
 
        if (udbh & bit) {
                scode = ecc_syndrome_table[udbh & 0xff];
-               if (prom_getunumber(scode, afar,
-                                   memmod_str, sizeof(memmod_str)) == -1)
+               if (sprintf_dimm(scode, afar, memmod_str, sizeof(memmod_str)) < 0)
                        p = syndrome_unknown;
                else
                        p = memmod_str;
@@ -481,41 +592,6 @@ static unsigned long ecache_flush_physbase;
 static unsigned long ecache_flush_linesize;
 static unsigned long ecache_flush_size;
 
-/* WARNING: The error trap handlers in assembly know the precise
- *         layout of the following structure.
- *
- * C-level handlers below use this information to log the error
- * and then determine how to recover (if possible).
- */
-struct cheetah_err_info {
-/*0x00*/u64 afsr;
-/*0x08*/u64 afar;
-
-       /* D-cache state */
-/*0x10*/u64 dcache_data[4];    /* The actual data      */
-/*0x30*/u64 dcache_index;      /* D-cache index        */
-/*0x38*/u64 dcache_tag;                /* D-cache tag/valid    */
-/*0x40*/u64 dcache_utag;       /* D-cache microtag     */
-/*0x48*/u64 dcache_stag;       /* D-cache snooptag     */
-
-       /* I-cache state */
-/*0x50*/u64 icache_data[8];    /* The actual insns + predecode */
-/*0x90*/u64 icache_index;      /* I-cache index        */
-/*0x98*/u64 icache_tag;                /* I-cache phys tag     */
-/*0xa0*/u64 icache_utag;       /* I-cache microtag     */
-/*0xa8*/u64 icache_stag;       /* I-cache snooptag     */
-/*0xb0*/u64 icache_upper;      /* I-cache upper-tag    */
-/*0xb8*/u64 icache_lower;      /* I-cache lower-tag    */
-
-       /* E-cache state */
-/*0xc0*/u64 ecache_data[4];    /* 32 bytes from staging registers */
-/*0xe0*/u64 ecache_index;      /* E-cache index        */
-/*0xe8*/u64 ecache_tag;                /* E-cache tag/state    */
-
-/*0xf0*/u64 __pad[32 - 30];
-};
-#define CHAFSR_INVALID         ((u64)-1L)
-
 /* This table is ordered in priority of errors and matches the
  * AFAR overwrite policy as well.
  */
@@ -689,13 +765,9 @@ static struct afsr_error_table __jalapeno_error_table[] = {
 static struct afsr_error_table *cheetah_error_table;
 static unsigned long cheetah_afsr_errors;
 
-/* This is allocated at boot time based upon the largest hardware
- * cpu ID in the system.  We allocate two entries per cpu, one for
- * TL==0 logging and one for TL >= 1 logging.
- */
 struct cheetah_err_info *cheetah_error_log;
 
-static __inline__ struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
+static inline struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
 {
        struct cheetah_err_info *p;
        int cpu = smp_processor_id();
@@ -725,7 +797,7 @@ extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector
 void __init cheetah_ecache_flush_init(void)
 {
        unsigned long largest_size, smallest_linesize, order, ver;
-       int node, i, instance;
+       int i, sz;
 
        /* Scan all cpu device tree nodes, note two values:
         * 1) largest E-cache size
@@ -734,18 +806,20 @@ void __init cheetah_ecache_flush_init(void)
        largest_size = 0UL;
        smallest_linesize = ~0UL;
 
-       instance = 0;
-       while (!cpu_find_by_instance(instance, &node, NULL)) {
+       for (i = 0; i < NR_CPUS; i++) {
                unsigned long val;
 
-               val = prom_getintdefault(node, "ecache-size",
-                                        (2 * 1024 * 1024));
+               val = cpu_data(i).ecache_size;
+               if (!val)
+                       continue;
+
                if (val > largest_size)
                        largest_size = val;
-               val = prom_getintdefault(node, "ecache-line-size", 64);
+
+               val = cpu_data(i).ecache_line_size;
                if (val < smallest_linesize)
                        smallest_linesize = val;
-               instance++;
+
        }
 
        if (largest_size == 0UL || smallest_linesize == ~0UL) {
@@ -767,16 +841,16 @@ void __init cheetah_ecache_flush_init(void)
        }
 
        /* Now allocate error trap reporting scoreboard. */
-       node = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
+       sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
        for (order = 0; order < MAX_ORDER; order++) {
-               if ((PAGE_SIZE << order) >= node)
+               if ((PAGE_SIZE << order) >= sz)
                        break;
        }
        cheetah_error_log = (struct cheetah_err_info *)
                __get_free_pages(GFP_KERNEL, order);
        if (!cheetah_error_log) {
                prom_printf("cheetah_ecache_flush_init: Failed to allocate "
-                           "error logging scoreboard (%d bytes).\n", node);
+                           "error logging scoreboard (%d bytes).\n", sz);
                prom_halt();
        }
        memset(cheetah_error_log, 0, PAGE_SIZE << order);
@@ -788,7 +862,8 @@ void __init cheetah_ecache_flush_init(void)
                cheetah_error_log[i].afsr = CHAFSR_INVALID;
 
        __asm__ ("rdpr %%ver, %0" : "=r" (ver));
-       if ((ver >> 32) == 0x003e0016) {
+       if ((ver >> 32) == __JALAPENO_ID ||
+           (ver >> 32) == __SERRANO_ID) {
                cheetah_error_table = &__jalapeno_error_table[0];
                cheetah_afsr_errors = JPAFSR_ERRORS;
        } else if ((ver >> 32) == 0x003e0015) {
@@ -1012,7 +1087,7 @@ static unsigned char cheetah_mtag_syntab[] = {
 };
 
 /* Return the highest priority error conditon mentioned. */
-static __inline__ unsigned long cheetah_get_hipri(unsigned long afsr)
+static inline unsigned long cheetah_get_hipri(unsigned long afsr)
 {
        unsigned long tmp = 0;
        int i;
@@ -1035,8 +1110,6 @@ static const char *cheetah_get_string(unsigned long bit)
        return "???";
 }
 
-extern int chmc_getunumber(int, unsigned long, char *, int);
-
 static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
                               unsigned long afsr, unsigned long afar, int recoverable)
 {
@@ -1047,9 +1120,12 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
               (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
               afsr, afar,
               (afsr & CHAFSR_TL1) ? 1 : 0);
-       printk("%s" "ERROR(%d): TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
+       printk("%s" "ERROR(%d): TPC[%lx] TNPC[%lx] O7[%lx] TSTATE[%lx]\n",
               (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
-              regs->tpc, regs->tnpc, regs->tstate);
+              regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
+       printk("%s" "ERROR(%d): ",
+              (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
+       printk("TPC<%pS>\n", (void *) regs->tpc);
        printk("%s" "ERROR(%d): M_SYND(%lx),  E_SYND(%lx)%s%s\n",
               (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
               (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
@@ -1075,7 +1151,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
 
                syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
                syndrome = cheetah_ecc_syntab[syndrome];
-               ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
+               ret = sprintf_dimm(syndrome, afar, unum, sizeof(unum));
                if (ret != -1)
                        printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
                               (recoverable ? KERN_WARNING : KERN_CRIT),
@@ -1086,7 +1162,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
 
                syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
                syndrome = cheetah_mtag_syntab[syndrome];
-               ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
+               ret = sprintf_dimm(syndrome, afar, unum, sizeof(unum));
                if (ret != -1)
                        printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
                               (recoverable ? KERN_WARNING : KERN_CRIT),
@@ -1657,6 +1733,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
                       smp_processor_id(),
                       (type & 0x1) ? 'I' : 'D',
                       regs->tpc);
+               printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
                panic("Irrecoverable Cheetah+ parity error.");
        }
 
@@ -1664,6 +1741,259 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
               smp_processor_id(),
               (type & 0x1) ? 'I' : 'D',
               regs->tpc);
+       printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
+}
+
+struct sun4v_error_entry {
+       u64             err_handle;
+       u64             err_stick;
+
+       u32             err_type;
+#define SUN4V_ERR_TYPE_UNDEFINED       0
+#define SUN4V_ERR_TYPE_UNCORRECTED_RES 1
+#define SUN4V_ERR_TYPE_PRECISE_NONRES  2
+#define SUN4V_ERR_TYPE_DEFERRED_NONRES 3
+#define SUN4V_ERR_TYPE_WARNING_RES     4
+
+       u32             err_attrs;
+#define SUN4V_ERR_ATTRS_PROCESSOR      0x00000001
+#define SUN4V_ERR_ATTRS_MEMORY         0x00000002
+#define SUN4V_ERR_ATTRS_PIO            0x00000004
+#define SUN4V_ERR_ATTRS_INT_REGISTERS  0x00000008
+#define SUN4V_ERR_ATTRS_FPU_REGISTERS  0x00000010
+#define SUN4V_ERR_ATTRS_USER_MODE      0x01000000
+#define SUN4V_ERR_ATTRS_PRIV_MODE      0x02000000
+#define SUN4V_ERR_ATTRS_RES_QUEUE_FULL 0x80000000
+
+       u64             err_raddr;
+       u32             err_size;
+       u16             err_cpu;
+       u16             err_pad;
+};
+
+static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
+static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
+
+static const char *sun4v_err_type_to_str(u32 type)
+{
+       switch (type) {
+       case SUN4V_ERR_TYPE_UNDEFINED:
+               return "undefined";
+       case SUN4V_ERR_TYPE_UNCORRECTED_RES:
+               return "uncorrected resumable";
+       case SUN4V_ERR_TYPE_PRECISE_NONRES:
+               return "precise nonresumable";
+       case SUN4V_ERR_TYPE_DEFERRED_NONRES:
+               return "deferred nonresumable";
+       case SUN4V_ERR_TYPE_WARNING_RES:
+               return "warning resumable";
+       default:
+               return "unknown";
+       };
+}
+
+static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
+{
+       int cnt;
+
+       printk("%s: Reporting on cpu %d\n", pfx, cpu);
+       printk("%s: err_handle[%lx] err_stick[%lx] err_type[%08x:%s]\n",
+              pfx,
+              ent->err_handle, ent->err_stick,
+              ent->err_type,
+              sun4v_err_type_to_str(ent->err_type));
+       printk("%s: err_attrs[%08x:%s %s %s %s %s %s %s %s]\n",
+              pfx,
+              ent->err_attrs,
+              ((ent->err_attrs & SUN4V_ERR_ATTRS_PROCESSOR) ?
+               "processor" : ""),
+              ((ent->err_attrs & SUN4V_ERR_ATTRS_MEMORY) ?
+               "memory" : ""),
+              ((ent->err_attrs & SUN4V_ERR_ATTRS_PIO) ?
+               "pio" : ""),
+              ((ent->err_attrs & SUN4V_ERR_ATTRS_INT_REGISTERS) ?
+               "integer-regs" : ""),
+              ((ent->err_attrs & SUN4V_ERR_ATTRS_FPU_REGISTERS) ?
+               "fpu-regs" : ""),
+              ((ent->err_attrs & SUN4V_ERR_ATTRS_USER_MODE) ?
+               "user" : ""),
+              ((ent->err_attrs & SUN4V_ERR_ATTRS_PRIV_MODE) ?
+               "privileged" : ""),
+              ((ent->err_attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL) ?
+               "queue-full" : ""));
+       printk("%s: err_raddr[%016lx] err_size[%u] err_cpu[%u]\n",
+              pfx,
+              ent->err_raddr, ent->err_size, ent->err_cpu);
+
+       show_regs(regs);
+
+       if ((cnt = atomic_read(ocnt)) != 0) {
+               atomic_set(ocnt, 0);
+               wmb();
+               printk("%s: Queue overflowed %d times.\n",
+                      pfx, cnt);
+       }
+}
+
+/* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
+ * Log the event and clear the first word of the entry.
+ */
+void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
+{
+       struct sun4v_error_entry *ent, local_copy;
+       struct trap_per_cpu *tb;
+       unsigned long paddr;
+       int cpu;
+
+       cpu = get_cpu();
+
+       tb = &trap_block[cpu];
+       paddr = tb->resum_kernel_buf_pa + offset;
+       ent = __va(paddr);
+
+       memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
+
+       /* We have a local copy now, so release the entry.  */
+       ent->err_handle = 0;
+       wmb();
+
+       put_cpu();
+
+       if (ent->err_type == SUN4V_ERR_TYPE_WARNING_RES) {
+               /* If err_type is 0x4, it's a powerdown request.  Do
+                * not do the usual resumable error log because that
+                * makes it look like some abnormal error.
+                */
+               printk(KERN_INFO "Power down request...\n");
+               kill_cad_pid(SIGINT, 1);
+               return;
+       }
+
+       sun4v_log_error(regs, &local_copy, cpu,
+                       KERN_ERR "RESUMABLE ERROR",
+                       &sun4v_resum_oflow_cnt);
+}
+
+/* If we try to printk() we'll probably make matters worse, by trying
+ * to retake locks this cpu already holds or causing more errors. So
+ * just bump a counter, and we'll report these counter bumps above.
+ */
+void sun4v_resum_overflow(struct pt_regs *regs)
+{
+       atomic_inc(&sun4v_resum_oflow_cnt);
+}
+
+/* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
+ * Log the event, clear the first word of the entry, and die.
+ */
+void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
+{
+       struct sun4v_error_entry *ent, local_copy;
+       struct trap_per_cpu *tb;
+       unsigned long paddr;
+       int cpu;
+
+       cpu = get_cpu();
+
+       tb = &trap_block[cpu];
+       paddr = tb->nonresum_kernel_buf_pa + offset;
+       ent = __va(paddr);
+
+       memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
+
+       /* We have a local copy now, so release the entry.  */
+       ent->err_handle = 0;
+       wmb();
+
+       put_cpu();
+
+#ifdef CONFIG_PCI
+       /* Check for the special PCI poke sequence. */
+       if (pci_poke_in_progress && pci_poke_cpu == cpu) {
+               pci_poke_faulted = 1;
+               regs->tpc += 4;
+               regs->tnpc = regs->tpc + 4;
+               return;
+       }
+#endif
+
+       sun4v_log_error(regs, &local_copy, cpu,
+                       KERN_EMERG "NON-RESUMABLE ERROR",
+                       &sun4v_nonresum_oflow_cnt);
+
+       panic("Non-resumable error.");
+}
+
+/* If we try to printk() we'll probably make matters worse, by trying
+ * to retake locks this cpu already holds or causing more errors. So
+ * just bump a counter, and we'll report these counter bumps above.
+ */
+void sun4v_nonresum_overflow(struct pt_regs *regs)
+{
+       /* XXX Actually even this can make not that much sense.  Perhaps
+        * XXX we should just pull the plug and panic directly from here?
+        */
+       atomic_inc(&sun4v_nonresum_oflow_cnt);
+}
+
+unsigned long sun4v_err_itlb_vaddr;
+unsigned long sun4v_err_itlb_ctx;
+unsigned long sun4v_err_itlb_pte;
+unsigned long sun4v_err_itlb_error;
+
+void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
+{
+       if (tl > 1)
+               dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+
+       printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
+              regs->tpc, tl);
+       printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
+       printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
+       printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
+              (void *) regs->u_regs[UREG_I7]);
+       printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
+              "pte[%lx] error[%lx]\n",
+              sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx,
+              sun4v_err_itlb_pte, sun4v_err_itlb_error);
+
+       prom_halt();
+}
+
+unsigned long sun4v_err_dtlb_vaddr;
+unsigned long sun4v_err_dtlb_ctx;
+unsigned long sun4v_err_dtlb_pte;
+unsigned long sun4v_err_dtlb_error;
+
+void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
+{
+       if (tl > 1)
+               dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+
+       printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
+              regs->tpc, tl);
+       printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
+       printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
+       printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
+              (void *) regs->u_regs[UREG_I7]);
+       printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
+              "pte[%lx] error[%lx]\n",
+              sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx,
+              sun4v_err_dtlb_pte, sun4v_err_dtlb_error);
+
+       prom_halt();
+}
+
+void hypervisor_tlbop_error(unsigned long err, unsigned long op)
+{
+       printk(KERN_CRIT "SUN4V: TLB hv call error %lu for op %lu\n",
+              err, op);
+}
+
+void hypervisor_tlbop_error_xcall(unsigned long err, unsigned long op)
+{
+       printk(KERN_CRIT "SUN4V: XCALL TLB hv call error %lu for op %lu\n",
+              err, op);
 }
 
 void do_fpe_common(struct pt_regs *regs)
@@ -1775,7 +2105,7 @@ void do_div0(struct pt_regs *regs)
        force_sig_info(SIGFPE, &info, current);
 }
 
-void instruction_dump (unsigned int *pc)
+static void instruction_dump(unsigned int *pc)
 {
        int i;
 
@@ -1788,7 +2118,7 @@ void instruction_dump (unsigned int *pc)
        printk("\n");
 }
 
-static void user_instruction_dump (unsigned int __user *pc)
+static void user_instruction_dump(unsigned int __user *pc)
 {
        int i;
        unsigned int buf[9];
@@ -1807,46 +2137,54 @@ static void user_instruction_dump (unsigned int __user *pc)
 
 void show_stack(struct task_struct *tsk, unsigned long *_ksp)
 {
-       unsigned long pc, fp, thread_base, ksp;
-       void *tp = task_stack_page(tsk);
-       struct reg_window *rw;
+       unsigned long fp, thread_base, ksp;
+       struct thread_info *tp;
        int count = 0;
 
        ksp = (unsigned long) _ksp;
-
+       if (!tsk)
+               tsk = current;
+       tp = task_thread_info(tsk);
+       if (ksp == 0UL) {
+               if (tsk == current)
+                       asm("mov %%fp, %0" : "=r" (ksp));
+               else
+                       ksp = tp->ksp;
+       }
        if (tp == current_thread_info())
                flushw_all();
 
        fp = ksp + STACK_BIAS;
        thread_base = (unsigned long) tp;
 
-       printk("Call Trace:");
-#ifdef CONFIG_KALLSYMS
-       printk("\n");
-#endif
+       printk("Call Trace:\n");
        do {
-               /* Bogus frame pointer? */
-               if (fp < (thread_base + sizeof(struct thread_info)) ||
-                   fp >= (thread_base + THREAD_SIZE))
+               struct sparc_stackf *sf;
+               struct pt_regs *regs;
+               unsigned long pc;
+
+               if (!kstack_valid(tp, fp))
                        break;
-               rw = (struct reg_window *)fp;
-               pc = rw->ins[7];
-               printk(" [%016lx] ", pc);
-               print_symbol("%s\n", pc);
-               fp = rw->ins[6] + STACK_BIAS;
+               sf = (struct sparc_stackf *) fp;
+               regs = (struct pt_regs *) (sf + 1);
+
+               if (kstack_is_trap_frame(tp, regs)) {
+                       if (!(regs->tstate & TSTATE_PRIV))
+                               break;
+                       pc = regs->tpc;
+                       fp = regs->u_regs[UREG_I6] + STACK_BIAS;
+               } else {
+                       pc = sf->callers_pc;
+                       fp = (unsigned long)sf->fp + STACK_BIAS;
+               }
+
+               printk(" [%016lx] %pS\n", pc, (void *) pc);
        } while (++count < 16);
-#ifndef CONFIG_KALLSYMS
-       printk("\n");
-#endif
 }
 
 void dump_stack(void)
 {
-       unsigned long *ksp;
-
-       __asm__ __volatile__("mov       %%fp, %0"
-                            : "=r" (ksp));
-       show_stack(current, ksp);
+       show_stack(current, NULL);
 }
 
 EXPORT_SYMBOL(dump_stack);
@@ -1885,8 +2223,6 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
 void die_if_kernel(char *str, struct pt_regs *regs)
 {
        static int die_counter;
-       extern void __show_regs(struct pt_regs * regs);
-       extern void smp_report_regs(void);
        int count = 0;
        
        /* Amuse the user. */
@@ -1896,10 +2232,11 @@ void die_if_kernel(char *str, struct pt_regs *regs)
 "              /_| \\__/ |_\\\n"
 "                 \\__U_/\n");
 
-       printk("%s(%d): %s [#%d]\n", current->comm, current->pid, str, ++die_counter);
+       printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter);
        notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
        __asm__ __volatile__("flushw");
-       __show_regs(regs);
+       show_regs(regs);
+       add_taint(TAINT_DIE);
        if (regs->tstate & TSTATE_PRIV) {
                struct reg_window *rw = (struct reg_window *)
                        (regs->u_regs[UREG_FP] + STACK_BIAS);
@@ -1910,9 +2247,8 @@ void die_if_kernel(char *str, struct pt_regs *regs)
                while (rw &&
                       count++ < 30&&
                       is_kernel_stack(current, rw)) {
-                       printk("Caller[%016lx]", rw->ins[7]);
-                       print_symbol(": %s", rw->ins[7]);
-                       printk("\n");
+                       printk("Caller[%016lx]: %pS\n", rw->ins[7],
+                              (void *) rw->ins[7]);
 
                        rw = kernel_stack_up(rw);
                }
@@ -1924,17 +2260,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
                }
                user_instruction_dump ((unsigned int __user *) regs->tpc);
        }
-#ifdef CONFIG_SMP
-       smp_report_regs();
-#endif
-                                                       
        if (regs->tstate & TSTATE_PRIV)
                do_exit(SIGKILL);
        do_exit(SIGSEGV);
 }
 
+#define VIS_OPCODE_MASK        ((0x3 << 30) | (0x3f << 19))
+#define VIS_OPCODE_VAL ((0x2 << 30) | (0x36 << 19))
+
 extern int handle_popc(u32 insn, struct pt_regs *regs);
 extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
+extern int vis_emul(struct pt_regs *, unsigned int);
 
 void do_illegal_instruction(struct pt_regs *regs)
 {
@@ -1958,6 +2294,19 @@ void do_illegal_instruction(struct pt_regs *regs)
                } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
                        if (handle_ldf_stq(insn, regs))
                                return;
+               } else if (tlb_type == hypervisor) {
+                       if ((insn & VIS_OPCODE_MASK) == VIS_OPCODE_VAL) {
+                               if (!vis_emul(regs, insn))
+                                       return;
+                       } else {
+                               struct fpustate *f = FPUSTATE;
+
+                               /* XXX maybe verify XFSR bits like
+                                * XXX do_fpother() does?
+                                */
+                               if (do_mathemu(regs, f))
+                                       return;
+                       }
                }
        }
        info.si_signo = SIGILL;
@@ -1968,6 +2317,8 @@ void do_illegal_instruction(struct pt_regs *regs)
        force_sig_info(SIGILL, &info, current);
 }
 
+extern void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn);
+
 void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
 {
        siginfo_t info;
@@ -1977,13 +2328,7 @@ void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned lo
                return;
 
        if (regs->tstate & TSTATE_PRIV) {
-               extern void kernel_unaligned_trap(struct pt_regs *regs,
-                                                 unsigned int insn, 
-                                                 unsigned long sfar,
-                                                 unsigned long sfsr);
-
-               kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc),
-                                     sfar, sfsr);
+               kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
                return;
        }
        info.si_signo = SIGBUS;
@@ -1994,6 +2339,26 @@ void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned lo
        force_sig_info(SIGBUS, &info, current);
 }
 
+void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
+{
+       siginfo_t info;
+
+       if (notify_die(DIE_TRAP, "memory address unaligned", regs,
+                      0, 0x34, SIGSEGV) == NOTIFY_STOP)
+               return;
+
+       if (regs->tstate & TSTATE_PRIV) {
+               kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
+               return;
+       }
+       info.si_signo = SIGBUS;
+       info.si_errno = 0;
+       info.si_code = BUS_ADRALN;
+       info.si_addr = (void __user *) addr;
+       info.si_trapno = 0;
+       force_sig_info(SIGBUS, &info, current);
+}
+
 void do_privop(struct pt_regs *regs)
 {
        siginfo_t info;
@@ -2135,17 +2500,18 @@ struct trap_per_cpu trap_block[NR_CPUS];
 /* This can get invoked before sched_init() so play it super safe
  * and use hard_smp_processor_id().
  */
-void init_cur_cpu_trap(void)
+void init_cur_cpu_trap(struct thread_info *t)
 {
        int cpu = hard_smp_processor_id();
        struct trap_per_cpu *p = &trap_block[cpu];
 
-       p->thread = current_thread_info();
+       p->thread = t;
        p->pgd_paddr = 0;
 }
 
 extern void thread_info_offsets_are_bolixed_dave(void);
 extern void trap_per_cpu_offsets_are_bolixed_dave(void);
+extern void tsb_config_offsets_are_bolixed_dave(void);
 
 /* Only invoked on boot processor. */
 void __init trap_init(void)
@@ -2169,7 +2535,6 @@ void __init trap_init(void)
            TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) ||
            TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) ||
            TI_PCR != offsetof(struct thread_info, pcr_reg) ||
-           TI_CEE_STUFF != offsetof(struct thread_info, cee_stuff) ||
            TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
            TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
            TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) ||
@@ -2181,9 +2546,56 @@ void __init trap_init(void)
                thread_info_offsets_are_bolixed_dave();
 
        if (TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu, thread) ||
-           TRAP_PER_CPU_PGD_PADDR != offsetof(struct trap_per_cpu, pgd_paddr))
+           (TRAP_PER_CPU_PGD_PADDR !=
+            offsetof(struct trap_per_cpu, pgd_paddr)) ||
+           (TRAP_PER_CPU_CPU_MONDO_PA !=
+            offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
+           (TRAP_PER_CPU_DEV_MONDO_PA !=
+            offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
+           (TRAP_PER_CPU_RESUM_MONDO_PA !=
+            offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
+           (TRAP_PER_CPU_RESUM_KBUF_PA !=
+            offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
+           (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
+            offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
+           (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
+            offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
+           (TRAP_PER_CPU_FAULT_INFO !=
+            offsetof(struct trap_per_cpu, fault_info)) ||
+           (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
+            offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
+           (TRAP_PER_CPU_CPU_LIST_PA !=
+            offsetof(struct trap_per_cpu, cpu_list_pa)) ||
+           (TRAP_PER_CPU_TSB_HUGE !=
+            offsetof(struct trap_per_cpu, tsb_huge)) ||
+           (TRAP_PER_CPU_TSB_HUGE_TEMP !=
+            offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
+           (TRAP_PER_CPU_IRQ_WORKLIST_PA !=
+            offsetof(struct trap_per_cpu, irq_worklist_pa)) ||
+           (TRAP_PER_CPU_CPU_MONDO_QMASK !=
+            offsetof(struct trap_per_cpu, cpu_mondo_qmask)) ||
+           (TRAP_PER_CPU_DEV_MONDO_QMASK !=
+            offsetof(struct trap_per_cpu, dev_mondo_qmask)) ||
+           (TRAP_PER_CPU_RESUM_QMASK !=
+            offsetof(struct trap_per_cpu, resum_qmask)) ||
+           (TRAP_PER_CPU_NONRESUM_QMASK !=
+            offsetof(struct trap_per_cpu, nonresum_qmask)))
                trap_per_cpu_offsets_are_bolixed_dave();
 
+       if ((TSB_CONFIG_TSB !=
+            offsetof(struct tsb_config, tsb)) ||
+           (TSB_CONFIG_RSS_LIMIT !=
+            offsetof(struct tsb_config, tsb_rss_limit)) ||
+           (TSB_CONFIG_NENTRIES !=
+            offsetof(struct tsb_config, tsb_nentries)) ||
+           (TSB_CONFIG_REG_VAL !=
+            offsetof(struct tsb_config, tsb_reg_val)) ||
+           (TSB_CONFIG_MAP_VADDR !=
+            offsetof(struct tsb_config, tsb_map_vaddr)) ||
+           (TSB_CONFIG_MAP_PTE !=
+            offsetof(struct tsb_config, tsb_map_pte)))
+               tsb_config_offsets_are_bolixed_dave();
+
        /* Attach to the address space of init_task.  On SMP we
         * do this in smp.c:smp_callin for other cpus.
         */