* I like traps on v9, :))))
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/sched.h> /* for jiffies */
#include <linux/kernel.h>
#ifdef CONFIG_KMOD
#include <linux/kmod.h>
#endif
+#include <asm/prom.h>
-struct notifier_block *sparc64die_chain;
-static DEFINE_SPINLOCK(die_notifier_lock);
+ATOMIC_NOTIFIER_HEAD(sparc64die_chain);
int register_die_notifier(struct notifier_block *nb)
{
- int err = 0;
- unsigned long flags;
- spin_lock_irqsave(&die_notifier_lock, flags);
- err = notifier_chain_register(&sparc64die_chain, nb);
- spin_unlock_irqrestore(&die_notifier_lock, flags);
- return err;
+ return atomic_notifier_chain_register(&sparc64die_chain, nb);
}
+EXPORT_SYMBOL(register_die_notifier);
+
+int unregister_die_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&sparc64die_chain, nb);
+}
+EXPORT_SYMBOL(unregister_die_notifier);
/* When an irrecoverable trap occurs at tl > 0, the trap entry
* code logs the trap state registers at every level in the trap
void __init cheetah_ecache_flush_init(void)
{
unsigned long largest_size, smallest_linesize, order, ver;
- int node, i, instance;
+ struct device_node *dp;
+ int i, instance, sz;
/* Scan all cpu device tree nodes, note two values:
* 1) largest E-cache size
smallest_linesize = ~0UL;
instance = 0;
- while (!cpu_find_by_instance(instance, &node, NULL)) {
+ while (!cpu_find_by_instance(instance, &dp, NULL)) {
unsigned long val;
- val = prom_getintdefault(node, "ecache-size",
- (2 * 1024 * 1024));
+ val = of_getintprop_default(dp, "ecache-size",
+ (2 * 1024 * 1024));
if (val > largest_size)
largest_size = val;
- val = prom_getintdefault(node, "ecache-line-size", 64);
+ val = of_getintprop_default(dp, "ecache-line-size", 64);
if (val < smallest_linesize)
smallest_linesize = val;
instance++;
}
/* Now allocate error trap reporting scoreboard. */
- node = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
+ sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
for (order = 0; order < MAX_ORDER; order++) {
- if ((PAGE_SIZE << order) >= node)
+ if ((PAGE_SIZE << order) >= sz)
break;
}
cheetah_error_log = (struct cheetah_err_info *)
__get_free_pages(GFP_KERNEL, order);
if (!cheetah_error_log) {
prom_printf("cheetah_ecache_flush_init: Failed to allocate "
- "error logging scoreboard (%d bytes).\n", node);
+ "error logging scoreboard (%d bytes).\n", sz);
prom_halt();
}
memset(cheetah_error_log, 0, PAGE_SIZE << order);
(recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
afsr, afar,
(afsr & CHAFSR_TL1) ? 1 : 0);
- printk("%s" "ERROR(%d): TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
+ printk("%s" "ERROR(%d): TPC[%lx] TNPC[%lx] O7[%lx] TSTATE[%lx]\n",
(recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
- regs->tpc, regs->tnpc, regs->tstate);
+ regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
(recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
(afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
};
}
-static void sun4v_log_error(struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
+extern void __show_regs(struct pt_regs * regs);
+
+static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
{
int cnt;
pfx,
ent->err_raddr, ent->err_size, ent->err_cpu);
+ __show_regs(regs);
+
if ((cnt = atomic_read(ocnt)) != 0) {
atomic_set(ocnt, 0);
wmb();
put_cpu();
- sun4v_log_error(&local_copy, cpu,
+ sun4v_log_error(regs, &local_copy, cpu,
KERN_ERR "RESUMABLE ERROR",
&sun4v_resum_oflow_cnt);
}
}
#endif
- sun4v_log_error(&local_copy, cpu,
+ sun4v_log_error(regs, &local_copy, cpu,
KERN_EMERG "NON-RESUMABLE ERROR",
&sun4v_nonresum_oflow_cnt);
void die_if_kernel(char *str, struct pt_regs *regs)
{
static int die_counter;
- extern void __show_regs(struct pt_regs * regs);
extern void smp_report_regs(void);
int count = 0;
} else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
if (handle_ldf_stq(insn, regs))
return;
+ } else if (tlb_type == hypervisor) {
+ extern int vis_emul(struct pt_regs *, unsigned int);
+
+ if (!vis_emul(regs, insn))
+ return;
}
}
info.si_signo = SIGILL;
extern void thread_info_offsets_are_bolixed_dave(void);
extern void trap_per_cpu_offsets_are_bolixed_dave(void);
+extern void tsb_config_offsets_are_bolixed_dave(void);
/* Only invoked on boot processor. */
void __init trap_init(void)
(TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
(TRAP_PER_CPU_CPU_LIST_PA !=
- offsetof(struct trap_per_cpu, cpu_list_pa)))
+ offsetof(struct trap_per_cpu, cpu_list_pa)) ||
+ (TRAP_PER_CPU_TSB_HUGE !=
+ offsetof(struct trap_per_cpu, tsb_huge)) ||
+ (TRAP_PER_CPU_TSB_HUGE_TEMP !=
+ offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
+ (TRAP_PER_CPU_IRQ_WORKLIST !=
+ offsetof(struct trap_per_cpu, irq_worklist)))
trap_per_cpu_offsets_are_bolixed_dave();
+ if ((TSB_CONFIG_TSB !=
+ offsetof(struct tsb_config, tsb)) ||
+ (TSB_CONFIG_RSS_LIMIT !=
+ offsetof(struct tsb_config, tsb_rss_limit)) ||
+ (TSB_CONFIG_NENTRIES !=
+ offsetof(struct tsb_config, tsb_nentries)) ||
+ (TSB_CONFIG_REG_VAL !=
+ offsetof(struct tsb_config, tsb_reg_val)) ||
+ (TSB_CONFIG_MAP_VADDR !=
+ offsetof(struct tsb_config, tsb_map_vaddr)) ||
+ (TSB_CONFIG_MAP_PTE !=
+ offsetof(struct tsb_config, tsb_map_pte)))
+ tsb_config_offsets_are_bolixed_dave();
+
/* Attach to the address space of init_task. On SMP we
* do this in smp.c:smp_callin for other cpus.
*/