#include <linux/cpumask.h>
#include <linux/kdebug.h>
#include <linux/cpu.h>
+#include <linux/gfp.h>
#include <asm/delay.h>
#include <asm/machvec.h>
# define IA64_MCA_DEBUG(fmt...)
#endif
+#define NOTIFY_INIT(event, regs, arg, spin) \
+do { \
+ if ((notify_die((event), "INIT", (regs), (arg), 0, 0) \
+ == NOTIFY_STOP) && ((spin) == 1)) \
+ ia64_mca_spin(__func__); \
+} while (0)
+
+#define NOTIFY_MCA(event, regs, arg, spin) \
+do { \
+ if ((notify_die((event), "MCA", (regs), (arg), 0, 0) \
+ == NOTIFY_STOP) && ((spin) == 1)) \
+ ia64_mca_spin(__func__); \
+} while (0)
+
/* Used by mca_asm.S */
DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
static void
ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused)
{
- on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0);
+ on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 0);
}
/*
static void
ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused)
{
- on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0);
+ on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 0);
}
/*
/* Mask all interrupts */
local_irq_save(flags);
- if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", get_irq_regs(),
- (long)&nd, 0, 0) == NOTIFY_STOP)
- ia64_mca_spin(__func__);
+
+ NOTIFY_MCA(DIE_MCA_RENDZVOUS_ENTER, get_irq_regs(), (long)&nd, 1);
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE;
/* Register with the SAL monarch that the slave has
*/
ia64_sal_mc_rendez();
- if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", get_irq_regs(),
- (long)&nd, 0, 0) == NOTIFY_STOP)
- ia64_mca_spin(__func__);
+ NOTIFY_MCA(DIE_MCA_RENDZVOUS_PROCESS, get_irq_regs(), (long)&nd, 1);
/* Wait for the monarch cpu to exit. */
while (monarch_cpu != -1)
cpu_relax(); /* spin until monarch leaves */
- if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", get_irq_regs(),
- (long)&nd, 0, 0) == NOTIFY_STOP)
- ia64_mca_spin(__func__);
+ NOTIFY_MCA(DIE_MCA_RENDZVOUS_LEAVE, get_irq_regs(), (long)&nd, 1);
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
/* Enable all interrupts */
static inline void
-copy_reg(const u64 *fr, u64 fnat, u64 *tr, u64 *tnat)
+copy_reg(const u64 *fr, u64 fnat, unsigned long *tr, unsigned long *tnat)
{
u64 fslot, tslot, nat;
*tr = *fr;
memcpy(current->comm, comm, sizeof(current->comm));
}
+static void
+finish_pt_regs(struct pt_regs *regs, struct ia64_sal_os_state *sos,
+ unsigned long *nat)
+{
+ const pal_min_state_area_t *ms = sos->pal_min_state;
+ const u64 *bank;
+
+ /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use
+ * pmsa_{xip,xpsr,xfs}
+ */
+ if (ia64_psr(regs)->ic) {
+ regs->cr_iip = ms->pmsa_iip;
+ regs->cr_ipsr = ms->pmsa_ipsr;
+ regs->cr_ifs = ms->pmsa_ifs;
+ } else {
+ regs->cr_iip = ms->pmsa_xip;
+ regs->cr_ipsr = ms->pmsa_xpsr;
+ regs->cr_ifs = ms->pmsa_xfs;
+
+ sos->iip = ms->pmsa_iip;
+ sos->ipsr = ms->pmsa_ipsr;
+ sos->ifs = ms->pmsa_ifs;
+ }
+ regs->pr = ms->pmsa_pr;
+ regs->b0 = ms->pmsa_br0;
+ regs->ar_rsc = ms->pmsa_rsc;
+ copy_reg(&ms->pmsa_gr[1-1], ms->pmsa_nat_bits, ®s->r1, nat);
+ copy_reg(&ms->pmsa_gr[2-1], ms->pmsa_nat_bits, ®s->r2, nat);
+ copy_reg(&ms->pmsa_gr[3-1], ms->pmsa_nat_bits, ®s->r3, nat);
+ copy_reg(&ms->pmsa_gr[8-1], ms->pmsa_nat_bits, ®s->r8, nat);
+ copy_reg(&ms->pmsa_gr[9-1], ms->pmsa_nat_bits, ®s->r9, nat);
+ copy_reg(&ms->pmsa_gr[10-1], ms->pmsa_nat_bits, ®s->r10, nat);
+ copy_reg(&ms->pmsa_gr[11-1], ms->pmsa_nat_bits, ®s->r11, nat);
+ copy_reg(&ms->pmsa_gr[12-1], ms->pmsa_nat_bits, ®s->r12, nat);
+ copy_reg(&ms->pmsa_gr[13-1], ms->pmsa_nat_bits, ®s->r13, nat);
+ copy_reg(&ms->pmsa_gr[14-1], ms->pmsa_nat_bits, ®s->r14, nat);
+ copy_reg(&ms->pmsa_gr[15-1], ms->pmsa_nat_bits, ®s->r15, nat);
+ if (ia64_psr(regs)->bn)
+ bank = ms->pmsa_bank1_gr;
+ else
+ bank = ms->pmsa_bank0_gr;
+ copy_reg(&bank[16-16], ms->pmsa_nat_bits, ®s->r16, nat);
+ copy_reg(&bank[17-16], ms->pmsa_nat_bits, ®s->r17, nat);
+ copy_reg(&bank[18-16], ms->pmsa_nat_bits, ®s->r18, nat);
+ copy_reg(&bank[19-16], ms->pmsa_nat_bits, ®s->r19, nat);
+ copy_reg(&bank[20-16], ms->pmsa_nat_bits, ®s->r20, nat);
+ copy_reg(&bank[21-16], ms->pmsa_nat_bits, ®s->r21, nat);
+ copy_reg(&bank[22-16], ms->pmsa_nat_bits, ®s->r22, nat);
+ copy_reg(&bank[23-16], ms->pmsa_nat_bits, ®s->r23, nat);
+ copy_reg(&bank[24-16], ms->pmsa_nat_bits, ®s->r24, nat);
+ copy_reg(&bank[25-16], ms->pmsa_nat_bits, ®s->r25, nat);
+ copy_reg(&bank[26-16], ms->pmsa_nat_bits, ®s->r26, nat);
+ copy_reg(&bank[27-16], ms->pmsa_nat_bits, ®s->r27, nat);
+ copy_reg(&bank[28-16], ms->pmsa_nat_bits, ®s->r28, nat);
+ copy_reg(&bank[29-16], ms->pmsa_nat_bits, ®s->r29, nat);
+ copy_reg(&bank[30-16], ms->pmsa_nat_bits, ®s->r30, nat);
+ copy_reg(&bank[31-16], ms->pmsa_nat_bits, ®s->r31, nat);
+}
+
/* On entry to this routine, we are running on the per cpu stack, see
* mca_asm.h. The original stack has not been touched by this event. Some of
* the original stack's registers will be in the RBS on this stack. This stack
struct switch_stack *old_sw;
unsigned size = sizeof(struct pt_regs) +
sizeof(struct switch_stack) + 16;
- u64 *old_bspstore, *old_bsp;
- u64 *new_bspstore, *new_bsp;
- u64 old_unat, old_rnat, new_rnat, nat;
+ unsigned long *old_bspstore, *old_bsp;
+ unsigned long *new_bspstore, *new_bsp;
+ unsigned long old_unat, old_rnat, new_rnat, nat;
u64 slots, loadrs = regs->loadrs;
u64 r12 = ms->pmsa_gr[12-1], r13 = ms->pmsa_gr[13-1];
u64 ar_bspstore = regs->ar_bspstore;
u64 ar_bsp = regs->ar_bspstore + (loadrs >> 16);
- const u64 *bank;
const char *msg;
int cpu = smp_processor_id();
* loadrs for the new stack and save it in the new pt_regs, where
* ia64_old_stack() can get it.
*/
- old_bspstore = (u64 *)ar_bspstore;
- old_bsp = (u64 *)ar_bsp;
+ old_bspstore = (unsigned long *)ar_bspstore;
+ old_bsp = (unsigned long *)ar_bsp;
slots = ia64_rse_num_regs(old_bspstore, old_bsp);
- new_bspstore = (u64 *)((u64)current + IA64_RBS_OFFSET);
+ new_bspstore = (unsigned long *)((u64)current + IA64_RBS_OFFSET);
new_bsp = ia64_rse_skip_regs(new_bspstore, slots);
regs->loadrs = (new_bsp - new_bspstore) * 8 << 16;
p = (char *)r12 - sizeof(*regs);
old_regs = (struct pt_regs *)p;
memcpy(old_regs, regs, sizeof(*regs));
- /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use
- * pmsa_{xip,xpsr,xfs}
- */
- if (ia64_psr(regs)->ic) {
- old_regs->cr_iip = ms->pmsa_iip;
- old_regs->cr_ipsr = ms->pmsa_ipsr;
- old_regs->cr_ifs = ms->pmsa_ifs;
- } else {
- old_regs->cr_iip = ms->pmsa_xip;
- old_regs->cr_ipsr = ms->pmsa_xpsr;
- old_regs->cr_ifs = ms->pmsa_xfs;
- }
- old_regs->pr = ms->pmsa_pr;
- old_regs->b0 = ms->pmsa_br0;
old_regs->loadrs = loadrs;
- old_regs->ar_rsc = ms->pmsa_rsc;
old_unat = old_regs->ar_unat;
- copy_reg(&ms->pmsa_gr[1-1], ms->pmsa_nat_bits, &old_regs->r1, &old_unat);
- copy_reg(&ms->pmsa_gr[2-1], ms->pmsa_nat_bits, &old_regs->r2, &old_unat);
- copy_reg(&ms->pmsa_gr[3-1], ms->pmsa_nat_bits, &old_regs->r3, &old_unat);
- copy_reg(&ms->pmsa_gr[8-1], ms->pmsa_nat_bits, &old_regs->r8, &old_unat);
- copy_reg(&ms->pmsa_gr[9-1], ms->pmsa_nat_bits, &old_regs->r9, &old_unat);
- copy_reg(&ms->pmsa_gr[10-1], ms->pmsa_nat_bits, &old_regs->r10, &old_unat);
- copy_reg(&ms->pmsa_gr[11-1], ms->pmsa_nat_bits, &old_regs->r11, &old_unat);
- copy_reg(&ms->pmsa_gr[12-1], ms->pmsa_nat_bits, &old_regs->r12, &old_unat);
- copy_reg(&ms->pmsa_gr[13-1], ms->pmsa_nat_bits, &old_regs->r13, &old_unat);
- copy_reg(&ms->pmsa_gr[14-1], ms->pmsa_nat_bits, &old_regs->r14, &old_unat);
- copy_reg(&ms->pmsa_gr[15-1], ms->pmsa_nat_bits, &old_regs->r15, &old_unat);
- if (ia64_psr(old_regs)->bn)
- bank = ms->pmsa_bank1_gr;
- else
- bank = ms->pmsa_bank0_gr;
- copy_reg(&bank[16-16], ms->pmsa_nat_bits, &old_regs->r16, &old_unat);
- copy_reg(&bank[17-16], ms->pmsa_nat_bits, &old_regs->r17, &old_unat);
- copy_reg(&bank[18-16], ms->pmsa_nat_bits, &old_regs->r18, &old_unat);
- copy_reg(&bank[19-16], ms->pmsa_nat_bits, &old_regs->r19, &old_unat);
- copy_reg(&bank[20-16], ms->pmsa_nat_bits, &old_regs->r20, &old_unat);
- copy_reg(&bank[21-16], ms->pmsa_nat_bits, &old_regs->r21, &old_unat);
- copy_reg(&bank[22-16], ms->pmsa_nat_bits, &old_regs->r22, &old_unat);
- copy_reg(&bank[23-16], ms->pmsa_nat_bits, &old_regs->r23, &old_unat);
- copy_reg(&bank[24-16], ms->pmsa_nat_bits, &old_regs->r24, &old_unat);
- copy_reg(&bank[25-16], ms->pmsa_nat_bits, &old_regs->r25, &old_unat);
- copy_reg(&bank[26-16], ms->pmsa_nat_bits, &old_regs->r26, &old_unat);
- copy_reg(&bank[27-16], ms->pmsa_nat_bits, &old_regs->r27, &old_unat);
- copy_reg(&bank[28-16], ms->pmsa_nat_bits, &old_regs->r28, &old_unat);
- copy_reg(&bank[29-16], ms->pmsa_nat_bits, &old_regs->r29, &old_unat);
- copy_reg(&bank[30-16], ms->pmsa_nat_bits, &old_regs->r30, &old_unat);
- copy_reg(&bank[31-16], ms->pmsa_nat_bits, &old_regs->r31, &old_unat);
+ finish_pt_regs(old_regs, sos, &old_unat);
/* Next stack a struct switch_stack. mca_asm.S built a partial
* switch_stack, copy it and fill in the blanks using pt_regs and
return previous_current;
no_mod:
- printk(KERN_INFO "cpu %d, %s %s, original stack not modified\n",
+ mprintk(KERN_INFO "cpu %d, %s %s, original stack not modified\n",
smp_processor_id(), type, msg);
+ old_unat = regs->ar_unat;
+ finish_pt_regs(regs, sos, &old_unat);
return previous_current;
}
unsigned long psr;
int cpu = smp_processor_id();
+ if (!ia64_idtrs[cpu])
+ return;
+
psr = ia64_clear_ic();
for (i = IA64_TR_ALLOC_BASE; i < IA64_TR_ALLOC_MAX; i++) {
- p = &__per_cpu_idtrs[cpu][iord-1][i];
+ p = ia64_idtrs[cpu] + (iord - 1) * IA64_TR_ALLOC_MAX;
if (p->pte & 0x1) {
old_rr = ia64_get_rr(p->ifa);
if (old_rr != p->rr) {
int recover, cpu = smp_processor_id();
struct task_struct *previous_current;
struct ia64_mca_notify_die nd =
- { .sos = sos, .monarch_cpu = &monarch_cpu };
+ { .sos = sos, .monarch_cpu = &monarch_cpu, .data = &recover };
static atomic_t mca_count;
static cpumask_t mca_cpu;
previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA");
- if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0)
- == NOTIFY_STOP)
- ia64_mca_spin(__func__);
+ NOTIFY_MCA(DIE_MCA_MONARCH_ENTER, regs, (long)&nd, 1);
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA;
if (sos->monarch) {
cpu_relax(); /* spin until monarch wakes us */
}
- if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, (long)&nd, 0, 0)
- == NOTIFY_STOP)
- ia64_mca_spin(__func__);
+ NOTIFY_MCA(DIE_MCA_MONARCH_PROCESS, regs, (long)&nd, 1);
/* Get the MCA error record and log it */
ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);
mca_insert_tr(0x2); /*Reload dynamic itrs*/
}
- if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover)
- == NOTIFY_STOP)
- ia64_mca_spin(__func__);
+ NOTIFY_MCA(DIE_MCA_MONARCH_LEAVE, regs, (long)&nd, 1);
if (atomic_dec_return(&mca_count) > 0) {
int i;
ia64_mca_cmc_int_handler(cmc_irq, arg);
- for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);
+ cpuid = cpumask_next(cpuid+1, cpu_online_mask);
- if (cpuid < NR_CPUS) {
+ if (cpuid < nr_cpu_ids) {
platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
} else {
/* If no log record, switch out of polling mode */
ia64_mca_cpe_int_handler(cpe_irq, arg);
- for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);
+ cpuid = cpumask_next(cpuid+1, cpu_online_mask);
if (cpuid < NR_CPUS) {
platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
struct ia64_mca_notify_die nd =
{ .sos = sos, .monarch_cpu = &monarch_cpu };
- (void) notify_die(DIE_INIT_ENTER, "INIT", regs, (long)&nd, 0, 0);
+ NOTIFY_INIT(DIE_INIT_ENTER, regs, (long)&nd, 0);
mprintk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n",
sos->proc_state_param, cpu, sos->monarch);
if (!sos->monarch) {
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT;
+
+#ifdef CONFIG_KEXEC
+ while (monarch_cpu == -1 && !atomic_read(&kdump_in_progress))
+ udelay(1000);
+#else
while (monarch_cpu == -1)
- cpu_relax(); /* spin until monarch enters */
- if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, (long)&nd, 0, 0)
- == NOTIFY_STOP)
- ia64_mca_spin(__func__);
- if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, (long)&nd, 0, 0)
- == NOTIFY_STOP)
- ia64_mca_spin(__func__);
+ cpu_relax(); /* spin until monarch enters */
+#endif
+
+ NOTIFY_INIT(DIE_INIT_SLAVE_ENTER, regs, (long)&nd, 1);
+ NOTIFY_INIT(DIE_INIT_SLAVE_PROCESS, regs, (long)&nd, 1);
+
+#ifdef CONFIG_KEXEC
+ while (monarch_cpu != -1 && !atomic_read(&kdump_in_progress))
+ udelay(1000);
+#else
while (monarch_cpu != -1)
- cpu_relax(); /* spin until monarch leaves */
- if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, (long)&nd, 0, 0)
- == NOTIFY_STOP)
- ia64_mca_spin(__func__);
+ cpu_relax(); /* spin until monarch leaves */
+#endif
+
+ NOTIFY_INIT(DIE_INIT_SLAVE_LEAVE, regs, (long)&nd, 1);
+
mprintk("Slave on cpu %d returning to normal service.\n", cpu);
set_curr_task(cpu, previous_current);
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
}
monarch_cpu = cpu;
- if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, (long)&nd, 0, 0)
- == NOTIFY_STOP)
- ia64_mca_spin(__func__);
+ NOTIFY_INIT(DIE_INIT_MONARCH_ENTER, regs, (long)&nd, 1);
/*
* Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be
* to default_monarch_init_process() above and just print all the
* tasks.
*/
- if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, (long)&nd, 0, 0)
- == NOTIFY_STOP)
- ia64_mca_spin(__func__);
- if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, (long)&nd, 0, 0)
- == NOTIFY_STOP)
- ia64_mca_spin(__func__);
+ NOTIFY_INIT(DIE_INIT_MONARCH_PROCESS, regs, (long)&nd, 1);
+ NOTIFY_INIT(DIE_INIT_MONARCH_LEAVE, regs, (long)&nd, 1);
+
mprintk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu);
atomic_dec(&monarchs);
set_curr_task(cpu, previous_current);
data = mca_bootmem();
first_time = 0;
} else
- data = page_address(alloc_pages_node(numa_node_id(),
- GFP_KERNEL, get_order(sz)));
+ data = __get_free_pages(GFP_KERNEL, get_order(sz));
if (!data)
panic("Could not allocate MCA memory for cpu %d\n",
cpu);
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
smp_call_function_single(hotcpu, ia64_mca_cmc_vector_adjust,
- NULL, 1, 0);
+ NULL, 0);
break;
}
return NOTIFY_OK;
ia64_fptr_t *init_hldlr_ptr_slave = (ia64_fptr_t *)ia64_os_init_dispatch_slave;
ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch;
int i;
- s64 rc;
+ long rc;
struct ia64_sal_retval isrv;
- u64 timeout = IA64_MCA_RENDEZ_TIMEOUT; /* platform specific */
+ unsigned long timeout = IA64_MCA_RENDEZ_TIMEOUT; /* platform specific */
static struct notifier_block default_init_monarch_nb = {
.notifier_call = default_monarch_init_process,
.priority = 0/* we need to notified last */
printk(KERN_INFO "Increasing MCA rendezvous timeout from "
"%ld to %ld milliseconds\n", timeout, isrv.v0);
timeout = isrv.v0;
- (void) notify_die(DIE_MCA_NEW_TIMEOUT, "MCA", NULL, timeout, 0, 0);
+ NOTIFY_MCA(DIE_MCA_NEW_TIMEOUT, NULL, timeout, 0);
continue;
}
printk(KERN_ERR "Failed to register rendezvous interrupt "
cpe_poll_timer.function = ia64_mca_cpe_poll;
{
- irq_desc_t *desc;
+ struct irq_desc *desc;
unsigned int irq;
if (cpe_vector >= 0) {