Merge branches 'misc', 'eeepc-laptop' and 'bugzilla-14445' into release
[safe/jmp/linux-2.6] / arch / mips / kernel / smtc.c
index 4705b3c..24630fd 100644 (file)
@@ -1,8 +1,26 @@
-/* Copyright (C) 2004 Mips Technologies, Inc */
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ *
+ * Copyright (C) 2004 Mips Technologies, Inc
+ * Copyright (C) 2008 Kevin D. Kissell
+ */
 
 #include <linux/clockchips.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
+#include <linux/smp.h>
 #include <linux/cpumask.h>
 #include <linux/interrupt.h>
 #include <linux/kernel_stat.h>
@@ -21,7 +39,6 @@
 #include <asm/time.h>
 #include <asm/addrspace.h>
 #include <asm/smtc.h>
-#include <asm/smtc_ipi.h>
 #include <asm/smtc_proc.h>
 
 /*
@@ -59,18 +76,12 @@ unsigned long irq_hwmask[NR_IRQS];
 asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
 
 /*
- * Clock interrupt "latch" buffers, per "CPU"
- */
-
-static atomic_t ipi_timer_latch[NR_CPUS];
-
-/*
  * Number of InterProcessor Interrupt (IPI) message buffers to allocate
  */
 
 #define IPIBUF_PER_CPU 4
 
-static struct smtc_ipi_q IPIQ[NR_CPUS];
+struct smtc_ipi_q IPIQ[NR_CPUS];
 static struct smtc_ipi_q freeIPIq;
 
 
@@ -83,14 +94,14 @@ void init_smtc_stats(void);
 
 /* Global SMTC Status */
 
-unsigned int smtc_status = 0;
+unsigned int smtc_status;
 
 /* Boot command line configuration overrides */
 
 static int vpe0limit;
-static int ipibuffers = 0;
-static int nostlb = 0;
-static int asidmask = 0;
+static int ipibuffers;
+static int nostlb;
+static int asidmask;
 unsigned long smtc_asid_mask = 0xff;
 
 static int __init vpe0tcs(char *str)
@@ -139,7 +150,7 @@ __setup("asidmask=", asidmask_set);
 
 #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
 
-static int hang_trig = 0;
+static int hang_trig;
 
 static int __init hangtrig_enable(char *s)
 {
@@ -279,10 +290,10 @@ static void smtc_configure_tlb(void)
  * possibly leave some TCs/VPEs as "slave" processors.
  *
  * Use c0_MVPConf0 to find out how many TCs are available, setting up
- * phys_cpu_present_map and the logical/physical mappings.
+ * cpu_possible_map and the logical/physical mappings.
  */
 
-int __init mipsmt_build_cpu_map(int start_cpu_slot)
+int __init smtc_build_cpu_map(int start_cpu_slot)
 {
        int i, ntcs;
 
@@ -293,7 +304,7 @@ int __init mipsmt_build_cpu_map(int start_cpu_slot)
         */
        ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
        for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) {
-               cpu_set(i, phys_cpu_present_map);
+               set_cpu_possible(i, true);
                __cpu_number_map[i] = i;
                __cpu_logical_map[i] = i;
        }
@@ -325,20 +336,34 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
        write_tc_c0_tcstatus((read_tc_c0_tcstatus()
                        & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT))
                        | TCSTATUS_A);
-       write_tc_c0_tccontext(0);
+       /*
+        * TCContext gets an offset from the base of the IPIQ array
+        * to be used in low-level code to detect the presence of
+        * an active IPI queue
+        */
+       write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16);
        /* Bind tc to vpe */
        write_tc_c0_tcbind(vpe);
        /* In general, all TCs should have the same cpu_data indications */
        memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips));
        /* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */
-       if (cpu_data[0].cputype == CPU_34K)
+       if (cpu_data[0].cputype == CPU_34K ||
+           cpu_data[0].cputype == CPU_1004K)
                cpu_data[cpu].options &= ~MIPS_CPU_FPU;
        cpu_data[cpu].vpe_id = vpe;
        cpu_data[cpu].tc_id = tc;
+       /* Multi-core SMTC hasn't been tested, but be prepared */
+       cpu_data[cpu].core = (read_vpe_c0_ebase() >> 1) & 0xff;
 }
 
+/*
+ * Tweak to get Count registes in as close a sync as possible.
+ * Value seems good for 34K-class cores.
+ */
+
+#define CP0_SKEW 8
 
-void mipsmt_prepare_cpus(void)
+void smtc_prepare_cpus(int cpus)
 {
        int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu;
        unsigned long flags;
@@ -362,13 +387,14 @@ void mipsmt_prepare_cpus(void)
                IPIQ[i].head = IPIQ[i].tail = NULL;
                spin_lock_init(&IPIQ[i].lock);
                IPIQ[i].depth = 0;
-               atomic_set(&ipi_timer_latch[i], 0);
+               IPIQ[i].resched_flag = 0; /* No reschedules queued initially */
        }
 
        /* cpu_data index starts at zero */
        cpu = 0;
        cpu_data[cpu].vpe_id = 0;
        cpu_data[cpu].tc_id = 0;
+       cpu_data[cpu].core = (read_c0_ebase() >> 1) & 0xff;
        cpu++;
 
        /* Report on boot-time options */
@@ -439,11 +465,8 @@ void mipsmt_prepare_cpus(void)
        smtc_configure_tlb();
 
        for (tc = 0, vpe = 0 ; (vpe < nvpe) && (tc < ntc) ; vpe++) {
-               /*
-                * Set the MVP bits.
-                */
-               settc(tc);
-               write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_MVP);
+               if (tcpervpe[vpe] == 0)
+                       continue;
                if (vpe != 0)
                        printk(", ");
                printk("VPE %d: TC", vpe);
@@ -462,6 +485,12 @@ void mipsmt_prepare_cpus(void)
                }
                if (vpe != 0) {
                        /*
+                        * Allow this VPE to control others.
+                        */
+                       write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() |
+                                             VPECONF0_MVP);
+
+                       /*
                         * Clear any stale software interrupts from VPE's Cause
                         */
                        write_vpe_c0_cause(0);
@@ -483,7 +512,8 @@ void mipsmt_prepare_cpus(void)
                        write_vpe_c0_compare(0);
                        /* Propagate Config7 */
                        write_vpe_c0_config7(read_c0_config7());
-                       write_vpe_c0_count(read_c0_count());
+                       write_vpe_c0_count(read_c0_count() + CP0_SKEW);
+                       ehb();
                }
                /* enable multi-threading within VPE */
                write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE);
@@ -495,8 +525,8 @@ void mipsmt_prepare_cpus(void)
         * Pull any physically present but unused TCs out of circulation.
         */
        while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) {
-               cpu_clear(tc, phys_cpu_present_map);
-               cpu_clear(tc, cpu_present_map);
+               set_cpu_possible(tc, false);
+               set_cpu_present(tc, false);
                tc++;
        }
 
@@ -555,7 +585,7 @@ void mipsmt_prepare_cpus(void)
 void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle)
 {
        extern u32 kernelsp[NR_CPUS];
-       long flags;
+       unsigned long flags;
        int mtflags;
 
        LOCK_MT_PRA();
@@ -584,24 +614,22 @@ void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle)
 
 void smtc_init_secondary(void)
 {
-       /*
-        * Start timer on secondary VPEs if necessary.
-        * plat_timer_setup has already have been invoked by init/main
-        * on "boot" TC.  Like per_cpu_trap_init() hack, this assumes that
-        * SMTC init code assigns TCs consdecutively and in ascending order
-        * to across available VPEs.
-        */
-       if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
-           ((read_c0_tcbind() & TCBIND_CURVPE)
-           != cpu_data[smp_processor_id() - 1].vpe_id)){
-               write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
-       }
-
        local_irq_enable();
 }
 
 void smtc_smp_finish(void)
 {
+       int cpu = smp_processor_id();
+
+       /*
+        * Lowest-numbered CPU per VPE starts a clock tick.
+        * Like per_cpu_trap_init() hack, this assumes that
+        * SMTC init code assigns TCs consdecutively and
+        * in ascending order across available VPEs.
+        */
+       if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id))
+               write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
+
        printk("TC %d going on-line as CPU %d\n",
                cpu_data[smp_processor_id()].tc_id, smp_processor_id());
 }
@@ -662,7 +690,7 @@ void smtc_forward_irq(unsigned int irq)
         * and efficiency, we just pick the easiest one to find.
         */
 
-       target = first_cpu(irq_desc[irq].affinity);
+       target = cpumask_first(irq_desc[irq].affinity);
 
        /*
         * We depend on the platform code to have correctly processed
@@ -713,11 +741,24 @@ void smtc_forward_irq(unsigned int irq)
 static void smtc_ipi_qdump(void)
 {
        int i;
+       struct smtc_ipi *temp;
 
        for (i = 0; i < NR_CPUS ;i++) {
-               printk("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n",
+               pr_info("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n",
                        i, (unsigned)IPIQ[i].head, (unsigned)IPIQ[i].tail,
                        IPIQ[i].depth);
+               temp = IPIQ[i].head;
+
+               while (temp != IPIQ[i].tail) {
+                       pr_debug("%d %d %d: ", temp->type, temp->dest,
+                              (int)temp->arg);
+#ifdef SMTC_IPI_DEBUG
+                   pr_debug("%u %lu\n", temp->sender, temp->stamp);
+#else
+                   pr_debug("\n");
+#endif
+                   temp = temp->flink;
+               }
        }
 }
 
@@ -752,13 +793,20 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
 {
        int tcstatus;
        struct smtc_ipi *pipi;
-       long flags;
+       unsigned long flags;
        int mtflags;
+       unsigned long tcrestart;
+       extern void r4k_wait_irqoff(void), __pastwait(void);
+       int set_resched_flag = (type == LINUX_SMP_IPI &&
+                               action == SMP_RESCHEDULE_YOURSELF);
 
        if (cpu == smp_processor_id()) {
                printk("Cannot Send IPI to self!\n");
                return;
        }
+       if (set_resched_flag && IPIQ[cpu].resched_flag != 0)
+               return; /* There is a reschedule queued already */
+
        /* Set up a descriptor, to be delivered either promptly or queued */
        pipi = smtc_ipi_dq(&freeIPIq);
        if (pipi == NULL) {
@@ -770,9 +818,8 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
        pipi->arg = (void *)action;
        pipi->dest = cpu;
        if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
-               if (type == SMTC_CLOCK_TICK)
-                       atomic_inc(&ipi_timer_latch[cpu]);
                /* If not on same VPE, enqueue and send cross-VPE interrupt */
+               IPIQ[cpu].resched_flag |= set_resched_flag;
                smtc_ipi_nq(&IPIQ[cpu], pipi);
                LOCK_CORE_PRA();
                settc(cpu_data[cpu].tc_id);
@@ -799,22 +846,30 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
 
                if ((tcstatus & TCSTATUS_IXMT) != 0) {
                        /*
-                        * Spin-waiting here can deadlock,
-                        * so we queue the message for the target TC.
+                        * If we're in the the irq-off version of the wait
+                        * loop, we need to force exit from the wait and
+                        * do a direct post of the IPI.
+                        */
+                       if (cpu_wait == r4k_wait_irqoff) {
+                               tcrestart = read_tc_c0_tcrestart();
+                               if (tcrestart >= (unsigned long)r4k_wait_irqoff
+                                   && tcrestart < (unsigned long)__pastwait) {
+                                       write_tc_c0_tcrestart(__pastwait);
+                                       tcstatus &= ~TCSTATUS_IXMT;
+                                       write_tc_c0_tcstatus(tcstatus);
+                                       goto postdirect;
+                               }
+                       }
+                       /*
+                        * Otherwise we queue the message for the target TC
+                        * to pick up when he does a local_irq_restore()
                         */
                        write_tc_c0_tchalt(0);
                        UNLOCK_CORE_PRA();
-                       /* Try to reduce redundant timer interrupt messages */
-                       if (type == SMTC_CLOCK_TICK) {
-                           if (atomic_postincrement(&ipi_timer_latch[cpu])!=0){
-                               smtc_ipi_nq(&freeIPIq, pipi);
-                               return;
-                           }
-                       }
+                       IPIQ[cpu].resched_flag |= set_resched_flag;
                        smtc_ipi_nq(&IPIQ[cpu], pipi);
                } else {
-                       if (type == SMTC_CLOCK_TICK)
-                               atomic_inc(&ipi_timer_latch[cpu]);
+postdirect:
                        post_direct_ipi(cpu, pipi);
                        write_tc_c0_tchalt(0);
                        UNLOCK_CORE_PRA();
@@ -876,14 +931,13 @@ static void ipi_resched_interrupt(void)
        /* Return from interrupt should be enough to cause scheduler check */
 }
 
-
 static void ipi_call_interrupt(void)
 {
        /* Invoke generic function invocation code in smp.c */
        smp_call_function_interrupt();
 }
 
-DECLARE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device);
+DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device);
 
 void ipi_decode(struct smtc_ipi *pipi)
 {
@@ -891,20 +945,16 @@ void ipi_decode(struct smtc_ipi *pipi)
        struct clock_event_device *cd;
        void *arg_copy = pipi->arg;
        int type_copy = pipi->type;
-       int ticks;
+       int irq = MIPS_CPU_IRQ_BASE + 1;
 
        smtc_ipi_nq(&freeIPIq, pipi);
+
        switch (type_copy) {
        case SMTC_CLOCK_TICK:
                irq_enter();
-               kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++;
-               cd = &per_cpu(smtc_dummy_clockevent_device, cpu);
-               ticks = atomic_read(&ipi_timer_latch[cpu]);
-               atomic_sub(ticks, &ipi_timer_latch[cpu]);
-               while (ticks) {
-                       cd->event_handler(cd);
-                       ticks--;
-               }
+               kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
+               cd = &per_cpu(mips_clockevent_device, cpu);
+               cd->event_handler(cd);
                irq_exit();
                break;
 
@@ -937,24 +987,51 @@ void ipi_decode(struct smtc_ipi *pipi)
        }
 }
 
+/*
+ * Similar to smtc_ipi_replay(), but invoked from context restore,
+ * so it reuses the current exception frame rather than set up a
+ * new one with self_ipi.
+ */
+
 void deferred_smtc_ipi(void)
 {
-       struct smtc_ipi *pipi;
-       unsigned long flags;
-/* DEBUG */
-       int q = smp_processor_id();
+       int cpu = smp_processor_id();
 
        /*
         * Test is not atomic, but much faster than a dequeue,
         * and the vast majority of invocations will have a null queue.
+        * If irq_disabled when this was called, then any IPIs queued
+        * after we test last will be taken on the next irq_enable/restore.
+        * If interrupts were enabled, then any IPIs added after the
+        * last test will be taken directly.
         */
-       if (IPIQ[q].head != NULL) {
-               while((pipi = smtc_ipi_dq(&IPIQ[q])) != NULL) {
-                       /* ipi_decode() should be called with interrupts off */
-                       local_irq_save(flags);
+
+       while (IPIQ[cpu].head != NULL) {
+               struct smtc_ipi_q *q = &IPIQ[cpu];
+               struct smtc_ipi *pipi;
+               unsigned long flags;
+
+               /*
+                * It may be possible we'll come in with interrupts
+                * already enabled.
+                */
+               local_irq_save(flags);
+               spin_lock(&q->lock);
+               pipi = __smtc_ipi_dq(q);
+               spin_unlock(&q->lock);
+               if (pipi != NULL) {
+                       if (pipi->type == LINUX_SMP_IPI &&
+                           (int)pipi->arg == SMP_RESCHEDULE_YOURSELF)
+                               IPIQ[cpu].resched_flag = 0;
                        ipi_decode(pipi);
-                       local_irq_restore(flags);
                }
+               /*
+                * The use of the __raw_local restore isn't
+                * as obviously necessary here as in smtc_ipi_replay(),
+                * but it's more efficient, given that we're already
+                * running down the IPI queue.
+                */
+               __raw_local_irq_restore(flags);
        }
 }
 
@@ -975,7 +1052,7 @@ static irqreturn_t ipi_interrupt(int irq, void *dev_idm)
        struct smtc_ipi *pipi;
        unsigned long tcstatus;
        int sent;
-       long flags;
+       unsigned long flags;
        unsigned int mtflags;
        unsigned int vpflags;
 
@@ -1028,6 +1105,9 @@ static irqreturn_t ipi_interrupt(int irq, void *dev_idm)
                                 * with interrupts off
                                 */
                                local_irq_save(flags);
+                               if (pipi->type == LINUX_SMP_IPI &&
+                                   (int)pipi->arg == SMP_RESCHEDULE_YOURSELF)
+                                       IPIQ[cpu].resched_flag = 0;
                                ipi_decode(pipi);
                                local_irq_restore(flags);
                        }
@@ -1044,9 +1124,8 @@ static void ipi_irq_dispatch(void)
 
 static struct irqaction irq_ipi = {
        .handler        = ipi_interrupt,
-       .flags          = IRQF_DISABLED,
-       .name           = "SMTC_IPI",
-       .flags          = IRQF_PERCPU
+       .flags          = IRQF_DISABLED | IRQF_PERCPU,
+       .name           = "SMTC_IPI"
 };
 
 static void setup_cross_vpe_interrupts(unsigned int nvpe)
@@ -1066,55 +1145,53 @@ static void setup_cross_vpe_interrupts(unsigned int nvpe)
 
 /*
  * SMTC-specific hacks invoked from elsewhere in the kernel.
- *
- * smtc_ipi_replay is called from raw_local_irq_restore which is only ever
- * called with interrupts disabled.  We do rely on interrupts being disabled
- * here because using spin_lock_irqsave()/spin_unlock_irqrestore() would
- * result in a recursive call to raw_local_irq_restore().
  */
 
-static void __smtc_ipi_replay(void)
+ /*
+  * smtc_ipi_replay is called from raw_local_irq_restore
+  */
+
+void smtc_ipi_replay(void)
 {
        unsigned int cpu = smp_processor_id();
 
        /*
         * To the extent that we've ever turned interrupts off,
         * we may have accumulated deferred IPIs.  This is subtle.
-        * If we use the smtc_ipi_qdepth() macro, we'll get an
-        * exact number - but we'll also disable interrupts
-        * and create a window of failure where a new IPI gets
-        * queued after we test the depth but before we re-enable
-        * interrupts. So long as IXMT never gets set, however,
         * we should be OK:  If we pick up something and dispatch
         * it here, that's great. If we see nothing, but concurrent
         * with this operation, another TC sends us an IPI, IXMT
         * is clear, and we'll handle it as a real pseudo-interrupt
-        * and not a pseudo-pseudo interrupt.
+        * and not a pseudo-pseudo interrupt.  The important thing
+        * is to do the last check for queued message *after* the
+        * re-enabling of interrupts.
         */
-       if (IPIQ[cpu].depth > 0) {
-               while (1) {
-                       struct smtc_ipi_q *q = &IPIQ[cpu];
-                       struct smtc_ipi *pipi;
-                       extern void self_ipi(struct smtc_ipi *);
-
-                       spin_lock(&q->lock);
-                       pipi = __smtc_ipi_dq(q);
-                       spin_unlock(&q->lock);
-                       if (!pipi)
-                               break;
+       while (IPIQ[cpu].head != NULL) {
+               struct smtc_ipi_q *q = &IPIQ[cpu];
+               struct smtc_ipi *pipi;
+               unsigned long flags;
 
+               /*
+                * It's just possible we'll come in with interrupts
+                * already enabled.
+                */
+               local_irq_save(flags);
+
+               spin_lock(&q->lock);
+               pipi = __smtc_ipi_dq(q);
+               spin_unlock(&q->lock);
+               /*
+                ** But use a raw restore here to avoid recursion.
+                */
+               __raw_local_irq_restore(flags);
+
+               if (pipi) {
                        self_ipi(pipi);
                        smtc_cpu_stats[cpu].selfipis++;
                }
        }
 }
 
-void smtc_ipi_replay(void)
-{
-       raw_local_irq_disable();
-       __smtc_ipi_replay();
-}
-
 EXPORT_SYMBOL(smtc_ipi_replay);
 
 void smtc_idle_loop_hook(void)
@@ -1193,40 +1270,13 @@ void smtc_idle_loop_hook(void)
                }
        }
 
-       /*
-        * Now that we limit outstanding timer IPIs, check for hung TC
-        */
-       for (tc = 0; tc < NR_CPUS; tc++) {
-               /* Don't check ourself - we'll dequeue IPIs just below */
-               if ((tc != smp_processor_id()) &&
-                   atomic_read(&ipi_timer_latch[tc]) > timerq_limit) {
-                   if (clock_hang_reported[tc] == 0) {
-                       pdb_msg += sprintf(pdb_msg,
-                               "TC %d looks hung with timer latch at %d\n",
-                               tc, atomic_read(&ipi_timer_latch[tc]));
-                       clock_hang_reported[tc]++;
-                       }
-               }
-       }
        emt(mtflags);
        local_irq_restore(flags);
        if (pdb_msg != &id_ho_db_msg[0])
                printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg);
 #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
 
-       /*
-        * Replay any accumulated deferred IPIs. If "Instant Replay"
-        * is in use, there should never be any.
-        */
-#ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
-       {
-               unsigned long flags;
-
-               local_irq_save(flags);
-               __smtc_ipi_replay();
-               local_irq_restore(flags);
-       }
-#endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */
+       smtc_ipi_replay();
 }
 
 void smtc_soft_dump(void)
@@ -1242,10 +1292,6 @@ void smtc_soft_dump(void)
                printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
        }
        smtc_ipi_qdump();
-       printk("Timer IPI Backlogs:\n");
-       for (i=0; i < NR_CPUS; i++) {
-               printk("%d: %d\n", i, atomic_read(&ipi_timer_latch[i]));
-       }
        printk("%d Recoveries of \"stolen\" FPU\n",
               atomic_read(&smtc_fpu_recoveries));
 }