Blackfin arch: panic when running on a chip rev below what we are compiled for
[safe/jmp/linux-2.6] / arch / blackfin / kernel / setup.c
index 7a82d10..d3d37e7 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/bootmem.h>
 #include <linux/seq_file.h>
 #include <linux/cpu.h>
+#include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/tty.h>
 #include <linux/pfn.h>
 #include <asm/blackfin.h>
 #include <asm/cplbinit.h>
 #include <asm/div64.h>
+#include <asm/cpu.h>
 #include <asm/fixed_code.h>
 #include <asm/early_printk.h>
 
-static DEFINE_PER_CPU(struct cpu, cpu_devices);
-
 u16 _bfin_swrst;
 EXPORT_SYMBOL(_bfin_swrst);
 
@@ -42,6 +42,7 @@ EXPORT_SYMBOL(memory_start);
 EXPORT_SYMBOL(memory_end);
 EXPORT_SYMBOL(physical_mem_end);
 EXPORT_SYMBOL(_ramend);
+EXPORT_SYMBOL(reserved_mem_dcache_on);
 
 #ifdef CONFIG_MTD_UCLINUX
 unsigned long memory_mtd_end, memory_mtd_start, mtd_size;
@@ -52,7 +53,8 @@ EXPORT_SYMBOL(mtd_size);
 #endif
 
 char __initdata command_line[COMMAND_LINE_SIZE];
-unsigned int __initdata *__retx;
+void __initdata *init_retx, *init_saved_retx, *init_saved_seqstat,
+       *init_saved_icplb_fault_addr, *init_saved_dcplb_fault_addr;
 
 /* boot memmap, for parsing "memmap=" */
 #define BFIN_MEMMAP_MAX                128 /* number of entries in bfin_memmap */
@@ -77,36 +79,86 @@ static struct change_member *change_point[2*BFIN_MEMMAP_MAX] __initdata;
 static struct bfin_memmap_entry *overlap_list[BFIN_MEMMAP_MAX] __initdata;
 static struct bfin_memmap_entry new_map[BFIN_MEMMAP_MAX] __initdata;
 
-void __init bf53x_cache_init(void)
-{
+DEFINE_PER_CPU(struct blackfin_cpudata, cpu_data);
+
+static int early_init_clkin_hz(char *buf);
+
 #if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
-       generate_cpl_tables();
+void __init generate_cplb_tables(void)
+{
+       unsigned int cpu;
+
+       /* Generate per-CPU I&D CPLB tables */
+       for (cpu = 0; cpu < num_possible_cpus(); ++cpu)
+               generate_cplb_tables_cpu(cpu);
+}
 #endif
 
+void __cpuinit bfin_setup_caches(unsigned int cpu)
+{
 #ifdef CONFIG_BFIN_ICACHE
-       bfin_icache_init();
-       printk(KERN_INFO "Instruction Cache Enabled\n");
+#ifdef CONFIG_MPU
+       bfin_icache_init(icplb_tbl[cpu]);
+#else
+       bfin_icache_init(icplb_tables[cpu]);
+#endif
+#endif
+
+#ifdef CONFIG_BFIN_DCACHE
+#ifdef CONFIG_MPU
+       bfin_dcache_init(dcplb_tbl[cpu]);
+#else
+       bfin_dcache_init(dcplb_tables[cpu]);
+#endif
 #endif
 
+       /*
+        * In cache coherence emulation mode, we need to have the
+        * D-cache enabled before running any atomic operation which
+        * might invove cache invalidation (i.e. spinlock, rwlock).
+        * So printk's are deferred until then.
+        */
+#ifdef CONFIG_BFIN_ICACHE
+       printk(KERN_INFO "Instruction Cache Enabled for CPU%u\n", cpu);
+#endif
 #ifdef CONFIG_BFIN_DCACHE
-       bfin_dcache_init();
-       printk(KERN_INFO "Data Cache Enabled"
+       printk(KERN_INFO "Data Cache Enabled for CPU%u"
 # if defined CONFIG_BFIN_WB
                " (write-back)"
 # elif defined CONFIG_BFIN_WT
                " (write-through)"
 # endif
-               "\n");
+               "\n", cpu);
+#endif
+}
+
+void __cpuinit bfin_setup_cpudata(unsigned int cpu)
+{
+       struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu);
+
+       cpudata->idle = current;
+       cpudata->loops_per_jiffy = loops_per_jiffy;
+       cpudata->imemctl = bfin_read_IMEM_CONTROL();
+       cpudata->dmemctl = bfin_read_DMEM_CONTROL();
+}
+
+void __init bfin_cache_init(void)
+{
+#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
+       generate_cplb_tables();
 #endif
+       bfin_setup_caches(0);
 }
 
-void __init bf53x_relocate_l1_mem(void)
+void __init bfin_relocate_l1_mem(void)
 {
        unsigned long l1_code_length;
        unsigned long l1_data_a_length;
        unsigned long l1_data_b_length;
        unsigned long l2_length;
 
+       blackfin_dma_early_init();
+
        l1_code_length = _etext_l1 - _stext_l1;
        if (l1_code_length > L1_CODE_LENGTH)
                panic("L1 Instruction SRAM Overflow\n");
@@ -117,23 +169,23 @@ void __init bf53x_relocate_l1_mem(void)
        /* Copy _stext_l1 to _etext_l1 to L1 instruction SRAM */
        dma_memcpy(_stext_l1, _l1_lma_start, l1_code_length);
 
-       l1_data_a_length = _ebss_l1 - _sdata_l1;
+       l1_data_a_length = _sbss_l1 - _sdata_l1;
        if (l1_data_a_length > L1_DATA_A_LENGTH)
                panic("L1 Data SRAM Bank A Overflow\n");
 
-       /* Copy _sdata_l1 to _ebss_l1 to L1 data bank A SRAM */
+       /* Copy _sdata_l1 to _sbss_l1 to L1 data bank A SRAM */
        dma_memcpy(_sdata_l1, _l1_lma_start + l1_code_length, l1_data_a_length);
 
-       l1_data_b_length = _ebss_b_l1 - _sdata_b_l1;
+       l1_data_b_length = _sbss_b_l1 - _sdata_b_l1;
        if (l1_data_b_length > L1_DATA_B_LENGTH)
                panic("L1 Data SRAM Bank B Overflow\n");
 
-       /* Copy _sdata_b_l1 to _ebss_b_l1 to L1 data bank B SRAM */
+       /* Copy _sdata_b_l1 to _sbss_b_l1 to L1 data bank B SRAM */
        dma_memcpy(_sdata_b_l1, _l1_lma_start + l1_code_length +
                        l1_data_a_length, l1_data_b_length);
 
        if (L2_LENGTH != 0) {
-               l2_length = _ebss_l2 - _stext_l2;
+               l2_length = _sbss_l2 - _stext_l2;
                if (l2_length > L2_LENGTH)
                        panic("L2 SRAM Overflow\n");
 
@@ -228,7 +280,7 @@ static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map)
        /* record all known change-points (starting and ending addresses),
           omitting those that are for empty memory regions */
        chgidx = 0;
-       for (i = 0; i < old_nr; i++)    {
+       for (i = 0; i < old_nr; i++) {
                if (map[i].size != 0) {
                        change_point[chgidx]->addr = map[i].addr;
                        change_point[chgidx++]->pentry = &map[i];
@@ -236,13 +288,13 @@ static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map)
                        change_point[chgidx++]->pentry = &map[i];
                }
        }
-       chg_nr = chgidx;        /* true number of change-points */
+       chg_nr = chgidx;        /* true number of change-points */
 
        /* sort change-point list by memory addresses (low -> high) */
        still_changing = 1;
-       while (still_changing)  {
+       while (still_changing) {
                still_changing = 0;
-               for (i = 1; i < chg_nr; i++)  {
+               for (i = 1; i < chg_nr; i++) {
                        /* if <current_addr> > <last_addr>, swap */
                        /* or, if current=<start_addr> & last=<end_addr>, swap */
                        if ((change_point[i]->addr < change_point[i-1]->addr) ||
@@ -259,10 +311,10 @@ static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map)
        }
 
        /* create a new memmap, removing overlaps */
-       overlap_entries = 0;     /* number of entries in the overlap table */
-       new_entry = 0;   /* index for creating new memmap entries */
-       last_type = 0;           /* start with undefined memory type */
-       last_addr = 0;           /* start with 0 as last starting address */
+       overlap_entries = 0;    /* number of entries in the overlap table */
+       new_entry = 0;          /* index for creating new memmap entries */
+       last_type = 0;          /* start with undefined memory type */
+       last_addr = 0;          /* start with 0 as last starting address */
        /* loop through change-points, determining affect on the new memmap */
        for (chgidx = 0; chgidx < chg_nr; chgidx++) {
                /* keep track of all overlapping memmap entries */
@@ -284,14 +336,14 @@ static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map)
                        if (overlap_list[i]->type > current_type)
                                current_type = overlap_list[i]->type;
                /* continue building up new memmap based on this information */
-               if (current_type != last_type)  {
+               if (current_type != last_type) {
                        if (last_type != 0) {
                                new_map[new_entry].size =
                                        change_point[chgidx]->addr - last_addr;
                                /* move forward only if the new size was non-zero */
                                if (new_map[new_entry].size != 0)
                                        if (++new_entry >= BFIN_MEMMAP_MAX)
-                                               break;  /* no more space left for new entries */
+                                               break;  /* no more space left for new entries */
                        }
                        if (current_type != 0) {
                                new_map[new_entry].addr = change_point[chgidx]->addr;
@@ -301,9 +353,9 @@ static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map)
                        last_type = current_type;
                }
        }
-       new_nr = new_entry;   /* retain count for new entries */
+       new_nr = new_entry;     /* retain count for new entries */
 
-       /* copy new  mapping into original location */
+       /* copy new mapping into original location */
        memcpy(map, new_map, new_nr*sizeof(struct bfin_memmap_entry));
        *pnr_map = new_nr;
 
@@ -359,7 +411,6 @@ static __init int parse_memmap(char *arg)
  *  - "memmap=XXX[KkmM][@][$]XXX[KkmM]" defines a memory region
  *       @ from <start> to <start>+<mem>, type RAM
  *       $ from <start> to <start>+<mem>, type RESERVED
- *
  */
 static __init void parse_cmdline_early(char *cmdline_p)
 {
@@ -381,14 +432,15 @@ static __init void parse_cmdline_early(char *cmdline_p)
                                        if (*to != ' ') {
                                                if (*to == '$'
                                                    || *(to + 1) == '$')
-                                                       reserved_mem_dcache_on =
-                                                           1;
+                                                       reserved_mem_dcache_on = 1;
                                                if (*to == '#'
                                                    || *(to + 1) == '#')
-                                                       reserved_mem_icache_on =
-                                                           1;
+                                                       reserved_mem_icache_on = 1;
                                        }
                                }
+                       } else if (!memcmp(to, "clkin_hz=", 9)) {
+                               to += 9;
+                               early_init_clkin_hz(to);
                        } else if (!memcmp(to, "earlyprintk=", 12)) {
                                to += 12;
                                setup_early_printk(to);
@@ -410,14 +462,13 @@ static __init void parse_cmdline_early(char *cmdline_p)
  *  [_rambase, _ramstart]:             kernel image
  *  [memory_start, memory_end]:                dynamic memory managed by kernel
  *  [memory_end, _ramend]:             reserved memory
- *     [meory_mtd_start(memory_end),
+ *     [memory_mtd_start(memory_end),
  *             memory_mtd_start + mtd_size]:   rootfs (if any)
  *     [_ramend - DMA_UNCACHED_REGION,
  *             _ramend]:                       uncached DMA region
  *  [_ramend, physical_mem_end]:       memory not managed by kernel
- *
  */
-static __init void  memory_setup(void)
+static __init void memory_setup(void)
 {
 #ifdef CONFIG_MTD_UCLINUX
        unsigned long mtd_phys = 0;
@@ -434,7 +485,7 @@ static __init void  memory_setup(void)
        memory_end = _ramend - DMA_UNCACHED_REGION;
 
 #ifdef CONFIG_MPU
-       /* Round up to multiple of 4MB */
+       /* Round up to multiple of 4MB */
        memory_start = (_ramstart + 0x3fffff) & ~0x3fffff;
 #else
        memory_start = PAGE_ALIGN(_ramstart);
@@ -614,7 +665,7 @@ static __init void setup_bootmem_allocator(void)
        end_pfn = memory_end >> PAGE_SHIFT;
 
        /*
-        * give all the memory to the bootmap allocator,  tell it to put the
+        * give all the memory to the bootmap allocator, tell it to put the
         * boot mem_map at the start of memory.
         */
        bootmap_size = init_bootmem_node(NODE_DATA(0),
@@ -782,16 +833,29 @@ void __init setup_arch(char **cmdline_p)
 
        _bfin_swrst = bfin_read_SWRST();
 
-       /* If we double fault, reset the system - otherwise we hang forever */
-       bfin_write_SWRST(DOUBLE_FAULT);
+#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
+       bfin_write_SWRST(_bfin_swrst & ~DOUBLE_FAULT);
+#endif
+#ifdef CONFIG_DEBUG_DOUBLEFAULT_RESET
+       bfin_write_SWRST(_bfin_swrst | DOUBLE_FAULT);
+#endif
 
-       if (_bfin_swrst & RESET_DOUBLE)
-               /*
-                * don't decode the address, since you don't know if this
-                * kernel's symbol map is the same as the crashing kernel
-                */
-               printk(KERN_INFO "Recovering from Double Fault event at %pF\n", __retx);
-       else if (_bfin_swrst & RESET_WDOG)
+#ifdef CONFIG_SMP
+       if (_bfin_swrst & SWRST_DBL_FAULT_A) {
+#else
+       if (_bfin_swrst & RESET_DOUBLE) {
+#endif
+               printk(KERN_EMERG "Recovering from DOUBLE FAULT event\n");
+#ifdef CONFIG_DEBUG_DOUBLEFAULT
+               /* We assume the crashing kernel, and the current symbol table match */
+               printk(KERN_EMERG " While handling exception (EXCAUSE = 0x%x) at %pF\n",
+                       (int)init_saved_seqstat & SEQSTAT_EXCAUSE, init_saved_retx);
+               printk(KERN_NOTICE "   DCPLB_FAULT_ADDR: %pF\n", init_saved_dcplb_fault_addr);
+               printk(KERN_NOTICE "   ICPLB_FAULT_ADDR: %pF\n", init_saved_icplb_fault_addr);
+#endif
+               printk(KERN_NOTICE " The instruction at %pF caused a double exception\n",
+                       init_retx);
+       } else if (_bfin_swrst & RESET_WDOG)
                printk(KERN_INFO "Recovering from Watchdog event\n");
        else if (_bfin_swrst & RESET_SOFTWARE)
                printk(KERN_NOTICE "Reset caused by Software reset\n");
@@ -803,21 +867,31 @@ void __init setup_arch(char **cmdline_p)
                printk(KERN_INFO "Compiled for ADSP-%s Rev none\n", CPU);
        else
                printk(KERN_INFO "Compiled for ADSP-%s Rev 0.%d\n", CPU, bfin_compiled_revid());
-       if (bfin_revid() != bfin_compiled_revid()) {
-               if (bfin_compiled_revid() == -1)
-                       printk(KERN_ERR "Warning: Compiled for Rev none, but running on Rev %d\n",
-                              bfin_revid());
-               else if (bfin_compiled_revid() != 0xffff)
-                       printk(KERN_ERR "Warning: Compiled for Rev %d, but running on Rev %d\n",
-                              bfin_compiled_revid(), bfin_revid());
+
+       if (unlikely(CPUID != bfin_cpuid()))
+               printk(KERN_ERR "ERROR: Not running on ADSP-%s: unknown CPUID 0x%04x Rev 0.%d\n",
+                       CPU, bfin_cpuid(), bfin_revid());
+       else {
+               if (bfin_revid() != bfin_compiled_revid()) {
+                       if (bfin_compiled_revid() == -1)
+                               printk(KERN_ERR "Warning: Compiled for Rev none, but running on Rev %d\n",
+                                      bfin_revid());
+                       else if (bfin_compiled_revid() != 0xffff) {
+                               printk(KERN_ERR "Warning: Compiled for Rev %d, but running on Rev %d\n",
+                                      bfin_compiled_revid(), bfin_revid());
+                               if (bfin_compiled_revid() > bfin_revid())
+                                       panic("Error: you are missing anomaly workarounds for this rev\n");
+                       }
+               }
+               if (bfin_revid() < CONFIG_BF_REV_MIN || bfin_revid() > CONFIG_BF_REV_MAX)
+                       printk(KERN_ERR "Warning: Unsupported Chip Revision ADSP-%s Rev 0.%d detected\n",
+                              CPU, bfin_revid());
        }
-       if (bfin_revid() < SUPPORTED_REVID)
-               printk(KERN_ERR "Warning: Unsupported Chip Revision ADSP-%s Rev 0.%d detected\n",
-                      CPU, bfin_revid());
+
        printk(KERN_INFO "Blackfin Linux support by http://blackfin.uclinux.org/\n");
 
        printk(KERN_INFO "Processor Speed: %lu MHz core clock and %lu MHz System Clock\n",
-              cclk / 1000000,  sclk / 1000000);
+              cclk / 1000000, sclk / 1000000);
 
        if (ANOMALY_05000273 && (cclk >> 1) <= sclk)
                printk("\n\n\nANOMALY_05000273: CCLK must be >= 2*SCLK !!!\n\n\n");
@@ -849,18 +923,21 @@ void __init setup_arch(char **cmdline_p)
        BUG_ON((char *)&safe_user_instruction - (char *)&fixed_code_start
                != SAFE_USER_INSTRUCTION - FIXED_CODE_START);
 
+#ifdef CONFIG_SMP
+       platform_init_cpus();
+#endif
        init_exception_vectors();
-       bf53x_cache_init();
+       bfin_cache_init();      /* Initialize caches for the boot CPU */
 }
 
 static int __init topology_init(void)
 {
-       int cpu;
+       unsigned int cpu;
+       /* Record CPU-private information for the boot processor. */
+       bfin_setup_cpudata(0);
 
        for_each_possible_cpu(cpu) {
-               struct cpu *c = &per_cpu(cpu_devices, cpu);
-
-               register_cpu(c, cpu);
+               register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu);
        }
 
        return 0;
@@ -868,6 +945,23 @@ static int __init topology_init(void)
 
 subsys_initcall(topology_init);
 
+/* Get the input clock frequency */
+static u_long cached_clkin_hz = CONFIG_CLKIN_HZ;
+static u_long get_clkin_hz(void)
+{
+       return cached_clkin_hz;
+}
+static int __init early_init_clkin_hz(char *buf)
+{
+       cached_clkin_hz = simple_strtoul(buf, NULL, 0);
+#ifdef BFIN_KERNEL_CLOCK
+       if (cached_clkin_hz != CONFIG_CLKIN_HZ)
+               panic("cannot change clkin_hz when reprogramming clocks");
+#endif
+       return 1;
+}
+early_param("clkin_hz=", early_init_clkin_hz);
+
 /* Get the voltage input multiplier */
 static u_long cached_vco_pll_ctl, cached_vco;
 static u_long get_vco(void)
@@ -884,7 +978,7 @@ static u_long get_vco(void)
        if (0 == msel)
                msel = 64;
 
-       cached_vco = CONFIG_CLKIN_HZ;
+       cached_vco = get_clkin_hz();
        cached_vco >>= (1 & pll_ctl);   /* DF bit */
        cached_vco *= msel;
        return cached_vco;
@@ -897,7 +991,7 @@ u_long get_cclk(void)
        u_long csel, ssel;
 
        if (bfin_read_PLL_STAT() & 0x1)
-               return CONFIG_CLKIN_HZ;
+               return get_clkin_hz();
 
        ssel = bfin_read_PLL_DIV();
        if (ssel == cached_cclk_pll_div)
@@ -922,7 +1016,7 @@ u_long get_sclk(void)
        u_long ssel;
 
        if (bfin_read_PLL_STAT() & 0x1)
-               return CONFIG_CLKIN_HZ;
+               return get_clkin_hz();
 
        ssel = bfin_read_PLL_DIV();
        if (ssel == cached_sclk_pll_div)
@@ -964,17 +1058,18 @@ static int show_cpuinfo(struct seq_file *m, void *v)
 {
        char *cpu, *mmu, *fpu, *vendor, *cache;
        uint32_t revid;
-
-       u_long cclk = 0, sclk = 0;
+       int cpu_num = *(unsigned int *)v;
+       u_long sclk, cclk;
        u_int icache_size = BFIN_ICACHESIZE / 1024, dcache_size = 0, dsup_banks = 0;
+       struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu_num);
 
        cpu = CPU;
        mmu = "none";
        fpu = "none";
        revid = bfin_revid();
 
-       cclk = get_cclk();
        sclk = get_sclk();
+       cclk = get_cclk();
 
        switch (bfin_read_CHIPID() & CHIPID_MANUFACTURE) {
        case 0xca:
@@ -985,14 +1080,16 @@ static int show_cpuinfo(struct seq_file *m, void *v)
                break;
        }
 
-       seq_printf(m, "processor\t: %d\n"
-               "vendor_id\t: %s\n"
-               "cpu family\t: 0x%x\n"
-               "model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n"
+       seq_printf(m, "processor\t: %d\n" "vendor_id\t: %s\n", cpu_num, vendor);
+
+       if (CPUID == bfin_cpuid())
+               seq_printf(m, "cpu family\t: 0x%04x\n", CPUID);
+       else
+               seq_printf(m, "cpu family\t: Compiled for:0x%04x, running on:0x%04x\n",
+                       CPUID, bfin_cpuid());
+
+       seq_printf(m, "model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n"
                "stepping\t: %d\n",
-               0,
-               vendor,
-               (bfin_read_CHIPID() & CHIPID_FAMILY),
                cpu, cclk/1000000, sclk/1000000,
 #ifdef CONFIG_MPU
                "mpu on",
@@ -1006,12 +1103,12 @@ static int show_cpuinfo(struct seq_file *m, void *v)
                sclk/1000000, sclk%1000000);
        seq_printf(m, "bogomips\t: %lu.%02lu\n"
                "Calibration\t: %lu loops\n",
-               (loops_per_jiffy * HZ) / 500000,
-               ((loops_per_jiffy * HZ) / 5000) % 100,
-               (loops_per_jiffy * HZ));
+               (cpudata->loops_per_jiffy * HZ) / 500000,
+               ((cpudata->loops_per_jiffy * HZ) / 5000) % 100,
+               (cpudata->loops_per_jiffy * HZ));
 
        /* Check Cache configutation */
-       switch (bfin_read_DMEM_CONTROL() & (1 << DMC0_P | 1 << DMC1_P)) {
+       switch (cpudata->dmemctl & (1 << DMC0_P | 1 << DMC1_P)) {
        case ACACHE_BSRAM:
                cache = "dbank-A/B\t: cache/sram";
                dcache_size = 16;
@@ -1035,10 +1132,10 @@ static int show_cpuinfo(struct seq_file *m, void *v)
        }
 
        /* Is it turned on? */
-       if ((bfin_read_DMEM_CONTROL() & (ENDCPLB | DMC_ENABLE)) != (ENDCPLB | DMC_ENABLE))
+       if ((cpudata->dmemctl & (ENDCPLB | DMC_ENABLE)) != (ENDCPLB | DMC_ENABLE))
                dcache_size = 0;
 
-       if ((bfin_read_IMEM_CONTROL() & (IMC | ENICPLB)) == (IMC | ENICPLB))
+       if ((cpudata->imemctl & (IMC | ENICPLB)) != (IMC | ENICPLB))
                icache_size = 0;
 
        seq_printf(m, "cache size\t: %d KB(L1 icache) "
@@ -1063,8 +1160,11 @@ static int show_cpuinfo(struct seq_file *m, void *v)
                   "dcache setup\t: %d Super-banks/%d Sub-banks/%d Ways, %d Lines/Way\n",
                   dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS,
                   BFIN_DLINES);
+#ifdef __ARCH_SYNC_CORE_DCACHE
+       seq_printf(m, "SMP Dcache Flushes\t: %lu\n\n", cpudata->dcache_invld_count);
+#endif
 #ifdef CONFIG_BFIN_ICACHE_LOCK
-       switch ((bfin_read_IMEM_CONTROL() >> 3) & WAYALL_L) {
+       switch ((cpudata->imemctl >> 3) & WAYALL_L) {
        case WAY0_L:
                seq_printf(m, "Way0 Locked-Down\n");
                break;
@@ -1114,6 +1214,12 @@ static int show_cpuinfo(struct seq_file *m, void *v)
                seq_printf(m, "No Ways are locked\n");
        }
 #endif
+
+       if (cpu_num != num_possible_cpus() - 1)
+               return 0;
+
+       if (L2_LENGTH)
+               seq_printf(m, "L2 SRAM\t\t: %dKB\n", L2_LENGTH/0x400);
        seq_printf(m, "board name\t: %s\n", bfin_board_name);
        seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n",
                 physical_mem_end >> 10, (void *)0, (void *)physical_mem_end);
@@ -1121,18 +1227,25 @@ static int show_cpuinfo(struct seq_file *m, void *v)
                ((int)memory_end - (int)_stext) >> 10,
                _stext,
                (void *)memory_end);
+       seq_printf(m, "\n");
 
        return 0;
 }
 
 static void *c_start(struct seq_file *m, loff_t *pos)
 {
-       return *pos < NR_CPUS ? ((void *)0x12345678) : NULL;
+       if (*pos == 0)
+               *pos = first_cpu(cpu_online_map);
+       if (*pos >= num_online_cpus())
+               return NULL;
+
+       return pos;
 }
 
 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
 {
-       ++*pos;
+       *pos = next_cpu(*pos, cpu_online_map);
+
        return c_start(m, pos);
 }