X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=arch%2Fpowerpc%2Fkernel%2Fsetup_64.c;h=6568406b2a3081c0e5c4c51b75129880fc241d73;hb=fbad5f1dfdd097ca282ee3a796986e11eb5a971f;hp=f7ad64acf47e5f70567da0187b8629406c32d9cd;hpb=12d04eef927bf61328af2c7cbe756c96f98ac3bf;p=safe%2Fjmp%2Flinux-2.6 diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index f7ad64a..6568406 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include #include @@ -34,6 +33,8 @@ #include #include #include +#include +#include #include #include #include @@ -56,11 +57,12 @@ #include #include #include -#include #include #include #include #include +#include +#include #include "setup.h" @@ -70,7 +72,6 @@ #define DBG(fmt...) #endif -int have_of = 1; int boot_cpuid = 0; u64 ppc64_pft_size; @@ -110,7 +111,7 @@ static void check_smt_enabled(void) dn = of_find_node_by_path("/options"); if (dn) { - smt_option = get_property(dn, "ibm,smt-enabled", NULL); + smt_option = of_get_property(dn, "ibm,smt-enabled", NULL); if (smt_option) { if (!strcmp(smt_option, "on")) @@ -142,11 +143,14 @@ early_param("smt-enabled", early_smt_enabled); #define check_smt_enabled() #endif /* CONFIG_SMP */ -/* Put the paca pointer into r13 and SPRG3 */ +/* Put the paca pointer into r13 and SPRG_PACA */ void __init setup_paca(int cpu) { local_paca = &paca[cpu]; - mtspr(SPRN_SPRG3, local_paca); + mtspr(SPRN_SPRG_PACA, local_paca); +#ifdef CONFIG_PPC_BOOK3E + mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb); +#endif } /* @@ -170,21 +174,31 @@ void __init setup_paca(int cpu) void __init early_setup(unsigned long dt_ptr) { + /* -------- printk is _NOT_ safe to use here ! ------- */ + + /* Fill in any unititialised pacas */ + initialise_pacas(); + /* Identify CPU type */ - identify_cpu(0); + identify_cpu(0, mfspr(SPRN_PVR)); /* Assume we're on cpu 0 for now. Don't write to the paca yet! */ setup_paca(0); + /* Initialize lockdep early or else spinlocks will blow */ + lockdep_init(); + + /* -------- printk is now safe to use ------- */ + /* Enable early debugging if any specified (see udbg.h) */ udbg_early_init(); DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr); /* - * Do early initializations using the flattened device - * tree, like retreiving the physical memory map or - * calculating/retreiving the hash table size + * Do early initialization using the flattened device + * tree, such as retrieving the physical memory map or + * calculating/retrieving the hash table size. */ early_init_devtree(__va(dt_ptr)); @@ -193,8 +207,6 @@ void __init early_setup(unsigned long dt_ptr) /* Fix up paca fields required for the boot cpu */ get_paca()->cpu_start = 1; - get_paca()->stab_real = __pa((u64)&initial_stab); - get_paca()->stab_addr = (u64)&initial_stab; /* Probe the machine type */ probe_machine(); @@ -203,20 +215,8 @@ void __init early_setup(unsigned long dt_ptr) DBG("Found, Initializing memory management...\n"); - /* - * Initialize the MMU Hash table and create the linear mapping - * of memory. Has to be done before stab/slb initialization as - * this is currently where the page size encoding is obtained - */ - htab_initialize(); - - /* - * Initialize stab / SLB management except on iSeries - */ - if (cpu_has_feature(CPU_FTR_SLB)) - slb_initialize(); - else if (!firmware_has_feature(FW_FEATURE_ISERIES)) - stab_initialize(get_paca()->stab_real); + /* Initialize the hash table or TLB handling */ + early_init_mmu(); DBG(" <- early_setup()\n"); } @@ -224,22 +224,11 @@ void __init early_setup(unsigned long dt_ptr) #ifdef CONFIG_SMP void early_setup_secondary(void) { - struct paca_struct *lpaca = get_paca(); - /* Mark interrupts enabled in PACA */ - lpaca->soft_enabled = 0; - - /* Initialize hash table for that CPU */ - htab_initialize_secondary(); + get_paca()->soft_enabled = 0; - /* Initialize STAB/SLB. We use a virtual address as it works - * in real mode on pSeries and we want a virutal address on - * iSeries anyway - */ - if (cpu_has_feature(CPU_FTR_SLB)) - slb_initialize(); - else - stab_initialize(lpaca->stab_addr); + /* Initialize the hash table or TLB handling */ + early_init_mmu_secondary(); } #endif /* CONFIG_SMP */ @@ -247,7 +236,6 @@ void early_setup_secondary(void) #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) void smp_release_cpus(void) { - extern unsigned long __secondary_hold_spinloop; unsigned long *ptr; DBG(" -> smp_release_cpus()\n"); @@ -256,12 +244,11 @@ void smp_release_cpus(void) * all now so they can start to spin on their individual paca * spinloops. For non SMP kernels, the secondary cpus never get out * of the common spinloop. - * This is useless but harmless on iSeries, secondaries are already - * waiting on their paca spinloops. */ + */ ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop - PHYSICAL_START); - *ptr = 1; + *ptr = __pa(generic_secondary_smp_init); mb(); DBG(" <- smp_release_cpus()\n"); @@ -292,23 +279,16 @@ static void __init initialize_cache_info(void) if ( num_cpus == 1 ) { const u32 *sizep, *lsizep; u32 size, lsize; - const char *dc, *ic; - - /* Then read cache informations */ - if (machine_is(powermac)) { - dc = "d-cache-block-size"; - ic = "i-cache-block-size"; - } else { - dc = "d-cache-line-size"; - ic = "i-cache-line-size"; - } size = 0; lsize = cur_cpu_spec->dcache_bsize; - sizep = get_property(np, "d-cache-size", NULL); + sizep = of_get_property(np, "d-cache-size", NULL); if (sizep != NULL) size = *sizep; - lsizep = get_property(np, dc, NULL); + lsizep = of_get_property(np, "d-cache-block-size", NULL); + /* fallback if block size missing */ + if (lsizep == NULL) + lsizep = of_get_property(np, "d-cache-line-size", NULL); if (lsizep != NULL) lsize = *lsizep; if (sizep == 0 || lsizep == 0) @@ -322,10 +302,12 @@ static void __init initialize_cache_info(void) size = 0; lsize = cur_cpu_spec->icache_bsize; - sizep = get_property(np, "i-cache-size", NULL); + sizep = of_get_property(np, "i-cache-size", NULL); if (sizep != NULL) size = *sizep; - lsizep = get_property(np, ic, NULL); + lsizep = of_get_property(np, "i-cache-block-size", NULL); + if (lsizep == NULL) + lsizep = of_get_property(np, "i-cache-line-size", NULL); if (lsizep != NULL) lsize = *lsizep; if (sizep == 0 || lsizep == 0) @@ -356,8 +338,12 @@ void __init setup_system(void) */ do_feature_fixups(cur_cpu_spec->cpu_features, &__start___ftr_fixup, &__stop___ftr_fixup); + do_feature_fixups(cur_cpu_spec->mmu_features, + &__start___mmu_ftr_fixup, &__stop___mmu_ftr_fixup); do_feature_fixups(powerpc_firmware_features, &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup); + do_lwsync_fixups(cur_cpu_spec->cpu_features, + &__start___lwsync_fixup, &__stop___lwsync_fixup); /* * Unflatten the device-tree passed by prom_init or kexec @@ -370,11 +356,6 @@ void __init setup_system(void) */ initialize_cache_info(); - /* - * Initialize irq remapping subsystem - */ - irq_early_init(); - #ifdef CONFIG_PPC_RTAS /* * Initialize RTAS if available @@ -425,17 +406,22 @@ void __init setup_system(void) printk("Starting Linux PPC64 %s\n", init_utsname()->version); printk("-----------------------------------------------------\n"); - printk("ppc64_pft_size = 0x%lx\n", ppc64_pft_size); - printk("physicalMemorySize = 0x%lx\n", lmb_phys_mem_size()); - printk("ppc64_caches.dcache_line_size = 0x%x\n", - ppc64_caches.dline_size); - printk("ppc64_caches.icache_line_size = 0x%x\n", - ppc64_caches.iline_size); - printk("htab_address = 0x%p\n", htab_address); + printk("ppc64_pft_size = 0x%llx\n", ppc64_pft_size); + printk("physicalMemorySize = 0x%llx\n", lmb_phys_mem_size()); + if (ppc64_caches.dline_size != 0x80) + printk("ppc64_caches.dcache_line_size = 0x%x\n", + ppc64_caches.dline_size); + if (ppc64_caches.iline_size != 0x80) + printk("ppc64_caches.icache_line_size = 0x%x\n", + ppc64_caches.iline_size); +#ifdef CONFIG_PPC_STD_MMU_64 + if (htab_address) + printk("htab_address = 0x%p\n", htab_address); printk("htab_hash_mask = 0x%lx\n", htab_hash_mask); -#if PHYSICAL_START > 0 - printk("physical_start = 0x%x\n", PHYSICAL_START); -#endif +#endif /* CONFIG_PPC_STD_MMU_64 */ + if (PHYSICAL_START > 0) + printk("physical_start = 0x%llx\n", + (unsigned long long)PHYSICAL_START); printk("-----------------------------------------------------\n"); DBG(" <- setup_system()\n"); @@ -463,6 +449,24 @@ static void __init irqstack_early_init(void) #define irqstack_early_init() #endif +#ifdef CONFIG_PPC_BOOK3E +static void __init exc_lvl_early_init(void) +{ + unsigned int i; + + for_each_possible_cpu(i) { + critirq_ctx[i] = (struct thread_info *) + __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); + dbgirq_ctx[i] = (struct thread_info *) + __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); + mcheckirq_ctx[i] = (struct thread_info *) + __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); + } +} +#else +#define exc_lvl_early_init() +#endif + /* * Stack space used when we detect a bad kernel stack pointer, and * early in SMP boots before relocation is enabled. @@ -481,11 +485,14 @@ static void __init emergency_stack_init(void) * bringup, we need to get at them in real mode. This means they * must also be within the RMO region. */ - limit = min(0x10000000UL, lmb.rmo_size); + limit = min(0x10000000ULL, lmb.rmo_size); - for_each_possible_cpu(i) - paca[i].emergency_sp = - __va(lmb_alloc_base(HW_PAGE_SIZE, 128, limit)) + HW_PAGE_SIZE; + for_each_possible_cpu(i) { + unsigned long sp; + sp = lmb_alloc_base(THREAD_SIZE, THREAD_SIZE, limit); + sp += THREAD_SIZE; + paca[i].emergency_sp = __va(sp); + } } /* @@ -513,16 +520,18 @@ void __init setup_arch(char **cmdline_p) if (ppc_md.panic) setup_panic(); - init_mm.start_code = PAGE_OFFSET; + init_mm.start_code = (unsigned long)_stext; init_mm.end_code = (unsigned long) _etext; init_mm.end_data = (unsigned long) _edata; init_mm.brk = klimit; irqstack_early_init(); + exc_lvl_early_init(); emergency_stack_init(); +#ifdef CONFIG_PPC_STD_MMU_64 stabs_alloc(); - +#endif /* set up the bootmem stuff with available memory */ do_init_bootmem(); sparse_init(); @@ -531,9 +540,19 @@ void __init setup_arch(char **cmdline_p) conswitchp = &dummy_con; #endif - ppc_md.setup_arch(); + if (ppc_md.setup_arch) + ppc_md.setup_arch(); + +#ifdef CONFIG_SWIOTLB + if (ppc_swiotlb_enable) + swiotlb_init(1); +#endif paging_init(); + + /* Initialize the MMU context management stuff */ + mmu_context_init(); + ppc64_boot_msg(0x15, "Setup Done"); } @@ -562,13 +581,6 @@ void ppc64_boot_msg(unsigned int src, const char *msg) printk("[boot]%04x %s\n", src, msg); } -/* Print a termination message (print only -- does not stop the kernel) */ -void ppc64_terminate_msg(unsigned int src, const char *msg) -{ - ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_TERM_MESSAGE|src, msg); - printk("[terminate]%04x %s\n", src, msg); -} - void cpu_die(void) { if (ppc_md.cpu_die) @@ -576,26 +588,59 @@ void cpu_die(void) } #ifdef CONFIG_SMP +#define PCPU_DYN_SIZE () + +static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) +{ + return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align, + __pa(MAX_DMA_ADDRESS)); +} + +static void __init pcpu_fc_free(void *ptr, size_t size) +{ + free_bootmem(__pa(ptr), size); +} + +static int pcpu_cpu_distance(unsigned int from, unsigned int to) +{ + if (cpu_to_node(from) == cpu_to_node(to)) + return LOCAL_DISTANCE; + else + return REMOTE_DISTANCE; +} + void __init setup_per_cpu_areas(void) { - int i; - unsigned long size; - char *ptr; - - /* Copy section for each CPU (we discard the original) */ - size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES); -#ifdef CONFIG_MODULES - if (size < PERCPU_ENOUGH_ROOM) - size = PERCPU_ENOUGH_ROOM; -#endif + const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE; + size_t atom_size; + unsigned long delta; + unsigned int cpu; + int rc; - for_each_possible_cpu(i) { - ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size); - if (!ptr) - panic("Cannot allocate cpu data for CPU %d\n", i); + /* + * Linear mapping is one of 4K, 1M and 16M. For 4K, no need + * to group units. For larger mappings, use 1M atom which + * should be large enough to contain a number of units. + */ + if (mmu_linear_psize == MMU_PAGE_4K) + atom_size = PAGE_SIZE; + else + atom_size = 1 << 20; - paca[i].data_offset = ptr - __per_cpu_start; - memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); - } + rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance, + pcpu_fc_alloc, pcpu_fc_free); + if (rc < 0) + panic("cannot initialize percpu area (err=%d)", rc); + + delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; + for_each_possible_cpu(cpu) + paca[cpu].data_offset = delta + pcpu_unit_offsets[cpu]; } #endif + + +#ifdef CONFIG_PPC_INDIRECT_IO +struct ppc_pci_io ppc_pci_io; +EXPORT_SYMBOL(ppc_pci_io); +#endif /* CONFIG_PPC_INDIRECT_IO */ +