X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=init%2Fmain.c;h=1a65fdd06318f05c81e68d89fe7b80a3a1fcbbc5;hb=b99b87f70c7785ab1e253c6220f4b0b57ce3a7f7;hp=b5a892c68375287188f62125267b5cf51dbf2770;hpb=d2e3192b6e372a441c18bc8cb32f89ef38f105b7;p=safe%2Fjmp%2Flinux-2.6 diff --git a/init/main.c b/init/main.c index b5a892c..1a65fdd 100644 --- a/init/main.c +++ b/init/main.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -55,6 +56,7 @@ #include #include #include +#include #include #include #include @@ -62,6 +64,9 @@ #include #include #include +#include +#include +#include #include #include @@ -96,7 +101,7 @@ static inline void mark_rodata_ro(void) { } extern void tc_init(void); #endif -enum system_states system_state; +enum system_states system_state __read_mostly; EXPORT_SYMBOL(system_state); /* @@ -134,14 +139,14 @@ unsigned int __initdata setup_max_cpus = NR_CPUS; * greater than 0, limits the maximum number of CPUs activated in * SMP mode to . */ -#ifndef CONFIG_X86_IO_APIC -static inline void disable_ioapic_setup(void) {}; -#endif + +void __weak arch_disable_smp_support(void) { } static int __init nosmp(char *str) { setup_max_cpus = 0; - disable_ioapic_setup(); + arch_disable_smp_support(); + return 0; } @@ -151,14 +156,14 @@ static int __init maxcpus(char *str) { get_option(&str, &setup_max_cpus); if (setup_max_cpus == 0) - disable_ioapic_setup(); + arch_disable_smp_support(); return 0; } early_param("maxcpus", maxcpus); #else -#define setup_max_cpus NR_CPUS +const unsigned int setup_max_cpus = NR_CPUS; #endif /* @@ -405,8 +410,7 @@ static void __init smp_init(void) * Set up the current CPU as possible to migrate to. * The other ones will be done by cpu_up/cpu_down() */ - cpu = smp_processor_id(); - cpu_set(cpu, cpu_active_map); + set_cpu_active(smp_processor_id(), true); /* FIXME: This should be done in userspace --RR */ for_each_present_cpu(cpu) { @@ -462,6 +466,7 @@ static noinline void __init_refok rest_init(void) * at least once to get things moving: */ init_idle_bootup_task(current); + rcu_scheduler_starting(); preempt_enable_no_resched(); schedule(); preempt_disable(); @@ -489,6 +494,11 @@ static int __init do_early_param(char *param, char *val) return 0; } +void __init parse_early_options(char *cmdline) +{ + parse_args("early options", cmdline, NULL, 0, do_early_param); +} + /* Arch code calls this early on, or if not, just before other parsing. */ void __init parse_early_param(void) { @@ -500,7 +510,7 @@ void __init parse_early_param(void) /* All fall through to do_early_param. */ strlcpy(tmp_cmdline, boot_command_line, COMMAND_LINE_SIZE); - parse_args("early options", tmp_cmdline, NULL, 0, do_early_param); + parse_early_options(tmp_cmdline); done = 1; } @@ -525,6 +535,22 @@ void __init __weak thread_info_cache_init(void) { } +/* + * Set up kernel memory allocators + */ +static void __init mm_init(void) +{ + /* + * page_cgroup requires countinous pages as memmap + * and it's bigger than MAX_ORDER unless SPARSEMEM. + */ + page_cgroup_init_flatmem(); + mem_init(); + kmem_cache_init(); + pgtable_cache_init(); + vmalloc_init(); +} + asmlinkage void __init start_kernel(void) { char * command_line; @@ -538,6 +564,12 @@ asmlinkage void __init start_kernel(void) */ lockdep_init(); debug_objects_early_init(); + + /* + * Set up the the initial canary ASAP: + */ + boot_init_stack_canary(); + cgroup_init_early(); local_irq_disable(); @@ -552,8 +584,7 @@ asmlinkage void __init start_kernel(void) tick_init(); boot_cpu_init(); page_address_init(); - printk(KERN_NOTICE); - printk(linux_banner); + printk(KERN_NOTICE "%s", linux_banner); setup_arch(&command_line); mm_init_owner(&init_mm, &init_task); setup_command_line(command_line); @@ -561,6 +592,23 @@ asmlinkage void __init start_kernel(void) setup_nr_cpu_ids(); smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ + build_all_zonelists(); + page_alloc_init(); + + printk(KERN_NOTICE "Kernel command line: %s\n", boot_command_line); + parse_early_param(); + parse_args("Booting kernel", static_command_line, __start___param, + __stop___param - __start___param, + &unknown_bootoption); + /* + * These use large bootmem allocations and must precede + * kmem_cache_init() + */ + pidhash_init(); + vfs_caches_init_early(); + sort_main_extable(); + trap_init(); + mm_init(); /* * Set up the scheduler prior starting any interrupts (such as the * timer interrupt). Full topology setup happens at smp_init() @@ -572,25 +620,16 @@ asmlinkage void __init start_kernel(void) * fragile until we cpu_idle() for the first time. */ preempt_disable(); - build_all_zonelists(); - page_alloc_init(); - printk(KERN_NOTICE "Kernel command line: %s\n", boot_command_line); - parse_early_param(); - parse_args("Booting kernel", static_command_line, __start___param, - __stop___param - __start___param, - &unknown_bootoption); if (!irqs_disabled()) { printk(KERN_WARNING "start_kernel(): bug: interrupts were " "enabled *very* early, fixing it\n"); local_irq_disable(); } - sort_main_extable(); - trap_init(); rcu_init(); /* init some links before init_ISA_irqs() */ early_irq_init(); init_IRQ(); - pidhash_init(); + prio_tree_init(); init_timers(); hrtimers_init(); softirq_init(); @@ -599,9 +638,11 @@ asmlinkage void __init start_kernel(void) sched_clock_init(); profile_init(); if (!irqs_disabled()) - printk("start_kernel(): bug: interrupts were enabled early\n"); + printk(KERN_CRIT "start_kernel(): bug: interrupts were " + "enabled early\n"); early_boot_irqs_on(); local_irq_enable(); + kmem_cache_init_late(); /* * HACK ALERT! This is early. We're enabling the console before @@ -631,14 +672,11 @@ asmlinkage void __init start_kernel(void) initrd_start = 0; } #endif - vmalloc_init(); - vfs_caches_init_early(); - cpuset_init_early(); page_cgroup_init(); - mem_init(); enable_debug_pagealloc(); cpu_hotplug_init(); - kmem_cache_init(); + kmemtrace_init(); + kmemleak_init(); debug_objects_mem_init(); idr_init_cache(); setup_per_cpu_pageset(); @@ -647,8 +685,6 @@ asmlinkage void __init start_kernel(void) late_time_init(); calibrate_delay(); pidmap_init(); - pgtable_cache_init(); - prio_tree_init(); anon_vma_init(); #ifdef CONFIG_X86 if (efi_enabled) @@ -684,7 +720,18 @@ asmlinkage void __init start_kernel(void) rest_init(); } -static int initcall_debug; +/* Call all constructor functions linked into the kernel. */ +static void __init do_ctors(void) +{ +#ifdef CONFIG_CONSTRUCTORS + ctor_fn_t *call = (ctor_fn_t *) __ctors_start; + + for (; call < (ctor_fn_t *) __ctors_end; call++) + (*call)(); +#endif +} + +int initcall_debug; core_param(initcall_debug, initcall_debug, bool, 0644); int do_one_initcall(initcall_t fn) @@ -760,9 +807,11 @@ static void __init do_basic_setup(void) { rcu_init_sched(); /* needed by module_init stage. */ init_workqueues(); + cpuset_init_smp(); usermodehelper_init(); driver_init(); init_irq_proc(); + do_ctors(); do_initcalls(); } @@ -784,7 +833,10 @@ static void run_init_process(char *init_filename) * makes it inline to init() and it becomes part of init.text section */ static noinline int init_post(void) + __releases(kernel_lock) { + /* need to finish all async __init code before freeing the memory */ + async_synchronize_full(); free_initmem(); unlock_kernel(); mark_rodata_ro(); @@ -827,10 +879,15 @@ static noinline int init_post(void) static int __init kernel_init(void * unused) { lock_kernel(); + + /* + * init can allocate pages on any node + */ + set_mems_allowed(node_possible_map); /* * init can run on any cpu. */ - set_cpus_allowed_ptr(current, CPU_MASK_ALL_PTR); + set_cpus_allowed_ptr(current, cpu_all_mask); /* * Tell the world that we're going to be the grim * reaper of innocent orphaned children. @@ -851,8 +908,6 @@ static int __init kernel_init(void * unused) smp_init(); sched_init_smp(); - cpuset_init_smp(); - do_basic_setup(); /*