X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=init%2Fmain.c;h=3bdb152f412f32b8edeeb1036a24f934988b5be2;hb=9940fa80ce0e546646b057958960c5c6fd2bbbb2;hp=6bf83afd654da44b5b52383a4697d4d98e0ff6fa;hpb=a1413c89ae6a4b7a9a43f7768934a81ffb5c629a;p=safe%2Fjmp%2Flinux-2.6 diff --git a/init/main.c b/init/main.c index 6bf83af..3bdb152 100644 --- a/init/main.c +++ b/init/main.c @@ -18,14 +18,13 @@ #include #include #include -#include #include #include #include #include #include +#include #include -#include #include #include #include @@ -56,14 +55,21 @@ #include #include #include +#include #include #include #include #include #include #include +#include #include #include +#include +#include +#include +#include +#include #include #include @@ -85,11 +91,6 @@ extern void sbus_init(void); extern void prio_tree_init(void); extern void radix_tree_init(void); extern void free_initmem(void); -#ifdef CONFIG_ACPI -extern void acpi_early_init(void); -#else -static inline void acpi_early_init(void) { } -#endif #ifndef CONFIG_DEBUG_RODATA static inline void mark_rodata_ro(void) { } #endif @@ -149,6 +150,20 @@ static int __init nosmp(char *str) early_param("nosmp", nosmp); +/* this is hard limit */ +static int __init nrcpus(char *str) +{ + int nr_cpus; + + get_option(&str, &nr_cpus); + if (nr_cpus > 0 && nr_cpus < nr_cpu_ids) + nr_cpu_ids = nr_cpus; + + return 0; +} + +early_param("nr_cpus", nrcpus); + static int __init maxcpus(char *str) { get_option(&str, &setup_max_cpus); @@ -160,7 +175,7 @@ static int __init maxcpus(char *str) early_param("maxcpus", maxcpus); #else -const unsigned int setup_max_cpus = NR_CPUS; +static const unsigned int setup_max_cpus = NR_CPUS; #endif /* @@ -251,7 +266,7 @@ early_param("loglevel", loglevel); /* * Unknown boot options get handed to init, unless they look like - * failed parameters + * unused parameters (modprobe will find them in /proc/cmdline). */ static int __init unknown_bootoption(char *param, char *val) { @@ -272,14 +287,9 @@ static int __init unknown_bootoption(char *param, char *val) if (obsolete_checksetup(param)) return 0; - /* - * Preemptive maintenance for "why didn't my misspelled command - * line work?" - */ - if (strchr(param, '.') && (!val || strchr(param, '.') < val)) { - printk(KERN_ERR "Unknown boot option `%s': ignoring\n", param); + /* Unused module parameter. */ + if (strchr(param, '.') && (!val || strchr(param, '.') < val)) return 0; - } if (panic_later) return 0; @@ -354,17 +364,11 @@ static void __init smp_init(void) #define smp_init() do { } while (0) #endif -static inline void setup_per_cpu_areas(void) { } static inline void setup_nr_cpu_ids(void) { } static inline void smp_prepare_cpus(unsigned int maxcpus) { } #else -#if NR_CPUS > BITS_PER_LONG -cpumask_t cpu_mask_all __read_mostly = CPU_MASK_ALL; -EXPORT_SYMBOL(cpu_mask_all); -#endif - /* Setup number of possible processor ids */ int nr_cpu_ids __read_mostly = NR_CPUS; EXPORT_SYMBOL(nr_cpu_ids); @@ -375,41 +379,11 @@ static void __init setup_nr_cpu_ids(void) nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; } -#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; - -EXPORT_SYMBOL(__per_cpu_offset); - -static void __init setup_per_cpu_areas(void) -{ - unsigned long size, i; - char *ptr; - unsigned long nr_possible_cpus = num_possible_cpus(); - - /* Copy section for each CPU (we discard the original) */ - size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE); - ptr = alloc_bootmem_pages(size * nr_possible_cpus); - - for_each_possible_cpu(i) { - __per_cpu_offset[i] = ptr - __per_cpu_start; - memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); - ptr += size; - } -} -#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ - /* Called by boot processor to activate the rest. */ static void __init smp_init(void) { unsigned int cpu; - /* - * Set up the current CPU as possible to migrate to. - * The other ones will be done by cpu_up/cpu_down() - */ - cpu = smp_processor_id(); - cpu_set(cpu, cpu_active_map); - /* FIXME: This should be done in userspace --RR */ for_each_present_cpu(cpu) { if (num_online_cpus() >= setup_max_cpus) @@ -453,10 +427,13 @@ static noinline void __init_refok rest_init(void) { int pid; + rcu_scheduler_starting(); kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND); numa_default_policy(); pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES); + rcu_read_lock(); kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns); + rcu_read_unlock(); unlock_kernel(); /* @@ -464,7 +441,6 @@ static noinline void __init_refok rest_init(void) * at least once to get things moving: */ init_idle_bootup_task(current); - rcu_scheduler_starting(); preempt_enable_no_resched(); schedule(); preempt_disable(); @@ -492,6 +468,11 @@ static int __init do_early_param(char *param, char *val) return 0; } +void __init parse_early_options(char *cmdline) +{ + parse_args("early options", cmdline, NULL, 0, do_early_param); +} + /* Arch code calls this early on, or if not, just before other parsing. */ void __init parse_early_param(void) { @@ -503,7 +484,7 @@ void __init parse_early_param(void) /* All fall through to do_early_param. */ strlcpy(tmp_cmdline, boot_command_line, COMMAND_LINE_SIZE); - parse_args("early options", tmp_cmdline, NULL, 0, do_early_param); + parse_early_options(tmp_cmdline); done = 1; } @@ -516,6 +497,7 @@ static void __init boot_cpu_init(void) int cpu = smp_processor_id(); /* Mark the boot cpu "present", "online" etc for SMP and UP case */ set_cpu_online(cpu, true); + set_cpu_active(cpu, true); set_cpu_present(cpu, true); set_cpu_possible(cpu, true); } @@ -528,6 +510,22 @@ void __init __weak thread_info_cache_init(void) { } +/* + * Set up kernel memory allocators + */ +static void __init mm_init(void) +{ + /* + * page_cgroup requires countinous pages as memmap + * and it's bigger than MAX_ORDER unless SPARSEMEM. + */ + page_cgroup_init_flatmem(); + mem_init(); + kmem_cache_init(); + pgtable_cache_init(); + vmalloc_init(); +} + asmlinkage void __init start_kernel(void) { char * command_line; @@ -561,15 +559,31 @@ asmlinkage void __init start_kernel(void) tick_init(); boot_cpu_init(); page_address_init(); - printk(KERN_NOTICE); - printk(linux_banner); + printk(KERN_NOTICE "%s", linux_banner); setup_arch(&command_line); mm_init_owner(&init_mm, &init_task); setup_command_line(command_line); - setup_per_cpu_areas(); setup_nr_cpu_ids(); + setup_per_cpu_areas(); smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ + build_all_zonelists(NULL); + page_alloc_init(); + + printk(KERN_NOTICE "Kernel command line: %s\n", boot_command_line); + parse_early_param(); + parse_args("Booting kernel", static_command_line, __start___param, + __stop___param - __start___param, + &unknown_bootoption); + /* + * These use large bootmem allocations and must precede + * kmem_cache_init() + */ + pidhash_init(); + vfs_caches_init_early(); + sort_main_extable(); + trap_init(); + mm_init(); /* * Set up the scheduler prior starting any interrupts (such as the * timer interrupt). Full topology setup happens at smp_init() @@ -581,31 +595,22 @@ asmlinkage void __init start_kernel(void) * fragile until we cpu_idle() for the first time. */ preempt_disable(); - build_all_zonelists(); - page_alloc_init(); - printk(KERN_NOTICE "Kernel command line: %s\n", boot_command_line); - parse_early_param(); - parse_args("Booting kernel", static_command_line, __start___param, - __stop___param - __start___param, - &unknown_bootoption); if (!irqs_disabled()) { printk(KERN_WARNING "start_kernel(): bug: interrupts were " "enabled *very* early, fixing it\n"); local_irq_disable(); } - sort_main_extable(); - trap_init(); rcu_init(); + radix_tree_init(); /* init some links before init_ISA_irqs() */ early_irq_init(); init_IRQ(); - pidhash_init(); + prio_tree_init(); init_timers(); hrtimers_init(); softirq_init(); timekeeping_init(); time_init(); - sched_clock_init(); profile_init(); if (!irqs_disabled()) printk(KERN_CRIT "start_kernel(): bug: interrupts were " @@ -613,6 +618,11 @@ asmlinkage void __init start_kernel(void) early_boot_irqs_on(); local_irq_enable(); + /* Interrupts are enabled now so all GFP allocations are safe. */ + gfp_allowed_mask = __GFP_BITS_MASK; + + kmem_cache_init_late(); + /* * HACK ALERT! This is early. We're enabling the console before * we've done PCI setups etc, and console_init() must be aware of @@ -641,24 +651,19 @@ asmlinkage void __init start_kernel(void) initrd_start = 0; } #endif - vmalloc_init(); - vfs_caches_init_early(); - cpuset_init_early(); page_cgroup_init(); - mem_init(); enable_debug_pagealloc(); - cpu_hotplug_init(); - kmem_cache_init(); + kmemtrace_init(); + kmemleak_init(); debug_objects_mem_init(); idr_init_cache(); setup_per_cpu_pageset(); numa_policy_init(); if (late_time_init) late_time_init(); + sched_clock_init(); calibrate_delay(); pidmap_init(); - pgtable_cache_init(); - prio_tree_init(); anon_vma_init(); #ifdef CONFIG_X86 if (efi_enabled) @@ -666,13 +671,13 @@ asmlinkage void __init start_kernel(void) #endif thread_info_cache_init(); cred_init(); - fork_init(num_physpages); + fork_init(totalram_pages); proc_caches_init(); buffer_init(); key_init(); security_init(); - vfs_caches_init(num_physpages); - radix_tree_init(); + dbg_late_init(); + vfs_caches_init(totalram_pages); signals_init(); /* rootfs populating might need page-writeback */ page_writeback_init(); @@ -687,6 +692,7 @@ asmlinkage void __init start_kernel(void) check_bugs(); acpi_early_init(); /* before LAPIC and SMP init */ + sfi_init_late(); ftrace_init(); @@ -694,16 +700,28 @@ asmlinkage void __init start_kernel(void) rest_init(); } +/* Call all constructor functions linked into the kernel. */ +static void __init do_ctors(void) +{ +#ifdef CONFIG_CONSTRUCTORS + ctor_fn_t *fn = (ctor_fn_t *) __ctors_start; + + for (; fn < (ctor_fn_t *) __ctors_end; fn++) + (*fn)(); +#endif +} + int initcall_debug; core_param(initcall_debug, initcall_debug, bool, 0644); +static char msgbuf[64]; +static struct boot_trace_call call; +static struct boot_trace_ret ret; + int do_one_initcall(initcall_t fn) { int count = preempt_count(); ktime_t calltime, delta, rettime; - char msgbuf[64]; - struct boot_trace_call call; - struct boot_trace_ret ret; if (initcall_debug) { call.caller = task_pid_nr(current); @@ -750,10 +768,10 @@ extern initcall_t __initcall_start[], __initcall_end[], __early_initcall_end[]; static void __init do_initcalls(void) { - initcall_t *call; + initcall_t *fn; - for (call = __early_initcall_end; call < __initcall_end; call++) - do_one_initcall(*call); + for (fn = __early_initcall_end; fn < __initcall_end; fn++) + do_one_initcall(*fn); /* Make sure there is no pending stuff from the initcall sequence */ flush_scheduled_work(); @@ -768,20 +786,22 @@ static void __init do_initcalls(void) */ static void __init do_basic_setup(void) { - rcu_init_sched(); /* needed by module_init stage. */ init_workqueues(); + cpuset_init_smp(); usermodehelper_init(); + init_tmpfs(); driver_init(); init_irq_proc(); + do_ctors(); do_initcalls(); } static void __init do_pre_smp_initcalls(void) { - initcall_t *call; + initcall_t *fn; - for (call = __initcall_start; call < __early_initcall_end; call++) - do_one_initcall(*call); + for (fn = __initcall_start; fn < __early_initcall_end; fn++) + do_one_initcall(*fn); } static void run_init_process(char *init_filename) @@ -794,6 +814,7 @@ static void run_init_process(char *init_filename) * makes it inline to init() and it becomes part of init.text section */ static noinline int init_post(void) + __releases(kernel_lock) { /* need to finish all async __init code before freeing the memory */ async_synchronize_full(); @@ -803,11 +824,6 @@ static noinline int init_post(void) system_state = SYSTEM_RUNNING; numa_default_policy(); - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0) - printk(KERN_WARNING "Warning: unable to open an initial console.\n"); - - (void) sys_dup(0); - (void) sys_dup(0); current->signal->flags |= SIGNAL_UNKILLABLE; @@ -833,16 +849,22 @@ static noinline int init_post(void) run_init_process("/bin/init"); run_init_process("/bin/sh"); - panic("No init found. Try passing init= option to kernel."); + panic("No init found. Try passing init= option to kernel. " + "See Linux Documentation/init.txt for guidance."); } static int __init kernel_init(void * unused) { lock_kernel(); + + /* + * init can allocate pages on any node + */ + set_mems_allowed(node_states[N_HIGH_MEMORY]); /* * init can run on any cpu. */ - set_cpus_allowed_ptr(current, CPU_MASK_ALL_PTR); + set_cpus_allowed_ptr(current, cpu_all_mask); /* * Tell the world that we're going to be the grim * reaper of innocent orphaned children. @@ -863,10 +885,14 @@ static int __init kernel_init(void * unused) smp_init(); sched_init_smp(); - cpuset_init_smp(); - do_basic_setup(); + /* Open the /dev/console on the rootfs, this should never fail */ + if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0) + printk(KERN_WARNING "Warning: unable to open an initial console.\n"); + + (void) sys_dup(0); + (void) sys_dup(0); /* * check if there is an early userspace init. If yes, let it do all * the work