X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=init%2Fmain.c;h=bb7dc57eee36ed8079e63bdf1a4c725d4c97e45f;hb=862366118026a358882eefc70238dbcc3db37aac;hp=e7939de80f3eb515a3064b1e111bcaa9f87bbb84;hpb=097d036a2f25eecc42435c57e010aaf4a2eed2d9;p=safe%2Fjmp%2Flinux-2.6 diff --git a/init/main.c b/init/main.c index e7939de..bb7dc57 100644 --- a/init/main.c +++ b/init/main.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -27,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -49,8 +51,8 @@ #include #include #include -#include #include +#include #include #include #include @@ -61,6 +63,9 @@ #include #include #include +#include +#include +#include #include #include @@ -72,15 +77,6 @@ #include #endif -/* - * This is one of the first .c files built. Error out early if we have compiler - * trouble. - */ - -#if __GNUC__ == 4 && __GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ == 0 -#warning gcc-4.1.0 is known to miscompile the kernel. A different compiler version is recommended. -#endif - static int kernel_init(void *); extern void init_IRQ(void); @@ -103,7 +99,7 @@ static inline void mark_rodata_ro(void) { } extern void tc_init(void); #endif -enum system_states system_state; +enum system_states system_state __read_mostly; EXPORT_SYMBOL(system_state); /* @@ -114,7 +110,7 @@ EXPORT_SYMBOL(system_state); extern void time_init(void); /* Default late time init is NULL. archs can override this later. */ -void (*late_time_init)(void); +void (*__initdata late_time_init)(void); extern void softirq_init(void); /* Untouched command line saved by arch-specific code. */ @@ -141,14 +137,14 @@ unsigned int __initdata setup_max_cpus = NR_CPUS; * greater than 0, limits the maximum number of CPUs activated in * SMP mode to . */ -#ifndef CONFIG_X86_IO_APIC -static inline void disable_ioapic_setup(void) {}; -#endif + +void __weak arch_disable_smp_support(void) { } static int __init nosmp(char *str) { setup_max_cpus = 0; - disable_ioapic_setup(); + arch_disable_smp_support(); + return 0; } @@ -158,14 +154,14 @@ static int __init maxcpus(char *str) { get_option(&str, &setup_max_cpus); if (setup_max_cpus == 0) - disable_ioapic_setup(); + arch_disable_smp_support(); return 0; } early_param("maxcpus", maxcpus); #else -#define setup_max_cpus NR_CPUS +const unsigned int setup_max_cpus = NR_CPUS; #endif /* @@ -377,12 +373,7 @@ EXPORT_SYMBOL(nr_cpu_ids); /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */ static void __init setup_nr_cpu_ids(void) { - int cpu, highest_cpu = 0; - - for_each_possible_cpu(cpu) - highest_cpu = cpu; - - nr_cpu_ids = highest_cpu + 1; + nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; } #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA @@ -417,8 +408,7 @@ static void __init smp_init(void) * Set up the current CPU as possible to migrate to. * The other ones will be done by cpu_up/cpu_down() */ - cpu = smp_processor_id(); - cpu_set(cpu, cpu_active_map); + set_cpu_active(smp_processor_id(), true); /* FIXME: This should be done in userspace --RR */ for_each_present_cpu(cpu) { @@ -458,7 +448,7 @@ static void __init setup_command_line(char *command_line) * gcc-3.4 accidentally inlines this function, so use noinline. */ -static void noinline __init_refok rest_init(void) +static noinline void __init_refok rest_init(void) __releases(kernel_lock) { int pid; @@ -474,6 +464,7 @@ static void noinline __init_refok rest_init(void) * at least once to get things moving: */ init_idle_bootup_task(current); + rcu_scheduler_starting(); preempt_enable_no_resched(); schedule(); preempt_disable(); @@ -501,6 +492,11 @@ static int __init do_early_param(char *param, char *val) return 0; } +void __init parse_early_options(char *cmdline) +{ + parse_args("early options", cmdline, NULL, 0, do_early_param); +} + /* Arch code calls this early on, or if not, just before other parsing. */ void __init parse_early_param(void) { @@ -512,7 +508,7 @@ void __init parse_early_param(void) /* All fall through to do_early_param. */ strlcpy(tmp_cmdline, boot_command_line, COMMAND_LINE_SIZE); - parse_args("early options", tmp_cmdline, NULL, 0, do_early_param); + parse_early_options(tmp_cmdline); done = 1; } @@ -524,9 +520,9 @@ static void __init boot_cpu_init(void) { int cpu = smp_processor_id(); /* Mark the boot cpu "present", "online" etc for SMP and UP case */ - cpu_set(cpu, cpu_online_map); - cpu_set(cpu, cpu_present_map); - cpu_set(cpu, cpu_possible_map); + set_cpu_online(cpu, true); + set_cpu_present(cpu, true); + set_cpu_possible(cpu, true); } void __init __weak smp_setup_processor_id(void) @@ -548,9 +544,14 @@ asmlinkage void __init start_kernel(void) * Need to run as early as possible, to initialize the * lockdep hash: */ - unwind_init(); lockdep_init(); debug_objects_early_init(); + + /* + * Set up the the initial canary ASAP: + */ + boot_init_stack_canary(); + cgroup_init_early(); local_irq_disable(); @@ -565,12 +566,10 @@ asmlinkage void __init start_kernel(void) tick_init(); boot_cpu_init(); page_address_init(); - printk(KERN_NOTICE); - printk(linux_banner); + printk(KERN_NOTICE "%s", linux_banner); setup_arch(&command_line); mm_init_owner(&init_mm, &init_task); setup_command_line(command_line); - unwind_setup(); setup_per_cpu_areas(); setup_nr_cpu_ids(); smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ @@ -601,6 +600,8 @@ asmlinkage void __init start_kernel(void) sort_main_extable(); trap_init(); rcu_init(); + /* init some links before init_ISA_irqs() */ + early_irq_init(); init_IRQ(); pidhash_init(); init_timers(); @@ -611,7 +612,8 @@ asmlinkage void __init start_kernel(void) sched_clock_init(); profile_init(); if (!irqs_disabled()) - printk("start_kernel(): bug: interrupts were enabled early\n"); + printk(KERN_CRIT "start_kernel(): bug: interrupts were " + "enabled early\n"); early_boot_irqs_on(); local_irq_enable(); @@ -643,12 +645,15 @@ asmlinkage void __init start_kernel(void) initrd_start = 0; } #endif + vmalloc_init(); vfs_caches_init_early(); cpuset_init_early(); + page_cgroup_init(); mem_init(); enable_debug_pagealloc(); cpu_hotplug_init(); kmem_cache_init(); + kmemtrace_init(); debug_objects_mem_init(); idr_init_cache(); setup_per_cpu_pageset(); @@ -665,10 +670,10 @@ asmlinkage void __init start_kernel(void) efi_enter_virtual_mode(); #endif thread_info_cache_init(); + cred_init(); fork_init(num_physpages); proc_caches_init(); buffer_init(); - unnamed_dev_init(); key_init(); security_init(); vfs_caches_init(num_physpages); @@ -694,43 +699,41 @@ asmlinkage void __init start_kernel(void) rest_init(); } -static int initcall_debug; - -static int __init initcall_debug_setup(char *str) -{ - initcall_debug = 1; - return 1; -} -__setup("initcall_debug", initcall_debug_setup); +int initcall_debug; +core_param(initcall_debug, initcall_debug, bool, 0644); int do_one_initcall(initcall_t fn) { int count = preempt_count(); - ktime_t delta; + ktime_t calltime, delta, rettime; char msgbuf[64]; - struct boot_trace it; + struct boot_trace_call call; + struct boot_trace_ret ret; if (initcall_debug) { - it.caller = task_pid_nr(current); - printk("calling %pF @ %i\n", fn, it.caller); - it.calltime = ktime_get(); + call.caller = task_pid_nr(current); + printk("calling %pF @ %i\n", fn, call.caller); + calltime = ktime_get(); + trace_boot_call(&call, fn); + enable_boot_trace(); } - it.result = fn(); + ret.result = fn(); if (initcall_debug) { - it.rettime = ktime_get(); - delta = ktime_sub(it.rettime, it.calltime); - it.duration = (unsigned long long) delta.tv64 >> 20; - printk("initcall %pF returned %d after %Ld msecs\n", fn, - it.result, it.duration); - trace_boot(&it, fn); + disable_boot_trace(); + rettime = ktime_get(); + delta = ktime_sub(rettime, calltime); + ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10; + trace_boot_ret(&ret, fn); + printk("initcall %pF returned %d after %Ld usecs\n", fn, + ret.result, ret.duration); } msgbuf[0] = 0; - if (it.result && it.result != -ENODEV && initcall_debug) - sprintf(msgbuf, "error code %d ", it.result); + if (ret.result && ret.result != -ENODEV && initcall_debug) + sprintf(msgbuf, "error code %d ", ret.result); if (preempt_count() != count) { strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf)); @@ -744,7 +747,7 @@ int do_one_initcall(initcall_t fn) printk("initcall %pF returned with %s\n", fn, msgbuf); } - return it.result; + return ret.result; } @@ -771,8 +774,8 @@ static void __init do_initcalls(void) static void __init do_basic_setup(void) { rcu_init_sched(); /* needed by module_init stage. */ - /* drivers will send hotplug events */ init_workqueues(); + cpuset_init_smp(); usermodehelper_init(); driver_init(); init_irq_proc(); @@ -796,8 +799,11 @@ static void run_init_process(char *init_filename) /* This is a non __init function. Force it to be noinline otherwise gcc * makes it inline to init() and it becomes part of init.text section */ -static int noinline init_post(void) +static noinline int init_post(void) + __releases(kernel_lock) { + /* need to finish all async __init code before freeing the memory */ + async_synchronize_full(); free_initmem(); unlock_kernel(); mark_rodata_ro(); @@ -843,7 +849,7 @@ static int __init kernel_init(void * unused) /* * init can run on any cpu. */ - set_cpus_allowed_ptr(current, CPU_MASK_ALL_PTR); + set_cpus_allowed_ptr(current, cpu_all_mask); /* * Tell the world that we're going to be the grim * reaper of innocent orphaned children. @@ -864,8 +870,6 @@ static int __init kernel_init(void * unused) smp_init(); sched_init_smp(); - cpuset_init_smp(); - do_basic_setup(); /* @@ -886,7 +890,7 @@ static int __init kernel_init(void * unused) * we're essentially up and running. Get rid of the * initmem segments and start the user-mode stuff.. */ - stop_boot_trace(); + init_post(); return 0; }