#include <linux/proc_fs.h>
#include <linux/kernel.h>
#include <linux/syscalls.h>
+#include <linux/stackprotector.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/delay.h>
-#include <linux/utsname.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/smp_lock.h>
#include <linux/initrd.h>
#include <linux/bootmem.h>
+#include <linux/acpi.h>
#include <linux/tty.h>
#include <linux/gfp.h>
#include <linux/percpu.h>
#include <linux/rmap.h>
#include <linux/mempolicy.h>
#include <linux/key.h>
-#include <linux/unwind.h>
#include <linux/buffer_head.h>
#include <linux/page_cgroup.h>
#include <linux/debug_locks.h>
#include <linux/debugobjects.h>
#include <linux/lockdep.h>
+#include <linux/kmemleak.h>
#include <linux/pid_namespace.h>
#include <linux/device.h>
#include <linux/kthread.h>
#include <linux/signal.h>
#include <linux/idr.h>
#include <linux/ftrace.h>
+#include <linux/async.h>
+#include <linux/kmemcheck.h>
+#include <linux/kmemtrace.h>
+#include <linux/sfi.h>
+#include <linux/shmem_fs.h>
#include <trace/boot.h>
#include <asm/io.h>
#include <asm/setup.h>
#include <asm/sections.h>
#include <asm/cacheflush.h>
-#include <trace/kmemtrace.h>
#ifdef CONFIG_X86_LOCAL_APIC
#include <asm/smp.h>
extern void prio_tree_init(void);
extern void radix_tree_init(void);
extern void free_initmem(void);
-#ifdef CONFIG_ACPI
-extern void acpi_early_init(void);
-#else
-static inline void acpi_early_init(void) { }
-#endif
#ifndef CONFIG_DEBUG_RODATA
static inline void mark_rodata_ro(void) { }
#endif
extern void tc_init(void);
#endif
-enum system_states system_state;
+enum system_states system_state __read_mostly;
EXPORT_SYMBOL(system_state);
/*
extern void time_init(void);
/* Default late time init is NULL. archs can override this later. */
-void (*late_time_init)(void);
+void (*__initdata late_time_init)(void);
extern void softirq_init(void);
/* Untouched command line saved by arch-specific code. */
* greater than 0, limits the maximum number of CPUs activated in
* SMP mode to <NUM>.
*/
-#ifndef CONFIG_X86_IO_APIC
-static inline void disable_ioapic_setup(void) {};
-#endif
+
+void __weak arch_disable_smp_support(void) { }
static int __init nosmp(char *str)
{
setup_max_cpus = 0;
- disable_ioapic_setup();
+ arch_disable_smp_support();
+
return 0;
}
{
get_option(&str, &setup_max_cpus);
if (setup_max_cpus == 0)
- disable_ioapic_setup();
+ arch_disable_smp_support();
return 0;
}
early_param("maxcpus", maxcpus);
#else
-#define setup_max_cpus NR_CPUS
+const unsigned int setup_max_cpus = NR_CPUS;
#endif
/*
#define smp_init() do { } while (0)
#endif
-static inline void setup_per_cpu_areas(void) { }
static inline void setup_nr_cpu_ids(void) { }
static inline void smp_prepare_cpus(unsigned int maxcpus) { }
#else
-#if NR_CPUS > BITS_PER_LONG
-cpumask_t cpu_mask_all __read_mostly = CPU_MASK_ALL;
-EXPORT_SYMBOL(cpu_mask_all);
-#endif
-
/* Setup number of possible processor ids */
int nr_cpu_ids __read_mostly = NR_CPUS;
EXPORT_SYMBOL(nr_cpu_ids);
nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
}
-#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
-
-EXPORT_SYMBOL(__per_cpu_offset);
-
-static void __init setup_per_cpu_areas(void)
-{
- unsigned long size, i;
- char *ptr;
- unsigned long nr_possible_cpus = num_possible_cpus();
-
- /* Copy section for each CPU (we discard the original) */
- size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
- ptr = alloc_bootmem_pages(size * nr_possible_cpus);
-
- for_each_possible_cpu(i) {
- __per_cpu_offset[i] = ptr - __per_cpu_start;
- memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
- ptr += size;
- }
-}
-#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
-
/* Called by boot processor to activate the rest. */
static void __init smp_init(void)
{
* Set up the current CPU as possible to migrate to.
* The other ones will be done by cpu_up/cpu_down()
*/
- cpu = smp_processor_id();
- cpu_set(cpu, cpu_active_map);
+ set_cpu_active(smp_processor_id(), true);
/* FIXME: This should be done in userspace --RR */
for_each_present_cpu(cpu) {
* gcc-3.4 accidentally inlines this function, so use noinline.
*/
-static void noinline __init_refok rest_init(void)
+static noinline void __init_refok rest_init(void)
__releases(kernel_lock)
{
int pid;
+ rcu_scheduler_starting();
kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND);
numa_default_policy();
pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES);
return 0;
}
+void __init parse_early_options(char *cmdline)
+{
+ parse_args("early options", cmdline, NULL, 0, do_early_param);
+}
+
/* Arch code calls this early on, or if not, just before other parsing. */
void __init parse_early_param(void)
{
/* All fall through to do_early_param. */
strlcpy(tmp_cmdline, boot_command_line, COMMAND_LINE_SIZE);
- parse_args("early options", tmp_cmdline, NULL, 0, do_early_param);
+ parse_early_options(tmp_cmdline);
done = 1;
}
{
}
+/*
+ * Set up kernel memory allocators
+ */
+static void __init mm_init(void)
+{
+ /*
+ * page_cgroup requires countinous pages as memmap
+ * and it's bigger than MAX_ORDER unless SPARSEMEM.
+ */
+ page_cgroup_init_flatmem();
+ mem_init();
+ kmem_cache_init();
+ pgtable_cache_init();
+ vmalloc_init();
+}
+
asmlinkage void __init start_kernel(void)
{
char * command_line;
* Need to run as early as possible, to initialize the
* lockdep hash:
*/
- unwind_init();
lockdep_init();
debug_objects_early_init();
+
+ /*
+ * Set up the the initial canary ASAP:
+ */
+ boot_init_stack_canary();
+
cgroup_init_early();
local_irq_disable();
tick_init();
boot_cpu_init();
page_address_init();
- printk(KERN_NOTICE);
- printk(linux_banner);
+ printk(KERN_NOTICE "%s", linux_banner);
setup_arch(&command_line);
mm_init_owner(&init_mm, &init_task);
setup_command_line(command_line);
- unwind_setup();
- setup_per_cpu_areas();
setup_nr_cpu_ids();
+ setup_per_cpu_areas();
smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
+ build_all_zonelists();
+ page_alloc_init();
+
+ printk(KERN_NOTICE "Kernel command line: %s\n", boot_command_line);
+ parse_early_param();
+ parse_args("Booting kernel", static_command_line, __start___param,
+ __stop___param - __start___param,
+ &unknown_bootoption);
+ /*
+ * These use large bootmem allocations and must precede
+ * kmem_cache_init()
+ */
+ pidhash_init();
+ vfs_caches_init_early();
+ sort_main_extable();
+ trap_init();
+ mm_init();
/*
* Set up the scheduler prior starting any interrupts (such as the
* timer interrupt). Full topology setup happens at smp_init()
* fragile until we cpu_idle() for the first time.
*/
preempt_disable();
- build_all_zonelists();
- page_alloc_init();
- printk(KERN_NOTICE "Kernel command line: %s\n", boot_command_line);
- parse_early_param();
- parse_args("Booting kernel", static_command_line, __start___param,
- __stop___param - __start___param,
- &unknown_bootoption);
if (!irqs_disabled()) {
printk(KERN_WARNING "start_kernel(): bug: interrupts were "
"enabled *very* early, fixing it\n");
local_irq_disable();
}
- sort_main_extable();
- trap_init();
rcu_init();
/* init some links before init_ISA_irqs() */
early_irq_init();
init_IRQ();
- pidhash_init();
+ prio_tree_init();
init_timers();
hrtimers_init();
softirq_init();
timekeeping_init();
time_init();
- sched_clock_init();
profile_init();
if (!irqs_disabled())
- printk("start_kernel(): bug: interrupts were enabled early\n");
+ printk(KERN_CRIT "start_kernel(): bug: interrupts were "
+ "enabled early\n");
early_boot_irqs_on();
local_irq_enable();
+ /* Interrupts are enabled now so all GFP allocations are safe. */
+ set_gfp_allowed_mask(__GFP_BITS_MASK);
+
+ kmem_cache_init_late();
+
/*
* HACK ALERT! This is early. We're enabling the console before
* we've done PCI setups etc, and console_init() must be aware of
initrd_start = 0;
}
#endif
- vmalloc_init();
- vfs_caches_init_early();
- cpuset_init_early();
page_cgroup_init();
- mem_init();
enable_debug_pagealloc();
- cpu_hotplug_init();
- kmem_cache_init();
kmemtrace_init();
+ kmemleak_init();
debug_objects_mem_init();
idr_init_cache();
setup_per_cpu_pageset();
numa_policy_init();
if (late_time_init)
late_time_init();
+ sched_clock_init();
calibrate_delay();
pidmap_init();
- pgtable_cache_init();
- prio_tree_init();
anon_vma_init();
#ifdef CONFIG_X86
if (efi_enabled)
#endif
thread_info_cache_init();
cred_init();
- fork_init(num_physpages);
+ fork_init(totalram_pages);
proc_caches_init();
buffer_init();
key_init();
security_init();
- vfs_caches_init(num_physpages);
+ vfs_caches_init(totalram_pages);
radix_tree_init();
signals_init();
/* rootfs populating might need page-writeback */
check_bugs();
acpi_early_init(); /* before LAPIC and SMP init */
+ sfi_init_late();
ftrace_init();
rest_init();
}
-static int initcall_debug;
+/* Call all constructor functions linked into the kernel. */
+static void __init do_ctors(void)
+{
+#ifdef CONFIG_CONSTRUCTORS
+ ctor_fn_t *call = (ctor_fn_t *) __ctors_start;
+
+ for (; call < (ctor_fn_t *) __ctors_end; call++)
+ (*call)();
+#endif
+}
+
+int initcall_debug;
core_param(initcall_debug, initcall_debug, bool, 0644);
+static char msgbuf[64];
+static struct boot_trace_call call;
+static struct boot_trace_ret ret;
+
int do_one_initcall(initcall_t fn)
{
int count = preempt_count();
ktime_t calltime, delta, rettime;
- char msgbuf[64];
- struct boot_trace_call call;
- struct boot_trace_ret ret;
if (initcall_debug) {
call.caller = task_pid_nr(current);
*/
static void __init do_basic_setup(void)
{
- rcu_init_sched(); /* needed by module_init stage. */
init_workqueues();
+ cpuset_init_smp();
usermodehelper_init();
+ init_tmpfs();
driver_init();
init_irq_proc();
+ do_ctors();
do_initcalls();
}
/* This is a non __init function. Force it to be noinline otherwise gcc
* makes it inline to init() and it becomes part of init.text section
*/
-static int noinline init_post(void)
+static noinline int init_post(void)
+ __releases(kernel_lock)
{
+ /* need to finish all async __init code before freeing the memory */
+ async_synchronize_full();
free_initmem();
unlock_kernel();
mark_rodata_ro();
static int __init kernel_init(void * unused)
{
lock_kernel();
+
+ /*
+ * init can allocate pages on any node
+ */
+ set_mems_allowed(node_possible_map);
/*
* init can run on any cpu.
*/
- set_cpus_allowed_ptr(current, CPU_MASK_ALL_PTR);
+ set_cpus_allowed_ptr(current, cpu_all_mask);
/*
* Tell the world that we're going to be the grim
* reaper of innocent orphaned children.
smp_init();
sched_init_smp();
- cpuset_init_smp();
-
do_basic_setup();
/*