Merge branch 'core/xen' into x86/urgent
[safe/jmp/linux-2.6] / kernel / kexec.c
index 9fc6f7c..8a6d7b0 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/slab.h>
 #include <linux/fs.h>
 #include <linux/kexec.h>
-#include <linux/spinlock.h>
+#include <linux/mutex.h>
 #include <linux/list.h>
 #include <linux/highmem.h>
 #include <linux/syscalls.h>
@@ -30,6 +30,7 @@
 #include <linux/pm.h>
 #include <linux/cpu.h>
 #include <linux/console.h>
+#include <linux/vmalloc.h>
 
 #include <asm/page.h>
 #include <asm/uaccess.h>
@@ -753,8 +754,14 @@ static struct page *kimage_alloc_page(struct kimage *image,
                        *old = addr | (*old & ~PAGE_MASK);
 
                        /* The old page I have found cannot be a
-                        * destination page, so return it.
+                        * destination page, so return it if it's
+                        * gfp_flags honor the ones passed in.
                         */
+                       if (!(gfp_mask & __GFP_HIGHMEM) &&
+                           PageHighMem(old_page)) {
+                               kimage_free_pages(old_page);
+                               continue;
+                       }
                        addr = old_addr;
                        page = old_page;
                        break;
@@ -924,19 +931,13 @@ static int kimage_load_segment(struct kimage *image,
  */
 struct kimage *kexec_image;
 struct kimage *kexec_crash_image;
-/*
- * A home grown binary mutex.
- * Nothing can wait so this mutex is safe to use
- * in interrupt context :)
- */
-static int kexec_lock;
 
-asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
-                               struct kexec_segment __user *segments,
-                               unsigned long flags)
+static DEFINE_MUTEX(kexec_mutex);
+
+SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
+               struct kexec_segment __user *, segments, unsigned long, flags)
 {
        struct kimage **dest_image, *image;
-       int locked;
        int result;
 
        /* We only trust the superuser with rebooting the system. */
@@ -972,8 +973,7 @@ asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
         *
         * KISS: always take the mutex.
         */
-       locked = xchg(&kexec_lock, 1);
-       if (locked)
+       if (!mutex_trylock(&kexec_mutex))
                return -EBUSY;
 
        dest_image = &kexec_image;
@@ -1015,8 +1015,7 @@ asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
        image = xchg(dest_image, image);
 
 out:
-       locked = xchg(&kexec_lock, 0); /* Release the mutex */
-       BUG_ON(!locked);
+       mutex_unlock(&kexec_mutex);
        kimage_free(image);
 
        return result;
@@ -1063,10 +1062,7 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
 
 void crash_kexec(struct pt_regs *regs)
 {
-       int locked;
-
-
-       /* Take the kexec_lock here to prevent sys_kexec_load
+       /* Take the kexec_mutex here to prevent sys_kexec_load
         * running on one cpu from replacing the crash kernel
         * we are using after a panic on a different cpu.
         *
@@ -1074,8 +1070,7 @@ void crash_kexec(struct pt_regs *regs)
         * of memory the xchg(&kexec_crash_image) would be
         * sufficient.  But since I reuse the memory...
         */
-       locked = xchg(&kexec_lock, 1);
-       if (!locked) {
+       if (mutex_trylock(&kexec_mutex)) {
                if (kexec_crash_image) {
                        struct pt_regs fixed_regs;
                        crash_setup_regs(&fixed_regs, regs);
@@ -1083,8 +1078,7 @@ void crash_kexec(struct pt_regs *regs)
                        machine_crash_shutdown(&fixed_regs);
                        machine_kexec(kexec_crash_image);
                }
-               locked = xchg(&kexec_lock, 0);
-               BUG_ON(!locked);
+               mutex_unlock(&kexec_mutex);
        }
 }
 
@@ -1121,7 +1115,7 @@ void crash_save_cpu(struct pt_regs *regs, int cpu)
        struct elf_prstatus prstatus;
        u32 *buf;
 
-       if ((cpu < 0) || (cpu >= NR_CPUS))
+       if ((cpu < 0) || (cpu >= nr_cpu_ids))
                return;
 
        /* Using ELF notes here is opportunistic.
@@ -1377,6 +1371,7 @@ static int __init crash_save_vmcoreinfo_init(void)
        VMCOREINFO_SYMBOL(node_online_map);
        VMCOREINFO_SYMBOL(swapper_pg_dir);
        VMCOREINFO_SYMBOL(_stext);
+       VMCOREINFO_SYMBOL(vmlist);
 
 #ifndef CONFIG_NEED_MULTIPLE_NODES
        VMCOREINFO_SYMBOL(mem_map);
@@ -1412,6 +1407,7 @@ static int __init crash_save_vmcoreinfo_init(void)
        VMCOREINFO_OFFSET(free_area, free_list);
        VMCOREINFO_OFFSET(list_head, next);
        VMCOREINFO_OFFSET(list_head, prev);
+       VMCOREINFO_OFFSET(vm_struct, addr);
        VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
        VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
        VMCOREINFO_NUMBER(NR_FREE_PAGES);
@@ -1434,7 +1430,7 @@ int kernel_kexec(void)
 {
        int error = 0;
 
-       if (xchg(&kexec_lock, 1))
+       if (!mutex_trylock(&kexec_mutex))
                return -EBUSY;
        if (!kexec_image) {
                error = -EINVAL;
@@ -1498,8 +1494,6 @@ int kernel_kexec(void)
 #endif
 
  Unlock:
-       if (!xchg(&kexec_lock, 0))
-               BUG();
-
+       mutex_unlock(&kexec_mutex);
        return error;
 }