x86: clean up arch/x86/mm/mmap_32/64.c
authorThomas Gleixner <tglx@linutronix.de>
Wed, 30 Jan 2008 12:30:25 +0000 (13:30 +0100)
committerIngo Molnar <mingo@elte.hu>
Wed, 30 Jan 2008 12:30:25 +0000 (13:30 +0100)
White space and coding style clenaup.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/mm/mmap_32.c
arch/x86/mm/mmap_64.c

index 552e084..d7dd096 100644 (file)
@@ -64,8 +64,8 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
         * bit is set, or if the expected stack growth is unlimited:
         */
        if (sysctl_legacy_va_layout ||
-                       (current->personality & ADDR_COMPAT_LAYOUT) ||
-                       current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY) {
+           (current->personality & ADDR_COMPAT_LAYOUT) ||
+           current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY) {
                mm->mmap_base = TASK_UNMAPPED_BASE;
                mm->get_unmapped_area = arch_get_unmapped_area;
                mm->unmap_area = arch_unmap_area;
index 80bba0d..ffb71a3 100644 (file)
@@ -16,11 +16,14 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
 #endif
        mm->mmap_base = TASK_UNMAPPED_BASE;
        if (current->flags & PF_RANDOMIZE) {
-               /* Add 28bit randomness which is about 40bits of address space
-                  because mmap base has to be page aligned.
-                  or ~1/128 of the total user VM
-                  (total user address space is 47bits) */
+               /*
+                * Add 28bit randomness which is about 40bits of
+                * address space because mmap base has to be page
+                * aligned.  or ~1/128 of the total user VM (total
+                * user address space is 47bits)
+                */
                unsigned rnd = get_random_int() & 0xfffffff;
+
                mm->mmap_base += ((unsigned long)rnd) << PAGE_SHIFT;
        }
        mm->get_unmapped_area = arch_get_unmapped_area;