x86: clean up formatting of __switch_to
authorJeremy Fitzhardinge <jeremy@goop.org>
Tue, 8 Jul 2008 22:06:26 +0000 (15:06 -0700)
committerIngo Molnar <mingo@elte.hu>
Wed, 16 Jul 2008 08:54:25 +0000 (10:54 +0200)
process_64.c:__switch_to has some very old strange formatting, some of
it dating back to pre-git.  Fix it up.

No functional changes.

Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: Stephen Tweedie <sct@redhat.com>
Cc: Eduardo Habkost <ehabkost@redhat.com>
Cc: Mark McLoughlin <markmc@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/process_64.c

index a8e5362..e8a8e1b 100644 (file)
@@ -537,8 +537,8 @@ static inline void __switch_to_xtra(struct task_struct *prev_p,
 struct task_struct *
 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 {
-       struct thread_struct *prev = &prev_p->thread,
-                                *next = &next_p->thread;
+       struct thread_struct *prev = &prev_p->thread;
+       struct thread_struct *next = &next_p->thread;
        int cpu = smp_processor_id();
        struct tss_struct *tss = &per_cpu(init_tss, cpu);
        unsigned fsindex, gsindex;
@@ -586,35 +586,34 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 
        /* 
         * Switch FS and GS.
+        *
+        * Segment register != 0 always requires a reload.  Also
+        * reload when it has changed.  When prev process used 64bit
+        * base always reload to avoid an information leak.
         */
-       { 
-               /* segment register != 0 always requires a reload. 
-                  also reload when it has changed. 
-                  when prev process used 64bit base always reload
-                  to avoid an information leak. */
-               if (unlikely(fsindex | next->fsindex | prev->fs)) {
-                       loadsegment(fs, next->fsindex);
-                       /* check if the user used a selector != 0
-                        * if yes clear 64bit base, since overloaded base
-                         * is always mapped to the Null selector
-                         */
-                       if (fsindex)
+       if (unlikely(fsindex | next->fsindex | prev->fs)) {
+               loadsegment(fs, next->fsindex);
+               /* 
+                * Check if the user used a selector != 0; if yes
+                *  clear 64bit base, since overloaded base is always
+                *  mapped to the Null selector
+                */
+               if (fsindex)
                        prev->fs = 0;                           
-               }
-               /* when next process has a 64bit base use it */
-               if (next->fs) 
-                       wrmsrl(MSR_FS_BASE, next->fs); 
-               prev->fsindex = fsindex;
-
-               if (unlikely(gsindex | next->gsindex | prev->gs)) {
-                       load_gs_index(next->gsindex);
-                       if (gsindex)
+       }
+       /* when next process has a 64bit base use it */
+       if (next->fs)
+               wrmsrl(MSR_FS_BASE, next->fs);
+       prev->fsindex = fsindex;
+
+       if (unlikely(gsindex | next->gsindex | prev->gs)) {
+               load_gs_index(next->gsindex);
+               if (gsindex)
                        prev->gs = 0;                           
-               }
-               if (next->gs)
-                       wrmsrl(MSR_KERNEL_GS_BASE, next->gs); 
-               prev->gsindex = gsindex;
        }
+       if (next->gs)
+               wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
+       prev->gsindex = gsindex;
 
        /* Must be after DS reload */
        unlazy_fpu(prev_p);
@@ -627,7 +626,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        write_pda(pcurrent, next_p); 
 
        write_pda(kernelstack,
-       (unsigned long)task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET);
+                 (unsigned long)task_stack_page(next_p) +
+                 THREAD_SIZE - PDA_STACKOFFSET);
 #ifdef CONFIG_CC_STACKPROTECTOR
        write_pda(stack_canary, next_p->stack_canary);
        /*