i386: move kernel/cpu/mtrr
[safe/jmp/linux-2.6] / arch / i386 / mach-voyager / voyager_smp.c
index 72a1b9c..b87f854 100644 (file)
@@ -9,7 +9,6 @@
  * This file provides all the same external entries as smp.c but uses
  * the voyager hal to provide the functionality
  */
-#include <linux/config.h>
 #include <linux/module.h>
 #include <linux/mm.h>
 #include <linux/kernel_stat.h>
@@ -17,7 +16,6 @@
 #include <linux/mc146818rtc.h>
 #include <linux/cache.h>
 #include <linux/interrupt.h>
-#include <linux/smp_lock.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/bootmem.h>
@@ -86,8 +84,8 @@ static int ack_QIC_CPI(__u8 cpi);
 static void ack_special_QIC_CPI(__u8 cpi);
 static void ack_VIC_CPI(__u8 cpi);
 static void send_CPI_allbutself(__u8 cpi);
-static void enable_vic_irq(unsigned int irq);
-static void disable_vic_irq(unsigned int irq);
+static void mask_vic_irq(unsigned int irq);
+static void unmask_vic_irq(unsigned int irq);
 static unsigned int startup_vic_irq(unsigned int irq);
 static void enable_local_vic_irq(unsigned int irq);
 static void disable_local_vic_irq(unsigned int irq);
@@ -100,6 +98,7 @@ static void do_boot_cpu(__u8 cpuid);
 static void do_quad_bootstrap(void);
 
 int hard_smp_processor_id(void);
+int safe_smp_processor_id(void);
 
 /* Inline functions */
 static inline void
@@ -126,10 +125,10 @@ send_QIC_CPI(__u32 cpuset, __u8 cpi)
 }
 
 static inline void
-wrapper_smp_local_timer_interrupt(struct pt_regs *regs)
+wrapper_smp_local_timer_interrupt(void)
 {
        irq_enter();
-       smp_local_timer_interrupt(regs);
+       smp_local_timer_interrupt();
        irq_exit();
 }
 
@@ -205,15 +204,12 @@ ack_CPI(__u8 cpi)
 /* The VIC IRQ descriptors -- these look almost identical to the
  * 8259 IRQs except that masks and things must be kept per processor
  */
-static struct hw_interrupt_type vic_irq_type = {
-       .typename = "VIC-level",
-       .startup = startup_vic_irq,
-       .shutdown = disable_vic_irq,
-       .enable = enable_vic_irq,
-       .disable = disable_vic_irq,
-       .ack = before_handle_vic_irq,
-       .end = after_handle_vic_irq,
-       .set_affinity = set_vic_irq_affinity,
+static struct irq_chip vic_chip = {
+       .name           = "VIC",
+       .startup        = startup_vic_irq,
+       .mask           = mask_vic_irq,
+       .unmask         = unmask_vic_irq,
+       .set_affinity   = set_vic_irq_affinity,
 };
 
 /* used to count up as CPUs are brought on line (starts at 0) */
@@ -240,7 +236,7 @@ static cpumask_t smp_commenced_mask = CPU_MASK_NONE;
 cpumask_t cpu_callin_map = CPU_MASK_NONE;
 cpumask_t cpu_callout_map = CPU_MASK_NONE;
 EXPORT_SYMBOL(cpu_callout_map);
-cpumask_t cpu_possible_map = CPU_MASK_ALL;
+cpumask_t cpu_possible_map = CPU_MASK_NONE;
 EXPORT_SYMBOL(cpu_possible_map);
 
 /* The per processor IRQ masks (these are usually kept in sync) */
@@ -402,6 +398,7 @@ find_smp_config(void)
        cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 1) << 8;
        cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 2) << 16;
        cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 3) << 24;
+       cpu_possible_map = phys_cpu_present_map;
        printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", cpus_addr(phys_cpu_present_map)[0]);
        /* Here we set up the VIC to enable SMP */
        /* enable the CPIs by writing the base vector to their register */
@@ -424,6 +421,7 @@ find_smp_config(void)
             VOYAGER_SUS_IN_CONTROL_PORT);
 
        current_thread_info()->cpu = boot_cpu_id;
+       x86_write_percpu(cpu_number, boot_cpu_id);
 }
 
 /*
@@ -436,7 +434,7 @@ smp_store_cpu_info(int id)
 
        *c = boot_cpu_data;
 
-       identify_cpu(c);
+       identify_secondary_cpu(c);
 }
 
 /* set up the trampoline and return the physical address of the code */
@@ -536,15 +534,6 @@ do_boot_cpu(__u8 cpu)
                & ~( voyager_extended_vic_processors
                     & voyager_allowed_boot_processors);
 
-       /* For the 486, we can't use the 4Mb page table trick, so
-        * must map a region of memory */
-#ifdef CONFIG_M486
-       int i;
-       unsigned long *page_table_copies = (unsigned long *)
-               __get_free_page(GFP_KERNEL);
-#endif
-       pgd_t orig_swapper_pg_dir0;
-
        /* This is an area in head.S which was used to set up the
         * initial kernel stack.  We need to alter this to give the
         * booting CPU a new stack (taken from its idle process) */
@@ -573,6 +562,8 @@ do_boot_cpu(__u8 cpu)
        hijack_source.idt.Segment = (start_phys_address >> 4) & 0xFFFF;
 
        cpucount++;
+       alternatives_smp_switch(1);
+
        idle = fork_idle(cpu);
        if(IS_ERR(idle))
                panic("failed fork for CPU%d", cpu);
@@ -580,30 +571,20 @@ do_boot_cpu(__u8 cpu)
        /* init_tasks (in sched.c) is indexed logically */
        stack_start.esp = (void *) idle->thread.esp;
 
+       init_gdt(cpu);
+       per_cpu(current_task, cpu) = idle;
+       early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
        irq_ctx_init(cpu);
 
        /* Note: Don't modify initial ss override */
        VDEBUG(("VOYAGER SMP: Booting CPU%d at 0x%lx[%x:%x], stack %p\n", cpu, 
                (unsigned long)hijack_source.val, hijack_source.idt.Segment,
                hijack_source.idt.Offset, stack_start.esp));
-       /* set the original swapper_pg_dir[0] to map 0 to 4Mb transparently
-        * (so that the booting CPU can find start_32 */
-       orig_swapper_pg_dir0 = swapper_pg_dir[0];
-#ifdef CONFIG_M486
-       if(page_table_copies == NULL)
-               panic("No free memory for 486 page tables\n");
-       for(i = 0; i < PAGE_SIZE/sizeof(unsigned long); i++)
-               page_table_copies[i] = (i * PAGE_SIZE) 
-                       | _PAGE_RW | _PAGE_USER | _PAGE_PRESENT;
-
-       ((unsigned long *)swapper_pg_dir)[0] = 
-               ((virt_to_phys(page_table_copies)) & PAGE_MASK)
-               | _PAGE_RW | _PAGE_USER | _PAGE_PRESENT;
-#else
-       ((unsigned long *)swapper_pg_dir)[0] = 
-               (virt_to_phys(pg0) & PAGE_MASK)
-               | _PAGE_RW | _PAGE_USER | _PAGE_PRESENT;
-#endif
+
+       /* init lowmem identity mapping */
+       clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
+                       min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS));
+       flush_tlb_all();
 
        if(quad_boot) {
                printk("CPU %d: non extended Quad boot\n", cpu);
@@ -646,11 +627,7 @@ do_boot_cpu(__u8 cpu)
                udelay(100);
        }
        /* reset the page table */
-       swapper_pg_dir[0] = orig_swapper_pg_dir0;
-       local_flush_tlb();
-#ifdef CONFIG_M486
-       free_page((unsigned long)page_table_copies);
-#endif
+       zap_low_mappings();
          
        if (cpu_booted_map) {
                VDEBUG(("CPU%d: Booted successfully, back in CPU %d\n",
@@ -660,6 +637,7 @@ do_boot_cpu(__u8 cpu)
                print_cpu_info(&cpu_data[cpu]);
                wmb();
                cpu_set(cpu, cpu_callout_map);
+               cpu_set(cpu, cpu_present_map);
        }
        else {
                printk("CPU%d FAILED TO BOOT: ", cpu);
@@ -784,7 +762,7 @@ fastcall void
 smp_vic_sys_interrupt(struct pt_regs *regs)
 {
        ack_CPI(VIC_SYS_INT);
-       printk("Voyager SYSTEM INTERRUPT\n");
+       printk("Voyager SYSTEM INTERRUPT\n");   
 }
 
 /* Handle a voyager CMN_INT; These interrupts occur either because of
@@ -882,8 +860,8 @@ smp_invalidate_interrupt(void)
 
 /* This routine is called with a physical cpu mask */
 static void
-flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
-                                               unsigned long va)
+voyager_flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
+                         unsigned long va)
 {
        int stuck = 50000;
 
@@ -935,7 +913,7 @@ flush_tlb_current_task(void)
        cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
        local_flush_tlb();
        if (cpu_mask)
-               flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
+               voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
 
        preempt_enable();
 }
@@ -957,7 +935,7 @@ flush_tlb_mm (struct mm_struct * mm)
                        leave_mm(smp_processor_id());
        }
        if (cpu_mask)
-               flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
+               voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
 
        preempt_enable();
 }
@@ -978,7 +956,7 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
        }
 
        if (cpu_mask)
-               flush_tlb_others(cpu_mask, mm, va);
+               voyager_flush_tlb_others(cpu_mask, mm, va);
 
        preempt_enable();
 }
@@ -1066,20 +1044,13 @@ smp_call_function_interrupt(void)
        }
 }
 
-/* Call this function on all CPUs using the function_interrupt above 
-    <func> The function to run. This must be fast and non-blocking.
-    <info> An arbitrary pointer to pass to the function.
-    <retry> If true, keep retrying until ready.
-    <wait> If true, wait until function has completed on other CPUs.
-    [RETURNS] 0 on success, else a negative status code. Does not return until
-    remote CPUs are nearly ready to execute <<func>> or are or have executed.
-*/
-int
-smp_call_function (void (*func) (void *info), void *info, int retry,
-                  int wait)
+static int
+voyager_smp_call_function_mask (cpumask_t cpumask,
+                               void (*func) (void *info), void *info,
+                               int wait)
 {
        struct call_data_struct data;
-       __u32 mask = cpus_addr(cpu_online_map)[0];
+       u32 mask = cpus_addr(cpumask)[0];
 
        mask &= ~(1<<smp_processor_id());
 
@@ -1100,7 +1071,7 @@ smp_call_function (void (*func) (void *info), void *info, int retry,
        call_data = &data;
        wmb();
        /* Send a message to all other CPUs and wait for them to respond */
-       send_CPI_allbutself(VIC_CALL_FUNCTION_CPI);
+       send_CPI(mask, VIC_CALL_FUNCTION_CPI);
 
        /* Wait for response */
        while (data.started)
@@ -1114,7 +1085,6 @@ smp_call_function (void (*func) (void *info), void *info, int retry,
 
        return 0;
 }
-EXPORT_SYMBOL(smp_call_function);
 
 /* Sorry about the name.  In an APIC based system, the APICs
  * themselves are programmed to send a timer interrupt.  This is used
@@ -1133,15 +1103,19 @@ EXPORT_SYMBOL(smp_call_function);
 fastcall void 
 smp_apic_timer_interrupt(struct pt_regs *regs)
 {
-       wrapper_smp_local_timer_interrupt(regs);
+       struct pt_regs *old_regs = set_irq_regs(regs);
+       wrapper_smp_local_timer_interrupt();
+       set_irq_regs(old_regs);
 }
 
 /* All of the QUAD interrupt GATES */
 fastcall void
 smp_qic_timer_interrupt(struct pt_regs *regs)
 {
+       struct pt_regs *old_regs = set_irq_regs(regs);
        ack_QIC_CPI(QIC_TIMER_CPI);
-       wrapper_smp_local_timer_interrupt(regs);
+       wrapper_smp_local_timer_interrupt();
+       set_irq_regs(old_regs);
 }
 
 fastcall void
@@ -1175,6 +1149,7 @@ smp_qic_call_function_interrupt(struct pt_regs *regs)
 fastcall void
 smp_vic_cpi_interrupt(struct pt_regs *regs)
 {
+       struct pt_regs *old_regs = set_irq_regs(regs);
        __u8 cpu = smp_processor_id();
 
        if(is_cpu_quad())
@@ -1183,7 +1158,7 @@ smp_vic_cpi_interrupt(struct pt_regs *regs)
                ack_VIC_CPI(VIC_CPI_LEVEL0);
 
        if(test_and_clear_bit(VIC_TIMER_CPI, &vic_cpi_mailbox[cpu]))
-               wrapper_smp_local_timer_interrupt(regs);
+               wrapper_smp_local_timer_interrupt();
        if(test_and_clear_bit(VIC_INVALIDATE_CPI, &vic_cpi_mailbox[cpu]))
                smp_invalidate_interrupt();
        if(test_and_clear_bit(VIC_RESCHEDULE_CPI, &vic_cpi_mailbox[cpu]))
@@ -1192,6 +1167,7 @@ smp_vic_cpi_interrupt(struct pt_regs *regs)
                smp_enable_irq_interrupt();
        if(test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu]))
                smp_call_function_interrupt();
+       set_irq_regs(old_regs);
 }
 
 static void
@@ -1223,8 +1199,8 @@ smp_alloc_memory(void)
 }
 
 /* send a reschedule CPI to one CPU by physical CPU number*/
-void
-smp_send_reschedule(int cpu)
+static void
+voyager_smp_send_reschedule(int cpu)
 {
        send_one_CPI(cpu, VIC_RESCHEDULE_CPI);
 }
@@ -1246,9 +1222,15 @@ hard_smp_processor_id(void)
        return 0;
 }
 
+int
+safe_smp_processor_id(void)
+{
+       return hard_smp_processor_id();
+}
+
 /* broadcast a halt to all other CPUs */
-void
-smp_send_stop(void)
+static void
+voyager_smp_send_stop(void)
 {
        smp_call_function(smp_stop_cpu_function, NULL, 1, 1);
 }
@@ -1256,10 +1238,10 @@ smp_send_stop(void)
 /* this function is triggered in time.c when a clock tick fires
  * we need to re-broadcast the tick to all CPUs */
 void
-smp_vic_timer_interrupt(struct pt_regs *regs)
+smp_vic_timer_interrupt(void)
 {
        send_CPI_allbutself(VIC_TIMER_CPI);
-       smp_local_timer_interrupt(regs);
+       smp_local_timer_interrupt();
 }
 
 /* local (per CPU) timer interrupt.  It does both profiling and
@@ -1271,12 +1253,12 @@ smp_vic_timer_interrupt(struct pt_regs *regs)
  * value into /proc/profile.
  */
 void
-smp_local_timer_interrupt(struct pt_regs * regs)
+smp_local_timer_interrupt(void)
 {
        int cpu = smp_processor_id();
        long weight;
 
-       profile_tick(CPU_PROFILING, regs);
+       profile_tick(CPU_PROFILING);
        if (--per_cpu(prof_counter, cpu) <= 0) {
                /*
                 * The multiplier may have changed since the last time we got
@@ -1294,7 +1276,7 @@ smp_local_timer_interrupt(struct pt_regs * regs)
                                                per_cpu(prof_counter, cpu);
                }
 
-               update_process_times(user_mode_vm(regs));
+               update_process_times(user_mode_vm(get_irq_regs()));
        }
 
        if( ((1<<cpu) & voyager_extended_vic_processors) == 0)
@@ -1381,6 +1363,17 @@ setup_profiling_timer(unsigned int multiplier)
        return 0;
 }
 
+/* This is a bit of a mess, but forced on us by the genirq changes
+ * there's no genirq handler that really does what voyager wants
+ * so hack it up with the simple IRQ handler */
+static void fastcall
+handle_vic_irq(unsigned int irq, struct irq_desc *desc)
+{
+       before_handle_vic_irq(irq);
+       handle_simple_irq(irq, desc);
+       after_handle_vic_irq(irq);
+}
+
 
 /*  The CPIs are handled in the per cpu 8259s, so they must be
  *  enabled to be received: FIX: enabling the CPIs in the early
@@ -1417,7 +1410,7 @@ smp_intr_init(void)
         * This is for later: first 16 correspond to PC IRQs; next 16
         * are Primary MC IRQs and final 16 are Secondary MC IRQs */
        for(i = 0; i < 48; i++)
-               irq_desc[i].handler = &vic_irq_type;
+               set_irq_chip_and_handler(i, &vic_chip, handle_vic_irq);
 }
 
 /* send a CPI at level cpi to a set of cpus in cpuset (set 1 bit per
@@ -1515,7 +1508,7 @@ ack_VIC_CPI(__u8 cpi)
 static unsigned int
 startup_vic_irq(unsigned int irq)
 {
-       enable_vic_irq(irq);
+       unmask_vic_irq(irq);
 
        return 0;
 }
@@ -1542,7 +1535,7 @@ startup_vic_irq(unsigned int irq)
  *    adjust their masks accordingly.  */
 
 static void
-enable_vic_irq(unsigned int irq)
+unmask_vic_irq(unsigned int irq)
 {
        /* linux doesn't to processor-irq affinity, so enable on
         * all CPUs we know about */
@@ -1551,7 +1544,7 @@ enable_vic_irq(unsigned int irq)
        __u32 processorList = 0;
        unsigned long flags;
 
-       VDEBUG(("VOYAGER: enable_vic_irq(%d) CPU%d affinity 0x%lx\n",
+       VDEBUG(("VOYAGER: unmask_vic_irq(%d) CPU%d affinity 0x%lx\n",
                irq, cpu, cpu_irq_affinity[cpu]));
        spin_lock_irqsave(&vic_irq_lock, flags);
        for_each_online_cpu(real_cpu) {
@@ -1575,7 +1568,7 @@ enable_vic_irq(unsigned int irq)
 }
 
 static void
-disable_vic_irq(unsigned int irq)
+mask_vic_irq(unsigned int irq)
 {
        /* lazy disable, do nothing */
 }
@@ -1699,7 +1692,7 @@ after_handle_vic_irq(unsigned int irq)
 
                        printk("VOYAGER SMP: CPU%d lost interrupt %d\n",
                               cpu, irq);
-                       for_each_cpu(real_cpu, mask) {
+                       for_each_possible_cpu(real_cpu, mask) {
 
                                outb(VIC_CPU_MASQUERADE_ENABLE | real_cpu,
                                     VIC_PROCESSOR_ID);
@@ -1803,7 +1796,7 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
         * disabled again as it comes in (voyager lazy disable).  If
         * the affinity map is tightened to disable the interrupt on a
         * cpu, it will be pushed off when it comes in */
-       enable_vic_irq(irq);
+       unmask_vic_irq(irq);
 }
 
 static void
@@ -1899,22 +1892,26 @@ smp_voyager_power_off(void *dummy)
                smp_stop_cpu_function(NULL);
 }
 
-void __init
-smp_prepare_cpus(unsigned int max_cpus)
+static void __init
+voyager_smp_prepare_cpus(unsigned int max_cpus)
 {
        /* FIXME: ignore max_cpus for now */
        smp_boot_cpus();
 }
 
-void __devinit smp_prepare_boot_cpu(void)
+static void __devinit voyager_smp_prepare_boot_cpu(void)
 {
+       init_gdt(smp_processor_id());
+       switch_to_new_gdt();
+
        cpu_set(smp_processor_id(), cpu_online_map);
        cpu_set(smp_processor_id(), cpu_callout_map);
        cpu_set(smp_processor_id(), cpu_possible_map);
+       cpu_set(smp_processor_id(), cpu_present_map);
 }
 
-int __devinit
-__cpu_up(unsigned int cpu)
+static int __devinit
+voyager_cpu_up(unsigned int cpu)
 {
        /* This only works at boot for x86.  See "rewrite" above. */
        if (cpu_isset(cpu, smp_commenced_mask))
@@ -1930,8 +1927,26 @@ __cpu_up(unsigned int cpu)
        return 0;
 }
 
-void __init 
-smp_cpus_done(unsigned int max_cpus)
+static void __init
+voyager_smp_cpus_done(unsigned int max_cpus)
 {
        zap_low_mappings();
 }
+
+void __init
+smp_setup_processor_id(void)
+{
+       current_thread_info()->cpu = hard_smp_processor_id();
+       x86_write_percpu(cpu_number, hard_smp_processor_id());
+}
+
+struct smp_ops smp_ops = {
+       .smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu,
+       .smp_prepare_cpus = voyager_smp_prepare_cpus,
+       .cpu_up = voyager_cpu_up,
+       .smp_cpus_done = voyager_smp_cpus_done,
+
+       .smp_send_stop = voyager_smp_send_stop,
+       .smp_send_reschedule = voyager_smp_send_reschedule,
+       .smp_call_function_mask = voyager_smp_call_function_mask,
+};