X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=arch%2Fsparc64%2Fkernel%2Fsmp.c;h=c099d96f12397c00f2f8d7ef45b9bfff9530c4c0;hb=8691e5a8f691cc2a4fda0651e8d307aaba0e7d68;hp=d637168ce37dc4f700a1cba42d9c077b999f9916;hpb=b5a37e96b8dc067b979e44c4e109c9bc49c2f4d8;p=safe%2Fjmp%2Flinux-2.6 diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index d637168..c099d96 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -1,6 +1,6 @@ /* smp.c: Sparc64 SMP support. * - * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net) */ #include @@ -10,7 +10,6 @@ #include #include #include -#include #include #include #include @@ -21,7 +20,7 @@ #include #include #include -#include +#include #include #include @@ -29,115 +28,82 @@ #include #include #include +#include +#include +#include #include +#include #include #include #include #include -#include #include #include #include +#include +#include +#include +#include -extern void calibrate_delay(void); - -/* Please don't make this stuff initdata!!! --DaveM */ -static unsigned char boot_cpu_id; +int sparc64_multi_core __read_mostly; +cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE; cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; -cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE; +DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; +cpumask_t cpu_core_map[NR_CPUS] __read_mostly = + { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; + +EXPORT_SYMBOL(cpu_possible_map); +EXPORT_SYMBOL(cpu_online_map); +EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); +EXPORT_SYMBOL(cpu_core_map); + static cpumask_t smp_commenced_mask; -static cpumask_t cpu_callout_map; void smp_info(struct seq_file *m) { int i; seq_printf(m, "State:\n"); - for (i = 0; i < NR_CPUS; i++) { - if (cpu_online(i)) - seq_printf(m, - "CPU%d:\t\tonline\n", i); - } + for_each_online_cpu(i) + seq_printf(m, "CPU%d:\t\tonline\n", i); } void smp_bogo(struct seq_file *m) { int i; - for (i = 0; i < NR_CPUS; i++) - if (cpu_online(i)) - seq_printf(m, - "Cpu%dBogo\t: %lu.%02lu\n" - "Cpu%dClkTck\t: %016lx\n", - i, cpu_data(i).udelay_val / (500000/HZ), - (cpu_data(i).udelay_val / (5000/HZ)) % 100, - i, cpu_data(i).clock_tick); + for_each_online_cpu(i) + seq_printf(m, + "Cpu%dClkTck\t: %016lx\n", + i, cpu_data(i).clock_tick); } -void __init smp_store_cpu_info(int id) -{ - int cpu_node; - - /* multiplier and counter set by - smp_setup_percpu_timer() */ - cpu_data(id).udelay_val = loops_per_jiffy; - - cpu_find_by_mid(id, &cpu_node); - cpu_data(id).clock_tick = prom_getintdefault(cpu_node, - "clock-frequency", 0); - - cpu_data(id).idle_volume = 1; - - cpu_data(id).dcache_size = prom_getintdefault(cpu_node, "dcache-size", - 16 * 1024); - cpu_data(id).dcache_line_size = - prom_getintdefault(cpu_node, "dcache-line-size", 32); - cpu_data(id).icache_size = prom_getintdefault(cpu_node, "icache-size", - 16 * 1024); - cpu_data(id).icache_line_size = - prom_getintdefault(cpu_node, "icache-line-size", 32); - cpu_data(id).ecache_size = prom_getintdefault(cpu_node, "ecache-size", - 4 * 1024 * 1024); - cpu_data(id).ecache_line_size = - prom_getintdefault(cpu_node, "ecache-line-size", 64); - printk("CPU[%d]: Caches " - "D[sz(%d):line_sz(%d)] " - "I[sz(%d):line_sz(%d)] " - "E[sz(%d):line_sz(%d)]\n", - id, - cpu_data(id).dcache_size, cpu_data(id).dcache_line_size, - cpu_data(id).icache_size, cpu_data(id).icache_line_size, - cpu_data(id).ecache_size, cpu_data(id).ecache_line_size); -} +static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock); -static void smp_setup_percpu_timer(void); +extern void setup_sparc64_timer(void); static volatile unsigned long callin_flag = 0; -void __init smp_callin(void) +void __cpuinit smp_callin(void) { int cpuid = hard_smp_processor_id(); __local_per_cpu_offset = __per_cpu_offset(cpuid); - if (tlb_type == hypervisor) { - sun4v_register_fault_status(); + if (tlb_type == hypervisor) sun4v_ktsb_register(); - } __flush_tlb_all(); - smp_setup_percpu_timer(); + setup_sparc64_timer(); if (cheetah_pcache_forced_on) cheetah_enable_pcache(); local_irq_enable(); - calibrate_delay(); - smp_store_cpu_info(cpuid); callin_flag = 1; __asm__ __volatile__("membar #Sync\n\t" "flush %%g6" : : : "memory"); @@ -154,7 +120,9 @@ void __init smp_callin(void) while (!cpu_isset(cpuid, smp_commenced_mask)) rmb(); + spin_lock(&call_lock); cpu_set(cpuid, cpu_online_map); + spin_unlock(&call_lock); /* idle thread is expected to have preempt disabled */ preempt_disable(); @@ -166,8 +134,6 @@ void cpu_panic(void) panic("SMP bolixed\n"); } -static unsigned long current_tick_offset __read_mostly; - /* This tick register synchronization scheme is taken entirely from * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit. * @@ -250,7 +216,7 @@ void smp_synchronize_tick_client(void) } else adj = -delta; - tick_ops->add_tick(adj, current_tick_offset); + tick_ops->add_tick(adj); } #if DEBUG_TICK_SYNC t[i].rt = rt; @@ -268,8 +234,9 @@ void smp_synchronize_tick_client(void) t[i].rt, t[i].master, t[i].diff, t[i].lat); #endif - printk(KERN_INFO "CPU %d: synchronized TICK with master CPU (last diff %ld cycles," - "maxerr %lu cycles)\n", smp_processor_id(), delta, rt); + printk(KERN_INFO "CPU %d: synchronized TICK with master CPU " + "(last diff %ld cycles, maxerr %lu cycles)\n", + smp_processor_id(), delta, rt); } static void smp_start_sync_tick_client(int cpu); @@ -304,6 +271,68 @@ static void smp_synchronize_one_tick(int cpu) spin_unlock_irqrestore(&itc_sync_lock, flags); } +#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) +/* XXX Put this in some common place. XXX */ +static unsigned long kimage_addr_to_ra(void *p) +{ + unsigned long val = (unsigned long) p; + + return kern_base + (val - KERNBASE); +} + +static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg) +{ + extern unsigned long sparc64_ttable_tl0; + extern unsigned long kern_locked_tte_data; + struct hvtramp_descr *hdesc; + unsigned long trampoline_ra; + struct trap_per_cpu *tb; + u64 tte_vaddr, tte_data; + unsigned long hv_err; + int i; + + hdesc = kzalloc(sizeof(*hdesc) + + (sizeof(struct hvtramp_mapping) * + num_kernel_image_mappings - 1), + GFP_KERNEL); + if (!hdesc) { + printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate " + "hvtramp_descr.\n"); + return; + } + + hdesc->cpu = cpu; + hdesc->num_mappings = num_kernel_image_mappings; + + tb = &trap_block[cpu]; + tb->hdesc = hdesc; + + hdesc->fault_info_va = (unsigned long) &tb->fault_info; + hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info); + + hdesc->thread_reg = thread_reg; + + tte_vaddr = (unsigned long) KERNBASE; + tte_data = kern_locked_tte_data; + + for (i = 0; i < hdesc->num_mappings; i++) { + hdesc->maps[i].vaddr = tte_vaddr; + hdesc->maps[i].tte = tte_data; + tte_vaddr += 0x400000; + tte_data += 0x400000; + } + + trampoline_ra = kimage_addr_to_ra(hv_cpu_startup); + + hv_err = sun4v_cpu_start(cpu, trampoline_ra, + kimage_addr_to_ra(&sparc64_ttable_tl0), + __pa(hdesc)); + if (hv_err) + printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() " + "gives error %lu\n", hv_err); +} +#endif + extern unsigned long sparc64_cpu_startup; /* The OBP cpu startup callback truncates the 3rd arg cookie to @@ -314,35 +343,53 @@ static struct thread_info *cpu_new_thread = NULL; static int __devinit smp_boot_one_cpu(unsigned int cpu) { + struct trap_per_cpu *tb = &trap_block[cpu]; unsigned long entry = (unsigned long)(&sparc64_cpu_startup); unsigned long cookie = (unsigned long)(&cpu_new_thread); struct task_struct *p; - int timeout, ret, cpu_node; + int timeout, ret; p = fork_idle(cpu); + if (IS_ERR(p)) + return PTR_ERR(p); callin_flag = 0; cpu_new_thread = task_thread_info(p); - cpu_set(cpu, cpu_callout_map); - cpu_find_by_mid(cpu, &cpu_node); - prom_startcpu(cpu_node, entry, cookie); + if (tlb_type == hypervisor) { +#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) + if (ldom_domaining_enabled) + ldom_startcpu_cpuid(cpu, + (unsigned long) cpu_new_thread); + else +#endif + prom_startcpu_cpuid(cpu, entry, cookie); + } else { + struct device_node *dp = of_find_node_by_cpuid(cpu); + + prom_startcpu(dp->node, entry, cookie); + } - for (timeout = 0; timeout < 5000000; timeout++) { + for (timeout = 0; timeout < 50000; timeout++) { if (callin_flag) break; udelay(100); } + if (callin_flag) { ret = 0; } else { printk("Processor %d is stuck.\n", cpu); - cpu_clear(cpu, cpu_callout_map); ret = -ENODEV; } cpu_new_thread = NULL; + if (tb->hdesc) { + kfree(tb->hdesc); + tb->hdesc = NULL; + } + return ret; } @@ -412,7 +459,7 @@ again: } } -static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) +static inline void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) { u64 pstate; int i; @@ -428,8 +475,8 @@ static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, c */ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) { - u64 pstate, ver; - int nack_busy_id, is_jbus; + u64 pstate, ver, busy_mask; + int nack_busy_id, is_jbus, need_more; if (cpus_empty(mask)) return; @@ -445,6 +492,7 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mas __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); retry: + need_more = 0; __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t" : : "r" (pstate), "i" (PSTATE_IE)); @@ -459,46 +507,67 @@ retry: "i" (ASI_INTR_W)); nack_busy_id = 0; + busy_mask = 0; { int i; for_each_cpu_mask(i, mask) { u64 target = (i << 14) | 0x70; - if (!is_jbus) + if (is_jbus) { + busy_mask |= (0x1UL << (i * 2)); + } else { target |= (nack_busy_id << 24); + busy_mask |= (0x1UL << + (nack_busy_id * 2)); + } __asm__ __volatile__( "stxa %%g0, [%0] %1\n\t" "membar #Sync\n\t" : /* no outputs */ : "r" (target), "i" (ASI_INTR_W)); nack_busy_id++; + if (nack_busy_id == 32) { + need_more = 1; + break; + } } } /* Now, poll for completion. */ { - u64 dispatch_stat; + u64 dispatch_stat, nack_mask; long stuck; stuck = 100000 * nack_busy_id; + nack_mask = busy_mask << 1; do { __asm__ __volatile__("ldxa [%%g0] %1, %0" : "=r" (dispatch_stat) : "i" (ASI_INTR_DISPATCH_STAT)); - if (dispatch_stat == 0UL) { + if (!(dispatch_stat & (busy_mask | nack_mask))) { __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate)); + if (unlikely(need_more)) { + int i, cnt = 0; + for_each_cpu_mask(i, mask) { + cpu_clear(i, mask); + cnt++; + if (cnt == 32) + break; + } + goto retry; + } return; } if (!--stuck) break; - } while (dispatch_stat & 0x5555555555555555UL); + } while (dispatch_stat & busy_mask); __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate)); - if ((dispatch_stat & ~(0x5555555555555555UL)) == 0) { + if (dispatch_stat & busy_mask) { /* Busy bits will not clear, continue instead * of freezing up on this cpu. */ @@ -526,6 +595,8 @@ retry: if ((dispatch_stat & check_mask) == 0) cpu_clear(i, mask); this_busy_nack += 2; + if (this_busy_nack == 64) + break; } goto retry; @@ -533,133 +604,157 @@ retry: } } -#if 0 /* Multi-cpu list version. */ -static int init_cpu_list(u16 *list, cpumask_t mask) +static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) { - int i, cnt; - - cnt = 0; - for_each_cpu_mask(i, mask) - list[cnt++] = i; + struct trap_per_cpu *tb; + u16 *cpu_list; + u64 *mondo; + cpumask_t error_mask; + unsigned long flags, status; + int cnt, retries, this_cpu, prev_sent, i; - return cnt; -} - -static int update_cpu_list(u16 *list, int orig_cnt, cpumask_t mask) -{ - int i; + if (cpus_empty(mask)) + return; - for (i = 0; i < orig_cnt; i++) { - if (list[i] == 0xffff) - cpu_clear(i, mask); - } + /* We have to do this whole thing with interrupts fully disabled. + * Otherwise if we send an xcall from interrupt context it will + * corrupt both our mondo block and cpu list state. + * + * One consequence of this is that we cannot use timeout mechanisms + * that depend upon interrupts being delivered locally. So, for + * example, we cannot sample jiffies and expect it to advance. + * + * Fortunately, udelay() uses %stick/%tick so we can use that. + */ + local_irq_save(flags); - return init_cpu_list(list, mask); -} - -static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) -{ - int this_cpu = get_cpu(); - struct trap_per_cpu *tb = &trap_block[this_cpu]; - u64 *mondo = __va(tb->cpu_mondo_block_pa); - u16 *cpu_list = __va(tb->cpu_list_pa); - int cnt, retries; + this_cpu = smp_processor_id(); + tb = &trap_block[this_cpu]; + mondo = __va(tb->cpu_mondo_block_pa); mondo[0] = data0; mondo[1] = data1; mondo[2] = data2; wmb(); + cpu_list = __va(tb->cpu_list_pa); + + /* Setup the initial cpu list. */ + cnt = 0; + for_each_cpu_mask(i, mask) + cpu_list[cnt++] = i; + + cpus_clear(error_mask); retries = 0; - cnt = init_cpu_list(cpu_list, mask); + prev_sent = 0; do { - register unsigned long func __asm__("%o5"); - register unsigned long arg0 __asm__("%o0"); - register unsigned long arg1 __asm__("%o1"); - register unsigned long arg2 __asm__("%o2"); - - func = HV_FAST_CPU_MONDO_SEND; - arg0 = cnt; - arg1 = tb->cpu_list_pa; - arg2 = tb->cpu_mondo_block_pa; - - __asm__ __volatile__("ta %8" - : "=&r" (func), "=&r" (arg0), - "=&r" (arg1), "=&r" (arg2) - : "0" (func), "1" (arg0), - "2" (arg1), "3" (arg2), - "i" (HV_FAST_TRAP) - : "memory"); - if (likely(arg0 == HV_EOK)) - break; + int forward_progress, n_sent; - if (unlikely(++retries > 100)) { - printk("CPU[%d]: sun4v mondo error %lu\n", - this_cpu, func); + status = sun4v_cpu_mondo_send(cnt, + tb->cpu_list_pa, + tb->cpu_mondo_block_pa); + + /* HV_EOK means all cpus received the xcall, we're done. */ + if (likely(status == HV_EOK)) break; + + /* First, see if we made any forward progress. + * + * The hypervisor indicates successful sends by setting + * cpu list entries to the value 0xffff. + */ + n_sent = 0; + for (i = 0; i < cnt; i++) { + if (likely(cpu_list[i] == 0xffff)) + n_sent++; } - cnt = update_cpu_list(cpu_list, cnt, mask); + forward_progress = 0; + if (n_sent > prev_sent) + forward_progress = 1; - udelay(2 * cnt); - } while (1); + prev_sent = n_sent; - put_cpu(); -} -#else -/* Single-cpu list version. */ -static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) -{ - int this_cpu = get_cpu(); - struct trap_per_cpu *tb = &trap_block[this_cpu]; - u64 *mondo = __va(tb->cpu_mondo_block_pa); - u16 *cpu_list = __va(tb->cpu_list_pa); - int i; + /* If we get a HV_ECPUERROR, then one or more of the cpus + * in the list are in error state. Use the cpu_state() + * hypervisor call to find out which cpus are in error state. + */ + if (unlikely(status == HV_ECPUERROR)) { + for (i = 0; i < cnt; i++) { + long err; + u16 cpu; + + cpu = cpu_list[i]; + if (cpu == 0xffff) + continue; + + err = sun4v_cpu_state(cpu); + if (err >= 0 && + err == HV_CPU_STATE_ERROR) { + cpu_list[i] = 0xffff; + cpu_set(cpu, error_mask); + } + } + } else if (unlikely(status != HV_EWOULDBLOCK)) + goto fatal_mondo_error; + + /* Don't bother rewriting the CPU list, just leave the + * 0xffff and non-0xffff entries in there and the + * hypervisor will do the right thing. + * + * Only advance timeout state if we didn't make any + * forward progress. + */ + if (unlikely(!forward_progress)) { + if (unlikely(++retries > 10000)) + goto fatal_mondo_timeout; - mondo[0] = data0; - mondo[1] = data1; - mondo[2] = data2; - wmb(); + /* Delay a little bit to let other cpus catch up + * on their cpu mondo queue work. + */ + udelay(2 * cnt); + } + } while (1); - for_each_cpu_mask(i, mask) { - int retries = 0; + local_irq_restore(flags); - do { - register unsigned long func __asm__("%o5"); - register unsigned long arg0 __asm__("%o0"); - register unsigned long arg1 __asm__("%o1"); - register unsigned long arg2 __asm__("%o2"); - - cpu_list[0] = i; - func = HV_FAST_CPU_MONDO_SEND; - arg0 = 1; - arg1 = tb->cpu_list_pa; - arg2 = tb->cpu_mondo_block_pa; - - __asm__ __volatile__("ta %8" - : "=&r" (func), "=&r" (arg0), - "=&r" (arg1), "=&r" (arg2) - : "0" (func), "1" (arg0), - "2" (arg1), "3" (arg2), - "i" (HV_FAST_TRAP) - : "memory"); - if (likely(arg0 == HV_EOK)) - break; + if (unlikely(!cpus_empty(error_mask))) + goto fatal_mondo_cpu_error; - if (unlikely(++retries > 100)) { - printk("CPU[%d]: sun4v mondo error %lu\n", - this_cpu, func); - break; - } + return; - udelay(2 * i); - } while (1); - } +fatal_mondo_cpu_error: + printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus " + "were in error state\n", + this_cpu); + printk(KERN_CRIT "CPU[%d]: Error mask [ ", this_cpu); + for_each_cpu_mask(i, error_mask) + printk("%d ", i); + printk("]\n"); + return; - put_cpu(); +fatal_mondo_timeout: + local_irq_restore(flags); + printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward " + " progress after %d retries.\n", + this_cpu, retries); + goto dump_cpu_list_and_out; + +fatal_mondo_error: + local_irq_restore(flags); + printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n", + this_cpu, status); + printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) " + "mondo_block_pa(%lx)\n", + this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa); + +dump_cpu_list_and_out: + printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu); + for (i = 0; i < cnt; i++) + printk("%u ", cpu_list[i]); + printk("]\n"); } -#endif /* Send cross call to all processors mentioned in MASK * except self. @@ -704,24 +799,27 @@ struct call_data_struct { int wait; }; -static DEFINE_SPINLOCK(call_lock); static struct call_data_struct *call_data; extern unsigned long xcall_call_function; -/* +/** + * smp_call_function(): Run a function on all other CPUs. + * @func: The function to run. This must be fast and non-blocking. + * @info: An arbitrary pointer to pass to the function. + * @wait: If true, wait (atomically) until function has completed on other CPUs. + * + * Returns 0 on success, else a negative status code. Does not return until + * remote CPUs are nearly ready to execute <> or are or have executed. + * * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. */ -static int smp_call_function_mask(void (*func)(void *info), void *info, - int nonatomic, int wait, cpumask_t mask) +static int sparc64_smp_call_function_mask(void (*func)(void *info), void *info, + int wait, cpumask_t mask) { struct call_data_struct data; - int cpus = cpus_weight(mask) - 1; - long timeout; - - if (!cpus) - return 0; + int cpus; /* Can deadlock when called with interrupts disabled */ WARN_ON(irqs_disabled()); @@ -733,39 +831,29 @@ static int smp_call_function_mask(void (*func)(void *info), void *info, spin_lock(&call_lock); + cpu_clear(smp_processor_id(), mask); + cpus = cpus_weight(mask); + if (!cpus) + goto out_unlock; + call_data = &data; + mb(); smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask); - /* - * Wait for other cpus to complete function or at - * least snap the call data. - */ - timeout = 1000000; - while (atomic_read(&data.finished) != cpus) { - if (--timeout <= 0) - goto out_timeout; - barrier(); - udelay(1); - } + /* Wait for response */ + while (atomic_read(&data.finished) != cpus) + cpu_relax(); +out_unlock: spin_unlock(&call_lock); return 0; - -out_timeout: - spin_unlock(&call_lock); - printk("XCALL: Remote cpus not responding, ncpus=%ld finished=%ld\n", - (long) num_online_cpus() - 1L, - (long) atomic_read(&data.finished)); - return 0; } -int smp_call_function(void (*func)(void *info), void *info, - int nonatomic, int wait) +int smp_call_function(void (*func)(void *info), void *info, int wait) { - return smp_call_function_mask(func, info, nonatomic, wait, - cpu_online_map); + return sparc64_smp_call_function_mask(func, info, wait, cpu_online_map); } void smp_call_function_client(int irq, struct pt_regs *regs) @@ -787,22 +875,36 @@ void smp_call_function_client(int irq, struct pt_regs *regs) static void tsb_sync(void *info) { + struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()]; struct mm_struct *mm = info; - if (current->active_mm == mm) + /* It is not valid to test "currrent->active_mm == mm" here. + * + * The value of "current" is not changed atomically with + * switch_mm(). But that's OK, we just need to check the + * current cpu's trap block PGD physical address. + */ + if (tp->pgd_paddr == __pa(mm->pgd)) tsb_context_switch(mm); } void smp_tsb_sync(struct mm_struct *mm) { - smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask); + sparc64_smp_call_function_mask(tsb_sync, mm, 1, mm->cpu_vm_mask); } extern unsigned long xcall_flush_tlb_mm; extern unsigned long xcall_flush_tlb_pending; extern unsigned long xcall_flush_tlb_kernel_range; extern unsigned long xcall_report_regs; +#ifdef CONFIG_MAGIC_SYSRQ +extern unsigned long xcall_fetch_glob_regs; +#endif extern unsigned long xcall_receive_signal; +extern unsigned long xcall_new_mmu_context_version; +#ifdef CONFIG_KGDB +extern unsigned long xcall_kgdb_capture; +#endif #ifdef DCACHE_ALIASING_POSSIBLE extern unsigned long xcall_flush_dcache_page_cheetah; @@ -814,7 +916,7 @@ extern atomic_t dcpage_flushes; extern atomic_t dcpage_flushes_xcall; #endif -static __inline__ void __local_flush_dcache_page(struct page *page) +static inline void __local_flush_dcache_page(struct page *page) { #ifdef DCACHE_ALIASING_POSSIBLE __flush_dcache_page(page_address(page), @@ -917,33 +1019,74 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) put_cpu(); } +static void __smp_receive_signal_mask(cpumask_t mask) +{ + smp_cross_call_masked(&xcall_receive_signal, 0, 0, 0, mask); +} + void smp_receive_signal(int cpu) { cpumask_t mask = cpumask_of_cpu(cpu); - if (cpu_online(cpu)) { - u64 data0 = (((u64)&xcall_receive_signal) & 0xffffffff); - - if (tlb_type == spitfire) - spitfire_xcall_deliver(data0, 0, 0, mask); - else if (tlb_type == cheetah || tlb_type == cheetah_plus) - cheetah_xcall_deliver(data0, 0, 0, mask); - else if (tlb_type == hypervisor) - hypervisor_xcall_deliver(data0, 0, 0, mask); - } + if (cpu_online(cpu)) + __smp_receive_signal_mask(mask); } void smp_receive_signal_client(int irq, struct pt_regs *regs) { - /* Just return, rtrap takes care of the rest. */ clear_softint(1 << irq); } +void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) +{ + struct mm_struct *mm; + unsigned long flags; + + clear_softint(1 << irq); + + /* See if we need to allocate a new TLB context because + * the version of the one we are using is now out of date. + */ + mm = current->active_mm; + if (unlikely(!mm || (mm == &init_mm))) + return; + + spin_lock_irqsave(&mm->context.lock, flags); + + if (unlikely(!CTX_VALID(mm->context))) + get_new_mmu_context(mm); + + spin_unlock_irqrestore(&mm->context.lock, flags); + + load_secondary_context(mm); + __flush_tlb_mm(CTX_HWBITS(mm->context), + SECONDARY_CONTEXT); +} + +void smp_new_mmu_context_version(void) +{ + smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0); +} + +#ifdef CONFIG_KGDB +void kgdb_roundup_cpus(unsigned long flags) +{ + smp_cross_call(&xcall_kgdb_capture, 0, 0, 0); +} +#endif + void smp_report_regs(void) { smp_cross_call(&xcall_report_regs, 0, 0, 0); } +#ifdef CONFIG_MAGIC_SYSRQ +void smp_fetch_global_regs(void) +{ + smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0); +} +#endif + /* We know that the window frames of the user have been flushed * to the stack before we get here because all callers of us * are flush_tlb_*() routines, and these run after flush_cache_*() @@ -1104,200 +1247,184 @@ void smp_penguin_jailcell(int irq, struct pt_regs *regs) preempt_enable(); } -#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier -#define prof_counter(__cpu) cpu_data(__cpu).counter +/* /proc/profile writes can call this, don't __init it please. */ +int setup_profiling_timer(unsigned int multiplier) +{ + return -EINVAL; +} -void smp_percpu_timer_interrupt(struct pt_regs *regs) +void __init smp_prepare_cpus(unsigned int max_cpus) { - unsigned long compare, tick, pstate; - int cpu = smp_processor_id(); - int user = user_mode(regs); +} - /* - * Check for level 14 softint. - */ - { - unsigned long tick_mask = tick_ops->softint_mask; +void __devinit smp_prepare_boot_cpu(void) +{ +} - if (!(get_softint() & tick_mask)) { - extern void handler_irq(int, struct pt_regs *); +void __devinit smp_fill_in_sib_core_maps(void) +{ + unsigned int i; - handler_irq(14, regs); - return; + for_each_present_cpu(i) { + unsigned int j; + + cpus_clear(cpu_core_map[i]); + if (cpu_data(i).core_id == 0) { + cpu_set(i, cpu_core_map[i]); + continue; + } + + for_each_present_cpu(j) { + if (cpu_data(i).core_id == + cpu_data(j).core_id) + cpu_set(j, cpu_core_map[i]); } - clear_softint(tick_mask); } - do { - profile_tick(CPU_PROFILING, regs); - if (!--prof_counter(cpu)) { - irq_enter(); + for_each_present_cpu(i) { + unsigned int j; - if (cpu == boot_cpu_id) { - kstat_this_cpu.irqs[0]++; - timer_tick_interrupt(regs); - } + cpus_clear(per_cpu(cpu_sibling_map, i)); + if (cpu_data(i).proc_id == -1) { + cpu_set(i, per_cpu(cpu_sibling_map, i)); + continue; + } - update_process_times(user); + for_each_present_cpu(j) { + if (cpu_data(i).proc_id == + cpu_data(j).proc_id) + cpu_set(j, per_cpu(cpu_sibling_map, i)); + } + } +} - irq_exit(); +int __cpuinit __cpu_up(unsigned int cpu) +{ + int ret = smp_boot_one_cpu(cpu); - prof_counter(cpu) = prof_multiplier(cpu); + if (!ret) { + cpu_set(cpu, smp_commenced_mask); + while (!cpu_isset(cpu, cpu_online_map)) + mb(); + if (!cpu_isset(cpu, cpu_online_map)) { + ret = -ENODEV; + } else { + /* On SUN4V, writes to %tick and %stick are + * not allowed. + */ + if (tlb_type != hypervisor) + smp_synchronize_one_tick(cpu); } - - /* Guarantee that the following sequences execute - * uninterrupted. - */ - __asm__ __volatile__("rdpr %%pstate, %0\n\t" - "wrpr %0, %1, %%pstate" - : "=r" (pstate) - : "i" (PSTATE_IE)); - - compare = tick_ops->add_compare(current_tick_offset); - tick = tick_ops->get_tick(); - - /* Restore PSTATE_IE. */ - __asm__ __volatile__("wrpr %0, 0x0, %%pstate" - : /* no outputs */ - : "r" (pstate)); - } while (time_after_eq(tick, compare)); + } + return ret; } -static void __init smp_setup_percpu_timer(void) +#ifdef CONFIG_HOTPLUG_CPU +void cpu_play_dead(void) { int cpu = smp_processor_id(); unsigned long pstate; - prof_counter(cpu) = prof_multiplier(cpu) = 1; + idle_task_exit(); - /* Guarantee that the following sequences execute - * uninterrupted. - */ - __asm__ __volatile__("rdpr %%pstate, %0\n\t" - "wrpr %0, %1, %%pstate" - : "=r" (pstate) - : "i" (PSTATE_IE)); + if (tlb_type == hypervisor) { + struct trap_per_cpu *tb = &trap_block[cpu]; + + sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO, + tb->cpu_mondo_pa, 0); + sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO, + tb->dev_mondo_pa, 0); + sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR, + tb->resum_mondo_pa, 0); + sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR, + tb->nonresum_mondo_pa, 0); + } - tick_ops->init_tick(current_tick_offset); + cpu_clear(cpu, smp_commenced_mask); + membar_safe("#Sync"); - /* Restore PSTATE_IE. */ - __asm__ __volatile__("wrpr %0, 0x0, %%pstate" - : /* no outputs */ - : "r" (pstate)); -} + local_irq_disable(); -void __init smp_tick_init(void) -{ - boot_cpu_id = hard_smp_processor_id(); - current_tick_offset = timer_tick_offset; + __asm__ __volatile__( + "rdpr %%pstate, %0\n\t" + "wrpr %0, %1, %%pstate" + : "=r" (pstate) + : "i" (PSTATE_IE)); - cpu_set(boot_cpu_id, cpu_online_map); - prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1; + while (1) + barrier(); } -/* /proc/profile writes can call this, don't __init it please. */ -static DEFINE_SPINLOCK(prof_setup_lock); - -int setup_profiling_timer(unsigned int multiplier) +int __cpu_disable(void) { - unsigned long flags; + int cpu = smp_processor_id(); + cpuinfo_sparc *c; int i; - if ((!multiplier) || (timer_tick_offset / multiplier) < 1000) - return -EINVAL; + for_each_cpu_mask(i, cpu_core_map[cpu]) + cpu_clear(cpu, cpu_core_map[i]); + cpus_clear(cpu_core_map[cpu]); - spin_lock_irqsave(&prof_setup_lock, flags); - for (i = 0; i < NR_CPUS; i++) - prof_multiplier(i) = multiplier; - current_tick_offset = (timer_tick_offset / multiplier); - spin_unlock_irqrestore(&prof_setup_lock, flags); + for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu)) + cpu_clear(cpu, per_cpu(cpu_sibling_map, i)); + cpus_clear(per_cpu(cpu_sibling_map, cpu)); - return 0; -} + c = &cpu_data(cpu); -/* Constrain the number of cpus to max_cpus. */ -void __init smp_prepare_cpus(unsigned int max_cpus) -{ - if (num_possible_cpus() > max_cpus) { - int instance, mid; - - instance = 0; - while (!cpu_find_by_instance(instance, NULL, &mid)) { - if (mid != boot_cpu_id) { - cpu_clear(mid, phys_cpu_present_map); - if (num_possible_cpus() <= max_cpus) - break; - } - instance++; - } - } - - smp_store_cpu_info(boot_cpu_id); -} + c->core_id = 0; + c->proc_id = -1; -/* Set this up early so that things like the scheduler can init - * properly. We use the same cpu mask for both the present and - * possible cpu map. - */ -void __init smp_setup_cpu_possible_map(void) -{ - int instance, mid; + spin_lock(&call_lock); + cpu_clear(cpu, cpu_online_map); + spin_unlock(&call_lock); - instance = 0; - while (!cpu_find_by_instance(instance, NULL, &mid)) { - if (mid < NR_CPUS) - cpu_set(mid, phys_cpu_present_map); - instance++; - } -} + smp_wmb(); -void __devinit smp_prepare_boot_cpu(void) -{ - int cpu = hard_smp_processor_id(); + /* Make sure no interrupts point to this cpu. */ + fixup_irqs(); - if (cpu >= NR_CPUS) { - prom_printf("Serious problem, boot cpu id >= NR_CPUS\n"); - prom_halt(); - } - - current_thread_info()->cpu = cpu; - __local_per_cpu_offset = __per_cpu_offset(cpu); + local_irq_enable(); + mdelay(1); + local_irq_disable(); - cpu_set(smp_processor_id(), cpu_online_map); - cpu_set(smp_processor_id(), phys_cpu_present_map); + return 0; } -int __devinit __cpu_up(unsigned int cpu) +void __cpu_die(unsigned int cpu) { - int ret = smp_boot_one_cpu(cpu); + int i; - if (!ret) { - cpu_set(cpu, smp_commenced_mask); - while (!cpu_isset(cpu, cpu_online_map)) - mb(); - if (!cpu_isset(cpu, cpu_online_map)) { - ret = -ENODEV; - } else { - smp_synchronize_one_tick(cpu); + for (i = 0; i < 100; i++) { + smp_rmb(); + if (!cpu_isset(cpu, smp_commenced_mask)) + break; + msleep(100); + } + if (cpu_isset(cpu, smp_commenced_mask)) { + printk(KERN_ERR "CPU %u didn't die...\n", cpu); + } else { +#if defined(CONFIG_SUN_LDOMS) + unsigned long hv_err; + int limit = 100; + + do { + hv_err = sun4v_cpu_stop(cpu); + if (hv_err == HV_EOK) { + cpu_clear(cpu, cpu_present_map); + break; + } + } while (--limit > 0); + if (limit <= 0) { + printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n", + hv_err); } +#endif } - return ret; } +#endif void __init smp_cpus_done(unsigned int max_cpus) { - unsigned long bogosum = 0; - int i; - - for (i = 0; i < NR_CPUS; i++) { - if (cpu_online(i)) - bogosum += cpu_data(i).udelay_val; - } - printk("Total of %ld processors activated " - "(%lu.%02lu BogoMIPS).\n", - (long) num_online_cpus(), - bogosum/(500000/HZ), - (bogosum/(5000/HZ))%100); } void smp_send_reschedule(int cpu) @@ -1318,25 +1445,30 @@ unsigned long __per_cpu_shift __read_mostly; EXPORT_SYMBOL(__per_cpu_base); EXPORT_SYMBOL(__per_cpu_shift); -void __init setup_per_cpu_areas(void) +void __init real_setup_per_cpu_areas(void) { - unsigned long goal, size, i; + unsigned long paddr, goal, size, i; char *ptr; /* Copy section for each CPU (we discard the original) */ - goal = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES); -#ifdef CONFIG_MODULES - if (goal < PERCPU_ENOUGH_ROOM) - goal = PERCPU_ENOUGH_ROOM; -#endif - __per_cpu_shift = 0; - for (size = 1UL; size < goal; size <<= 1UL) + goal = PERCPU_ENOUGH_ROOM; + + __per_cpu_shift = PAGE_SHIFT; + for (size = PAGE_SIZE; size < goal; size <<= 1UL) __per_cpu_shift++; - ptr = alloc_bootmem(size * NR_CPUS); + paddr = lmb_alloc(size * NR_CPUS, PAGE_SIZE); + if (!paddr) { + prom_printf("Cannot allocate per-cpu memory.\n"); + prom_halt(); + } + ptr = __va(paddr); __per_cpu_base = ptr - __per_cpu_start; for (i = 0; i < NR_CPUS; i++, ptr += size) memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); + + /* Setup %g5 for the boot cpu. */ + __local_per_cpu_offset = __per_cpu_offset(smp_processor_id()); }