X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=arch%2Fia64%2Fkernel%2Fsmpboot.c;h=d7ad42b77d41a2c5d4d105bcfe6dbf42c4984d7c;hb=4df8d22bbbb16ccfa4e10cc068135183c9e5e006;hp=e9d37bf67d691189d221d693ad05015ec1498e01;hpb=ff741906ad3cf4b8ca1a958acb013a97a6381ca2;p=safe%2Fjmp%2Flinux-2.6 diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index e9d37bf..d7ad42b 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -21,7 +21,6 @@ * 05/01/30 Suresh Siddha * Setup cpu_sibling_map and cpu_core_map */ -#include #include #include @@ -36,7 +35,6 @@ #include #include #include -#include #include #include #include @@ -60,6 +58,7 @@ #include #include #include +#include #define SMP_DEBUG 0 @@ -121,11 +120,10 @@ static volatile unsigned long go[SLAVE + 1]; #define DEBUG_ITC_SYNC 0 -extern void __devinit calibrate_delay (void); extern void start_ap (void); extern unsigned long ia64_iobase; -task_t *task_for_booting_cpu; +struct task_struct *task_for_booting_cpu; /* * State for each CPU @@ -135,13 +133,14 @@ DEFINE_PER_CPU(int, cpu_state); /* Bitmasks of currently online, and possible CPUs */ cpumask_t cpu_online_map; EXPORT_SYMBOL(cpu_online_map); -cpumask_t cpu_possible_map; +cpumask_t cpu_possible_map = CPU_MASK_NONE; EXPORT_SYMBOL(cpu_possible_map); cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned; -cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned; +DEFINE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map); +EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); + int smp_num_siblings = 1; -int smp_num_cpucores = 1; /* which logical CPU number maps to which CPU (physical APIC ID) */ volatile int ia64_cpu_to_sapicid[NR_CPUS]; @@ -372,10 +371,11 @@ smp_setup_percpu_timer (void) { } -static void __devinit +static void __cpuinit smp_callin (void) { int cpuid, phys_id, itc_master; + struct cpuinfo_ia64 *last_cpuinfo, *this_cpuinfo; extern void ia64_init_itm(void); extern volatile int time_keeper_id; @@ -396,9 +396,13 @@ smp_callin (void) fix_b0_for_bsp(); lock_ipi_calllock(); + spin_lock(&vector_lock); + /* Setup the per cpu irq handling data structures */ + __setup_vector_irq(cpuid); cpu_set(cpuid, cpu_online_map); - unlock_ipi_calllock(); per_cpu(cpu_state, cpuid) = CPU_ONLINE; + spin_unlock(&vector_lock); + unlock_ipi_calllock(); smp_setup_percpu_timer(); @@ -425,7 +429,21 @@ smp_callin (void) * Get our bogomips. */ ia64_init_itm(); - calibrate_delay(); + + /* + * Delay calibration can be skipped if new processor is identical to the + * previous processor. + */ + last_cpuinfo = cpu_data(cpuid - 1); + this_cpuinfo = local_cpu_data; + if (last_cpuinfo->itc_freq != this_cpuinfo->itc_freq || + last_cpuinfo->proc_freq != this_cpuinfo->proc_freq || + last_cpuinfo->features != this_cpuinfo->features || + last_cpuinfo->revision != this_cpuinfo->revision || + last_cpuinfo->family != this_cpuinfo->family || + last_cpuinfo->archrev != this_cpuinfo->archrev || + last_cpuinfo->model != this_cpuinfo->model) + calibrate_delay(); local_cpu_data->loops_per_jiffy = loops_per_jiffy; #ifdef CONFIG_IA32_SUPPORT @@ -443,7 +461,7 @@ smp_callin (void) /* * Activate a secondary processor. head.S calls this. */ -int __devinit +int __cpuinit start_secondary (void *unused) { /* Early console may use I/O ports */ @@ -458,35 +476,37 @@ start_secondary (void *unused) return 0; } -struct pt_regs * __devinit idle_regs(struct pt_regs *regs) +struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) { return NULL; } struct create_idle { + struct work_struct work; struct task_struct *idle; struct completion done; int cpu; }; -void -do_fork_idle(void *_c_idle) +void __cpuinit +do_fork_idle(struct work_struct *work) { - struct create_idle *c_idle = _c_idle; + struct create_idle *c_idle = + container_of(work, struct create_idle, work); c_idle->idle = fork_idle(c_idle->cpu); complete(&c_idle->done); } -static int __devinit +static int __cpuinit do_boot_cpu (int sapicid, int cpu) { int timeout; struct create_idle c_idle = { + .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle), .cpu = cpu, .done = COMPLETION_INITIALIZER(c_idle.done), }; - DECLARE_WORK(work, do_fork_idle, &c_idle); c_idle.idle = get_idle_for_cpu(cpu); if (c_idle.idle) { @@ -498,9 +518,9 @@ do_boot_cpu (int sapicid, int cpu) * We can't use kernel_thread since we must avoid to reschedule the child. */ if (!keventd_up() || current_is_keventd()) - work.func(work.data); + c_idle.work.func(&c_idle.work); else { - schedule_work(&work); + schedule_work(&c_idle.work); wait_for_completion(&c_idle.done); } @@ -558,9 +578,6 @@ smp_build_cpu_map (void) for (cpu = 0; cpu < NR_CPUS; cpu++) { ia64_cpu_to_sapicid[cpu] = -1; -#ifdef CONFIG_HOTPLUG_CPU - cpu_set(cpu, cpu_possible_map); -#endif } ia64_cpu_to_sapicid[0] = boot_cpu_id; @@ -627,42 +644,18 @@ void __devinit smp_prepare_boot_cpu(void) per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; } -/* - * mt_info[] is a temporary store for all info returned by - * PAL_LOGICAL_TO_PHYSICAL, to be copied into cpuinfo_ia64 when the - * specific cpu comes. - */ -static struct { - __u32 socket_id; - __u16 core_id; - __u16 thread_id; - __u16 proc_fixed_addr; - __u8 valid; -} mt_info[NR_CPUS] __devinitdata; - #ifdef CONFIG_HOTPLUG_CPU static inline void -remove_from_mtinfo(int cpu) -{ - int i; - - for_each_cpu(i) - if (mt_info[i].valid && mt_info[i].socket_id == - cpu_data(cpu)->socket_id) - mt_info[i].valid = 0; -} - -static inline void clear_cpu_sibling_map(int cpu) { int i; - for_each_cpu_mask(i, cpu_sibling_map[cpu]) - cpu_clear(cpu, cpu_sibling_map[i]); + for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu)) + cpu_clear(cpu, per_cpu(cpu_sibling_map, i)); for_each_cpu_mask(i, cpu_core_map[cpu]) cpu_clear(cpu, cpu_core_map[i]); - cpu_sibling_map[cpu] = cpu_core_map[cpu] = CPU_MASK_NONE; + per_cpu(cpu_sibling_map, cpu) = cpu_core_map[cpu] = CPU_MASK_NONE; } static void @@ -673,7 +666,7 @@ remove_siblinginfo(int cpu) if (cpu_data(cpu)->threads_per_core == 1 && cpu_data(cpu)->cores_per_socket == 1) { cpu_clear(cpu, cpu_core_map[cpu]); - cpu_clear(cpu, cpu_sibling_map[cpu]); + cpu_clear(cpu, per_cpu(cpu_sibling_map, cpu)); return; } @@ -681,12 +674,6 @@ remove_siblinginfo(int cpu) /* remove it from all sibling map's */ clear_cpu_sibling_map(cpu); - - /* if this cpu is the last in the core group, remove all its info - * from mt_info structure - */ - if (last) - remove_from_mtinfo(cpu); } extern void fixup_irqs(void); @@ -710,16 +697,16 @@ int migrate_platform_irqs(unsigned int cpu) new_cpei_cpu = any_online_cpu(cpu_online_map); mask = cpumask_of_cpu(new_cpei_cpu); set_cpei_target_cpu(new_cpei_cpu); - desc = irq_descp(ia64_cpe_irq); + desc = irq_desc + ia64_cpe_irq; /* - * Switch for now, immediatly, we need to do fake intr + * Switch for now, immediately, we need to do fake intr * as other interrupts, but need to study CPEI behaviour with * polling before making changes. */ if (desc) { - desc->handler->disable(ia64_cpe_irq); - desc->handler->set_affinity(ia64_cpe_irq, mask); - desc->handler->enable(ia64_cpe_irq); + desc->chip->disable(ia64_cpe_irq); + desc->chip->set_affinity(ia64_cpe_irq, mask); + desc->chip->enable(ia64_cpe_irq); printk ("Re-targetting CPEI to cpu %d\n", new_cpei_cpu); } } @@ -744,6 +731,11 @@ int __cpu_disable(void) return (-EBUSY); } + if (ia64_platform_is("sn2")) { + if (!sn_cpu_disable_allowed(cpu)) + return -EBUSY; + } + cpu_clear(cpu, cpu_online_map); if (migrate_platform_irqs(cpu)) { @@ -774,17 +766,6 @@ void __cpu_die(unsigned int cpu) } printk(KERN_ERR "CPU %u didn't die...\n", cpu); } -#else /* !CONFIG_HOTPLUG_CPU */ -int __cpu_disable(void) -{ - return -ENOSYS; -} - -void __cpu_die(unsigned int cpu) -{ - /* We said "no" in __cpu_disable */ - BUG(); -} #endif /* CONFIG_HOTPLUG_CPU */ void @@ -815,14 +796,14 @@ set_cpu_sibling_map(int cpu) cpu_set(i, cpu_core_map[cpu]); cpu_set(cpu, cpu_core_map[i]); if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) { - cpu_set(i, cpu_sibling_map[cpu]); - cpu_set(cpu, cpu_sibling_map[i]); + cpu_set(i, per_cpu(cpu_sibling_map, cpu)); + cpu_set(cpu, per_cpu(cpu_sibling_map, i)); } } } } -int __devinit +int __cpuinit __cpu_up (unsigned int cpu) { int ret; @@ -847,7 +828,7 @@ __cpu_up (unsigned int cpu) if (cpu_data(cpu)->threads_per_core == 1 && cpu_data(cpu)->cores_per_socket == 1) { - cpu_set(cpu, cpu_sibling_map[cpu]); + cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); cpu_set(cpu, cpu_core_map[cpu]); return 0; } @@ -858,7 +839,7 @@ __cpu_up (unsigned int cpu) } /* - * Assume that CPU's have been discovered by some platform-dependent interface. For + * Assume that CPUs have been discovered by some platform-dependent interface. For * SoftSDV/Lion, that would be ACPI. * * Setup of the IPI irq handler is done in irq.c:init_IRQ_SMP(). @@ -872,7 +853,7 @@ init_smp_config(void) } *ap_startup; long sal_ret; - /* Tell SAL where to drop the AP's. */ + /* Tell SAL where to drop the APs. */ ap_startup = (struct fptr *) start_ap; sal_ret = ia64_sal_set_vectors(SAL_VECTOR_OS_BOOT_RENDEZ, ia64_tpa(ap_startup->fp), ia64_tpa(ap_startup->gp), 0, 0, 0, 0); @@ -881,40 +862,6 @@ init_smp_config(void) ia64_sal_strerror(sal_ret)); } -static inline int __devinit -check_for_mtinfo_index(void) -{ - int i; - - for_each_cpu(i) - if (!mt_info[i].valid) - return i; - - return -1; -} - -/* - * Search the mt_info to find out if this socket's cid/tid information is - * cached or not. If the socket exists, fill in the core_id and thread_id - * in cpuinfo - */ -static int __devinit -check_for_new_socket(__u16 logical_address, struct cpuinfo_ia64 *c) -{ - int i; - __u32 sid = c->socket_id; - - for_each_cpu(i) { - if (mt_info[i].valid && mt_info[i].proc_fixed_addr == logical_address - && mt_info[i].socket_id == sid) { - c->core_id = mt_info[i].core_id; - c->thread_id = mt_info[i].thread_id; - return 1; /* not a new socket */ - } - } - return 0; -} - /* * identify_siblings(cpu) gets called from identify_cpu. This populates the * information related to logical execution units in per_cpu_data structure. @@ -924,63 +871,64 @@ identify_siblings(struct cpuinfo_ia64 *c) { s64 status; u16 pltid; - u64 proc_fixed_addr; - int count, i; pal_logical_to_physical_t info; - if (smp_num_cpucores == 1 && smp_num_siblings == 1) - return; + status = ia64_pal_logical_to_phys(-1, &info); + if (status != PAL_STATUS_SUCCESS) { + if (status != PAL_STATUS_UNIMPLEMENTED) { + printk(KERN_ERR + "ia64_pal_logical_to_phys failed with %ld\n", + status); + return; + } - if ((status = ia64_pal_logical_to_phys(0, &info)) != PAL_STATUS_SUCCESS) { - printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n", - status); - return; - } - if ((status = ia64_sal_physical_id_info(&pltid)) != PAL_STATUS_SUCCESS) { - printk(KERN_ERR "ia64_sal_pltid failed with %ld\n", status); - return; + info.overview_ppid = 0; + info.overview_cpp = 1; + info.overview_tpc = 1; } - if ((status = ia64_pal_fixed_addr(&proc_fixed_addr)) != PAL_STATUS_SUCCESS) { - printk(KERN_ERR "ia64_pal_fixed_addr failed with %ld\n", status); + + status = ia64_sal_physical_id_info(&pltid); + if (status != PAL_STATUS_SUCCESS) { + if (status != PAL_STATUS_UNIMPLEMENTED) + printk(KERN_ERR + "ia64_sal_pltid failed with %ld\n", + status); return; } c->socket_id = (pltid << 8) | info.overview_ppid; - c->cores_per_socket = info.overview_cpp; - c->threads_per_core = info.overview_tpc; - count = c->num_log = info.overview_num_log; - /* If the thread and core id information is already cached, then - * we will simply update cpu_info and return. Otherwise, we will - * do the PAL calls and cache core and thread id's of all the siblings. - */ - if (check_for_new_socket(proc_fixed_addr, c)) + if (info.overview_cpp == 1 && info.overview_tpc == 1) return; - for (i = 0; i < count; i++) { - int index; - - if (i && (status = ia64_pal_logical_to_phys(i, &info)) - != PAL_STATUS_SUCCESS) { - printk(KERN_ERR "ia64_pal_logical_to_phys failed" - " with %ld\n", status); - return; - } - if (info.log2_la == proc_fixed_addr) { - c->core_id = info.log1_cid; - c->thread_id = info.log1_tid; - } + c->cores_per_socket = info.overview_cpp; + c->threads_per_core = info.overview_tpc; + c->num_log = info.overview_num_log; - index = check_for_mtinfo_index(); - /* We will not do the mt_info caching optimization in this case. - */ - if (index < 0) - continue; + c->core_id = info.log1_cid; + c->thread_id = info.log1_tid; +} - mt_info[index].valid = 1; - mt_info[index].socket_id = c->socket_id; - mt_info[index].core_id = info.log1_cid; - mt_info[index].thread_id = info.log1_tid; - mt_info[index].proc_fixed_addr = info.log2_la; +/* + * returns non zero, if multi-threading is enabled + * on at least one physical package. Due to hotplug cpu + * and (maxcpus=), all threads may not necessarily be enabled + * even though the processor supports multi-threading. + */ +int is_multithreading_enabled(void) +{ + int i, j; + + for_each_present_cpu(i) { + for_each_present_cpu(j) { + if (j == i) + continue; + if ((cpu_data(j)->socket_id == cpu_data(i)->socket_id)) { + if (cpu_data(j)->core_id == cpu_data(i)->core_id) + return 1; + } + } } + return 0; } +EXPORT_SYMBOL_GPL(is_multithreading_enabled);