X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=arch%2Fs390%2Fkernel%2Fsmp.c;h=a8e6199755d4ecc86197aa0f95cfad26314df31b;hb=a077c1a075473910928676ed95acb393259d9310;hp=d0a2745aec7f5f3987adc6bc022aec7bbd536e3b;hpb=37a3302618a51520e2056494715ea6b4776dd8ab;p=safe%2Fjmp%2Flinux-2.6 diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index d0a2745..a8e6199 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -59,14 +59,11 @@ static struct task_struct *current_set[NR_CPUS]; extern char vmhalt_cmd[]; extern char vmpoff_cmd[]; -extern void reipl(unsigned long devno); -extern void reipl_diag(void); - static void smp_ext_bitcall(int, ec_bit_sig); static void smp_ext_bitcall_others(ec_bit_sig); /* - * Structure and data for smp_call_function(). This is designed to minimise +5B * Structure and data for smp_call_function(). This is designed to minimise * static memory requirements. It also looks cleaner. */ static DEFINE_SPINLOCK(call_lock); @@ -279,12 +276,7 @@ static void do_machine_restart(void * __unused) * interrupted by an external interrupt and s390irq * locks are always held disabled). */ - reipl_diag(); - - if (MACHINE_IS_VM) - cpcmd ("IPL", NULL, 0, NULL); - else - reipl (0x10000 | S390_lowcore.ipl_device); + do_reipl(); } void machine_restart_smp(char * __unused) @@ -426,59 +418,49 @@ void smp_send_reschedule(int cpu) /* * parameter area for the set/clear control bit callbacks */ -typedef struct -{ - __u16 start_ctl; - __u16 end_ctl; +struct ec_creg_mask_parms { unsigned long orvals[16]; unsigned long andvals[16]; -} ec_creg_mask_parms; +}; /* * callback for setting/clearing control bits */ void smp_ctl_bit_callback(void *info) { - ec_creg_mask_parms *pp; + struct ec_creg_mask_parms *pp = info; unsigned long cregs[16]; int i; - pp = (ec_creg_mask_parms *) info; - __ctl_store(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl); - for (i = pp->start_ctl; i <= pp->end_ctl; i++) + __ctl_store(cregs, 0, 15); + for (i = 0; i <= 15; i++) cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i]; - __ctl_load(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl); + __ctl_load(cregs, 0, 15); } /* * Set a bit in a control register of all cpus */ -void smp_ctl_set_bit(int cr, int bit) { - ec_creg_mask_parms parms; +void smp_ctl_set_bit(int cr, int bit) +{ + struct ec_creg_mask_parms parms; - parms.start_ctl = cr; - parms.end_ctl = cr; + memset(&parms.orvals, 0, sizeof(parms.orvals)); + memset(&parms.andvals, 0xff, sizeof(parms.andvals)); parms.orvals[cr] = 1 << bit; - parms.andvals[cr] = -1L; - preempt_disable(); - smp_call_function(smp_ctl_bit_callback, &parms, 0, 1); - __ctl_set_bit(cr, bit); - preempt_enable(); + on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); } /* * Clear a bit in a control register of all cpus */ -void smp_ctl_clear_bit(int cr, int bit) { - ec_creg_mask_parms parms; +void smp_ctl_clear_bit(int cr, int bit) +{ + struct ec_creg_mask_parms parms; - parms.start_ctl = cr; - parms.end_ctl = cr; - parms.orvals[cr] = 0; + memset(&parms.orvals, 0, sizeof(parms.orvals)); + memset(&parms.andvals, 0xff, sizeof(parms.andvals)); parms.andvals[cr] = ~(1L << bit); - preempt_disable(); - smp_call_function(smp_ctl_bit_callback, &parms, 0, 1); - __ctl_clear_bit(cr, bit); - preempt_enable(); + on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); } /* @@ -658,14 +640,16 @@ __cpu_up(unsigned int cpu) sf->gprs[9] = (unsigned long) sf; cpu_lowcore->save_area[15] = (unsigned long) sf; __ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15); - __asm__ __volatile__("stam 0,15,0(%0)" - : : "a" (&cpu_lowcore->access_regs_save_area) - : "memory"); + asm volatile( + " stam 0,15,0(%0)" + : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; cpu_lowcore->current_task = (unsigned long) idle; cpu_lowcore->cpu_data.cpu_nr = cpu; eieio(); - signal_processor(cpu,sigp_restart); + + while (signal_processor(cpu,sigp_restart) == sigp_busy) + udelay(10); while (!cpu_online(cpu)) cpu_relax(); @@ -677,17 +661,21 @@ static unsigned int __initdata possible_cpus; void __init smp_setup_cpu_possible_map(void) { - unsigned int pcpus, cpu; + unsigned int phy_cpus, pos_cpus, cpu; - pcpus = min(smp_count_cpus() + additional_cpus, (unsigned int) NR_CPUS); + phy_cpus = smp_count_cpus(); + pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS); if (possible_cpus) - pcpus = min(possible_cpus, (unsigned int) NR_CPUS); + pos_cpus = min(possible_cpus, (unsigned int) NR_CPUS); - for (cpu = 0; cpu < pcpus; cpu++) + for (cpu = 0; cpu < pos_cpus; cpu++) cpu_set(cpu, cpu_possible_map); - cpu_present_map = cpu_possible_map; + phy_cpus = min(phy_cpus, pos_cpus); + + for (cpu = 0; cpu < phy_cpus; cpu++) + cpu_set(cpu, cpu_present_map); } #ifdef CONFIG_HOTPLUG_CPU @@ -710,7 +698,7 @@ int __cpu_disable(void) { unsigned long flags; - ec_creg_mask_parms cr_parms; + struct ec_creg_mask_parms cr_parms; int cpu = smp_processor_id(); spin_lock_irqsave(&smp_reserve_lock, flags); @@ -726,30 +714,21 @@ __cpu_disable(void) pfault_fini(); #endif - /* disable all external interrupts */ + memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals)); + memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals)); - cr_parms.start_ctl = 0; - cr_parms.end_ctl = 0; + /* disable all external interrupts */ cr_parms.orvals[0] = 0; cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 | 1<<11 | 1<<10 | 1<< 6 | 1<< 4); - smp_ctl_bit_callback(&cr_parms); - /* disable all I/O interrupts */ - - cr_parms.start_ctl = 6; - cr_parms.end_ctl = 6; cr_parms.orvals[6] = 0; cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24); - smp_ctl_bit_callback(&cr_parms); - /* disable most machine checks */ - - cr_parms.start_ctl = 14; - cr_parms.end_ctl = 14; cr_parms.orvals[14] = 0; cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24); + smp_ctl_bit_callback(&cr_parms); spin_unlock_irqrestore(&smp_reserve_lock, flags); @@ -795,9 +774,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) */ print_cpu_info(&S390_lowcore.cpu_data); - for(i = 0; i < NR_CPUS; i++) { - if (!cpu_possible(i)) - continue; + for_each_possible_cpu(i) { lowcore_ptr[i] = (struct _lowcore *) __get_free_pages(GFP_KERNEL|GFP_DMA, sizeof(void*) == 8 ? 1 : 0); @@ -827,7 +804,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) #endif set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]); - for_each_cpu(cpu) + for_each_possible_cpu(cpu) if (cpu != smp_processor_id()) smp_create_idle(cpu); } @@ -843,6 +820,7 @@ void __devinit smp_prepare_boot_cpu(void) void smp_cpus_done(unsigned int max_cpus) { + cpu_present_map = cpu_possible_map; } /* @@ -863,8 +841,8 @@ static int __init topology_init(void) int cpu; int ret; - for_each_cpu(cpu) { - ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL); + for_each_possible_cpu(cpu) { + ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu); if (ret) printk(KERN_WARNING "topology_init: register_cpu %d " "failed (%d)\n", cpu, ret);