2 * arch/s390/kernel/smp.c
4 * Copyright (C) IBM Corp. 1999,2006
5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 * Heiko Carstens (heiko.carstens@de.ibm.com)
9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar
13 * We work with logical cpu numbering everywhere we can. The only
14 * functions using the real cpu address (got from STAP) are the sigp
15 * functions. For all other functions we use the identity mapping.
16 * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
17 * used e.g. to find the idle task belonging to a logical cpu. Every array
18 * in the kernel is sorted by the logical cpu number and not by the physical
19 * one which is causing all the confusion with __cpu_logical_map and
20 * cpu_number_map in other architectures.
23 #include <linux/module.h>
24 #include <linux/init.h>
26 #include <linux/spinlock.h>
27 #include <linux/kernel_stat.h>
28 #include <linux/smp_lock.h>
29 #include <linux/delay.h>
30 #include <linux/cache.h>
31 #include <linux/interrupt.h>
32 #include <linux/cpu.h>
33 #include <linux/timex.h>
34 #include <asm/setup.h>
36 #include <asm/pgalloc.h>
38 #include <asm/s390_ext.h>
39 #include <asm/cpcmd.h>
40 #include <asm/tlbflush.h>
41 #include <asm/timer.h>
43 extern volatile int __cpu_logical_map[];
46 * An array with a pointer the lowcore of every CPU.
49 struct _lowcore *lowcore_ptr[NR_CPUS];
51 cpumask_t cpu_online_map = CPU_MASK_NONE;
52 cpumask_t cpu_possible_map = CPU_MASK_NONE;
54 static struct task_struct *current_set[NR_CPUS];
56 static void smp_ext_bitcall(int, ec_bit_sig);
59 * Structure and data for __smp_call_function_map(). This is designed to
60 * minimise static memory requirements. It also looks cleaner.
62 static DEFINE_SPINLOCK(call_lock);
64 struct call_data_struct {
65 void (*func) (void *info);
72 static struct call_data_struct * call_data;
75 * 'Call function' interrupt callback
77 static void do_call_function(void)
79 void (*func) (void *info) = call_data->func;
80 void *info = call_data->info;
81 int wait = call_data->wait;
83 cpu_set(smp_processor_id(), call_data->started);
86 cpu_set(smp_processor_id(), call_data->finished);;
89 static void __smp_call_function_map(void (*func) (void *info), void *info,
90 int nonatomic, int wait, cpumask_t map)
92 struct call_data_struct data;
96 * Can deadlock when interrupts are disabled or if in wrong context,
97 * caller must disable preemption
99 WARN_ON(irqs_disabled() || in_irq() || preemptible());
102 * Check for local function call. We have to have the same call order
103 * as in on_each_cpu() because of machine_restart_smp().
105 if (cpu_isset(smp_processor_id(), map)) {
107 cpu_clear(smp_processor_id(), map);
110 cpus_and(map, map, cpu_online_map);
116 data.started = CPU_MASK_NONE;
119 data.finished = CPU_MASK_NONE;
121 spin_lock_bh(&call_lock);
124 for_each_cpu_mask(cpu, map)
125 smp_ext_bitcall(cpu, ec_call_function);
127 /* Wait for response */
128 while (!cpus_equal(map, data.started))
132 while (!cpus_equal(map, data.finished))
135 spin_unlock_bh(&call_lock);
146 * @func: the function to run; this must be fast and non-blocking
147 * @info: an arbitrary pointer to pass to the function
149 * @wait: if true, wait (atomically) until function has completed on other CPUs
151 * Run a function on all other CPUs.
153 * You must not call this function with disabled interrupts or from a
154 * hardware interrupt handler. Must be called with preemption disabled.
155 * You may call it from a bottom half.
157 int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
162 map = cpu_online_map;
163 cpu_clear(smp_processor_id(), map);
164 __smp_call_function_map(func, info, nonatomic, wait, map);
167 EXPORT_SYMBOL(smp_call_function);
170 * smp_call_function_on:
171 * @func: the function to run; this must be fast and non-blocking
172 * @info: an arbitrary pointer to pass to the function
174 * @wait: if true, wait (atomically) until function has completed on other CPUs
175 * @cpu: the CPU where func should run
177 * Run a function on one processor.
179 * You must not call this function with disabled interrupts or from a
180 * hardware interrupt handler. Must be called with preemption disabled.
181 * You may call it from a bottom half.
183 int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic,
186 cpumask_t map = CPU_MASK_NONE;
189 __smp_call_function_map(func, info, nonatomic, wait, map);
192 EXPORT_SYMBOL(smp_call_function_on);
194 static void do_send_stop(void)
198 /* stop all processors */
199 for_each_online_cpu(cpu) {
200 if (cpu == smp_processor_id())
203 rc = signal_processor(cpu, sigp_stop);
204 } while (rc == sigp_busy);
208 static void do_store_status(void)
212 /* store status of all processors in their lowcores (real 0) */
213 for_each_online_cpu(cpu) {
214 if (cpu == smp_processor_id())
217 rc = signal_processor_p(
218 (__u32)(unsigned long) lowcore_ptr[cpu], cpu,
219 sigp_store_status_at_address);
220 } while(rc == sigp_busy);
224 static void do_wait_for_stop(void)
228 /* Wait for all other cpus to enter stopped state */
229 for_each_online_cpu(cpu) {
230 if (cpu == smp_processor_id())
232 while(!smp_cpu_not_running(cpu))
238 * this function sends a 'stop' sigp to all other CPUs in the system.
239 * it goes straight through.
241 void smp_send_stop(void)
243 /* Disable all interrupts/machine checks */
244 __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
246 /* write magic number to zero page (absolute 0) */
247 lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC;
249 /* stop other processors. */
252 /* wait until other processors are stopped */
255 /* store status of other processors. */
260 * Reboot, halt and power_off routines for SMP.
263 void machine_restart_smp(char * __unused)
269 void machine_halt_smp(void)
272 if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
273 __cpcmd(vmhalt_cmd, NULL, 0, NULL);
274 signal_processor(smp_processor_id(), sigp_stop_and_store_status);
278 void machine_power_off_smp(void)
281 if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
282 __cpcmd(vmpoff_cmd, NULL, 0, NULL);
283 signal_processor(smp_processor_id(), sigp_stop_and_store_status);
288 * This is the main routine where commands issued by other
292 static void do_ext_call_interrupt(__u16 code)
297 * handle bit signal external calls
299 * For the ec_schedule signal we have to do nothing. All the work
300 * is done automatically when we return from the interrupt.
302 bits = xchg(&S390_lowcore.ext_call_fast, 0);
304 if (test_bit(ec_call_function, &bits))
309 * Send an external call sigp to another cpu and return without waiting
310 * for its completion.
312 static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
315 * Set signaling bit in lowcore of target cpu and kick it
317 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
318 while(signal_processor(cpu, sigp_emergency_signal) == sigp_busy)
324 * this function sends a 'purge tlb' signal to another CPU.
326 void smp_ptlb_callback(void *info)
331 void smp_ptlb_all(void)
333 on_each_cpu(smp_ptlb_callback, NULL, 0, 1);
335 EXPORT_SYMBOL(smp_ptlb_all);
336 #endif /* ! CONFIG_64BIT */
339 * this function sends a 'reschedule' IPI to another CPU.
340 * it goes straight through and wastes no time serializing
341 * anything. Worst case is that we lose a reschedule ...
343 void smp_send_reschedule(int cpu)
345 smp_ext_bitcall(cpu, ec_schedule);
349 * parameter area for the set/clear control bit callbacks
351 struct ec_creg_mask_parms {
352 unsigned long orvals[16];
353 unsigned long andvals[16];
357 * callback for setting/clearing control bits
359 static void smp_ctl_bit_callback(void *info) {
360 struct ec_creg_mask_parms *pp = info;
361 unsigned long cregs[16];
364 __ctl_store(cregs, 0, 15);
365 for (i = 0; i <= 15; i++)
366 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
367 __ctl_load(cregs, 0, 15);
371 * Set a bit in a control register of all cpus
373 void smp_ctl_set_bit(int cr, int bit)
375 struct ec_creg_mask_parms parms;
377 memset(&parms.orvals, 0, sizeof(parms.orvals));
378 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
379 parms.orvals[cr] = 1 << bit;
380 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
384 * Clear a bit in a control register of all cpus
386 void smp_ctl_clear_bit(int cr, int bit)
388 struct ec_creg_mask_parms parms;
390 memset(&parms.orvals, 0, sizeof(parms.orvals));
391 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
392 parms.andvals[cr] = ~(1L << bit);
393 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
397 * Lets check how many CPUs we have.
401 __init smp_count_cpus(void)
403 unsigned int cpu, num_cpus;
407 * cpu 0 is the boot cpu. See smp_prepare_boot_cpu.
410 boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr;
411 current_thread_info()->cpu = 0;
413 for (cpu = 0; cpu <= 65535; cpu++) {
414 if ((__u16) cpu == boot_cpu_addr)
416 __cpu_logical_map[1] = (__u16) cpu;
417 if (signal_processor(1, sigp_sense) ==
418 sigp_not_operational)
423 printk("Detected %d CPU's\n",(int) num_cpus);
424 printk("Boot cpu address %2X\n", boot_cpu_addr);
430 * Activate a secondary processor.
432 int __devinit start_secondary(void *cpuvoid)
437 /* Enable TOD clock interrupts on the secondary cpu. */
439 #ifdef CONFIG_VIRT_TIMER
440 /* Enable cpu timer interrupts on the secondary cpu. */
443 /* Enable pfault pseudo page faults on this cpu. */
446 /* Mark this cpu as online */
447 cpu_set(smp_processor_id(), cpu_online_map);
448 /* Switch on interrupts */
450 /* Print info about this processor */
451 print_cpu_info(&S390_lowcore.cpu_data);
452 /* cpu_idle will call schedule for us */
457 static void __init smp_create_idle(unsigned int cpu)
459 struct task_struct *p;
462 * don't care about the psw and regs settings since we'll never
463 * reschedule the forked task.
467 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
468 current_set[cpu] = p;
471 /* Reserving and releasing of CPUs */
473 static DEFINE_SPINLOCK(smp_reserve_lock);
474 static int smp_cpu_reserved[NR_CPUS];
477 smp_get_cpu(cpumask_t cpu_mask)
482 spin_lock_irqsave(&smp_reserve_lock, flags);
483 /* Try to find an already reserved cpu. */
484 for_each_cpu_mask(cpu, cpu_mask) {
485 if (smp_cpu_reserved[cpu] != 0) {
486 smp_cpu_reserved[cpu]++;
491 /* Reserve a new cpu from cpu_mask. */
492 for_each_cpu_mask(cpu, cpu_mask) {
493 if (cpu_online(cpu)) {
494 smp_cpu_reserved[cpu]++;
500 spin_unlock_irqrestore(&smp_reserve_lock, flags);
509 spin_lock_irqsave(&smp_reserve_lock, flags);
510 smp_cpu_reserved[cpu]--;
511 spin_unlock_irqrestore(&smp_reserve_lock, flags);
519 /* Check for stopped state */
520 if (signal_processor_ps(&status, 0, cpu, sigp_sense) == sigp_status_stored) {
527 /* Upping and downing of CPUs */
530 __cpu_up(unsigned int cpu)
532 struct task_struct *idle;
533 struct _lowcore *cpu_lowcore;
534 struct stack_frame *sf;
538 for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) {
539 __cpu_logical_map[cpu] = (__u16) curr_cpu;
540 if (cpu_stopped(cpu))
544 if (!cpu_stopped(cpu))
547 ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),
548 cpu, sigp_set_prefix);
550 printk("sigp_set_prefix failed for cpu %d "
551 "with condition code %d\n",
552 (int) cpu, (int) ccode);
556 idle = current_set[cpu];
557 cpu_lowcore = lowcore_ptr[cpu];
558 cpu_lowcore->kernel_stack = (unsigned long)
559 task_stack_page(idle) + (THREAD_SIZE);
560 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
561 - sizeof(struct pt_regs)
562 - sizeof(struct stack_frame));
563 memset(sf, 0, sizeof(struct stack_frame));
564 sf->gprs[9] = (unsigned long) sf;
565 cpu_lowcore->save_area[15] = (unsigned long) sf;
566 __ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15);
569 : : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
570 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
571 cpu_lowcore->current_task = (unsigned long) idle;
572 cpu_lowcore->cpu_data.cpu_nr = cpu;
575 while (signal_processor(cpu,sigp_restart) == sigp_busy)
578 while (!cpu_online(cpu))
583 static unsigned int __initdata additional_cpus;
584 static unsigned int __initdata possible_cpus;
586 void __init smp_setup_cpu_possible_map(void)
588 unsigned int phy_cpus, pos_cpus, cpu;
590 phy_cpus = smp_count_cpus();
591 pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS);
594 pos_cpus = min(possible_cpus, (unsigned int) NR_CPUS);
596 for (cpu = 0; cpu < pos_cpus; cpu++)
597 cpu_set(cpu, cpu_possible_map);
599 phy_cpus = min(phy_cpus, pos_cpus);
601 for (cpu = 0; cpu < phy_cpus; cpu++)
602 cpu_set(cpu, cpu_present_map);
605 #ifdef CONFIG_HOTPLUG_CPU
607 static int __init setup_additional_cpus(char *s)
609 additional_cpus = simple_strtoul(s, NULL, 0);
612 early_param("additional_cpus", setup_additional_cpus);
614 static int __init setup_possible_cpus(char *s)
616 possible_cpus = simple_strtoul(s, NULL, 0);
619 early_param("possible_cpus", setup_possible_cpus);
625 struct ec_creg_mask_parms cr_parms;
626 int cpu = smp_processor_id();
628 spin_lock_irqsave(&smp_reserve_lock, flags);
629 if (smp_cpu_reserved[cpu] != 0) {
630 spin_unlock_irqrestore(&smp_reserve_lock, flags);
633 cpu_clear(cpu, cpu_online_map);
635 /* Disable pfault pseudo page faults on this cpu. */
638 memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals));
639 memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals));
641 /* disable all external interrupts */
642 cr_parms.orvals[0] = 0;
643 cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 |
644 1<<11 | 1<<10 | 1<< 6 | 1<< 4);
645 /* disable all I/O interrupts */
646 cr_parms.orvals[6] = 0;
647 cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 |
648 1<<27 | 1<<26 | 1<<25 | 1<<24);
649 /* disable most machine checks */
650 cr_parms.orvals[14] = 0;
651 cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24);
653 smp_ctl_bit_callback(&cr_parms);
655 spin_unlock_irqrestore(&smp_reserve_lock, flags);
660 __cpu_die(unsigned int cpu)
662 /* Wait until target cpu is down */
663 while (!smp_cpu_not_running(cpu))
665 printk("Processor %d spun down\n", cpu);
672 signal_processor(smp_processor_id(), sigp_stop);
677 #endif /* CONFIG_HOTPLUG_CPU */
680 * Cycle through the processors and setup structures.
683 void __init smp_prepare_cpus(unsigned int max_cpus)
689 /* request the 0x1201 emergency signal external interrupt */
690 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
691 panic("Couldn't request external interrupt 0x1201");
692 memset(lowcore_ptr,0,sizeof(lowcore_ptr));
694 * Initialize prefix pages and stacks for all possible cpus
696 print_cpu_info(&S390_lowcore.cpu_data);
698 for_each_possible_cpu(i) {
699 lowcore_ptr[i] = (struct _lowcore *)
700 __get_free_pages(GFP_KERNEL|GFP_DMA,
701 sizeof(void*) == 8 ? 1 : 0);
702 stack = __get_free_pages(GFP_KERNEL,ASYNC_ORDER);
703 if (lowcore_ptr[i] == NULL || stack == 0ULL)
704 panic("smp_boot_cpus failed to allocate memory\n");
706 *(lowcore_ptr[i]) = S390_lowcore;
707 lowcore_ptr[i]->async_stack = stack + (ASYNC_SIZE);
708 stack = __get_free_pages(GFP_KERNEL,0);
710 panic("smp_boot_cpus failed to allocate memory\n");
711 lowcore_ptr[i]->panic_stack = stack + (PAGE_SIZE);
713 if (MACHINE_HAS_IEEE) {
714 lowcore_ptr[i]->extended_save_area_addr =
715 (__u32) __get_free_pages(GFP_KERNEL,0);
716 if (lowcore_ptr[i]->extended_save_area_addr == 0)
717 panic("smp_boot_cpus failed to "
718 "allocate memory\n");
723 if (MACHINE_HAS_IEEE)
724 ctl_set_bit(14, 29); /* enable extended save area */
726 set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]);
728 for_each_possible_cpu(cpu)
729 if (cpu != smp_processor_id())
730 smp_create_idle(cpu);
733 void __devinit smp_prepare_boot_cpu(void)
735 BUG_ON(smp_processor_id() != 0);
737 cpu_set(0, cpu_online_map);
738 S390_lowcore.percpu_offset = __per_cpu_offset[0];
739 current_set[0] = current;
742 void smp_cpus_done(unsigned int max_cpus)
744 cpu_present_map = cpu_possible_map;
748 * the frequency of the profiling timer can be changed
749 * by writing a multiplier value into /proc/profile.
751 * usually you want to run this on all CPUs ;)
753 int setup_profiling_timer(unsigned int multiplier)
758 static DEFINE_PER_CPU(struct cpu, cpu_devices);
760 static int __init topology_init(void)
765 for_each_possible_cpu(cpu) {
766 struct cpu *c = &per_cpu(cpu_devices, cpu);
769 ret = register_cpu(c, cpu);
771 printk(KERN_WARNING "topology_init: register_cpu %d "
772 "failed (%d)\n", cpu, ret);
777 subsys_initcall(topology_init);
779 EXPORT_SYMBOL(cpu_online_map);
780 EXPORT_SYMBOL(cpu_possible_map);
781 EXPORT_SYMBOL(lowcore_ptr);
782 EXPORT_SYMBOL(smp_ctl_set_bit);
783 EXPORT_SYMBOL(smp_ctl_clear_bit);
784 EXPORT_SYMBOL(smp_get_cpu);
785 EXPORT_SYMBOL(smp_put_cpu);