* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/module.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/profile.h>
#include <linux/errno.h>
#include <linux/mm.h>
+#include <linux/err.h>
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/seq_file.h>
+#include <linux/irq.h>
+#include <linux/percpu.h>
+#include <linux/clockchips.h>
#include <asm/atomic.h>
#include <asm/cacheflush.h>
#include <asm/cpu.h>
+#include <asm/cputype.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/tlbflush.h>
#include <asm/ptrace.h>
-
-/*
- * bitmask of present and online CPUs.
- * The present bitmask indicates that the CPU is physically present.
- * The online bitmask indicates that the CPU is up and running.
- */
-cpumask_t cpu_possible_map;
-cpumask_t cpu_online_map;
+#include <asm/localtimer.h>
+#include <asm/smp_plat.h>
/*
* as from 2.5, kernels no longer have an init_tasks structure
IPI_TIMER,
IPI_RESCHEDULE,
IPI_CALL_FUNC,
+ IPI_CALL_FUNC_SINGLE,
IPI_CPU_STOP,
};
-struct smp_call_struct {
- void (*func)(void *info);
- void *info;
- int wait;
- cpumask_t pending;
- cpumask_t unfinished;
-};
-
-static struct smp_call_struct * volatile smp_call_function_data;
-static DEFINE_SPINLOCK(smp_call_function_lock);
-
int __cpuinit __cpu_up(unsigned int cpu)
{
struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
* a 1:1 mapping for the physical address of the kernel.
*/
pgd = pgd_alloc(&init_mm);
- pmd = pmd_offset(pgd, PHYS_OFFSET);
+ pmd = pmd_offset(pgd + pgd_index(PHYS_OFFSET), PHYS_OFFSET);
*pmd = __pmd((PHYS_OFFSET & PGDIR_MASK) |
PMD_TYPE_SECT | PMD_SECT_AP_WRITE);
+ flush_pmd_entry(pmd);
+ outer_clean_range(__pa(pmd), __pa(pmd + 1));
/*
* We need to tell the secondary core where to find
*/
secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
secondary_data.pgdir = virt_to_phys(pgd);
- wmb();
+ __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
+ outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
/*
* Now bring the CPU into our world.
secondary_data.stack = NULL;
secondary_data.pgdir = 0;
- *pmd_offset(pgd, PHYS_OFFSET) = __pmd(0);
- pgd_free(pgd);
+ *pmd = __pmd(0);
+ clean_pmd_entry(pmd);
+ pgd_free(&init_mm, pgd);
if (ret) {
printk(KERN_CRIT "CPU%u: processor failed to boot\n", cpu);
/*
* __cpu_disable runs on the processor to be shutdown.
*/
-int __cpuexit __cpu_disable(void)
+int __cpu_disable(void)
{
unsigned int cpu = smp_processor_id();
struct task_struct *p;
* Take this CPU offline. Once we clear this, we can't return,
* and we must not schedule until we're ready to give up the cpu.
*/
- cpu_clear(cpu, cpu_online_map);
+ set_cpu_online(cpu, false);
/*
* OK - migrate IRQs away from this CPU
/*
* Stop the local timer for this CPU.
*/
- local_timer_stop(cpu);
+ local_timer_stop();
/*
* Flush user cache and TLB mappings, and then remove this CPU
read_lock(&tasklist_lock);
for_each_process(p) {
if (p->mm)
- cpu_clear(cpu, p->mm->cpu_vm_mask);
+ cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
}
read_unlock(&tasklist_lock);
* called on the thread which is asking for a CPU to be shutdown -
* waits until shutdown has completed, or it is timed out.
*/
-void __cpuexit __cpu_die(unsigned int cpu)
+void __cpu_die(unsigned int cpu)
{
if (!platform_cpu_kill(cpu))
printk("CPU%u: unable to kill\n", cpu);
* of the other hotplug-cpu capable cores, so presumably coming
* out of idle fixes this.
*/
-void __cpuexit cpu_die(void)
+void __ref cpu_die(void)
{
unsigned int cpu = smp_processor_id();
atomic_inc(&mm->mm_users);
atomic_inc(&mm->mm_count);
current->active_mm = mm;
- cpu_set(cpu, mm->cpu_vm_mask);
+ cpumask_set_cpu(cpu, mm_cpumask(mm));
cpu_switch_mm(mm->pgd, mm);
enter_lazy_tlb(mm, current);
local_flush_tlb_all();
/*
* Enable local interrupts.
*/
+ notify_cpu_starting(cpu);
local_irq_enable();
local_fiq_enable();
+ /*
+ * Setup the percpu timer for this CPU.
+ */
+ percpu_timer_setup();
+
calibrate_delay();
smp_store_cpu_info(cpu);
/*
* OK, now it's safe to let the boot CPU continue
*/
- cpu_set(cpu, cpu_online_map);
-
- /*
- * Setup local timer for this CPU.
- */
- local_timer_setup(cpu);
+ set_cpu_online(cpu, true);
/*
* OK, it's off to the idle thread for us
per_cpu(cpu_data, cpu).idle = current;
}
-static void send_ipi_message(cpumask_t callmap, enum ipi_msg_type msg)
+static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg)
{
unsigned long flags;
unsigned int cpu;
local_irq_save(flags);
- for_each_cpu_mask(cpu, callmap) {
+ for_each_cpu(cpu, mask) {
struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
spin_lock(&ipi->lock);
/*
* Call the platform specific cross-CPU call function.
*/
- smp_cross_call(callmap);
+ smp_cross_call(mask);
local_irq_restore(flags);
}
-/*
- * You must not call this function with disabled interrupts, from a
- * hardware interrupt handler, nor from a bottom half handler.
- */
-static int smp_call_function_on_cpu(void (*func)(void *info), void *info,
- int retry, int wait, cpumask_t callmap)
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
- struct smp_call_struct data;
- unsigned long timeout;
- int ret = 0;
-
- data.func = func;
- data.info = info;
- data.wait = wait;
-
- cpu_clear(smp_processor_id(), callmap);
- if (cpus_empty(callmap))
- goto out;
-
- data.pending = callmap;
- if (wait)
- data.unfinished = callmap;
-
- /*
- * try to get the mutex on smp_call_function_data
- */
- spin_lock(&smp_call_function_lock);
- smp_call_function_data = &data;
-
- send_ipi_message(callmap, IPI_CALL_FUNC);
-
- timeout = jiffies + HZ;
- while (!cpus_empty(data.pending) && time_before(jiffies, timeout))
- barrier();
-
- /*
- * did we time out?
- */
- if (!cpus_empty(data.pending)) {
- /*
- * this may be causing our panic - report it
- */
- printk(KERN_CRIT
- "CPU%u: smp_call_function timeout for %p(%p)\n"
- " callmap %lx pending %lx, %swait\n",
- smp_processor_id(), func, info, *cpus_addr(callmap),
- *cpus_addr(data.pending), wait ? "" : "no ");
-
- /*
- * TRACE
- */
- timeout = jiffies + (5 * HZ);
- while (!cpus_empty(data.pending) && time_before(jiffies, timeout))
- barrier();
-
- if (cpus_empty(data.pending))
- printk(KERN_CRIT " RESOLVED\n");
- else
- printk(KERN_CRIT " STILL STUCK\n");
- }
-
- /*
- * whatever happened, we're done with the data, so release it
- */
- smp_call_function_data = NULL;
- spin_unlock(&smp_call_function_lock);
-
- if (!cpus_empty(data.pending)) {
- ret = -ETIMEDOUT;
- goto out;
- }
-
- if (wait)
- while (!cpus_empty(data.unfinished))
- barrier();
- out:
-
- return 0;
+ send_ipi_message(mask, IPI_CALL_FUNC);
}
-int smp_call_function(void (*func)(void *info), void *info, int retry,
- int wait)
+void arch_send_call_function_single_ipi(int cpu)
{
- return smp_call_function_on_cpu(func, info, retry, wait,
- cpu_online_map);
+ send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
}
void show_ipi_list(struct seq_file *p)
seq_putc(p, '\n');
}
-static void ipi_timer(struct pt_regs *regs)
-{
- int user = user_mode(regs);
+/*
+ * Timer (local or broadcast) support
+ */
+static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
+static void ipi_timer(void)
+{
+ struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);
irq_enter();
- profile_tick(CPU_PROFILING, regs);
- update_process_times(user);
+ evt->event_handler(evt);
irq_exit();
}
#ifdef CONFIG_LOCAL_TIMERS
-asmlinkage void do_local_timer(struct pt_regs *regs)
+asmlinkage void __exception do_local_timer(struct pt_regs *regs)
{
+ struct pt_regs *old_regs = set_irq_regs(regs);
int cpu = smp_processor_id();
if (local_timer_ack()) {
irq_stat[cpu].local_timer_irqs++;
- ipi_timer(regs);
+ ipi_timer();
}
+
+ set_irq_regs(old_regs);
}
#endif
-/*
- * ipi_call_function - handle IPI from smp_call_function()
- *
- * Note that we copy data out of the cross-call structure and then
- * let the caller know that we're here and have done with their data
- */
-static void ipi_call_function(unsigned int cpu)
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+static void smp_timer_broadcast(const struct cpumask *mask)
{
- struct smp_call_struct *data = smp_call_function_data;
- void (*func)(void *info) = data->func;
- void *info = data->info;
- int wait = data->wait;
+ send_ipi_message(mask, IPI_TIMER);
+}
- cpu_clear(cpu, data->pending);
+static void broadcast_timer_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+}
+
+static void local_timer_setup(struct clock_event_device *evt)
+{
+ evt->name = "dummy_timer";
+ evt->features = CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_DUMMY;
+ evt->rating = 400;
+ evt->mult = 1;
+ evt->set_mode = broadcast_timer_set_mode;
+ evt->broadcast = smp_timer_broadcast;
+
+ clockevents_register_device(evt);
+}
+#endif
- func(info);
+void __cpuinit percpu_timer_setup(void)
+{
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
- if (wait)
- cpu_clear(cpu, data->unfinished);
+ evt->cpumask = cpumask_of(cpu);
+
+ local_timer_setup(evt);
}
static DEFINE_SPINLOCK(stop_lock);
dump_stack();
spin_unlock(&stop_lock);
- cpu_clear(cpu, cpu_online_map);
+ set_cpu_online(cpu, false);
local_fiq_disable();
local_irq_disable();
*
* Bit 0 - Inter-processor function call
*/
-asmlinkage void do_IPI(struct pt_regs *regs)
+asmlinkage void __exception do_IPI(struct pt_regs *regs)
{
unsigned int cpu = smp_processor_id();
struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
+ struct pt_regs *old_regs = set_irq_regs(regs);
ipi->ipi_count++;
switch (nextmsg) {
case IPI_TIMER:
- ipi_timer(regs);
+ ipi_timer();
break;
case IPI_RESCHEDULE:
break;
case IPI_CALL_FUNC:
- ipi_call_function(cpu);
+ generic_smp_call_function_interrupt();
+ break;
+
+ case IPI_CALL_FUNC_SINGLE:
+ generic_smp_call_function_single_interrupt();
break;
case IPI_CPU_STOP:
}
} while (msgs);
}
-}
-void smp_send_reschedule(int cpu)
-{
- send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE);
+ set_irq_regs(old_regs);
}
-void smp_send_timer(void)
+void smp_send_reschedule(int cpu)
{
- cpumask_t mask = cpu_online_map;
- cpu_clear(smp_processor_id(), mask);
- send_ipi_message(mask, IPI_TIMER);
+ send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
}
void smp_send_stop(void)
{
cpumask_t mask = cpu_online_map;
cpu_clear(smp_processor_id(), mask);
- send_ipi_message(mask, IPI_CPU_STOP);
+ send_ipi_message(&mask, IPI_CPU_STOP);
}
/*
* not supported here
*/
-int __init setup_profiling_timer(unsigned int multiplier)
+int setup_profiling_timer(unsigned int multiplier)
{
return -EINVAL;
}
-static int
-on_each_cpu_mask(void (*func)(void *), void *info, int retry, int wait,
- cpumask_t mask)
+static void
+on_each_cpu_mask(void (*func)(void *), void *info, int wait,
+ const struct cpumask *mask)
{
- int ret = 0;
-
preempt_disable();
- ret = smp_call_function_on_cpu(func, info, retry, wait, mask);
- if (cpu_isset(smp_processor_id(), mask))
+ smp_call_function_many(mask, func, info, wait);
+ if (cpumask_test_cpu(smp_processor_id(), mask))
func(info);
preempt_enable();
-
- return ret;
}
/**********************************************************************/
void flush_tlb_all(void)
{
- on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1);
+ if (tlb_ops_need_broadcast())
+ on_each_cpu(ipi_flush_tlb_all, NULL, 1);
+ else
+ local_flush_tlb_all();
}
void flush_tlb_mm(struct mm_struct *mm)
{
- cpumask_t mask = mm->cpu_vm_mask;
-
- on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, 1, mask);
+ if (tlb_ops_need_broadcast())
+ on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm));
+ else
+ local_flush_tlb_mm(mm);
}
void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
{
- cpumask_t mask = vma->vm_mm->cpu_vm_mask;
- struct tlb_args ta;
-
- ta.ta_vma = vma;
- ta.ta_start = uaddr;
-
- on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, 1, mask);
+ if (tlb_ops_need_broadcast()) {
+ struct tlb_args ta;
+ ta.ta_vma = vma;
+ ta.ta_start = uaddr;
+ on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm));
+ } else
+ local_flush_tlb_page(vma, uaddr);
}
void flush_tlb_kernel_page(unsigned long kaddr)
{
- struct tlb_args ta;
-
- ta.ta_start = kaddr;
-
- on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1, 1);
+ if (tlb_ops_need_broadcast()) {
+ struct tlb_args ta;
+ ta.ta_start = kaddr;
+ on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
+ } else
+ local_flush_tlb_kernel_page(kaddr);
}
void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
- cpumask_t mask = vma->vm_mm->cpu_vm_mask;
- struct tlb_args ta;
-
- ta.ta_vma = vma;
- ta.ta_start = start;
- ta.ta_end = end;
-
- on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, 1, mask);
+ if (tlb_ops_need_broadcast()) {
+ struct tlb_args ta;
+ ta.ta_vma = vma;
+ ta.ta_start = start;
+ ta.ta_end = end;
+ on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm));
+ } else
+ local_flush_tlb_range(vma, start, end);
}
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
- struct tlb_args ta;
-
- ta.ta_start = start;
- ta.ta_end = end;
-
- on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1, 1);
+ if (tlb_ops_need_broadcast()) {
+ struct tlb_args ta;
+ ta.ta_start = start;
+ ta.ta_end = end;
+ on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
+ } else
+ local_flush_tlb_kernel_range(start, end);
}