* 2005-10-07 Keith Owens <kaos@sgi.com>
* Add notify_die() hooks.
*/
-#define __KERNEL_SYSCALLS__ /* see <asm/unistd.h> */
-#include <linux/config.h>
-
#include <linux/cpu.h>
#include <linux/pm.h>
#include <linux/elf.h>
#include <linux/kallsyms.h>
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/slab.h>
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/personality.h>
#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/stddef.h>
#include <linux/thread_info.h>
#include <linux/unistd.h>
#include <linux/efi.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
-#include <linux/kprobes.h>
+#include <linux/kdebug.h>
+#include <linux/utsname.h>
+#include <linux/tracehook.h>
#include <asm/cpu.h>
#include <asm/delay.h>
#include <asm/elf.h>
-#include <asm/ia32.h>
#include <asm/irq.h>
-#include <asm/kdebug.h>
+#include <asm/kexec.h>
#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/sal.h>
#include "sigframe.h"
void (*ia64_mark_idle)(int);
-static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
unsigned long boot_option_idle_override = 0;
EXPORT_SYMBOL(boot_option_idle_override);
+unsigned long idle_halt;
+EXPORT_SYMBOL(idle_halt);
+unsigned long idle_nomwait;
+EXPORT_SYMBOL(idle_nomwait);
+void (*pm_idle) (void);
+EXPORT_SYMBOL(pm_idle);
+void (*pm_power_off) (void);
+EXPORT_SYMBOL(pm_power_off);
void
ia64_do_show_stack (struct unw_frame_info *info, void *arg)
unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;
print_modules();
- printk("\nPid: %d, CPU %d, comm: %20s\n", current->pid, smp_processor_id(), current->comm);
- printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s\n",
- regs->cr_ipsr, regs->cr_ifs, ip, print_tainted());
+ printk("\nPid: %d, CPU %d, comm: %20s\n", task_pid_nr(current),
+ smp_processor_id(), current->comm);
+ printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s (%s)\n",
+ regs->cr_ipsr, regs->cr_ifs, ip, print_tainted(),
+ init_utsname()->release);
print_symbol("ip is at %s\n", ip);
printk("unat: %016lx pfs : %016lx rsc : %016lx\n",
regs->ar_unat, regs->ar_pfs, regs->ar_rsc);
show_stack(NULL, NULL);
}
+/* local support for deprecated console_print */
+void
+console_print(const char *s)
+{
+ printk(KERN_EMERG "%s", s);
+}
+
void
-do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall)
+do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall)
{
if (fsys_mode(current, &scr->pt)) {
- /* defer signal-handling etc. until we return to privilege-level 0. */
+ /*
+ * defer signal-handling etc. until we return to
+ * privilege-level 0.
+ */
if (!ia64_psr(&scr->pt)->lp)
ia64_psr(&scr->pt)->lp = 1;
return;
#ifdef CONFIG_PERFMON
if (current->thread.pfm_needs_checking)
+ /*
+ * Note: pfm_handle_work() allow us to call it with interrupts
+ * disabled, and may enable interrupts within the function.
+ */
pfm_handle_work();
#endif
/* deal with pending signal delivery */
- if (test_thread_flag(TIF_SIGPENDING))
- ia64_do_signal(oldset, scr, in_syscall);
+ if (test_thread_flag(TIF_SIGPENDING)) {
+ local_irq_enable(); /* force interrupt enable */
+ ia64_do_signal(scr, in_syscall);
+ }
+
+ if (test_thread_flag(TIF_NOTIFY_RESUME)) {
+ clear_thread_flag(TIF_NOTIFY_RESUME);
+ tracehook_notify_resume(&scr->pt);
+ if (current->replacement_session_keyring)
+ key_replace_session_keyring();
+ }
+
+ /* copy user rbs to kernel rbs */
+ if (unlikely(test_thread_flag(TIF_RESTORE_RSE))) {
+ local_irq_enable(); /* force interrupt enable */
+ ia64_sync_krbs();
+ }
+
+ local_irq_disable(); /* force interrupt disable */
}
static int pal_halt = 1;
{
local_irq_enable();
while (!need_resched()) {
- if (can_do_pal_halt)
- safe_halt();
- else
+ if (can_do_pal_halt) {
+ local_irq_disable();
+ if (!need_resched()) {
+ safe_halt();
+ }
+ local_irq_enable();
+ } else
cpu_relax();
}
}
/* We don't actually take CPU down, just spin without interrupts. */
static inline void play_dead(void)
{
- extern void ia64_cpu_local_tick (void);
unsigned int this_cpu = smp_processor_id();
/* Ack it */
}
#endif /* CONFIG_HOTPLUG_CPU */
-void cpu_idle_wait(void)
+static void do_nothing(void *unused)
{
- unsigned int cpu, this_cpu = get_cpu();
- cpumask_t map;
-
- set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
- put_cpu();
-
- cpus_clear(map);
- for_each_online_cpu(cpu) {
- per_cpu(cpu_idle_state, cpu) = 1;
- cpu_set(cpu, map);
- }
-
- __get_cpu_var(cpu_idle_state) = 0;
+}
- wmb();
- do {
- ssleep(1);
- for_each_online_cpu(cpu) {
- if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
- cpu_clear(cpu, map);
- }
- cpus_and(map, map, cpu_online_map);
- } while (!cpus_empty(map));
+/*
+ * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
+ * pm_idle and update to new pm_idle value. Required while changing pm_idle
+ * handler on SMP systems.
+ *
+ * Caller must have changed pm_idle to the new value before the call. Old
+ * pm_idle value will not be used by any CPU after the return of this function.
+ */
+void cpu_idle_wait(void)
+{
+ smp_mb();
+ /* kick all the CPUs so that they exit out of pm_idle */
+ smp_call_function(do_nothing, NULL, 1);
}
EXPORT_SYMBOL_GPL(cpu_idle_wait);
/* endless idle loop with no priority at all */
while (1) {
- if (can_do_pal_halt)
- clear_thread_flag(TIF_POLLING_NRFLAG);
- else
- set_thread_flag(TIF_POLLING_NRFLAG);
+ if (can_do_pal_halt) {
+ current_thread_info()->status &= ~TS_POLLING;
+ /*
+ * TS_POLLING-cleared state must be visible before we
+ * test NEED_RESCHED:
+ */
+ smp_mb();
+ } else {
+ current_thread_info()->status |= TS_POLLING;
+ }
if (!need_resched()) {
void (*idle)(void);
#ifdef CONFIG_SMP
min_xtp();
#endif
- if (__get_cpu_var(cpu_idle_state))
- __get_cpu_var(cpu_idle_state) = 0;
-
rmb();
if (mark_idle)
(*mark_idle)(1);
if (info & PFM_CPUINFO_SYST_WIDE)
pfm_syst_wide_update_task(task, info, 0);
#endif
-
-#ifdef CONFIG_IA32_SUPPORT
- if (IS_IA32_PROCESS(ia64_task_regs(task)))
- ia32_save_state(task);
-#endif
}
void
if (info & PFM_CPUINFO_SYST_WIDE)
pfm_syst_wide_update_task(task, info, 1);
#endif
-
-#ifdef CONFIG_IA32_SUPPORT
- if (IS_IA32_PROCESS(ia64_task_regs(task)))
- ia32_load_state(task);
-#endif
}
/*
* so there is nothing to worry about.
*/
int
-copy_thread (int nr, unsigned long clone_flags,
+copy_thread(unsigned long clone_flags,
unsigned long user_stack_base, unsigned long user_stack_size,
struct task_struct *p, struct pt_regs *regs)
{
- extern char ia64_ret_from_clone, ia32_ret_from_clone;
+ extern char ia64_ret_from_clone;
struct switch_stack *child_stack, *stack;
unsigned long rbs, child_rbs, rbs_size;
struct pt_regs *child_ptregs;
memcpy((void *) child_rbs, (void *) rbs, rbs_size);
if (likely(user_mode(child_ptregs))) {
- if ((clone_flags & CLONE_SETTLS) && !IS_IA32_PROCESS(regs))
+ if (clone_flags & CLONE_SETTLS)
child_ptregs->r13 = regs->r16; /* see sys_clone2() in entry.S */
if (user_stack_base) {
child_ptregs->r12 = user_stack_base + user_stack_size - 16;
child_ptregs->r13 = (unsigned long) p; /* set `current' pointer */
}
child_stack->ar_bspstore = child_rbs + rbs_size;
- if (IS_IA32_PROCESS(regs))
- child_stack->b0 = (unsigned long) &ia32_ret_from_clone;
- else
- child_stack->b0 = (unsigned long) &ia64_ret_from_clone;
+ child_stack->b0 = (unsigned long) &ia64_ret_from_clone;
/* copy parts of thread_struct: */
p->thread.ksp = (unsigned long) child_stack - 16;
p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR)
| THREAD_FLAGS_TO_SET);
ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */
-#ifdef CONFIG_IA32_SUPPORT
- /*
- * If we're cloning an IA32 task then save the IA32 extra
- * state from the current task to the new task
- */
- if (IS_IA32_PROCESS(ia64_task_regs(current))) {
- ia32_save_state(p);
- if (clone_flags & CLONE_SETTLS)
- retval = ia32_clone_tls(p, child_ptregs);
-
- /* Copy partially mapped page list */
- if (!retval)
- retval = ia32_copy_partial_page_list(p, clone_flags);
- }
-#endif
#ifdef CONFIG_PERFMON
if (current->thread.pfm_context)
static void
do_copy_task_regs (struct task_struct *task, struct unw_frame_info *info, void *arg)
{
- unsigned long mask, sp, nat_bits = 0, ip, ar_rnat, urbs_end, cfm;
+ unsigned long mask, sp, nat_bits = 0, ar_rnat, urbs_end, cfm;
+ unsigned long uninitialized_var(ip); /* GCC be quiet */
elf_greg_t *dst = arg;
struct pt_regs *pt;
char nat;
do_dump_task_fpu(current, info, arg);
}
-int
-dump_task_regs(struct task_struct *task, elf_gregset_t *regs)
-{
- struct unw_frame_info tcore_info;
-
- if (current == task) {
- unw_init_running(do_copy_regs, regs);
- } else {
- memset(&tcore_info, 0, sizeof(tcore_info));
- unw_init_from_blocked_task(&tcore_info, task);
- do_copy_task_regs(task, &tcore_info, regs);
- }
- return 1;
-}
-
void
ia64_elf_core_copy_regs (struct pt_regs *pt, elf_gregset_t dst)
{
}
int
-dump_task_fpu (struct task_struct *task, elf_fpregset_t *dst)
-{
- struct unw_frame_info tcore_info;
-
- if (current == task) {
- unw_init_running(do_dump_fpu, dst);
- } else {
- memset(&tcore_info, 0, sizeof(tcore_info));
- unw_init_from_blocked_task(&tcore_info, task);
- do_dump_task_fpu(task, &tcore_info, dst);
- }
- return 1;
-}
-
-int
dump_fpu (struct pt_regs *pt, elf_fpregset_t dst)
{
unw_init_running(do_dump_fpu, dst);
int
kernel_thread_helper (int (*fn)(void *), void *arg)
{
-#ifdef CONFIG_IA32_SUPPORT
- if (IS_IA32_PROCESS(ia64_task_regs(current))) {
- /* A kernel thread is always a 64-bit process. */
- current->thread.map_base = DEFAULT_MAP_BASE;
- current->thread.task_size = DEFAULT_TASK_SIZE;
- ia64_set_kr(IA64_KR_IO_BASE, current->thread.old_iob);
- ia64_set_kr(IA64_KR_TSSD, current->thread.old_k1);
- }
-#endif
return (*fn)(arg);
}
/* drop floating-point and debug-register state if it exists: */
current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID);
ia64_drop_fpu(current);
-#ifdef CONFIG_IA32_SUPPORT
- if (IS_IA32_PROCESS(ia64_task_regs(current))) {
- ia32_drop_partial_page_list(current);
- current->thread.task_size = IA32_PAGE_OFFSET;
- set_fs(USER_DS);
- }
-#endif
}
/*
exit_thread (void)
{
- /*
- * Remove function-return probe instances associated with this task
- * and put them back on the free list. Do not insert an exit probe for
- * this function, it will be disabled by kprobe_flush_task if you do.
- */
- kprobe_flush_task(current);
-
ia64_drop_fpu(current);
#ifdef CONFIG_PERFMON
/* if needed, stop monitoring and flush state to perfmon context */
if (current->thread.flags & IA64_THREAD_DBG_VALID)
pfm_release_debug_registers(current);
#endif
- if (IS_IA32_PROCESS(ia64_task_regs(current)))
- ia32_drop_partial_page_list(current);
}
unsigned long
unsigned long ip;
int count = 0;
+ if (!p || p == current || p->state == TASK_RUNNING)
+ return 0;
+
/*
* Note: p may not be a blocked task (it could be current or
* another process running on some other CPU. Rather than
*/
unw_init_from_blocked_task(&info, p);
do {
+ if (p->state == TASK_RUNNING)
+ return 0;
if (unw_unwind(&info) < 0)
return 0;
unw_get_ip(&info, &ip);
ia64_pal_halt(min_power_state);
}
+void machine_shutdown(void)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+ int cpu;
+
+ for_each_online_cpu(cpu) {
+ if (cpu != smp_processor_id())
+ cpu_down(cpu);
+ }
+#endif
+#ifdef CONFIG_KEXEC
+ kexec_disable_iosapic();
+#endif
+}
+
void
machine_restart (char *restart_cmd)
{