X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=arch%2Fia64%2Fkernel%2Fprocess.c;h=fabaf08d9a695bbb088951da7e96af4a9b640443;hb=8691e5a8f691cc2a4fda0651e8d307aaba0e7d68;hp=ea914cc6812a0a327cd73802f4b0e2b6beacd42b;hpb=6ab3d5624e172c553004ecc862bfeac16d9d68b7;p=safe%2Fjmp%2Flinux-2.6 diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index ea914cc..fabaf08 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -8,8 +8,6 @@ * 2005-10-07 Keith Owens * Add notify_die() hooks. */ -#define __KERNEL_SYSCALLS__ /* see */ - #include #include #include @@ -22,20 +20,21 @@ #include #include #include -#include #include #include #include #include #include #include +#include +#include #include #include #include #include #include -#include +#include #include #include #include @@ -53,7 +52,6 @@ #include "sigframe.h" void (*ia64_mark_idle)(int); -static DEFINE_PER_CPU(unsigned int, cpu_idle_state); unsigned long boot_option_idle_override = 0; EXPORT_SYMBOL(boot_option_idle_override); @@ -107,9 +105,11 @@ show_regs (struct pt_regs *regs) unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri; print_modules(); - printk("\nPid: %d, CPU %d, comm: %20s\n", current->pid, smp_processor_id(), current->comm); - printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s\n", - regs->cr_ipsr, regs->cr_ifs, ip, print_tainted()); + printk("\nPid: %d, CPU %d, comm: %20s\n", task_pid_nr(current), + smp_processor_id(), current->comm); + printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s (%s)\n", + regs->cr_ipsr, regs->cr_ifs, ip, print_tainted(), + init_utsname()->release); print_symbol("ip is at %s\n", ip); printk("unat: %016lx pfs : %016lx rsc : %016lx\n", regs->ar_unat, regs->ar_pfs, regs->ar_rsc); @@ -156,11 +156,29 @@ show_regs (struct pt_regs *regs) show_stack(NULL, NULL); } +void tsk_clear_notify_resume(struct task_struct *tsk) +{ +#ifdef CONFIG_PERFMON + if (tsk->thread.pfm_needs_checking) + return; +#endif + if (test_ti_thread_flag(task_thread_info(tsk), TIF_RESTORE_RSE)) + return; + clear_ti_thread_flag(task_thread_info(tsk), TIF_NOTIFY_RESUME); +} + +/* + * do_notify_resume_user(): + * Called from notify_resume_user at entry.S, with interrupts disabled. + */ void -do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall) +do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall) { if (fsys_mode(current, &scr->pt)) { - /* defer signal-handling etc. until we return to privilege-level 0. */ + /* + * defer signal-handling etc. until we return to + * privilege-level 0. + */ if (!ia64_psr(&scr->pt)->lp) ia64_psr(&scr->pt)->lp = 1; return; @@ -168,12 +186,26 @@ do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall #ifdef CONFIG_PERFMON if (current->thread.pfm_needs_checking) + /* + * Note: pfm_handle_work() allow us to call it with interrupts + * disabled, and may enable interrupts within the function. + */ pfm_handle_work(); #endif /* deal with pending signal delivery */ - if (test_thread_flag(TIF_SIGPENDING)) - ia64_do_signal(oldset, scr, in_syscall); + if (test_thread_flag(TIF_SIGPENDING)) { + local_irq_enable(); /* force interrupt enable */ + ia64_do_signal(scr, in_syscall); + } + + /* copy user rbs to kernel rbs */ + if (unlikely(test_thread_flag(TIF_RESTORE_RSE))) { + local_irq_enable(); /* force interrupt enable */ + ia64_sync_krbs(); + } + + local_irq_disable(); /* force interrupt disable */ } static int pal_halt = 1; @@ -200,9 +232,13 @@ default_idle (void) { local_irq_enable(); while (!need_resched()) { - if (can_do_pal_halt) - safe_halt(); - else + if (can_do_pal_halt) { + local_irq_disable(); + if (!need_resched()) { + safe_halt(); + } + local_irq_enable(); + } else cpu_relax(); } } @@ -234,31 +270,23 @@ static inline void play_dead(void) } #endif /* CONFIG_HOTPLUG_CPU */ -void cpu_idle_wait(void) +static void do_nothing(void *unused) { - unsigned int cpu, this_cpu = get_cpu(); - cpumask_t map; - - set_cpus_allowed(current, cpumask_of_cpu(this_cpu)); - put_cpu(); - - cpus_clear(map); - for_each_online_cpu(cpu) { - per_cpu(cpu_idle_state, cpu) = 1; - cpu_set(cpu, map); - } - - __get_cpu_var(cpu_idle_state) = 0; +} - wmb(); - do { - ssleep(1); - for_each_online_cpu(cpu) { - if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu)) - cpu_clear(cpu, map); - } - cpus_and(map, map, cpu_online_map); - } while (!cpus_empty(map)); +/* + * cpu_idle_wait - Used to ensure that all the CPUs discard old value of + * pm_idle and update to new pm_idle value. Required while changing pm_idle + * handler on SMP systems. + * + * Caller must have changed pm_idle to the new value before the call. Old + * pm_idle value will not be used by any CPU after the return of this function. + */ +void cpu_idle_wait(void) +{ + smp_mb(); + /* kick all the CPUs so that they exit out of pm_idle */ + smp_call_function(do_nothing, NULL, 1); } EXPORT_SYMBOL_GPL(cpu_idle_wait); @@ -270,19 +298,22 @@ cpu_idle (void) /* endless idle loop with no priority at all */ while (1) { - if (can_do_pal_halt) + if (can_do_pal_halt) { current_thread_info()->status &= ~TS_POLLING; - else + /* + * TS_POLLING-cleared state must be visible before we + * test NEED_RESCHED: + */ + smp_mb(); + } else { current_thread_info()->status |= TS_POLLING; + } if (!need_resched()) { void (*idle)(void); #ifdef CONFIG_SMP min_xtp(); #endif - if (__get_cpu_var(cpu_idle_state)) - __get_cpu_var(cpu_idle_state) = 0; - rmb(); if (mark_idle) (*mark_idle)(1); @@ -493,7 +524,8 @@ copy_thread (int nr, unsigned long clone_flags, /* Copy partially mapped page list */ if (!retval) - retval = ia32_copy_partial_page_list(p, clone_flags); + retval = ia32_copy_ia64_partial_page_list(p, + clone_flags); } #endif @@ -507,7 +539,8 @@ copy_thread (int nr, unsigned long clone_flags, static void do_copy_task_regs (struct task_struct *task, struct unw_frame_info *info, void *arg) { - unsigned long mask, sp, nat_bits = 0, ip, ar_rnat, urbs_end, cfm; + unsigned long mask, sp, nat_bits = 0, ar_rnat, urbs_end, cfm; + unsigned long uninitialized_var(ip); /* GCC be quiet */ elf_greg_t *dst = arg; struct pt_regs *pt; char nat; @@ -609,21 +642,6 @@ do_dump_fpu (struct unw_frame_info *info, void *arg) do_dump_task_fpu(current, info, arg); } -int -dump_task_regs(struct task_struct *task, elf_gregset_t *regs) -{ - struct unw_frame_info tcore_info; - - if (current == task) { - unw_init_running(do_copy_regs, regs); - } else { - memset(&tcore_info, 0, sizeof(tcore_info)); - unw_init_from_blocked_task(&tcore_info, task); - do_copy_task_regs(task, &tcore_info, regs); - } - return 1; -} - void ia64_elf_core_copy_regs (struct pt_regs *pt, elf_gregset_t dst) { @@ -631,21 +649,6 @@ ia64_elf_core_copy_regs (struct pt_regs *pt, elf_gregset_t dst) } int -dump_task_fpu (struct task_struct *task, elf_fpregset_t *dst) -{ - struct unw_frame_info tcore_info; - - if (current == task) { - unw_init_running(do_dump_fpu, dst); - } else { - memset(&tcore_info, 0, sizeof(tcore_info)); - unw_init_from_blocked_task(&tcore_info, task); - do_dump_task_fpu(task, &tcore_info, dst); - } - return 1; -} - -int dump_fpu (struct pt_regs *pt, elf_fpregset_t dst) { unw_init_running(do_dump_fpu, dst); @@ -721,9 +724,10 @@ flush_thread (void) ia64_drop_fpu(current); #ifdef CONFIG_IA32_SUPPORT if (IS_IA32_PROCESS(task_pt_regs(current))) { - ia32_drop_partial_page_list(current); + ia32_drop_ia64_partial_page_list(current); current->thread.task_size = IA32_PAGE_OFFSET; set_fs(USER_DS); + memset(current->thread.tls_array, 0, sizeof(current->thread.tls_array)); } #endif } @@ -747,7 +751,7 @@ exit_thread (void) pfm_release_debug_registers(current); #endif if (IS_IA32_PROCESS(task_pt_regs(current))) - ia32_drop_partial_page_list(current); + ia32_drop_ia64_partial_page_list(current); } unsigned long @@ -757,6 +761,9 @@ get_wchan (struct task_struct *p) unsigned long ip; int count = 0; + if (!p || p == current || p->state == TASK_RUNNING) + return 0; + /* * Note: p may not be a blocked task (it could be current or * another process running on some other CPU. Rather than @@ -767,6 +774,8 @@ get_wchan (struct task_struct *p) */ unw_init_from_blocked_task(&info, p); do { + if (p->state == TASK_RUNNING) + return 0; if (unw_unwind(&info) < 0) return 0; unw_get_ip(&info, &ip); @@ -799,6 +808,21 @@ cpu_halt (void) ia64_pal_halt(min_power_state); } +void machine_shutdown(void) +{ +#ifdef CONFIG_HOTPLUG_CPU + int cpu; + + for_each_online_cpu(cpu) { + if (cpu != smp_processor_id()) + cpu_down(cpu); + } +#endif +#ifdef CONFIG_KEXEC + kexec_disable_iosapic(); +#endif +} + void machine_restart (char *restart_cmd) {