/*
- * arch/ppc/kernel/process.c
- *
* Derived from "arch/i386/kernel/process.c"
* Copyright (C) 1995 Linus Torvalds
*
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/config.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <linux/mqueue.h>
#include <linux/hardirq.h>
#include <linux/utsname.h>
-#include <linux/kprobes.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/prom.h>
+#include <asm/machdep.h>
+#include <asm/time.h>
+#include <asm/syscalls.h>
#ifdef CONFIG_PPC64
#include <asm/firmware.h>
-#include <asm/time.h>
-#include <asm/machdep.h>
#endif
+#include <linux/kprobes.h>
+#include <linux/kdebug.h>
extern unsigned long _get_SP(void);
#ifndef CONFIG_SMP
struct task_struct *last_task_used_math = NULL;
struct task_struct *last_task_used_altivec = NULL;
+struct task_struct *last_task_used_vsx = NULL;
struct task_struct *last_task_used_spe = NULL;
#endif
*/
BUG_ON(tsk != current);
#endif
- giveup_fpu(current);
+ giveup_fpu(tsk);
}
preempt_enable();
}
}
EXPORT_SYMBOL(enable_kernel_fp);
-int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
-{
- if (!tsk->thread.regs)
- return 0;
- flush_fp_to_thread(current);
-
- memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs));
-
- return 1;
-}
-
#ifdef CONFIG_ALTIVEC
void enable_kernel_altivec(void)
{
#ifdef CONFIG_SMP
BUG_ON(tsk != current);
#endif
- giveup_altivec(current);
+ giveup_altivec(tsk);
}
preempt_enable();
}
}
+#endif /* CONFIG_ALTIVEC */
-int dump_task_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
+#ifdef CONFIG_VSX
+#if 0
+/* not currently used, but some crazy RAID module might want to later */
+void enable_kernel_vsx(void)
{
- flush_altivec_to_thread(current);
- memcpy(vrregs, ¤t->thread.vr[0], sizeof(*vrregs));
- return 1;
+ WARN_ON(preemptible());
+
+#ifdef CONFIG_SMP
+ if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
+ giveup_vsx(current);
+ else
+ giveup_vsx(NULL); /* just enable vsx for kernel - force */
+#else
+ giveup_vsx(last_task_used_vsx);
+#endif /* CONFIG_SMP */
}
-#endif /* CONFIG_ALTIVEC */
+EXPORT_SYMBOL(enable_kernel_vsx);
+#endif
+
+void giveup_vsx(struct task_struct *tsk)
+{
+ giveup_fpu(tsk);
+ giveup_altivec(tsk);
+ __giveup_vsx(tsk);
+}
+
+void flush_vsx_to_thread(struct task_struct *tsk)
+{
+ if (tsk->thread.regs) {
+ preempt_disable();
+ if (tsk->thread.regs->msr & MSR_VSX) {
+#ifdef CONFIG_SMP
+ BUG_ON(tsk != current);
+#endif
+ giveup_vsx(tsk);
+ }
+ preempt_enable();
+ }
+}
+#endif /* CONFIG_VSX */
#ifdef CONFIG_SPE
#ifdef CONFIG_SMP
BUG_ON(tsk != current);
#endif
- giveup_spe(current);
+ giveup_spe(tsk);
}
preempt_enable();
}
}
+#endif /* CONFIG_SPE */
-int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
+#ifndef CONFIG_SMP
+/*
+ * If we are doing lazy switching of CPU state (FP, altivec or SPE),
+ * and the current task has some state, discard it.
+ */
+void discard_lazy_cpu_state(void)
{
- flush_spe_to_thread(current);
- /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */
- memcpy(evrregs, ¤t->thread.evr[0], sizeof(u32) * 35);
- return 1;
+ preempt_disable();
+ if (last_task_used_math == current)
+ last_task_used_math = NULL;
+#ifdef CONFIG_ALTIVEC
+ if (last_task_used_altivec == current)
+ last_task_used_altivec = NULL;
+#endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_VSX
+ if (last_task_used_vsx == current)
+ last_task_used_vsx = NULL;
+#endif /* CONFIG_VSX */
+#ifdef CONFIG_SPE
+ if (last_task_used_spe == current)
+ last_task_used_spe = NULL;
+#endif
+ preempt_enable();
}
-#endif /* CONFIG_SPE */
+#endif /* CONFIG_SMP */
+
+void do_dabr(struct pt_regs *regs, unsigned long address,
+ unsigned long error_code)
+{
+ siginfo_t info;
+
+ if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
+ 11, SIGSEGV) == NOTIFY_STOP)
+ return;
+
+ if (debugger_dabr_match(regs))
+ return;
+
+ /* Clear the DAC and struct entries. One shot trigger */
+#if defined(CONFIG_BOOKE)
+ mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~(DBSR_DAC1R | DBSR_DAC1W
+ | DBCR0_IDM));
+#endif
+
+ /* Clear the DABR */
+ set_dabr(0);
+
+ /* Deliver the signal to userspace */
+ info.si_signo = SIGTRAP;
+ info.si_errno = 0;
+ info.si_code = TRAP_HWBKPT;
+ info.si_addr = (void __user *)address;
+ force_sig_info(SIGTRAP, &info, current);
+}
+
+static DEFINE_PER_CPU(unsigned long, current_dabr);
int set_dabr(unsigned long dabr)
{
-#ifdef CONFIG_PPC64
+ __get_cpu_var(current_dabr) = dabr;
+
+#ifdef CONFIG_PPC_MERGE /* XXX for now */
if (ppc_md.set_dabr)
return ppc_md.set_dabr(dabr);
#endif
+ /* XXX should we have a CPU_FTR_HAS_DABR ? */
+#if defined(CONFIG_PPC64) || defined(CONFIG_6xx)
mtspr(SPRN_DABR, dabr);
+#endif
+
+#if defined(CONFIG_BOOKE)
+ mtspr(SPRN_DAC1, dabr);
+#endif
+
return 0;
}
#ifdef CONFIG_PPC64
DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
-static DEFINE_PER_CPU(unsigned long, current_dabr);
#endif
struct task_struct *__switch_to(struct task_struct *prev,
if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
giveup_altivec(prev);
#endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_VSX
+ if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
+ /* VMX and FPU registers are already save here */
+ __giveup_vsx(prev);
+#endif /* CONFIG_VSX */
#ifdef CONFIG_SPE
/*
* If the previous thread used spe in the last quantum
if (new->thread.regs && last_task_used_altivec == new)
new->thread.regs->msr |= MSR_VEC;
#endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_VSX
+ if (new->thread.regs && last_task_used_vsx == new)
+ new->thread.regs->msr |= MSR_VSX;
+#endif /* CONFIG_VSX */
#ifdef CONFIG_SPE
/* Avoid the trap. On smp this this never happens since
* we don't set last_task_used_spe
#endif /* CONFIG_SMP */
-#ifdef CONFIG_PPC64 /* for now */
- if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) {
+ if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr))
set_dabr(new->thread.dabr);
- __get_cpu_var(current_dabr) = new->thread.dabr;
- }
- flush_tlb_pending();
+#if defined(CONFIG_BOOKE)
+ /* If new thread DAC (HW breakpoint) is the same then leave it */
+ if (new->thread.dabr)
+ set_dabr(new->thread.dabr);
#endif
new_thread = &new->thread;
#endif
local_irq_save(flags);
+
+ account_system_vtime(current);
+ account_process_vtime(current);
+ calculate_steal_time();
+
+ /*
+ * We can't take a PMU exception inside _switch() since there is a
+ * window where the kernel stack SLB and the kernel stack are out
+ * of sync. Hard disable here.
+ */
+ hard_irq_disable();
last = _switch(old_thread, new_thread);
local_irq_restore(flags);
static int instructions_to_print = 16;
-#ifdef CONFIG_PPC64
-#define BAD_PC(pc) ((REGION_ID(pc) != KERNEL_REGION_ID) && \
- (REGION_ID(pc) != VMALLOC_REGION_ID))
-#else
-#define BAD_PC(pc) ((pc) < KERNELBASE)
-#endif
-
static void show_instructions(struct pt_regs *regs)
{
int i;
if (!(i % 8))
printk("\n");
- if (BAD_PC(pc) || __get_user(instr, (unsigned int *)pc)) {
+#if !defined(CONFIG_BOOKE)
+ /* If executing with the IMMU off, adjust pc rather
+ * than print XXXXXXXX.
+ */
+ if (!(regs->msr & MSR_IR))
+ pc = (unsigned long)phys_to_virt(pc);
+#endif
+
+ /* We use __get_user here *only* to avoid an OOPS on a
+ * bad address because the pc *should* only be a
+ * kernel address.
+ */
+ if (!__kernel_text_address(pc) ||
+ __get_user(instr, (unsigned int __user *)pc)) {
printk("XXXXXXXX ");
} else {
if (regs->nip == pc)
{MSR_EE, "EE"},
{MSR_PR, "PR"},
{MSR_FP, "FP"},
+ {MSR_VEC, "VEC"},
+ {MSR_VSX, "VSX"},
{MSR_ME, "ME"},
{MSR_IR, "IR"},
{MSR_DR, "DR"},
}
#ifdef CONFIG_PPC64
-#define REG "%016lX"
+#define REG "%016lx"
#define REGS_PER_LINE 4
#define LAST_VOLATILE 13
#else
-#define REG "%08lX"
+#define REG "%08lx"
#define REGS_PER_LINE 8
#define LAST_VOLATILE 12
#endif
printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
regs->nip, regs->link, regs->ctr);
printk("REGS: %p TRAP: %04lx %s (%s)\n",
- regs, regs->trap, print_tainted(), system_utsname.release);
+ regs, regs->trap, print_tainted(), init_utsname()->release);
printk("MSR: "REG" ", regs->msr);
printbits(regs->msr, msr_bits);
- printk(" CR: %08lX XER: %08lX\n", regs->ccr, regs->xer);
+ printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
trap = TRAP(regs);
if (trap == 0x300 || trap == 0x600)
+#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
+ printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
+#else
printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr);
+#endif
printk("TASK = %p[%d] '%s' THREAD: %p",
- current, current->pid, current->comm, current->thread_info);
+ current, task_pid_nr(current), current->comm, task_thread_info(current));
#ifdef CONFIG_SMP
- printk(" CPU: %d", smp_processor_id());
+ printk(" CPU: %d", raw_smp_processor_id());
#endif /* CONFIG_SMP */
for (i = 0; i < 32; i++) {
* Lookup NIP late so we have the best change of getting the
* above info out without failing
*/
- printk("NIP ["REG"] ", regs->nip);
- print_symbol("%s\n", regs->nip);
- printk("LR ["REG"] ", regs->link);
- print_symbol("%s\n", regs->link);
+ printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
+ printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
#endif
show_stack(current, (unsigned long *) regs->gpr[1]);
if (!user_mode(regs))
void exit_thread(void)
{
- kprobe_flush_task(current);
-
-#ifndef CONFIG_SMP
- if (last_task_used_math == current)
- last_task_used_math = NULL;
-#ifdef CONFIG_ALTIVEC
- if (last_task_used_altivec == current)
- last_task_used_altivec = NULL;
-#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_SPE
- if (last_task_used_spe == current)
- last_task_used_spe = NULL;
-#endif
-#endif /* CONFIG_SMP */
+ discard_lazy_cpu_state();
}
void flush_thread(void)
#ifdef CONFIG_PPC64
struct thread_info *t = current_thread_info();
- if (t->flags & _TIF_ABI_PENDING)
- t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT);
+ if (test_ti_thread_flag(t, TIF_ABI_PENDING)) {
+ clear_ti_thread_flag(t, TIF_ABI_PENDING);
+ if (test_ti_thread_flag(t, TIF_32BIT))
+ clear_ti_thread_flag(t, TIF_32BIT);
+ else
+ set_ti_thread_flag(t, TIF_32BIT);
+ }
#endif
- kprobe_flush_task(current);
-#ifndef CONFIG_SMP
- if (last_task_used_math == current)
- last_task_used_math = NULL;
-#ifdef CONFIG_ALTIVEC
- if (last_task_used_altivec == current)
- last_task_used_altivec = NULL;
-#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_SPE
- if (last_task_used_spe == current)
- last_task_used_spe = NULL;
-#endif
-#endif /* CONFIG_SMP */
+ discard_lazy_cpu_state();
-#ifdef CONFIG_PPC64 /* for now */
if (current->thread.dabr) {
current->thread.dabr = 0;
set_dabr(0);
- }
+
+#if defined(CONFIG_BOOKE)
+ current->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W);
#endif
+ }
}
void
{
flush_fp_to_thread(current);
flush_altivec_to_thread(current);
+ flush_vsx_to_thread(current);
flush_spe_to_thread(current);
}
{
struct pt_regs *childregs, *kregs;
extern void ret_from_fork(void);
- unsigned long sp = (unsigned long)p->thread_info + THREAD_SIZE;
+ unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
CHECK_FULL_REGS(regs);
/* Copy registers */
#ifdef CONFIG_PPC32
childregs->gpr[2] = (unsigned long) p;
#else
- clear_ti_thread_flag(p->thread_info, TIF_32BIT);
+ clear_tsk_thread_flag(p, TIF_32BIT);
#endif
p->thread.regs = NULL; /* no user register state */
} else {
kregs = (struct pt_regs *) sp;
sp -= STACK_FRAME_OVERHEAD;
p->thread.ksp = sp;
+ p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
+ _ALIGN_UP(sizeof(struct thread_info), 16);
#ifdef CONFIG_PPC64
if (cpu_has_feature(CPU_FTR_SLB)) {
- unsigned long sp_vsid = get_kernel_vsid(sp);
-
- sp_vsid <<= SLB_VSID_SHIFT;
- sp_vsid |= SLB_VSID_KERNEL;
- if (cpu_has_feature(CPU_FTR_16M_PAGE))
- sp_vsid |= SLB_VSID_L;
+ unsigned long sp_vsid;
+ unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
+ if (cpu_has_feature(CPU_FTR_1T_SEGMENT))
+ sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
+ << SLB_VSID_SHIFT_1T;
+ else
+ sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
+ << SLB_VSID_SHIFT;
+ sp_vsid |= SLB_VSID_KERNEL | llp;
p->thread.ksp_vsid = sp_vsid;
}
kregs->nip = *((unsigned long *)ret_from_fork);
#else
kregs->nip = (unsigned long)ret_from_fork;
- p->thread.last_syscall = -1;
#endif
return 0;
* set. Do it now.
*/
if (!current->thread.regs) {
- unsigned long childregs = (unsigned long)current->thread_info +
- THREAD_SIZE;
- childregs -= sizeof(struct pt_regs);
- current->thread.regs = (struct pt_regs *)childregs;
+ struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
+ current->thread.regs = regs - 1;
}
memset(regs->gpr, 0, sizeof(regs->gpr));
regs->ccr = 0;
regs->gpr[1] = sp;
+ /*
+ * We have just cleared all the nonvolatile GPRs, so make
+ * FULL_REGS(regs) return true. This is necessary to allow
+ * ptrace to examine the thread immediately after exec.
+ */
+ regs->trap &= ~1UL;
+
#ifdef CONFIG_PPC32
regs->mq = 0;
regs->nip = start;
}
#endif
-#ifndef CONFIG_SMP
- if (last_task_used_math == current)
- last_task_used_math = NULL;
-#ifdef CONFIG_ALTIVEC
- if (last_task_used_altivec == current)
- last_task_used_altivec = NULL;
+ discard_lazy_cpu_state();
+#ifdef CONFIG_VSX
+ current->thread.used_vsr = 0;
#endif
-#ifdef CONFIG_SPE
- if (last_task_used_spe == current)
- last_task_used_spe = NULL;
-#endif
-#endif /* CONFIG_SMP */
memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
current->thread.fpscr.val = 0;
#ifdef CONFIG_ALTIVEC
* mode (asyn, precise, disabled) for 'Classic' FP. */
if (val & PR_FP_EXC_SW_ENABLE) {
#ifdef CONFIG_SPE
- tsk->thread.fpexc_mode = val &
- (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
- return 0;
+ if (cpu_has_feature(CPU_FTR_SPE)) {
+ tsk->thread.fpexc_mode = val &
+ (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
+ return 0;
+ } else {
+ return -EINVAL;
+ }
#else
return -EINVAL;
#endif
if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
#ifdef CONFIG_SPE
- val = tsk->thread.fpexc_mode;
+ if (cpu_has_feature(CPU_FTR_SPE))
+ val = tsk->thread.fpexc_mode;
+ else
+ return -EINVAL;
#else
return -EINVAL;
#endif
return put_user(val, (unsigned int __user *) adr);
}
+int set_endian(struct task_struct *tsk, unsigned int val)
+{
+ struct pt_regs *regs = tsk->thread.regs;
+
+ if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
+ (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
+ return -EINVAL;
+
+ if (regs == NULL)
+ return -EINVAL;
+
+ if (val == PR_ENDIAN_BIG)
+ regs->msr &= ~MSR_LE;
+ else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
+ regs->msr |= MSR_LE;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+int get_endian(struct task_struct *tsk, unsigned long adr)
+{
+ struct pt_regs *regs = tsk->thread.regs;
+ unsigned int val;
+
+ if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
+ !cpu_has_feature(CPU_FTR_REAL_LE))
+ return -EINVAL;
+
+ if (regs == NULL)
+ return -EINVAL;
+
+ if (regs->msr & MSR_LE) {
+ if (cpu_has_feature(CPU_FTR_REAL_LE))
+ val = PR_ENDIAN_LITTLE;
+ else
+ val = PR_ENDIAN_PPC_LITTLE;
+ } else
+ val = PR_ENDIAN_BIG;
+
+ return put_user(val, (unsigned int __user *)adr);
+}
+
+int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
+{
+ tsk->thread.align_ctl = val;
+ return 0;
+}
+
+int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
+{
+ return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
+}
+
#define TRUNC_PTR(x) ((typeof(x))(((unsigned long)(x)) & 0xffffffff))
int sys_clone(unsigned long clone_flags, unsigned long usp,
flush_spe_to_thread(current);
error = do_execve(filename, (char __user * __user *) a1,
(char __user * __user *) a2, regs);
- if (error == 0) {
- task_lock(current);
- current->ptrace &= ~PT_DTRACE;
- task_unlock(current);
- }
putname(filename);
out:
return error;
}
-static int validate_sp(unsigned long sp, struct task_struct *p,
- unsigned long nbytes)
+#ifdef CONFIG_IRQSTACKS
+static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
+ unsigned long nbytes)
{
- unsigned long stack_page = (unsigned long)p->thread_info;
+ unsigned long stack_page;
+ unsigned long cpu = task_cpu(p);
- if (sp >= stack_page + sizeof(struct thread_struct)
- && sp <= stack_page + THREAD_SIZE - nbytes)
- return 1;
+ /*
+ * Avoid crashing if the stack has overflowed and corrupted
+ * task_cpu(p), which is in the thread_info struct.
+ */
+ if (cpu < NR_CPUS && cpu_possible(cpu)) {
+ stack_page = (unsigned long) hardirq_ctx[cpu];
+ if (sp >= stack_page + sizeof(struct thread_struct)
+ && sp <= stack_page + THREAD_SIZE - nbytes)
+ return 1;
+
+ stack_page = (unsigned long) softirq_ctx[cpu];
+ if (sp >= stack_page + sizeof(struct thread_struct)
+ && sp <= stack_page + THREAD_SIZE - nbytes)
+ return 1;
+ }
+ return 0;
+}
-#ifdef CONFIG_IRQSTACKS
- stack_page = (unsigned long) hardirq_ctx[task_cpu(p)];
- if (sp >= stack_page + sizeof(struct thread_struct)
- && sp <= stack_page + THREAD_SIZE - nbytes)
- return 1;
+#else
+#define valid_irq_stack(sp, p, nb) 0
+#endif /* CONFIG_IRQSTACKS */
+
+int validate_sp(unsigned long sp, struct task_struct *p,
+ unsigned long nbytes)
+{
+ unsigned long stack_page = (unsigned long)task_stack_page(p);
- stack_page = (unsigned long) softirq_ctx[task_cpu(p)];
if (sp >= stack_page + sizeof(struct thread_struct)
&& sp <= stack_page + THREAD_SIZE - nbytes)
return 1;
-#endif
- return 0;
+ return valid_irq_stack(sp, p, nbytes);
}
-#ifdef CONFIG_PPC64
-#define MIN_STACK_FRAME 112 /* same as STACK_FRAME_OVERHEAD, in fact */
-#define FRAME_LR_SAVE 2
-#define INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD + 288)
-#define REGS_MARKER 0x7265677368657265ul
-#define FRAME_MARKER 12
-#else
-#define MIN_STACK_FRAME 16
-#define FRAME_LR_SAVE 1
-#define INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD)
-#define REGS_MARKER 0x72656773ul
-#define FRAME_MARKER 2
-#endif
+EXPORT_SYMBOL(validate_sp);
unsigned long get_wchan(struct task_struct *p)
{
return 0;
sp = p->thread.ksp;
- if (!validate_sp(sp, p, MIN_STACK_FRAME))
+ if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
return 0;
do {
sp = *(unsigned long *)sp;
- if (!validate_sp(sp, p, MIN_STACK_FRAME))
+ if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
return 0;
if (count > 0) {
- ip = ((unsigned long *)sp)[FRAME_LR_SAVE];
+ ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
if (!in_sched_functions(ip))
return ip;
}
} while (count++ < 16);
return 0;
}
-EXPORT_SYMBOL(get_wchan);
static int kstack_depth_to_print = 64;
lr = 0;
printk("Call Trace:\n");
do {
- if (!validate_sp(sp, tsk, MIN_STACK_FRAME))
+ if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
return;
stack = (unsigned long *) sp;
newsp = stack[0];
- ip = stack[FRAME_LR_SAVE];
+ ip = stack[STACK_FRAME_LR_SAVE];
if (!firstframe || ip != lr) {
- printk("["REG"] ["REG"] ", sp, ip);
- print_symbol("%s", ip);
+ printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
if (firstframe)
printk(" (unreliable)");
printk("\n");
* See if this is an exception frame.
* We look for the "regshere" marker in the current frame.
*/
- if (validate_sp(sp, tsk, INT_FRAME_SIZE)
- && stack[FRAME_MARKER] == REGS_MARKER) {
+ if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
+ && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
struct pt_regs *regs = (struct pt_regs *)
(sp + STACK_FRAME_OVERHEAD);
- printk("--- Exception: %lx", regs->trap);
- print_symbol(" at %s\n", regs->nip);
lr = regs->link;
- print_symbol(" LR = %s\n", lr);
+ printk("--- Exception: %lx at %pS\n LR = %pS\n",
+ regs->trap, (void *)regs->nip, (void *)lr);
firstframe = 1;
}
show_stack(current, NULL);
}
EXPORT_SYMBOL(dump_stack);
+
+#ifdef CONFIG_PPC64
+void ppc64_runlatch_on(void)
+{
+ unsigned long ctrl;
+
+ if (cpu_has_feature(CPU_FTR_CTRL) && !test_thread_flag(TIF_RUNLATCH)) {
+ HMT_medium();
+
+ ctrl = mfspr(SPRN_CTRLF);
+ ctrl |= CTRL_RUNLATCH;
+ mtspr(SPRN_CTRLT, ctrl);
+
+ set_thread_flag(TIF_RUNLATCH);
+ }
+}
+
+void ppc64_runlatch_off(void)
+{
+ unsigned long ctrl;
+
+ if (cpu_has_feature(CPU_FTR_CTRL) && test_thread_flag(TIF_RUNLATCH)) {
+ HMT_medium();
+
+ clear_thread_flag(TIF_RUNLATCH);
+
+ ctrl = mfspr(SPRN_CTRLF);
+ ctrl &= ~CTRL_RUNLATCH;
+ mtspr(SPRN_CTRLT, ctrl);
+ }
+}
+#endif
+
+#if THREAD_SHIFT < PAGE_SHIFT
+
+static struct kmem_cache *thread_info_cache;
+
+struct thread_info *alloc_thread_info(struct task_struct *tsk)
+{
+ struct thread_info *ti;
+
+ ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL);
+ if (unlikely(ti == NULL))
+ return NULL;
+#ifdef CONFIG_DEBUG_STACK_USAGE
+ memset(ti, 0, THREAD_SIZE);
+#endif
+ return ti;
+}
+
+void free_thread_info(struct thread_info *ti)
+{
+ kmem_cache_free(thread_info_cache, ti);
+}
+
+void thread_info_cache_init(void)
+{
+ thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
+ THREAD_SIZE, 0, NULL);
+ BUG_ON(thread_info_cache == NULL);
+}
+
+#endif /* THREAD_SHIFT < PAGE_SHIFT */