*/
unsigned int VFP_arch;
+/*
+ * Per-thread VFP initialization.
+ */
+static void vfp_thread_flush(struct thread_info *thread)
+{
+ union vfp_state *vfp = &thread->vfpstate;
+ unsigned int cpu;
+
+ memset(vfp, 0, sizeof(union vfp_state));
+
+ vfp->hard.fpexc = FPEXC_EN;
+ vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
+
+ /*
+ * Disable VFP to ensure we initialize it first. We must ensure
+ * that the modification of last_VFP_context[] and hardware disable
+ * are done for the same CPU and without preemption.
+ */
+ cpu = get_cpu();
+ if (last_VFP_context[cpu] == vfp)
+ last_VFP_context[cpu] = NULL;
+ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
+ put_cpu();
+}
+
+static void vfp_thread_exit(struct thread_info *thread)
+{
+ /* release case: Per-thread VFP cleanup. */
+ union vfp_state *vfp = &thread->vfpstate;
+ unsigned int cpu = get_cpu();
+
+ if (last_VFP_context[cpu] == vfp)
+ last_VFP_context[cpu] = NULL;
+ put_cpu();
+}
+
+/*
+ * When this function is called with the following 'cmd's, the following
+ * is true while this function is being run:
+ * THREAD_NOFTIFY_SWTICH:
+ * - the previously running thread will not be scheduled onto another CPU.
+ * - the next thread to be run (v) will not be running on another CPU.
+ * - thread->cpu is the local CPU number
+ * - not preemptible as we're called in the middle of a thread switch
+ * THREAD_NOTIFY_FLUSH:
+ * - the thread (v) will be running on the local CPU, so
+ * v === current_thread_info()
+ * - thread->cpu is the local CPU number at the time it is accessed,
+ * but may change at any time.
+ * - we could be preempted if tree preempt rcu is enabled, so
+ * it is unsafe to use thread->cpu.
+ * THREAD_NOTIFY_EXIT
+ * - the thread (v) will be running on the local CPU, so
+ * v === current_thread_info()
+ * - thread->cpu is the local CPU number at the time it is accessed,
+ * but may change at any time.
+ * - we could be preempted if tree preempt rcu is enabled, so
+ * it is unsafe to use thread->cpu.
+ */
static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
{
struct thread_info *thread = v;
- union vfp_state *vfp;
- __u32 cpu = thread->cpu;
if (likely(cmd == THREAD_NOTIFY_SWITCH)) {
u32 fpexc = fmrx(FPEXC);
#ifdef CONFIG_SMP
+ unsigned int cpu = thread->cpu;
+
/*
* On SMP, if VFP is enabled, save the old state in
* case the thread migrates to a different CPU. The
return NOTIFY_DONE;
}
- vfp = &thread->vfpstate;
- if (cmd == THREAD_NOTIFY_FLUSH) {
- /*
- * Per-thread VFP initialisation.
- */
- memset(vfp, 0, sizeof(union vfp_state));
-
- vfp->hard.fpexc = FPEXC_EN;
- vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
-
- /*
- * Disable VFP to ensure we initialise it first.
- */
- fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
- }
-
- /* flush and release case: Per-thread VFP cleanup. */
- if (last_VFP_context[cpu] == vfp)
- last_VFP_context[cpu] = NULL;
+ if (cmd == THREAD_NOTIFY_FLUSH)
+ vfp_thread_flush(thread);
+ else
+ vfp_thread_exit(thread);
return NOTIFY_DONE;
}
}
/*
- * Update the FPSCR with the additional exception flags.
+ * If any of the status flags are set, update the FPSCR.
* Comparison instructions always return at least one of
* these flags set.
*/
+ if (exceptions & (FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V))
+ fpscr &= ~(FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V);
+
fpscr |= exceptions;
fmxr(FPSCR, fpscr);
}
if (fpexc & FPEXC_EX) {
+#ifndef CONFIG_CPU_FEROCEON
/*
* Asynchronous exception. The instruction is read from FPINST
* and the interrupted instruction has to be restarted.
*/
trigger = fmrx(FPINST);
regs->ARM_pc -= 4;
+#endif
} else if (!(fpexc & FPEXC_DEX)) {
/*
* Illegal combination of bits. It can be caused by an
* on VFP subarch 1.
*/
vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs);
- return;
+ goto exit;
}
/*
* the FPEXC.FP2V bit is valid only if FPEXC.EX is 1.
*/
if (fpexc ^ (FPEXC_EX | FPEXC_FP2V))
- return;
+ goto exit;
/*
* The barrier() here prevents fpinst2 being read
exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs);
if (exceptions)
vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
+ exit:
+ preempt_enable();
}
static void vfp_enable(void *unused)
set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11));
}
+#ifdef CONFIG_PM
+#include <linux/sysdev.h>
+
+static int vfp_pm_suspend(struct sys_device *dev, pm_message_t state)
+{
+ struct thread_info *ti = current_thread_info();
+ u32 fpexc = fmrx(FPEXC);
+
+ /* if vfp is on, then save state for resumption */
+ if (fpexc & FPEXC_EN) {
+ printk(KERN_DEBUG "%s: saving vfp state\n", __func__);
+ vfp_save_state(&ti->vfpstate, fpexc);
+
+ /* disable, just in case */
+ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
+ }
+
+ /* clear any information we had about last context state */
+ memset(last_VFP_context, 0, sizeof(last_VFP_context));
+
+ return 0;
+}
+
+static int vfp_pm_resume(struct sys_device *dev)
+{
+ /* ensure we have access to the vfp */
+ vfp_enable(NULL);
+
+ /* and disable it to ensure the next usage restores the state */
+ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
+
+ return 0;
+}
+
+static struct sysdev_class vfp_pm_sysclass = {
+ .name = "vfp",
+ .suspend = vfp_pm_suspend,
+ .resume = vfp_pm_resume,
+};
+
+static struct sys_device vfp_pm_sysdev = {
+ .cls = &vfp_pm_sysclass,
+};
+
+static void vfp_pm_init(void)
+{
+ sysdev_class_register(&vfp_pm_sysclass);
+ sysdev_register(&vfp_pm_sysdev);
+}
+
+
+#else
+static inline void vfp_pm_init(void) { }
+#endif /* CONFIG_PM */
+
+void vfp_sync_hwstate(struct thread_info *thread)
+{
+ unsigned int cpu = get_cpu();
+
+ /*
+ * If the thread we're interested in is the current owner of the
+ * hardware VFP state, then we need to save its state.
+ */
+ if (last_VFP_context[cpu] == &thread->vfpstate) {
+ u32 fpexc = fmrx(FPEXC);
+
+ /*
+ * Save the last VFP state on this CPU.
+ */
+ fmxr(FPEXC, fpexc | FPEXC_EN);
+ vfp_save_state(&thread->vfpstate, fpexc | FPEXC_EN);
+ fmxr(FPEXC, fpexc);
+ }
+
+ put_cpu();
+}
+
+void vfp_flush_hwstate(struct thread_info *thread)
+{
+ unsigned int cpu = get_cpu();
+
+ /*
+ * If the thread we're interested in is the current owner of the
+ * hardware VFP state, then we need to save its state.
+ */
+ if (last_VFP_context[cpu] == &thread->vfpstate) {
+ u32 fpexc = fmrx(FPEXC);
+
+ fmxr(FPEXC, fpexc & ~FPEXC_EN);
+
+ /*
+ * Set the context to NULL to force a reload the next time
+ * the thread uses the VFP.
+ */
+ last_VFP_context[cpu] = NULL;
+ }
+
+#ifdef CONFIG_SMP
+ /*
+ * For SMP we still have to take care of the case where the thread
+ * migrates to another CPU and then back to the original CPU on which
+ * the last VFP user is still the same thread. Mark the thread VFP
+ * state as belonging to a non-existent CPU so that the saved one will
+ * be reloaded in the above case.
+ */
+ thread->vfpstate.hard.cpu = NR_CPUS;
+#endif
+ put_cpu();
+}
+
#include <linux/smp.h>
/*
vfp_vector = vfp_support_entry;
thread_register_notifier(&vfp_notifier_block);
+ vfp_pm_init();
/*
* We detected VFP, and the support code is
* in place; report VFP support to userspace.
*/
elf_hwcap |= HWCAP_VFP;
+#ifdef CONFIG_VFPv3
+ if (VFP_arch >= 2) {
+ elf_hwcap |= HWCAP_VFPv3;
+
+ /*
+ * Check for VFPv3 D16. CPUs in this configuration
+ * only have 16 x 64bit registers.
+ */
+ if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1)
+ elf_hwcap |= HWCAP_VFPv3D16;
+ }
+#endif
#ifdef CONFIG_NEON
/*
* Check for the presence of the Advanced SIMD