This is a fix for the following crash observed in 2.6.29-rc3:
http://lkml.org/lkml/2009/1/29/150
On ARM it doesn't make sense to trace a naked function because then
mcount is called without stack and frame pointer being set up and there
is no chance to restore the lr register to the value before mcount was
called.
Reported-by: Matthias Kaehlcke <matthias@kaehlcke.net>
Tested-by: Matthias Kaehlcke <matthias@kaehlcke.net>
Cc: Abhishek Sagar <sagar.abhishek@gmail.com>
Cc: Steven Rostedt <rostedt@home.goodmis.org>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
* disable irqs for the duration. Note - these functions are almost
* entirely coded in assembly.
*/
* disable irqs for the duration. Note - these functions are almost
* entirely coded in assembly.
*/
-void __attribute__((naked)) set_fiq_regs(struct pt_regs *regs)
+void __naked set_fiq_regs(struct pt_regs *regs)
{
register unsigned long tmp;
asm volatile (
{
register unsigned long tmp;
asm volatile (
: "r" (®s->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | FIQ_MODE));
}
: "r" (®s->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | FIQ_MODE));
}
-void __attribute__((naked)) get_fiq_regs(struct pt_regs *regs)
+void __naked get_fiq_regs(struct pt_regs *regs)
{
register unsigned long tmp;
asm volatile (
{
register unsigned long tmp;
asm volatile (
#include <linux/init.h>
#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/highmem.h>
-static void __attribute__((naked))
feroceon_copy_user_page(void *kto, const void *kfrom)
{
asm("\
feroceon_copy_user_page(void *kto, const void *kfrom)
{
asm("\
*
* FIXME: do we need to handle cache stuff...
*/
*
* FIXME: do we need to handle cache stuff...
*/
-static void __attribute__((naked))
v3_copy_user_page(void *kto, const void *kfrom)
{
asm("\n\
v3_copy_user_page(void *kto, const void *kfrom)
{
asm("\n\
* instruction. If your processor does not supply this, you have to write your
* own copy_user_highpage that does the right thing.
*/
* instruction. If your processor does not supply this, you have to write your
* own copy_user_highpage that does the right thing.
*/
-static void __attribute__((naked))
mc_copy_user_page(void *from, void *to)
{
asm volatile(
mc_copy_user_page(void *from, void *to)
{
asm volatile(
* instruction. If your processor does not supply this, you have to write your
* own copy_user_highpage that does the right thing.
*/
* instruction. If your processor does not supply this, you have to write your
* own copy_user_highpage that does the right thing.
*/
-static void __attribute__((naked))
v4wb_copy_user_page(void *kto, const void *kfrom)
{
asm("\
v4wb_copy_user_page(void *kto, const void *kfrom)
{
asm("\
* dirty data in the cache. However, we do have to ensure that
* subsequent reads are up to date.
*/
* dirty data in the cache. However, we do have to ensure that
* subsequent reads are up to date.
*/
-static void __attribute__((naked))
v4wt_copy_user_page(void *kto, const void *kfrom)
{
asm("\
v4wt_copy_user_page(void *kto, const void *kfrom)
{
asm("\
* if we eventually end up using our copied page.
*
*/
* if we eventually end up using our copied page.
*
*/
-static void __attribute__((naked))
xsc3_mc_copy_user_page(void *kto, const void *kfrom)
{
asm("\
xsc3_mc_copy_user_page(void *kto, const void *kfrom)
{
asm("\
* Dcache aliasing issue. The writes will be forwarded to the write buffer,
* and merged as appropriate.
*/
* Dcache aliasing issue. The writes will be forwarded to the write buffer,
* and merged as appropriate.
*/
-static void __attribute__((naked))
mc_copy_user_page(void *from, void *to)
{
/*
mc_copy_user_page(void *from, void *to)
{
/*
#define __deprecated __attribute__((deprecated))
#define __packed __attribute__((packed))
#define __weak __attribute__((weak))
#define __deprecated __attribute__((deprecated))
#define __packed __attribute__((packed))
#define __weak __attribute__((weak))
-#define __naked __attribute__((naked))
+
+/*
+ * it doesn't make sense on ARM (currently the only user of __naked) to trace
+ * naked functions because then mcount is called without stack and frame pointer
+ * being set up and there is no chance to restore the lr register to the value
+ * before mcount was called.
+ */
+#define __naked __attribute__((naked)) notrace
+
#define __noreturn __attribute__((noreturn))
/*
#define __noreturn __attribute__((noreturn))
/*