* This function is used through-out the kernel (including mm and fs)
* to indicate a major problem.
*/
-#include <linux/config.h>
+#include <linux/debug_locks.h>
+#include <linux/interrupt.h>
+#include <linux/kmsg_dump.h>
+#include <linux/kallsyms.h>
+#include <linux/notifier.h>
#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
+#include <linux/random.h>
#include <linux/reboot.h>
-#include <linux/notifier.h>
-#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/kexec.h>
+#include <linux/sched.h>
#include <linux/sysrq.h>
-#include <linux/interrupt.h>
+#include <linux/init.h>
#include <linux/nmi.h>
-#include <linux/kexec.h>
+#include <linux/dmi.h>
int panic_on_oops;
-int tainted;
+static unsigned long tainted_mask;
static int pause_on_oops;
static int pause_on_oops_flag;
static DEFINE_SPINLOCK(pause_on_oops_lock);
int panic_timeout;
-EXPORT_SYMBOL(panic_timeout);
ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
EXPORT_SYMBOL(panic_notifier_list);
-static int __init panic_setup(char *str)
-{
- panic_timeout = simple_strtoul(str, NULL, 0);
- return 1;
-}
-__setup("panic=", panic_setup);
-
-static long no_blink(long time)
-{
- return 0;
-}
-
/* Returns how long it waited in ms */
long (*panic_blink)(long time);
EXPORT_SYMBOL(panic_blink);
+static void panic_blink_one_second(void)
+{
+ static long i = 0, end;
+
+ if (panic_blink) {
+ end = i + MSEC_PER_SEC;
+
+ while (i < end) {
+ i += panic_blink(i);
+ mdelay(1);
+ i++;
+ }
+ } else {
+ /*
+ * When running under a hypervisor a small mdelay may get
+ * rounded up to the hypervisor timeslice. For example, with
+ * a 1ms in 10ms hypervisor timeslice we might inflate a
+ * mdelay(1) loop by 10x.
+ *
+ * If we have nothing to blink, spin on 1 second calls to
+ * mdelay to avoid this.
+ */
+ mdelay(MSEC_PER_SEC);
+ }
+}
+
/**
* panic - halt the system
* @fmt: The text string to print
*
* This function never returns.
*/
-
NORET_TYPE void panic(const char * fmt, ...)
{
- long i;
static char buf[1024];
va_list args;
-#if defined(CONFIG_S390)
- unsigned long caller = (unsigned long) __builtin_return_address(0);
-#endif
+ long i;
/*
- * It's possible to come here directly from a panic-assertion and not
- * have preempt disabled. Some functions called from here want
+ * It's possible to come here directly from a panic-assertion and
+ * not have preempt disabled. Some functions called from here want
* preempt to be disabled. No point enabling it later though...
*/
preempt_disable();
+ console_verbose();
bust_spinlocks(1);
va_start(args, fmt);
vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
- bust_spinlocks(0);
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+ dump_stack();
+#endif
/*
* If we have crashed and we have a crash kernel loaded let it handle
*/
crash_kexec(NULL);
-#ifdef CONFIG_SMP
+ kmsg_dump(KMSG_DUMP_PANIC);
+
/*
* Note smp_send_stop is the usual smp shutdown function, which
* unfortunately means it may not be hardened to work in a panic
* situation.
*/
smp_send_stop();
-#endif
atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
- if (!panic_blink)
- panic_blink = no_blink;
+ bust_spinlocks(0);
if (panic_timeout > 0) {
/*
- * Delay timeout seconds before rebooting the machine.
- * We can't use the "normal" timers since we just panicked..
- */
- printk(KERN_EMERG "Rebooting in %d seconds..",panic_timeout);
- for (i = 0; i < panic_timeout*1000; ) {
+ * Delay timeout seconds before rebooting the machine.
+ * We can't use the "normal" timers since we just panicked.
+ */
+ printk(KERN_EMERG "Rebooting in %d seconds..", panic_timeout);
+
+ for (i = 0; i < panic_timeout; i++) {
touch_nmi_watchdog();
- i += panic_blink(i);
- mdelay(1);
- i++;
+ panic_blink_one_second();
}
- /* This will not be a clean reboot, with everything
- * shutting down. But if there is a chance of
- * rebooting the system it will be rebooted.
+ /*
+ * This will not be a clean reboot, with everything
+ * shutting down. But if there is a chance of
+ * rebooting the system it will be rebooted.
*/
emergency_restart();
}
}
#endif
#if defined(CONFIG_S390)
- disabled_wait(caller);
+ {
+ unsigned long caller;
+
+ caller = (unsigned long)__builtin_return_address(0);
+ disabled_wait(caller);
+ }
#endif
local_irq_enable();
- for (i = 0;;) {
+ while (1) {
touch_softlockup_watchdog();
- i += panic_blink(i);
- mdelay(1);
- i++;
+ panic_blink_one_second();
}
}
EXPORT_SYMBOL(panic);
+
+struct tnt {
+ u8 bit;
+ char true;
+ char false;
+};
+
+static const struct tnt tnts[] = {
+ { TAINT_PROPRIETARY_MODULE, 'P', 'G' },
+ { TAINT_FORCED_MODULE, 'F', ' ' },
+ { TAINT_UNSAFE_SMP, 'S', ' ' },
+ { TAINT_FORCED_RMMOD, 'R', ' ' },
+ { TAINT_MACHINE_CHECK, 'M', ' ' },
+ { TAINT_BAD_PAGE, 'B', ' ' },
+ { TAINT_USER, 'U', ' ' },
+ { TAINT_DIE, 'D', ' ' },
+ { TAINT_OVERRIDDEN_ACPI_TABLE, 'A', ' ' },
+ { TAINT_WARN, 'W', ' ' },
+ { TAINT_CRAP, 'C', ' ' },
+ { TAINT_FIRMWARE_WORKAROUND, 'I', ' ' },
+};
+
/**
* print_tainted - return a string to represent the kernel taint state.
*
* 'F' - Module has been forcibly loaded.
* 'S' - SMP with CPUs not designed for SMP.
* 'R' - User forced a module unload.
- * 'M' - Machine had a machine check experience.
+ * 'M' - System experienced a machine check exception.
* 'B' - System has hit bad_page.
+ * 'U' - Userspace-defined naughtiness.
+ * 'D' - Kernel has oopsed before
+ * 'A' - ACPI table overridden.
+ * 'W' - Taint on warning.
+ * 'C' - modules from drivers/staging are loaded.
+ * 'I' - Working around severe firmware bug.
*
- * The string is overwritten by the next call to print_taint().
+ * The string is overwritten by the next call to print_tainted().
*/
-
const char *print_tainted(void)
{
- static char buf[20];
- if (tainted) {
- snprintf(buf, sizeof(buf), "Tainted: %c%c%c%c%c%c",
- tainted & TAINT_PROPRIETARY_MODULE ? 'P' : 'G',
- tainted & TAINT_FORCED_MODULE ? 'F' : ' ',
- tainted & TAINT_UNSAFE_SMP ? 'S' : ' ',
- tainted & TAINT_FORCED_RMMOD ? 'R' : ' ',
- tainted & TAINT_MACHINE_CHECK ? 'M' : ' ',
- tainted & TAINT_BAD_PAGE ? 'B' : ' ');
- }
- else
+ static char buf[ARRAY_SIZE(tnts) + sizeof("Tainted: ") + 1];
+
+ if (tainted_mask) {
+ char *s;
+ int i;
+
+ s = buf + sprintf(buf, "Tainted: ");
+ for (i = 0; i < ARRAY_SIZE(tnts); i++) {
+ const struct tnt *t = &tnts[i];
+ *s++ = test_bit(t->bit, &tainted_mask) ?
+ t->true : t->false;
+ }
+ *s = 0;
+ } else
snprintf(buf, sizeof(buf), "Not tainted");
- return(buf);
+
+ return buf;
}
-void add_taint(unsigned flag)
+int test_taint(unsigned flag)
{
- tainted |= flag;
+ return test_bit(flag, &tainted_mask);
+}
+EXPORT_SYMBOL(test_taint);
+
+unsigned long get_taint(void)
+{
+ return tainted_mask;
}
-EXPORT_SYMBOL(add_taint);
-static int __init pause_on_oops_setup(char *str)
+void add_taint(unsigned flag)
{
- pause_on_oops = simple_strtoul(str, NULL, 0);
- return 1;
+ /*
+ * Can't trust the integrity of the kernel anymore.
+ * We don't call directly debug_locks_off() because the issue
+ * is not necessarily serious enough to set oops_in_progress to 1
+ * Also we want to keep up lockdep for staging development and
+ * post-warning case.
+ */
+ if (flag != TAINT_CRAP && flag != TAINT_WARN && __debug_locks_off())
+ printk(KERN_WARNING "Disabling lock debugging due to kernel taint\n");
+
+ set_bit(flag, &tainted_mask);
}
-__setup("pause_on_oops=", pause_on_oops_setup);
+EXPORT_SYMBOL(add_taint);
static void spin_msec(int msecs)
{
}
/*
- * Return true if the calling CPU is allowed to print oops-related info. This
- * is a bit racy..
+ * Return true if the calling CPU is allowed to print oops-related info.
+ * This is a bit racy..
*/
int oops_may_print(void)
{
/*
* Called when the architecture enters its oops handler, before it prints
- * anything. If this is the first CPU to oops, and it's oopsing the first time
- * then let it proceed.
+ * anything. If this is the first CPU to oops, and it's oopsing the first
+ * time then let it proceed.
*
- * This is all enabled by the pause_on_oops kernel boot option. We do all this
- * to ensure that oopses don't scroll off the screen. It has the side-effect
- * of preventing later-oopsing CPUs from mucking up the display, too.
+ * This is all enabled by the pause_on_oops kernel boot option. We do all
+ * this to ensure that oopses don't scroll off the screen. It has the
+ * side-effect of preventing later-oopsing CPUs from mucking up the display,
+ * too.
*
- * It turns out that the CPU which is allowed to print ends up pausing for the
- * right duration, whereas all the other CPUs pause for twice as long: once in
- * oops_enter(), once in oops_exit().
+ * It turns out that the CPU which is allowed to print ends up pausing for
+ * the right duration, whereas all the other CPUs pause for twice as long:
+ * once in oops_enter(), once in oops_exit().
*/
void oops_enter(void)
{
+ tracing_off();
+ /* can't trust the integrity of the kernel anymore: */
+ debug_locks_off();
do_oops_enter_exit();
}
/*
+ * 64-bit random ID for oopses:
+ */
+static u64 oops_id;
+
+static int init_oops_id(void)
+{
+ if (!oops_id)
+ get_random_bytes(&oops_id, sizeof(oops_id));
+ else
+ oops_id++;
+
+ return 0;
+}
+late_initcall(init_oops_id);
+
+static void print_oops_end_marker(void)
+{
+ init_oops_id();
+ printk(KERN_WARNING "---[ end trace %016llx ]---\n",
+ (unsigned long long)oops_id);
+}
+
+/*
* Called when the architecture exits its oops handler, after printing
* everything.
*/
void oops_exit(void)
{
do_oops_enter_exit();
+ print_oops_end_marker();
+ kmsg_dump(KMSG_DUMP_OOPS);
}
+
+#ifdef WANT_WARN_ON_SLOWPATH
+struct slowpath_args {
+ const char *fmt;
+ va_list args;
+};
+
+static void warn_slowpath_common(const char *file, int line, void *caller,
+ unsigned taint, struct slowpath_args *args)
+{
+ const char *board;
+
+ printk(KERN_WARNING "------------[ cut here ]------------\n");
+ printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
+ board = dmi_get_system_info(DMI_PRODUCT_NAME);
+ if (board)
+ printk(KERN_WARNING "Hardware name: %s\n", board);
+
+ if (args)
+ vprintk(args->fmt, args->args);
+
+ print_modules();
+ dump_stack();
+ print_oops_end_marker();
+ add_taint(taint);
+}
+
+void warn_slowpath_fmt(const char *file, int line, const char *fmt, ...)
+{
+ struct slowpath_args args;
+
+ args.fmt = fmt;
+ va_start(args.args, fmt);
+ warn_slowpath_common(file, line, __builtin_return_address(0),
+ TAINT_WARN, &args);
+ va_end(args.args);
+}
+EXPORT_SYMBOL(warn_slowpath_fmt);
+
+void warn_slowpath_fmt_taint(const char *file, int line,
+ unsigned taint, const char *fmt, ...)
+{
+ struct slowpath_args args;
+
+ args.fmt = fmt;
+ va_start(args.args, fmt);
+ warn_slowpath_common(file, line, __builtin_return_address(0),
+ taint, &args);
+ va_end(args.args);
+}
+EXPORT_SYMBOL(warn_slowpath_fmt_taint);
+
+void warn_slowpath_null(const char *file, int line)
+{
+ warn_slowpath_common(file, line, __builtin_return_address(0),
+ TAINT_WARN, NULL);
+}
+EXPORT_SYMBOL(warn_slowpath_null);
+#endif
+
+#ifdef CONFIG_CC_STACKPROTECTOR
+
+/*
+ * Called when gcc's -fstack-protector feature is used, and
+ * gcc detects corruption of the on-stack canary value
+ */
+void __stack_chk_fail(void)
+{
+ panic("stack-protector: Kernel stack is corrupted in: %p\n",
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(__stack_chk_fail);
+
+#endif
+
+core_param(panic, panic_timeout, int, 0644);
+core_param(pause_on_oops, pause_on_oops, int, 0644);