git://ftp.safe.ca
/
safe
/
jmp
/
linux-2.6
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
ring-buffer: reset timestamps when ring buffer is reset
[safe/jmp/linux-2.6]
/
kernel
/
softlockup.c
diff --git
a/kernel/softlockup.c
b/kernel/softlockup.c
index
7bd8d1a
..
d9188c6
100644
(file)
--- a/
kernel/softlockup.c
+++ b/
kernel/softlockup.c
@@
-164,7
+164,7
@@
unsigned long __read_mostly sysctl_hung_task_check_count = 1024;
/*
* Zero means infinite timeout - no checking done:
*/
/*
* Zero means infinite timeout - no checking done:
*/
-unsigned long __read_mostly sysctl_hung_task_timeout_secs =
12
0;
+unsigned long __read_mostly sysctl_hung_task_timeout_secs =
48
0;
unsigned long __read_mostly sysctl_hung_task_warnings = 10;
unsigned long __read_mostly sysctl_hung_task_warnings = 10;
@@
-188,7
+188,7
@@
static void check_hung_task(struct task_struct *t, unsigned long now)
if ((long)(now - t->last_switch_timestamp) <
sysctl_hung_task_timeout_secs)
return;
if ((long)(now - t->last_switch_timestamp) <
sysctl_hung_task_timeout_secs)
return;
- if (
sysctl_hung_task_warnings < 0
)
+ if (
!sysctl_hung_task_warnings
)
return;
sysctl_hung_task_warnings--;
return;
sysctl_hung_task_warnings--;
@@
-226,14
+226,15
@@
static void check_hung_uninterruptible_tasks(int this_cpu)
* If the system crashed already then all bets are off,
* do not report extra hung tasks:
*/
* If the system crashed already then all bets are off,
* do not report extra hung tasks:
*/
- if (
(tainted &
TAINT_DIE) || did_panic)
+ if (
test_taint(
TAINT_DIE) || did_panic)
return;
read_lock(&tasklist_lock);
do_each_thread(g, t) {
if (!--max_count)
goto unlock;
return;
read_lock(&tasklist_lock);
do_each_thread(g, t) {
if (!--max_count)
goto unlock;
- if (t->state & TASK_UNINTERRUPTIBLE)
+ /* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */
+ if (t->state == TASK_UNINTERRUPTIBLE)
check_hung_task(t, now);
} while_each_thread(g, t);
unlock:
check_hung_task(t, now);
} while_each_thread(g, t);
unlock:
@@
-302,17
+303,15
@@
cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
break;
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
break;
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
- check_cpu =
any_online_cpu(cpu_online_map
);
+ check_cpu =
cpumask_any(cpu_online_mask
);
wake_up_process(per_cpu(watchdog_task, hotcpu));
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
if (hotcpu == check_cpu) {
wake_up_process(per_cpu(watchdog_task, hotcpu));
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
if (hotcpu == check_cpu) {
- cpumask_t temp_cpu_online_map = cpu_online_map;
-
- cpu_clear(hotcpu, temp_cpu_online_map);
- check_cpu = any_online_cpu(temp_cpu_online_map);
+ /* Pick any other online cpu. */
+ check_cpu = cpumask_any_but(cpu_online_mask, hotcpu);
}
break;
}
break;
@@
-322,7
+321,7
@@
cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
break;
/* Unbind so it can run. Fall thru. */
kthread_bind(per_cpu(watchdog_task, hotcpu),
break;
/* Unbind so it can run. Fall thru. */
kthread_bind(per_cpu(watchdog_task, hotcpu),
-
any_online_cpu(cpu_online_map
));
+
cpumask_any(cpu_online_mask
));
case CPU_DEAD:
case CPU_DEAD_FROZEN:
p = per_cpu(watchdog_task, hotcpu);
case CPU_DEAD:
case CPU_DEAD_FROZEN:
p = per_cpu(watchdog_task, hotcpu);
@@
-338,14
+337,33
@@
static struct notifier_block __cpuinitdata cpu_nfb = {
.notifier_call = cpu_callback
};
.notifier_call = cpu_callback
};
-__init void spawn_softlockup_task(void)
+static int __initdata nosoftlockup;
+
+static int __init nosoftlockup_setup(char *str)
+{
+ nosoftlockup = 1;
+ return 1;
+}
+__setup("nosoftlockup", nosoftlockup_setup);
+
+static int __init spawn_softlockup_task(void)
{
void *cpu = (void *)(long)smp_processor_id();
{
void *cpu = (void *)(long)smp_processor_id();
- int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
+ int err;
+
+ if (nosoftlockup)
+ return 0;
- BUG_ON(err == NOTIFY_BAD);
+ err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
+ if (err == NOTIFY_BAD) {
+ BUG();
+ return 1;
+ }
cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
register_cpu_notifier(&cpu_nfb);
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
register_cpu_notifier(&cpu_nfb);
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
+
+ return 0;
}
}
+early_initcall(spawn_softlockup_task);