git://ftp.safe.ca
/
safe
/
jmp
/
linux-2.6
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
mm: avoid null-pointer deref in sync_mm_rss()
[safe/jmp/linux-2.6]
/
kernel
/
softlockup.c
diff --git
a/kernel/softlockup.c
b/kernel/softlockup.c
index
d225790
..
4b493f6
100644
(file)
--- a/
kernel/softlockup.c
+++ b/
kernel/softlockup.c
@@
-25,6
+25,7
@@
static DEFINE_SPINLOCK(print_lock);
static DEFINE_PER_CPU(unsigned long, softlockup_touch_ts); /* touch timestamp */
static DEFINE_PER_CPU(unsigned long, softlockup_print_ts); /* print timestamp */
static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
static DEFINE_PER_CPU(unsigned long, softlockup_touch_ts); /* touch timestamp */
static DEFINE_PER_CPU(unsigned long, softlockup_print_ts); /* print timestamp */
static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
+static DEFINE_PER_CPU(bool, softlock_touch_sync);
static int __read_mostly did_panic;
int __read_mostly softlockup_thresh = 60;
static int __read_mostly did_panic;
int __read_mostly softlockup_thresh = 60;
@@
-79,6
+80,12
@@
void touch_softlockup_watchdog(void)
}
EXPORT_SYMBOL(touch_softlockup_watchdog);
}
EXPORT_SYMBOL(touch_softlockup_watchdog);
+void touch_softlockup_watchdog_sync(void)
+{
+ __raw_get_cpu_var(softlock_touch_sync) = true;
+ __raw_get_cpu_var(softlockup_touch_ts) = 0;
+}
+
void touch_all_softlockup_watchdogs(void)
{
int cpu;
void touch_all_softlockup_watchdogs(void)
{
int cpu;
@@
-118,6
+125,14
@@
void softlockup_tick(void)
}
if (touch_ts == 0) {
}
if (touch_ts == 0) {
+ if (unlikely(per_cpu(softlock_touch_sync, this_cpu))) {
+ /*
+ * If the time stamp was touched atomically
+ * make sure the scheduler tick is up to date.
+ */
+ per_cpu(softlock_touch_sync, this_cpu) = false;
+ sched_clock_tick();
+ }
__touch_softlockup_watchdog();
return;
}
__touch_softlockup_watchdog();
return;
}
@@
-140,11
+155,11
@@
void softlockup_tick(void)
* Wake up the high-prio watchdog task twice per
* threshold timespan.
*/
* Wake up the high-prio watchdog task twice per
* threshold timespan.
*/
- if (
now > touch_ts + softlockup_thresh/2
)
+ if (
time_after(now - softlockup_thresh/2, touch_ts)
)
wake_up_process(per_cpu(softlockup_watchdog, this_cpu));
/* Warn about unreasonable delays: */
wake_up_process(per_cpu(softlockup_watchdog, this_cpu));
/* Warn about unreasonable delays: */
- if (
now <= (touch_ts + softlockup_thresh
))
+ if (
time_before_eq(now - softlockup_thresh, touch_ts
))
return;
per_cpu(softlockup_print_ts, this_cpu) = touch_ts;
return;
per_cpu(softlockup_print_ts, this_cpu) = touch_ts;