mm: avoid null-pointer deref in sync_mm_rss()
[safe/jmp/linux-2.6] / kernel / trace / trace_stack.c
index 728c352..f4bc9b2 100644 (file)
@@ -54,7 +54,7 @@ static inline void check_stack(void)
                return;
 
        local_irq_save(flags);
-       __raw_spin_lock(&max_stack_lock);
+       arch_spin_lock(&max_stack_lock);
 
        /* a race could have already updated it */
        if (this_size <= max_stack_size)
@@ -103,7 +103,7 @@ static inline void check_stack(void)
        }
 
  out:
-       __raw_spin_unlock(&max_stack_lock);
+       arch_spin_unlock(&max_stack_lock);
        local_irq_restore(flags);
 }
 
@@ -157,6 +157,7 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
        unsigned long val, flags;
        char buf[64];
        int ret;
+       int cpu;
 
        if (count >= sizeof(buf))
                return -EINVAL;
@@ -171,9 +172,20 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
                return ret;
 
        local_irq_save(flags);
-       __raw_spin_lock(&max_stack_lock);
+
+       /*
+        * In case we trace inside arch_spin_lock() or after (NMI),
+        * we will cause circular lock, so we also need to increase
+        * the percpu trace_active here.
+        */
+       cpu = smp_processor_id();
+       per_cpu(trace_active, cpu)++;
+
+       arch_spin_lock(&max_stack_lock);
        *ptr = val;
-       __raw_spin_unlock(&max_stack_lock);
+       arch_spin_unlock(&max_stack_lock);
+
+       per_cpu(trace_active, cpu)--;
        local_irq_restore(flags);
 
        return count;
@@ -206,8 +218,14 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
 
 static void *t_start(struct seq_file *m, loff_t *pos)
 {
+       int cpu;
+
        local_irq_disable();
-       __raw_spin_lock(&max_stack_lock);
+
+       cpu = smp_processor_id();
+       per_cpu(trace_active, cpu)++;
+
+       arch_spin_lock(&max_stack_lock);
 
        if (*pos == 0)
                return SEQ_START_TOKEN;
@@ -217,7 +235,13 @@ static void *t_start(struct seq_file *m, loff_t *pos)
 
 static void t_stop(struct seq_file *m, void *p)
 {
-       __raw_spin_unlock(&max_stack_lock);
+       int cpu;
+
+       arch_spin_unlock(&max_stack_lock);
+
+       cpu = smp_processor_id();
+       per_cpu(trace_active, cpu)--;
+
        local_irq_enable();
 }