Audit: save audit_backlog_limit audit messages in case auditd comes back
[safe/jmp/linux-2.6] / kernel / timer.c
index 9fbb472..f3d35d4 100644 (file)
@@ -327,7 +327,7 @@ static void timer_stats_account_timer(struct timer_list *timer) {}
  * init_timer() must be done to a timer prior calling *any* of the
  * other timer functions.
  */
-void fastcall init_timer(struct timer_list *timer)
+void init_timer(struct timer_list *timer)
 {
        timer->entry.next = NULL;
        timer->base = __raw_get_cpu_var(tvec_bases);
@@ -339,7 +339,7 @@ void fastcall init_timer(struct timer_list *timer)
 }
 EXPORT_SYMBOL(init_timer);
 
-void fastcall init_timer_deferrable(struct timer_list *timer)
+void init_timer_deferrable(struct timer_list *timer)
 {
        init_timer(timer);
        timer_set_deferrable(timer);
@@ -451,10 +451,18 @@ void add_timer_on(struct timer_list *timer, int cpu)
        spin_lock_irqsave(&base->lock, flags);
        timer_set_base(timer, base);
        internal_add_timer(base, timer);
+       /*
+        * Check whether the other CPU is idle and needs to be
+        * triggered to reevaluate the timer wheel when nohz is
+        * active. We are protected against the other CPU fiddling
+        * with the timer by holding the timer base lock. This also
+        * makes sure that a CPU on the way to idle can not evaluate
+        * the timer wheel.
+        */
+       wake_up_idle_cpu(cpu);
        spin_unlock_irqrestore(&base->lock, flags);
 }
 
-
 /**
  * mod_timer - modify a timer's timeout
  * @timer: the timer to be modified
@@ -818,12 +826,14 @@ unsigned long next_timer_interrupt(void)
 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
 void account_process_tick(struct task_struct *p, int user_tick)
 {
+       cputime_t one_jiffy = jiffies_to_cputime(1);
+
        if (user_tick) {
-               account_user_time(p, jiffies_to_cputime(1));
-               account_user_time_scaled(p, jiffies_to_cputime(1));
+               account_user_time(p, one_jiffy);
+               account_user_time_scaled(p, cputime_to_scaled(one_jiffy));
        } else {
-               account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1));
-               account_system_time_scaled(p, jiffies_to_cputime(1));
+               account_system_time(p, HARDIRQ_OFFSET, one_jiffy);
+               account_system_time_scaled(p, cputime_to_scaled(one_jiffy));
        }
 }
 #endif
@@ -977,7 +987,7 @@ asmlinkage long sys_getppid(void)
        int pid;
 
        rcu_read_lock();
-       pid = task_tgid_nr_ns(current->real_parent, current->nsproxy->pid_ns);
+       pid = task_tgid_vnr(current->real_parent);
        rcu_read_unlock();
 
        return pid;
@@ -1040,7 +1050,7 @@ static void process_timeout(unsigned long __data)
  *
  * In all cases the return value is guaranteed to be non-negative.
  */
-fastcall signed long __sched schedule_timeout(signed long timeout)
+signed long __sched schedule_timeout(signed long timeout)
 {
        struct timer_list timer;
        unsigned long expire;
@@ -1218,13 +1228,6 @@ asmlinkage long sys_sysinfo(struct sysinfo __user *info)
        return 0;
 }
 
-/*
- * lockdep: we want to track each per-CPU base as a separate lock-class,
- * but timer-bases are kmalloc()-ed, so we need to attach separate
- * keys to them:
- */
-static struct lock_class_key base_lock_keys[NR_CPUS];
-
 static int __cpuinit init_timers_cpu(int cpu)
 {
        int j;
@@ -1267,7 +1270,6 @@ static int __cpuinit init_timers_cpu(int cpu)
        }
 
        spin_lock_init(&base->lock);
-       lockdep_set_class(&base->lock, base_lock_keys + cpu);
 
        for (j = 0; j < TVN_SIZE; j++) {
                INIT_LIST_HEAD(base->tv5.vec + j);
@@ -1306,8 +1308,8 @@ static void __cpuinit migrate_timers(int cpu)
        new_base = get_cpu_var(tvec_bases);
 
        local_irq_disable();
-       double_spin_lock(&new_base->lock, &old_base->lock,
-                        smp_processor_id() < cpu);
+       spin_lock(&new_base->lock);
+       spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
 
        BUG_ON(old_base->running_timer);
 
@@ -1320,8 +1322,8 @@ static void __cpuinit migrate_timers(int cpu)
                migrate_timer_list(new_base, old_base->tv5.vec + i);
        }
 
-       double_spin_unlock(&new_base->lock, &old_base->lock,
-                          smp_processor_id() < cpu);
+       spin_unlock(&old_base->lock);
+       spin_unlock(&new_base->lock);
        local_irq_enable();
        put_cpu_var(tvec_bases);
 }