Preempt-RCU: implementation
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Fri, 25 Jan 2008 20:08:24 +0000 (21:08 +0100)
committerIngo Molnar <mingo@elte.hu>
Fri, 25 Jan 2008 20:08:24 +0000 (21:08 +0100)
This patch implements a new version of RCU which allows its read-side
critical sections to be preempted. It uses a set of counter pairs
to keep track of the read-side critical sections and flips them
when all tasks exit read-side critical section. The details
of this implementation can be found in this paper -

http://www.rdrop.com/users/paulmck/RCU/OLSrtRCU.2006.08.11a.pdf

and the article-

http://lwn.net/Articles/253651/

This patch was developed as a part of the -rt kernel development and
meant to provide better latencies when read-side critical sections of
RCU don't disable preemption.  As a consequence of keeping track of RCU
readers, the readers have a slight overhead (optimizations in the paper).
This implementation co-exists with the "classic" RCU implementations
and can be switched to at compiler.

Also includes RCU tracing summarized in debugfs.

[ akpm@linux-foundation.org: build fixes on non-preempt architectures ]

Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
Signed-off-by: Dipankar Sarma <dipankar@in.ibm.com>
Signed-off-by: Paul E. McKenney <paulmck@us.ibm.com>
Reviewed-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
13 files changed:
fs/Kconfig
include/linux/rcuclassic.h
include/linux/rcupdate.h
include/linux/rcupreempt.h [new file with mode: 0644]
include/linux/rcupreempt_trace.h [new file with mode: 0644]
include/linux/sched.h
init/Kconfig
kernel/Kconfig.preempt
kernel/Makefile
kernel/fork.c
kernel/rcuclassic.c
kernel/rcupreempt.c [new file with mode: 0644]
kernel/rcupreempt_trace.c [new file with mode: 0644]

index 781b47d..b4799ef 100644 (file)
@@ -2130,4 +2130,3 @@ source "fs/nls/Kconfig"
 source "fs/dlm/Kconfig"
 
 endmenu
-
index 2b8b045..4d66242 100644 (file)
@@ -157,5 +157,8 @@ extern void __rcu_init(void);
 extern void rcu_check_callbacks(int cpu, int user);
 extern void rcu_restart_cpu(int cpu);
 
+extern long rcu_batches_completed(void);
+extern long rcu_batches_completed_bh(void);
+
 #endif /* __KERNEL__ */
 #endif /* __LINUX_RCUCLASSIC_H */
index 12aa13e..d32c14d 100644 (file)
@@ -53,7 +53,11 @@ struct rcu_head {
        void (*func)(struct rcu_head *head);
 };
 
+#ifdef CONFIG_CLASSIC_RCU
 #include <linux/rcuclassic.h>
+#else /* #ifdef CONFIG_CLASSIC_RCU */
+#include <linux/rcupreempt.h>
+#endif /* #else #ifdef CONFIG_CLASSIC_RCU */
 
 #define RCU_HEAD_INIT  { .next = NULL, .func = NULL }
 #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT
@@ -231,13 +235,12 @@ extern void call_rcu_bh(struct rcu_head *head,
 /* Exported common interfaces */
 extern void synchronize_rcu(void);
 extern void rcu_barrier(void);
+extern long rcu_batches_completed(void);
+extern long rcu_batches_completed_bh(void);
 
 /* Internal to kernel */
 extern void rcu_init(void);
-extern void rcu_check_callbacks(int cpu, int user);
-
-extern long rcu_batches_completed(void);
-extern long rcu_batches_completed_bh(void);
+extern int rcu_needs_cpu(int cpu);
 
 #endif /* __KERNEL__ */
 #endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h
new file mode 100644 (file)
index 0000000..ece8eb3
--- /dev/null
@@ -0,0 +1,86 @@
+/*
+ * Read-Copy Update mechanism for mutual exclusion (RT implementation)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2006
+ *
+ * Author:  Paul McKenney <paulmck@us.ibm.com>
+ *
+ * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com>
+ * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
+ * Papers:
+ * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
+ * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
+ *
+ * For detailed explanation of Read-Copy Update mechanism see -
+ *             Documentation/RCU
+ *
+ */
+
+#ifndef __LINUX_RCUPREEMPT_H
+#define __LINUX_RCUPREEMPT_H
+
+#ifdef __KERNEL__
+
+#include <linux/cache.h>
+#include <linux/spinlock.h>
+#include <linux/threads.h>
+#include <linux/percpu.h>
+#include <linux/cpumask.h>
+#include <linux/seqlock.h>
+
+#define rcu_qsctr_inc(cpu)
+#define rcu_bh_qsctr_inc(cpu)
+#define call_rcu_bh(head, rcu) call_rcu(head, rcu)
+
+extern void __rcu_read_lock(void);
+extern void __rcu_read_unlock(void);
+extern int rcu_pending(int cpu);
+extern int rcu_needs_cpu(int cpu);
+
+#define __rcu_read_lock_bh()   { rcu_read_lock(); local_bh_disable(); }
+#define __rcu_read_unlock_bh() { local_bh_enable(); rcu_read_unlock(); }
+
+extern void __synchronize_sched(void);
+
+extern void __rcu_init(void);
+extern void rcu_check_callbacks(int cpu, int user);
+extern void rcu_restart_cpu(int cpu);
+extern long rcu_batches_completed(void);
+
+/*
+ * Return the number of RCU batches processed thus far. Useful for debug
+ * and statistic. The _bh variant is identifcal to straight RCU
+ */
+static inline long rcu_batches_completed_bh(void)
+{
+       return rcu_batches_completed();
+}
+
+#ifdef CONFIG_RCU_TRACE
+struct rcupreempt_trace;
+extern long *rcupreempt_flipctr(int cpu);
+extern long rcupreempt_data_completed(void);
+extern int rcupreempt_flip_flag(int cpu);
+extern int rcupreempt_mb_flag(int cpu);
+extern char *rcupreempt_try_flip_state_name(void);
+extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu);
+#endif
+
+struct softirq_action;
+
+#endif /* __KERNEL__ */
+#endif /* __LINUX_RCUPREEMPT_H */
diff --git a/include/linux/rcupreempt_trace.h b/include/linux/rcupreempt_trace.h
new file mode 100644 (file)
index 0000000..21cd6b2
--- /dev/null
@@ -0,0 +1,99 @@
+/*
+ * Read-Copy Update mechanism for mutual exclusion (RT implementation)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2006
+ *
+ * Author:  Paul McKenney <paulmck@us.ibm.com>
+ *
+ * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com>
+ * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
+ * Papers:
+ * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
+ * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
+ *
+ * For detailed explanation of the Preemptible Read-Copy Update mechanism see -
+ *              http://lwn.net/Articles/253651/
+ */
+
+#ifndef __LINUX_RCUPREEMPT_TRACE_H
+#define __LINUX_RCUPREEMPT_TRACE_H
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+#include <asm/atomic.h>
+
+/*
+ * PREEMPT_RCU data structures.
+ */
+
+struct rcupreempt_trace {
+       long            next_length;
+       long            next_add;
+       long            wait_length;
+       long            wait_add;
+       long            done_length;
+       long            done_add;
+       long            done_remove;
+       atomic_t        done_invoked;
+       long            rcu_check_callbacks;
+       atomic_t        rcu_try_flip_1;
+       atomic_t        rcu_try_flip_e1;
+       long            rcu_try_flip_i1;
+       long            rcu_try_flip_ie1;
+       long            rcu_try_flip_g1;
+       long            rcu_try_flip_a1;
+       long            rcu_try_flip_ae1;
+       long            rcu_try_flip_a2;
+       long            rcu_try_flip_z1;
+       long            rcu_try_flip_ze1;
+       long            rcu_try_flip_z2;
+       long            rcu_try_flip_m1;
+       long            rcu_try_flip_me1;
+       long            rcu_try_flip_m2;
+};
+
+#ifdef CONFIG_RCU_TRACE
+#define RCU_TRACE(fn, arg)     fn(arg);
+#else
+#define RCU_TRACE(fn, arg)
+#endif
+
+extern void rcupreempt_trace_move2done(struct rcupreempt_trace *trace);
+extern void rcupreempt_trace_move2wait(struct rcupreempt_trace *trace);
+extern void rcupreempt_trace_try_flip_1(struct rcupreempt_trace *trace);
+extern void rcupreempt_trace_try_flip_e1(struct rcupreempt_trace *trace);
+extern void rcupreempt_trace_try_flip_i1(struct rcupreempt_trace *trace);
+extern void rcupreempt_trace_try_flip_ie1(struct rcupreempt_trace *trace);
+extern void rcupreempt_trace_try_flip_g1(struct rcupreempt_trace *trace);
+extern void rcupreempt_trace_try_flip_a1(struct rcupreempt_trace *trace);
+extern void rcupreempt_trace_try_flip_ae1(struct rcupreempt_trace *trace);
+extern void rcupreempt_trace_try_flip_a2(struct rcupreempt_trace *trace);
+extern void rcupreempt_trace_try_flip_z1(struct rcupreempt_trace *trace);
+extern void rcupreempt_trace_try_flip_ze1(struct rcupreempt_trace *trace);
+extern void rcupreempt_trace_try_flip_z2(struct rcupreempt_trace *trace);
+extern void rcupreempt_trace_try_flip_m1(struct rcupreempt_trace *trace);
+extern void rcupreempt_trace_try_flip_me1(struct rcupreempt_trace *trace);
+extern void rcupreempt_trace_try_flip_m2(struct rcupreempt_trace *trace);
+extern void rcupreempt_trace_check_callbacks(struct rcupreempt_trace *trace);
+extern void rcupreempt_trace_done_remove(struct rcupreempt_trace *trace);
+extern void rcupreempt_trace_invoke(struct rcupreempt_trace *trace);
+extern void rcupreempt_trace_next_add(struct rcupreempt_trace *trace);
+
+#endif /* __KERNEL__ */
+#endif /* __LINUX_RCUPREEMPT_TRACE_H */
index f2044e7..72e1b8e 100644 (file)
@@ -974,6 +974,11 @@ struct task_struct {
        int nr_cpus_allowed;
        unsigned int time_slice;
 
+#ifdef CONFIG_PREEMPT_RCU
+       int rcu_read_lock_nesting;
+       int rcu_flipctr_idx;
+#endif /* #ifdef CONFIG_PREEMPT_RCU */
+
 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
        struct sched_info sched_info;
 #endif
index f5becd2..0eda68f 100644 (file)
@@ -763,3 +763,31 @@ source "block/Kconfig"
 
 config PREEMPT_NOTIFIERS
        bool
+
+choice
+       prompt "RCU implementation type:"
+       default CLASSIC_RCU
+
+config CLASSIC_RCU
+       bool "Classic RCU"
+       help
+         This option selects the classic RCU implementation that is
+         designed for best read-side performance on non-realtime
+         systems.
+
+         Say Y if you are unsure.
+
+config PREEMPT_RCU
+       bool "Preemptible RCU"
+       depends on PREEMPT
+       help
+         This option reduces the latency of the kernel by making certain
+         RCU sections preemptible. Normally RCU code is non-preemptible, if
+         this option is selected then read-only RCU sections become
+         preemptible. This helps latency, but may expose bugs due to
+         now-naive assumptions about each RCU read-side critical section
+         remaining on a given CPU through its execution.
+
+         Say N if you are unsure.
+
+endchoice
index c64ce9c..61fa116 100644 (file)
@@ -63,3 +63,13 @@ config PREEMPT_BKL
          Say Y here if you are building a kernel for a desktop system.
          Say N if you are unsure.
 
+config RCU_TRACE
+       bool "Enable tracing for RCU - currently stats in debugfs"
+       select DEBUG_FS
+       default y
+       help
+         This option provides tracing in RCU which presents stats
+         in debugfs for debugging RCU implementation.
+
+         Say Y here if you want to enable RCU tracing
+         Say N if you are unsure.
index def5dd6..68755cd 100644 (file)
@@ -6,7 +6,7 @@ obj-y     = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
            exit.o itimer.o time.o softirq.o resource.o \
            sysctl.o capability.o ptrace.o timer.o user.o user_namespace.o \
            signal.o sys.o kmod.o workqueue.o pid.o \
-           rcupdate.o rcuclassic.o extable.o params.o posix-timers.o \
+           rcupdate.o extable.o params.o posix-timers.o \
            kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
            hrtimer.o rwsem.o latency.o nsproxy.o srcu.o \
            utsname.o notifier.o
@@ -52,6 +52,11 @@ obj-$(CONFIG_DETECT_SOFTLOCKUP) += softlockup.o
 obj-$(CONFIG_GENERIC_HARDIRQS) += irq/
 obj-$(CONFIG_SECCOMP) += seccomp.o
 obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
+obj-$(CONFIG_CLASSIC_RCU) += rcuclassic.o
+obj-$(CONFIG_PREEMPT_RCU) += rcupreempt.o
+ifeq ($(CONFIG_PREEMPT_RCU),y)
+obj-$(CONFIG_RCU_TRACE) += rcupreempt_trace.o
+endif
 obj-$(CONFIG_RELAY) += relay.o
 obj-$(CONFIG_SYSCTL) += utsname_sysctl.o
 obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
index 930c518..9f8ef32 100644 (file)
@@ -1045,6 +1045,10 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        copy_flags(clone_flags, p);
        INIT_LIST_HEAD(&p->children);
        INIT_LIST_HEAD(&p->sibling);
+#ifdef CONFIG_PREEMPT_RCU
+       p->rcu_read_lock_nesting = 0;
+       p->rcu_flipctr_idx = 0;
+#endif /* #ifdef CONFIG_PREEMPT_RCU */
        p->vfork_done = NULL;
        spin_lock_init(&p->alloc_lock);
 
index ce0cf16..f4ffbd0 100644 (file)
@@ -45,7 +45,6 @@
 #include <linux/moduleparam.h>
 #include <linux/percpu.h>
 #include <linux/notifier.h>
-/* #include <linux/rcupdate.h> @@@ */
 #include <linux/cpu.h>
 #include <linux/mutex.h>
 
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
new file mode 100644 (file)
index 0000000..a5aabb1
--- /dev/null
@@ -0,0 +1,816 @@
+/*
+ * Read-Copy Update mechanism for mutual exclusion, realtime implementation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright IBM Corporation, 2006
+ *
+ * Authors: Paul E. McKenney <paulmck@us.ibm.com>
+ *             With thanks to Esben Nielsen, Bill Huey, and Ingo Molnar
+ *             for pushing me away from locks and towards counters, and
+ *             to Suparna Bhattacharya for pushing me completely away
+ *             from atomic instructions on the read side.
+ *
+ * Papers:  http://www.rdrop.com/users/paulmck/RCU
+ *
+ * Design Document: http://lwn.net/Articles/253651/
+ *
+ * For detailed explanation of Read-Copy Update mechanism see -
+ *             Documentation/RCU/ *.txt
+ *
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/rcupdate.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <asm/atomic.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/completion.h>
+#include <linux/moduleparam.h>
+#include <linux/percpu.h>
+#include <linux/notifier.h>
+#include <linux/rcupdate.h>
+#include <linux/cpu.h>
+#include <linux/random.h>
+#include <linux/delay.h>
+#include <linux/byteorder/swabb.h>
+#include <linux/cpumask.h>
+#include <linux/rcupreempt_trace.h>
+
+/*
+ * Macro that prevents the compiler from reordering accesses, but does
+ * absolutely -nothing- to prevent CPUs from reordering.  This is used
+ * only to mediate communication between mainline code and hardware
+ * interrupt and NMI handlers.
+ */
+#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
+
+/*
+ * PREEMPT_RCU data structures.
+ */
+
+/*
+ * GP_STAGES specifies the number of times the state machine has
+ * to go through the all the rcu_try_flip_states (see below)
+ * in a single Grace Period.
+ *
+ * GP in GP_STAGES stands for Grace Period ;)
+ */
+#define GP_STAGES    2
+struct rcu_data {
+       spinlock_t      lock;           /* Protect rcu_data fields. */
+       long            completed;      /* Number of last completed batch. */
+       int             waitlistcount;
+       struct tasklet_struct rcu_tasklet;
+       struct rcu_head *nextlist;
+       struct rcu_head **nexttail;
+       struct rcu_head *waitlist[GP_STAGES];
+       struct rcu_head **waittail[GP_STAGES];
+       struct rcu_head *donelist;
+       struct rcu_head **donetail;
+       long rcu_flipctr[2];
+#ifdef CONFIG_RCU_TRACE
+       struct rcupreempt_trace trace;
+#endif /* #ifdef CONFIG_RCU_TRACE */
+};
+
+/*
+ * States for rcu_try_flip() and friends.
+ */
+
+enum rcu_try_flip_states {
+
+       /*
+        * Stay here if nothing is happening. Flip the counter if somthing
+        * starts happening. Denoted by "I"
+        */
+       rcu_try_flip_idle_state,
+
+       /*
+        * Wait here for all CPUs to notice that the counter has flipped. This
+        * prevents the old set of counters from ever being incremented once
+        * we leave this state, which in turn is necessary because we cannot
+        * test any individual counter for zero -- we can only check the sum.
+        * Denoted by "A".
+        */
+       rcu_try_flip_waitack_state,
+
+       /*
+        * Wait here for the sum of the old per-CPU counters to reach zero.
+        * Denoted by "Z".
+        */
+       rcu_try_flip_waitzero_state,
+
+       /*
+        * Wait here for each of the other CPUs to execute a memory barrier.
+        * This is necessary to ensure that these other CPUs really have
+        * completed executing their RCU read-side critical sections, despite
+        * their CPUs wildly reordering memory. Denoted by "M".
+        */
+       rcu_try_flip_waitmb_state,
+};
+
+struct rcu_ctrlblk {
+       spinlock_t      fliplock;       /* Protect state-machine transitions. */
+       long            completed;      /* Number of last completed batch. */
+       enum rcu_try_flip_states rcu_try_flip_state; /* The current state of
+                                                       the rcu state machine */
+};
+
+static DEFINE_PER_CPU(struct rcu_data, rcu_data);
+static struct rcu_ctrlblk rcu_ctrlblk = {
+       .fliplock = __SPIN_LOCK_UNLOCKED(rcu_ctrlblk.fliplock),
+       .completed = 0,
+       .rcu_try_flip_state = rcu_try_flip_idle_state,
+};
+
+
+#ifdef CONFIG_RCU_TRACE
+static char *rcu_try_flip_state_names[] =
+       { "idle", "waitack", "waitzero", "waitmb" };
+#endif /* #ifdef CONFIG_RCU_TRACE */
+
+/*
+ * Enum and per-CPU flag to determine when each CPU has seen
+ * the most recent counter flip.
+ */
+
+enum rcu_flip_flag_values {
+       rcu_flip_seen,          /* Steady/initial state, last flip seen. */
+                               /* Only GP detector can update. */
+       rcu_flipped             /* Flip just completed, need confirmation. */
+                               /* Only corresponding CPU can update. */
+};
+static DEFINE_PER_CPU_SHARED_ALIGNED(enum rcu_flip_flag_values, rcu_flip_flag)
+                                                               = rcu_flip_seen;
+
+/*
+ * Enum and per-CPU flag to determine when each CPU has executed the
+ * needed memory barrier to fence in memory references from its last RCU
+ * read-side critical section in the just-completed grace period.
+ */
+
+enum rcu_mb_flag_values {
+       rcu_mb_done,            /* Steady/initial state, no mb()s required. */
+                               /* Only GP detector can update. */
+       rcu_mb_needed           /* Flip just completed, need an mb(). */
+                               /* Only corresponding CPU can update. */
+};
+static DEFINE_PER_CPU_SHARED_ALIGNED(enum rcu_mb_flag_values, rcu_mb_flag)
+                                                               = rcu_mb_done;
+
+/*
+ * RCU_DATA_ME: find the current CPU's rcu_data structure.
+ * RCU_DATA_CPU: find the specified CPU's rcu_data structure.
+ */
+#define RCU_DATA_ME()          (&__get_cpu_var(rcu_data))
+#define RCU_DATA_CPU(cpu)      (&per_cpu(rcu_data, cpu))
+
+/*
+ * Helper macro for tracing when the appropriate rcu_data is not
+ * cached in a local variable, but where the CPU number is so cached.
+ */
+#define RCU_TRACE_CPU(f, cpu) RCU_TRACE(f, &(RCU_DATA_CPU(cpu)->trace));
+
+/*
+ * Helper macro for tracing when the appropriate rcu_data is not
+ * cached in a local variable.
+ */
+#define RCU_TRACE_ME(f) RCU_TRACE(f, &(RCU_DATA_ME()->trace));
+
+/*
+ * Helper macro for tracing when the appropriate rcu_data is pointed
+ * to by a local variable.
+ */
+#define RCU_TRACE_RDP(f, rdp) RCU_TRACE(f, &((rdp)->trace));
+
+/*
+ * Return the number of RCU batches processed thus far.  Useful
+ * for debug and statistics.
+ */
+long rcu_batches_completed(void)
+{
+       return rcu_ctrlblk.completed;
+}
+EXPORT_SYMBOL_GPL(rcu_batches_completed);
+
+EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
+
+void __rcu_read_lock(void)
+{
+       int idx;
+       struct task_struct *t = current;
+       int nesting;
+
+       nesting = ACCESS_ONCE(t->rcu_read_lock_nesting);
+       if (nesting != 0) {
+
+               /* An earlier rcu_read_lock() covers us, just count it. */
+
+               t->rcu_read_lock_nesting = nesting + 1;
+
+       } else {
+               unsigned long flags;
+
+               /*
+                * We disable interrupts for the following reasons:
+                * - If we get scheduling clock interrupt here, and we
+                *   end up acking the counter flip, it's like a promise
+                *   that we will never increment the old counter again.
+                *   Thus we will break that promise if that
+                *   scheduling clock interrupt happens between the time
+                *   we pick the .completed field and the time that we
+                *   increment our counter.
+                *
+                * - We don't want to be preempted out here.
+                *
+                * NMIs can still occur, of course, and might themselves
+                * contain rcu_read_lock().
+                */
+
+               local_irq_save(flags);
+
+               /*
+                * Outermost nesting of rcu_read_lock(), so increment
+                * the current counter for the current CPU.  Use volatile
+                * casts to prevent the compiler from reordering.
+                */
+
+               idx = ACCESS_ONCE(rcu_ctrlblk.completed) & 0x1;
+               ACCESS_ONCE(RCU_DATA_ME()->rcu_flipctr[idx])++;
+
+               /*
+                * Now that the per-CPU counter has been incremented, we
+                * are protected from races with rcu_read_lock() invoked
+                * from NMI handlers on this CPU.  We can therefore safely
+                * increment the nesting counter, relieving further NMIs
+                * of the need to increment the per-CPU counter.
+                */
+
+               ACCESS_ONCE(t->rcu_read_lock_nesting) = nesting + 1;
+
+               /*
+                * Now that we have preventing any NMIs from storing
+                * to the ->rcu_flipctr_idx, we can safely use it to
+                * remember which counter to decrement in the matching
+                * rcu_read_unlock().
+                */
+
+               ACCESS_ONCE(t->rcu_flipctr_idx) = idx;
+               local_irq_restore(flags);
+       }
+}
+EXPORT_SYMBOL_GPL(__rcu_read_lock);
+
+void __rcu_read_unlock(void)
+{
+       int idx;
+       struct task_struct *t = current;
+       int nesting;
+
+       nesting = ACCESS_ONCE(t->rcu_read_lock_nesting);
+       if (nesting > 1) {
+
+               /*
+                * We are still protected by the enclosing rcu_read_lock(),
+                * so simply decrement the counter.
+                */
+
+               t->rcu_read_lock_nesting = nesting - 1;
+
+       } else {
+               unsigned long flags;
+
+               /*
+                * Disable local interrupts to prevent the grace-period
+                * detection state machine from seeing us half-done.
+                * NMIs can still occur, of course, and might themselves
+                * contain rcu_read_lock() and rcu_read_unlock().
+                */
+
+               local_irq_save(flags);
+
+               /*
+                * Outermost nesting of rcu_read_unlock(), so we must
+                * decrement the current counter for the current CPU.
+                * This must be done carefully, because NMIs can
+                * occur at any point in this code, and any rcu_read_lock()
+                * and rcu_read_unlock() pairs in the NMI handlers
+                * must interact non-destructively with this code.
+                * Lots of volatile casts, and -very- careful ordering.
+                *
+                * Changes to this code, including this one, must be
+                * inspected, validated, and tested extremely carefully!!!
+                */
+
+               /*
+                * First, pick up the index.
+                */
+
+               idx = ACCESS_ONCE(t->rcu_flipctr_idx);
+
+               /*
+                * Now that we have fetched the counter index, it is
+                * safe to decrement the per-task RCU nesting counter.
+                * After this, any interrupts or NMIs will increment and
+                * decrement the per-CPU counters.
+                */
+               ACCESS_ONCE(t->rcu_read_lock_nesting) = nesting - 1;
+
+               /*
+                * It is now safe to decrement this task's nesting count.
+                * NMIs that occur after this statement will route their
+                * rcu_read_lock() calls through this "else" clause, and
+                * will thus start incrementing the per-CPU counter on
+                * their own.  They will also clobber ->rcu_flipctr_idx,
+                * but that is OK, since we have already fetched it.
+                */
+
+               ACCESS_ONCE(RCU_DATA_ME()->rcu_flipctr[idx])--;
+               local_irq_restore(flags);
+       }
+}
+EXPORT_SYMBOL_GPL(__rcu_read_unlock);
+
+/*
+ * If a global counter flip has occurred since the last time that we
+ * advanced callbacks, advance them.  Hardware interrupts must be
+ * disabled when calling this function.
+ */
+static void __rcu_advance_callbacks(struct rcu_data *rdp)
+{
+       int cpu;
+       int i;
+       int wlc = 0;
+
+       if (rdp->completed != rcu_ctrlblk.completed) {
+               if (rdp->waitlist[GP_STAGES - 1] != NULL) {
+                       *rdp->donetail = rdp->waitlist[GP_STAGES - 1];
+                       rdp->donetail = rdp->waittail[GP_STAGES - 1];
+                       RCU_TRACE_RDP(rcupreempt_trace_move2done, rdp);
+               }
+               for (i = GP_STAGES - 2; i >= 0; i--) {
+                       if (rdp->waitlist[i] != NULL) {
+                               rdp->waitlist[i + 1] = rdp->waitlist[i];
+                               rdp->waittail[i + 1] = rdp->waittail[i];
+                               wlc++;
+                       } else {
+                               rdp->waitlist[i + 1] = NULL;
+                               rdp->waittail[i + 1] =
+                                       &rdp->waitlist[i + 1];
+                       }
+               }
+               if (rdp->nextlist != NULL) {
+                       rdp->waitlist[0] = rdp->nextlist;
+                       rdp->waittail[0] = rdp->nexttail;
+                       wlc++;
+                       rdp->nextlist = NULL;
+                       rdp->nexttail = &rdp->nextlist;
+                       RCU_TRACE_RDP(rcupreempt_trace_move2wait, rdp);
+               } else {
+                       rdp->waitlist[0] = NULL;
+                       rdp->waittail[0] = &rdp->waitlist[0];
+               }
+               rdp->waitlistcount = wlc;
+               rdp->completed = rcu_ctrlblk.completed;
+       }
+
+       /*
+        * Check to see if this CPU needs to report that it has seen
+        * the most recent counter flip, thereby declaring that all
+        * subsequent rcu_read_lock() invocations will respect this flip.
+        */
+
+       cpu = raw_smp_processor_id();
+       if (per_cpu(rcu_flip_flag, cpu) == rcu_flipped) {
+               smp_mb();  /* Subsequent counter accesses must see new value */
+               per_cpu(rcu_flip_flag, cpu) = rcu_flip_seen;
+               smp_mb();  /* Subsequent RCU read-side critical sections */
+                          /*  seen -after- acknowledgement. */
+       }
+}
+
+/*
+ * Get here when RCU is idle.  Decide whether we need to
+ * move out of idle state, and return non-zero if so.
+ * "Straightforward" approach for the moment, might later
+ * use callback-list lengths, grace-period duration, or
+ * some such to determine when to exit idle state.
+ * Might also need a pre-idle test that does not acquire
+ * the lock, but let's get the simple case working first...
+ */
+
+static int
+rcu_try_flip_idle(void)
+{
+       int cpu;
+
+       RCU_TRACE_ME(rcupreempt_trace_try_flip_i1);
+       if (!rcu_pending(smp_processor_id())) {
+               RCU_TRACE_ME(rcupreempt_trace_try_flip_ie1);
+               return 0;
+       }
+
+       /*
+        * Do the flip.
+        */
+
+       RCU_TRACE_ME(rcupreempt_trace_try_flip_g1);
+       rcu_ctrlblk.completed++;  /* stands in for rcu_try_flip_g2 */
+
+       /*
+        * Need a memory barrier so that other CPUs see the new
+        * counter value before they see the subsequent change of all
+        * the rcu_flip_flag instances to rcu_flipped.
+        */
+
+       smp_mb();       /* see above block comment. */
+
+       /* Now ask each CPU for acknowledgement of the flip. */
+
+       for_each_possible_cpu(cpu)
+               per_cpu(rcu_flip_flag, cpu) = rcu_flipped;
+
+       return 1;
+}
+
+/*
+ * Wait for CPUs to acknowledge the flip.
+ */
+
+static int
+rcu_try_flip_waitack(void)
+{
+       int cpu;
+
+       RCU_TRACE_ME(rcupreempt_trace_try_flip_a1);
+       for_each_possible_cpu(cpu)
+               if (per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) {
+                       RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1);
+                       return 0;
+               }
+
+       /*
+        * Make sure our checks above don't bleed into subsequent
+        * waiting for the sum of the counters to reach zero.
+        */
+
+       smp_mb();       /* see above block comment. */
+       RCU_TRACE_ME(rcupreempt_trace_try_flip_a2);
+       return 1;
+}
+
+/*
+ * Wait for collective ``last'' counter to reach zero,
+ * then tell all CPUs to do an end-of-grace-period memory barrier.
+ */
+
+static int
+rcu_try_flip_waitzero(void)
+{
+       int cpu;
+       int lastidx = !(rcu_ctrlblk.completed & 0x1);
+       int sum = 0;
+
+       /* Check to see if the sum of the "last" counters is zero. */
+
+       RCU_TRACE_ME(rcupreempt_trace_try_flip_z1);
+       for_each_possible_cpu(cpu)
+               sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx];
+       if (sum != 0) {
+               RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1);
+               return 0;
+       }
+
+       /*
+        * This ensures that the other CPUs see the call for
+        * memory barriers -after- the sum to zero has been
+        * detected here
+        */
+       smp_mb();  /*  ^^^^^^^^^^^^ */
+
+       /* Call for a memory barrier from each CPU. */
+       for_each_possible_cpu(cpu)
+               per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed;
+
+       RCU_TRACE_ME(rcupreempt_trace_try_flip_z2);
+       return 1;
+}
+
+/*
+ * Wait for all CPUs to do their end-of-grace-period memory barrier.
+ * Return 0 once all CPUs have done so.
+ */
+
+static int
+rcu_try_flip_waitmb(void)
+{
+       int cpu;
+
+       RCU_TRACE_ME(rcupreempt_trace_try_flip_m1);
+       for_each_possible_cpu(cpu)
+               if (per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) {
+                       RCU_TRACE_ME(rcupreempt_trace_try_flip_me1);
+                       return 0;
+               }
+
+       smp_mb(); /* Ensure that the above checks precede any following flip. */
+       RCU_TRACE_ME(rcupreempt_trace_try_flip_m2);
+       return 1;
+}
+
+/*
+ * Attempt a single flip of the counters.  Remember, a single flip does
+ * -not- constitute a grace period.  Instead, the interval between
+ * at least GP_STAGES consecutive flips is a grace period.
+ *
+ * If anyone is nuts enough to run this CONFIG_PREEMPT_RCU implementation
+ * on a large SMP, they might want to use a hierarchical organization of
+ * the per-CPU-counter pairs.
+ */
+static void rcu_try_flip(void)
+{
+       unsigned long flags;
+
+       RCU_TRACE_ME(rcupreempt_trace_try_flip_1);
+       if (unlikely(!spin_trylock_irqsave(&rcu_ctrlblk.fliplock, flags))) {
+               RCU_TRACE_ME(rcupreempt_trace_try_flip_e1);
+               return;
+       }
+
+       /*
+        * Take the next transition(s) through the RCU grace-period
+        * flip-counter state machine.
+        */
+
+       switch (rcu_ctrlblk.rcu_try_flip_state) {
+       case rcu_try_flip_idle_state:
+               if (rcu_try_flip_idle())
+                       rcu_ctrlblk.rcu_try_flip_state =
+                               rcu_try_flip_waitack_state;
+               break;
+       case rcu_try_flip_waitack_state:
+               if (rcu_try_flip_waitack())
+                       rcu_ctrlblk.rcu_try_flip_state =
+                               rcu_try_flip_waitzero_state;
+               break;
+       case rcu_try_flip_waitzero_state:
+               if (rcu_try_flip_waitzero())
+                       rcu_ctrlblk.rcu_try_flip_state =
+                               rcu_try_flip_waitmb_state;
+               break;
+       case rcu_try_flip_waitmb_state:
+               if (rcu_try_flip_waitmb())
+                       rcu_ctrlblk.rcu_try_flip_state =
+                               rcu_try_flip_idle_state;
+       }
+       spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
+}
+
+/*
+ * Check to see if this CPU needs to do a memory barrier in order to
+ * ensure that any prior RCU read-side critical sections have committed
+ * their counter manipulations and critical-section memory references
+ * before declaring the grace period to be completed.
+ */
+static void rcu_check_mb(int cpu)
+{
+       if (per_cpu(rcu_mb_flag, cpu) == rcu_mb_needed) {
+               smp_mb();  /* Ensure RCU read-side accesses are visible. */
+               per_cpu(rcu_mb_flag, cpu) = rcu_mb_done;
+       }
+}
+
+void rcu_check_callbacks(int cpu, int user)
+{
+       unsigned long flags;
+       struct rcu_data *rdp = RCU_DATA_CPU(cpu);
+
+       rcu_check_mb(cpu);
+       if (rcu_ctrlblk.completed == rdp->completed)
+               rcu_try_flip();
+       spin_lock_irqsave(&rdp->lock, flags);
+       RCU_TRACE_RDP(rcupreempt_trace_check_callbacks, rdp);
+       __rcu_advance_callbacks(rdp);
+       if (rdp->donelist == NULL) {
+               spin_unlock_irqrestore(&rdp->lock, flags);
+       } else {
+               spin_unlock_irqrestore(&rdp->lock, flags);
+               raise_softirq(RCU_SOFTIRQ);
+       }
+}
+
+/*
+ * Needed by dynticks, to make sure all RCU processing has finished
+ * when we go idle:
+ */
+void rcu_advance_callbacks(int cpu, int user)
+{
+       unsigned long flags;
+       struct rcu_data *rdp = RCU_DATA_CPU(cpu);
+
+       if (rcu_ctrlblk.completed == rdp->completed) {
+               rcu_try_flip();
+               if (rcu_ctrlblk.completed == rdp->completed)
+                       return;
+       }
+       spin_lock_irqsave(&rdp->lock, flags);
+       RCU_TRACE_RDP(rcupreempt_trace_check_callbacks, rdp);
+       __rcu_advance_callbacks(rdp);
+       spin_unlock_irqrestore(&rdp->lock, flags);
+}
+
+static void rcu_process_callbacks(struct softirq_action *unused)
+{
+       unsigned long flags;
+       struct rcu_head *next, *list;
+       struct rcu_data *rdp = RCU_DATA_ME();
+
+       spin_lock_irqsave(&rdp->lock, flags);
+       list = rdp->donelist;
+       if (list == NULL) {
+               spin_unlock_irqrestore(&rdp->lock, flags);
+               return;
+       }
+       rdp->donelist = NULL;
+       rdp->donetail = &rdp->donelist;
+       RCU_TRACE_RDP(rcupreempt_trace_done_remove, rdp);
+       spin_unlock_irqrestore(&rdp->lock, flags);
+       while (list) {
+               next = list->next;
+               list->func(list);
+               list = next;
+               RCU_TRACE_ME(rcupreempt_trace_invoke);
+       }
+}
+
+void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
+{
+       unsigned long flags;
+       struct rcu_data *rdp;
+
+       head->func = func;
+       head->next = NULL;
+       local_irq_save(flags);
+       rdp = RCU_DATA_ME();
+       spin_lock(&rdp->lock);
+       __rcu_advance_callbacks(rdp);
+       *rdp->nexttail = head;
+       rdp->nexttail = &head->next;
+       RCU_TRACE_RDP(rcupreempt_trace_next_add, rdp);
+       spin_unlock(&rdp->lock);
+       local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(call_rcu);
+
+/*
+ * Wait until all currently running preempt_disable() code segments
+ * (including hardware-irq-disable segments) complete.  Note that
+ * in -rt this does -not- necessarily result in all currently executing
+ * interrupt -handlers- having completed.
+ */
+void __synchronize_sched(void)
+{
+       cpumask_t oldmask;
+       int cpu;
+
+       if (sched_getaffinity(0, &oldmask) < 0)
+               oldmask = cpu_possible_map;
+       for_each_online_cpu(cpu) {
+               sched_setaffinity(0, cpumask_of_cpu(cpu));
+               schedule();
+       }
+       sched_setaffinity(0, oldmask);
+}
+EXPORT_SYMBOL_GPL(__synchronize_sched);
+
+/*
+ * Check to see if any future RCU-related work will need to be done
+ * by the current CPU, even if none need be done immediately, returning
+ * 1 if so.  Assumes that notifiers would take care of handling any
+ * outstanding requests from the RCU core.
+ *
+ * This function is part of the RCU implementation; it is -not-
+ * an exported member of the RCU API.
+ */
+int rcu_needs_cpu(int cpu)
+{
+       struct rcu_data *rdp = RCU_DATA_CPU(cpu);
+
+       return (rdp->donelist != NULL ||
+               !!rdp->waitlistcount ||
+               rdp->nextlist != NULL);
+}
+
+int rcu_pending(int cpu)
+{
+       struct rcu_data *rdp = RCU_DATA_CPU(cpu);
+
+       /* The CPU has at least one callback queued somewhere. */
+
+       if (rdp->donelist != NULL ||
+           !!rdp->waitlistcount ||
+           rdp->nextlist != NULL)
+               return 1;
+
+       /* The RCU core needs an acknowledgement from this CPU. */
+
+       if ((per_cpu(rcu_flip_flag, cpu) == rcu_flipped) ||
+           (per_cpu(rcu_mb_flag, cpu) == rcu_mb_needed))
+               return 1;
+
+       /* This CPU has fallen behind the global grace-period number. */
+
+       if (rdp->completed != rcu_ctrlblk.completed)
+               return 1;
+
+       /* Nothing needed from this CPU. */
+
+       return 0;
+}
+
+void __init __rcu_init(void)
+{
+       int cpu;
+       int i;
+       struct rcu_data *rdp;
+
+       printk(KERN_NOTICE "Preemptible RCU implementation.\n");
+       for_each_possible_cpu(cpu) {
+               rdp = RCU_DATA_CPU(cpu);
+               spin_lock_init(&rdp->lock);
+               rdp->completed = 0;
+               rdp->waitlistcount = 0;
+               rdp->nextlist = NULL;
+               rdp->nexttail = &rdp->nextlist;
+               for (i = 0; i < GP_STAGES; i++) {
+                       rdp->waitlist[i] = NULL;
+                       rdp->waittail[i] = &rdp->waitlist[i];
+               }
+               rdp->donelist = NULL;
+               rdp->donetail = &rdp->donelist;
+               rdp->rcu_flipctr[0] = 0;
+               rdp->rcu_flipctr[1] = 0;
+       }
+       open_softirq(RCU_SOFTIRQ, rcu_process_callbacks, NULL);
+}
+
+/*
+ * Deprecated, use synchronize_rcu() or synchronize_sched() instead.
+ */
+void synchronize_kernel(void)
+{
+       synchronize_rcu();
+}
+
+#ifdef CONFIG_RCU_TRACE
+long *rcupreempt_flipctr(int cpu)
+{
+       return &RCU_DATA_CPU(cpu)->rcu_flipctr[0];
+}
+EXPORT_SYMBOL_GPL(rcupreempt_flipctr);
+
+int rcupreempt_flip_flag(int cpu)
+{
+       return per_cpu(rcu_flip_flag, cpu);
+}
+EXPORT_SYMBOL_GPL(rcupreempt_flip_flag);
+
+int rcupreempt_mb_flag(int cpu)
+{
+       return per_cpu(rcu_mb_flag, cpu);
+}
+EXPORT_SYMBOL_GPL(rcupreempt_mb_flag);
+
+char *rcupreempt_try_flip_state_name(void)
+{
+       return rcu_try_flip_state_names[rcu_ctrlblk.rcu_try_flip_state];
+}
+EXPORT_SYMBOL_GPL(rcupreempt_try_flip_state_name);
+
+struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu)
+{
+       struct rcu_data *rdp = RCU_DATA_CPU(cpu);
+
+       return &rdp->trace;
+}
+EXPORT_SYMBOL_GPL(rcupreempt_trace_cpu);
+
+#endif /* #ifdef RCU_TRACE */
diff --git a/kernel/rcupreempt_trace.c b/kernel/rcupreempt_trace.c
new file mode 100644 (file)
index 0000000..49ac494
--- /dev/null
@@ -0,0 +1,330 @@
+/*
+ * Read-Copy Update tracing for realtime implementation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright IBM Corporation, 2006
+ *
+ * Papers:  http://www.rdrop.com/users/paulmck/RCU
+ *
+ * For detailed explanation of Read-Copy Update mechanism see -
+ *             Documentation/RCU/ *.txt
+ *
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/rcupdate.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <asm/atomic.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/completion.h>
+#include <linux/moduleparam.h>
+#include <linux/percpu.h>
+#include <linux/notifier.h>
+#include <linux/rcupdate.h>
+#include <linux/cpu.h>
+#include <linux/mutex.h>
+#include <linux/rcupreempt_trace.h>
+#include <linux/debugfs.h>
+
+static struct mutex rcupreempt_trace_mutex;
+static char *rcupreempt_trace_buf;
+#define RCUPREEMPT_TRACE_BUF_SIZE 4096
+
+void rcupreempt_trace_move2done(struct rcupreempt_trace *trace)
+{
+       trace->done_length += trace->wait_length;
+       trace->done_add += trace->wait_length;
+       trace->wait_length = 0;
+}
+void rcupreempt_trace_move2wait(struct rcupreempt_trace *trace)
+{
+       trace->wait_length += trace->next_length;
+       trace->wait_add += trace->next_length;
+       trace->next_length = 0;
+}
+void rcupreempt_trace_try_flip_1(struct rcupreempt_trace *trace)
+{
+       atomic_inc(&trace->rcu_try_flip_1);
+}
+void rcupreempt_trace_try_flip_e1(struct rcupreempt_trace *trace)
+{
+       atomic_inc(&trace->rcu_try_flip_e1);
+}
+void rcupreempt_trace_try_flip_i1(struct rcupreempt_trace *trace)
+{
+       trace->rcu_try_flip_i1++;
+}
+void rcupreempt_trace_try_flip_ie1(struct rcupreempt_trace *trace)
+{
+       trace->rcu_try_flip_ie1++;
+}
+void rcupreempt_trace_try_flip_g1(struct rcupreempt_trace *trace)
+{
+       trace->rcu_try_flip_g1++;
+}
+void rcupreempt_trace_try_flip_a1(struct rcupreempt_trace *trace)
+{
+       trace->rcu_try_flip_a1++;
+}
+void rcupreempt_trace_try_flip_ae1(struct rcupreempt_trace *trace)
+{
+       trace->rcu_try_flip_ae1++;
+}
+void rcupreempt_trace_try_flip_a2(struct rcupreempt_trace *trace)
+{
+       trace->rcu_try_flip_a2++;
+}
+void rcupreempt_trace_try_flip_z1(struct rcupreempt_trace *trace)
+{
+       trace->rcu_try_flip_z1++;
+}
+void rcupreempt_trace_try_flip_ze1(struct rcupreempt_trace *trace)
+{
+       trace->rcu_try_flip_ze1++;
+}
+void rcupreempt_trace_try_flip_z2(struct rcupreempt_trace *trace)
+{
+       trace->rcu_try_flip_z2++;
+}
+void rcupreempt_trace_try_flip_m1(struct rcupreempt_trace *trace)
+{
+       trace->rcu_try_flip_m1++;
+}
+void rcupreempt_trace_try_flip_me1(struct rcupreempt_trace *trace)
+{
+       trace->rcu_try_flip_me1++;
+}
+void rcupreempt_trace_try_flip_m2(struct rcupreempt_trace *trace)
+{
+       trace->rcu_try_flip_m2++;
+}
+void rcupreempt_trace_check_callbacks(struct rcupreempt_trace *trace)
+{
+       trace->rcu_check_callbacks++;
+}
+void rcupreempt_trace_done_remove(struct rcupreempt_trace *trace)
+{
+       trace->done_remove += trace->done_length;
+       trace->done_length = 0;
+}
+void rcupreempt_trace_invoke(struct rcupreempt_trace *trace)
+{
+       atomic_inc(&trace->done_invoked);
+}
+void rcupreempt_trace_next_add(struct rcupreempt_trace *trace)
+{
+       trace->next_add++;
+       trace->next_length++;
+}
+
+static void rcupreempt_trace_sum(struct rcupreempt_trace *sp)
+{
+       struct rcupreempt_trace *cp;
+       int cpu;
+
+       memset(sp, 0, sizeof(*sp));
+       for_each_possible_cpu(cpu) {
+               cp = rcupreempt_trace_cpu(cpu);
+               sp->next_length += cp->next_length;
+               sp->next_add += cp->next_add;
+               sp->wait_length += cp->wait_length;
+               sp->wait_add += cp->wait_add;
+               sp->done_length += cp->done_length;
+               sp->done_add += cp->done_add;
+               sp->done_remove += cp->done_remove;
+               atomic_set(&sp->done_invoked, atomic_read(&cp->done_invoked));
+               sp->rcu_check_callbacks += cp->rcu_check_callbacks;
+               atomic_set(&sp->rcu_try_flip_1,
+                          atomic_read(&cp->rcu_try_flip_1));
+               atomic_set(&sp->rcu_try_flip_e1,
+                          atomic_read(&cp->rcu_try_flip_e1));
+               sp->rcu_try_flip_i1 += cp->rcu_try_flip_i1;
+               sp->rcu_try_flip_ie1 += cp->rcu_try_flip_ie1;
+               sp->rcu_try_flip_g1 += cp->rcu_try_flip_g1;
+               sp->rcu_try_flip_a1 += cp->rcu_try_flip_a1;
+               sp->rcu_try_flip_ae1 += cp->rcu_try_flip_ae1;
+               sp->rcu_try_flip_a2 += cp->rcu_try_flip_a2;
+               sp->rcu_try_flip_z1 += cp->rcu_try_flip_z1;
+               sp->rcu_try_flip_ze1 += cp->rcu_try_flip_ze1;
+               sp->rcu_try_flip_z2 += cp->rcu_try_flip_z2;
+               sp->rcu_try_flip_m1 += cp->rcu_try_flip_m1;
+               sp->rcu_try_flip_me1 += cp->rcu_try_flip_me1;
+               sp->rcu_try_flip_m2 += cp->rcu_try_flip_m2;
+       }
+}
+
+static ssize_t rcustats_read(struct file *filp, char __user *buffer,
+                               size_t count, loff_t *ppos)
+{
+       struct rcupreempt_trace trace;
+       ssize_t bcount;
+       int cnt = 0;
+
+       rcupreempt_trace_sum(&trace);
+       mutex_lock(&rcupreempt_trace_mutex);
+       snprintf(&rcupreempt_trace_buf[cnt], RCUPREEMPT_TRACE_BUF_SIZE - cnt,
+                "ggp=%ld rcc=%ld\n",
+                rcu_batches_completed(),
+                trace.rcu_check_callbacks);
+       snprintf(&rcupreempt_trace_buf[cnt], RCUPREEMPT_TRACE_BUF_SIZE - cnt,
+                "na=%ld nl=%ld wa=%ld wl=%ld da=%ld dl=%ld dr=%ld di=%d\n"
+                "1=%d e1=%d i1=%ld ie1=%ld g1=%ld a1=%ld ae1=%ld a2=%ld\n"
+                "z1=%ld ze1=%ld z2=%ld m1=%ld me1=%ld m2=%ld\n",
+
+                trace.next_add, trace.next_length,
+                trace.wait_add, trace.wait_length,
+                trace.done_add, trace.done_length,
+                trace.done_remove, atomic_read(&trace.done_invoked),
+                atomic_read(&trace.rcu_try_flip_1),
+                atomic_read(&trace.rcu_try_flip_e1),
+                trace.rcu_try_flip_i1, trace.rcu_try_flip_ie1,
+                trace.rcu_try_flip_g1,
+                trace.rcu_try_flip_a1, trace.rcu_try_flip_ae1,
+                        trace.rcu_try_flip_a2,
+                trace.rcu_try_flip_z1, trace.rcu_try_flip_ze1,
+                        trace.rcu_try_flip_z2,
+                trace.rcu_try_flip_m1, trace.rcu_try_flip_me1,
+                       trace.rcu_try_flip_m2);
+       bcount = simple_read_from_buffer(buffer, count, ppos,
+                       rcupreempt_trace_buf, strlen(rcupreempt_trace_buf));
+       mutex_unlock(&rcupreempt_trace_mutex);
+       return bcount;
+}
+
+static ssize_t rcugp_read(struct file *filp, char __user *buffer,
+                               size_t count, loff_t *ppos)
+{
+       long oldgp = rcu_batches_completed();
+       ssize_t bcount;
+
+       mutex_lock(&rcupreempt_trace_mutex);
+       synchronize_rcu();
+       snprintf(rcupreempt_trace_buf, RCUPREEMPT_TRACE_BUF_SIZE,
+               "oldggp=%ld  newggp=%ld\n", oldgp, rcu_batches_completed());
+       bcount = simple_read_from_buffer(buffer, count, ppos,
+                       rcupreempt_trace_buf, strlen(rcupreempt_trace_buf));
+       mutex_unlock(&rcupreempt_trace_mutex);
+       return bcount;
+}
+
+static ssize_t rcuctrs_read(struct file *filp, char __user *buffer,
+                               size_t count, loff_t *ppos)
+{
+       int cnt = 0;
+       int cpu;
+       int f = rcu_batches_completed() & 0x1;
+       ssize_t bcount;
+
+       mutex_lock(&rcupreempt_trace_mutex);
+
+       cnt += snprintf(&rcupreempt_trace_buf[cnt], RCUPREEMPT_TRACE_BUF_SIZE,
+                               "CPU last cur F M\n");
+       for_each_online_cpu(cpu) {
+               long *flipctr = rcupreempt_flipctr(cpu);
+               cnt += snprintf(&rcupreempt_trace_buf[cnt],
+                               RCUPREEMPT_TRACE_BUF_SIZE - cnt,
+                                       "%3d %4ld %3ld %d %d\n",
+                              cpu,
+                              flipctr[!f],
+                              flipctr[f],
+                              rcupreempt_flip_flag(cpu),
+                              rcupreempt_mb_flag(cpu));
+       }
+       cnt += snprintf(&rcupreempt_trace_buf[cnt],
+                       RCUPREEMPT_TRACE_BUF_SIZE - cnt,
+                       "ggp = %ld, state = %s\n",
+                       rcu_batches_completed(),
+                       rcupreempt_try_flip_state_name());
+       cnt += snprintf(&rcupreempt_trace_buf[cnt],
+                       RCUPREEMPT_TRACE_BUF_SIZE - cnt,
+                       "\n");
+       bcount = simple_read_from_buffer(buffer, count, ppos,
+                       rcupreempt_trace_buf, strlen(rcupreempt_trace_buf));
+       mutex_unlock(&rcupreempt_trace_mutex);
+       return bcount;
+}
+
+static struct file_operations rcustats_fops = {
+       .owner = THIS_MODULE,
+       .read = rcustats_read,
+};
+
+static struct file_operations rcugp_fops = {
+       .owner = THIS_MODULE,
+       .read = rcugp_read,
+};
+
+static struct file_operations rcuctrs_fops = {
+       .owner = THIS_MODULE,
+       .read = rcuctrs_read,
+};
+
+static struct dentry *rcudir, *statdir, *ctrsdir, *gpdir;
+static int rcupreempt_debugfs_init(void)
+{
+       rcudir = debugfs_create_dir("rcu", NULL);
+       if (!rcudir)
+               goto out;
+       statdir = debugfs_create_file("rcustats", 0444, rcudir,
+                                               NULL, &rcustats_fops);
+       if (!statdir)
+               goto free_out;
+
+       gpdir = debugfs_create_file("rcugp", 0444, rcudir, NULL, &rcugp_fops);
+       if (!gpdir)
+               goto free_out;
+
+       ctrsdir = debugfs_create_file("rcuctrs", 0444, rcudir,
+                                               NULL, &rcuctrs_fops);
+       if (!ctrsdir)
+               goto free_out;
+       return 0;
+free_out:
+       if (statdir)
+               debugfs_remove(statdir);
+       if (gpdir)
+               debugfs_remove(gpdir);
+       debugfs_remove(rcudir);
+out:
+       return 1;
+}
+
+static int __init rcupreempt_trace_init(void)
+{
+       mutex_init(&rcupreempt_trace_mutex);
+       rcupreempt_trace_buf = kmalloc(RCUPREEMPT_TRACE_BUF_SIZE, GFP_KERNEL);
+       if (!rcupreempt_trace_buf)
+               return 1;
+       return rcupreempt_debugfs_init();
+}
+
+static void __exit rcupreempt_trace_cleanup(void)
+{
+       debugfs_remove(statdir);
+       debugfs_remove(gpdir);
+       debugfs_remove(ctrsdir);
+       debugfs_remove(rcudir);
+       kfree(rcupreempt_trace_buf);
+}
+
+
+module_init(rcupreempt_trace_init);
+module_exit(rcupreempt_trace_cleanup);