2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions that provide either classic
4 * or preemptable semantics.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 * Copyright Red Hat, 2009
21 * Copyright IBM Corporation, 2009
23 * Author: Ingo Molnar <mingo@elte.hu>
24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
28 #ifdef CONFIG_TREE_PREEMPT_RCU
30 struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state);
31 DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
34 * Tell them what RCU they are running.
36 static inline void rcu_bootup_announce(void)
39 "Experimental preemptable hierarchical RCU implementation.\n");
43 * Return the number of RCU-preempt batches processed thus far
44 * for debug and statistics.
46 long rcu_batches_completed_preempt(void)
48 return rcu_preempt_state.completed;
50 EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
53 * Return the number of RCU batches processed thus far for debug & stats.
55 long rcu_batches_completed(void)
57 return rcu_batches_completed_preempt();
59 EXPORT_SYMBOL_GPL(rcu_batches_completed);
62 * Record a preemptable-RCU quiescent state for the specified CPU. Note
63 * that this just means that the task currently running on the CPU is
64 * not in a quiescent state. There might be any number of tasks blocked
65 * while in an RCU read-side critical section.
67 static void rcu_preempt_qs(int cpu)
69 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
70 rdp->passed_quiesc_completed = rdp->completed;
72 rdp->passed_quiesc = 1;
76 * We have entered the scheduler, and the current task might soon be
77 * context-switched away from. If this task is in an RCU read-side
78 * critical section, we will no longer be able to rely on the CPU to
79 * record that fact, so we enqueue the task on the appropriate entry
80 * of the blocked_tasks[] array. The task will dequeue itself when
81 * it exits the outermost enclosing RCU read-side critical section.
82 * Therefore, the current grace period cannot be permitted to complete
83 * until the blocked_tasks[] entry indexed by the low-order bit of
86 * Caller must disable preemption.
88 static void rcu_preempt_note_context_switch(int cpu)
90 struct task_struct *t = current;
96 if (t->rcu_read_lock_nesting &&
97 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
99 /* Possibly blocking in an RCU read-side critical section. */
100 rdp = rcu_preempt_state.rda[cpu];
102 spin_lock_irqsave(&rnp->lock, flags);
103 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
104 t->rcu_blocked_node = rnp;
107 * If this CPU has already checked in, then this task
108 * will hold up the next grace period rather than the
109 * current grace period. Queue the task accordingly.
110 * If the task is queued for the current grace period
111 * (i.e., this CPU has not yet passed through a quiescent
112 * state for the current grace period), then as long
113 * as that task remains queued, the current grace period
116 * But first, note that the current CPU must still be
119 WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
120 phase = !(rnp->qsmask & rdp->grpmask) ^ (rnp->gpnum & 0x1);
121 list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]);
122 smp_mb(); /* Ensure later ctxt swtch seen after above. */
123 spin_unlock_irqrestore(&rnp->lock, flags);
127 * Either we were not in an RCU read-side critical section to
128 * begin with, or we have now recorded that critical section
129 * globally. Either way, we can now note a quiescent state
130 * for this CPU. Again, if we were in an RCU read-side critical
131 * section, and if that critical section was blocking the current
132 * grace period, then the fact that the task has been enqueued
133 * means that we continue to block the current grace period.
136 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
140 * Tree-preemptable RCU implementation for rcu_read_lock().
141 * Just increment ->rcu_read_lock_nesting, shared state will be updated
144 void __rcu_read_lock(void)
146 ACCESS_ONCE(current->rcu_read_lock_nesting)++;
147 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
149 EXPORT_SYMBOL_GPL(__rcu_read_lock);
151 static void rcu_read_unlock_special(struct task_struct *t)
156 struct rcu_node *rnp;
159 /* NMI handlers cannot block and cannot safely manipulate state. */
163 local_irq_save(flags);
166 * If RCU core is waiting for this CPU to exit critical section,
167 * let it know that we have done so.
169 special = t->rcu_read_unlock_special;
170 if (special & RCU_READ_UNLOCK_NEED_QS) {
171 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
172 rcu_preempt_qs(smp_processor_id());
175 /* Hardware IRQ handlers cannot block. */
177 local_irq_restore(flags);
181 /* Clean up if blocked during RCU read-side critical section. */
182 if (special & RCU_READ_UNLOCK_BLOCKED) {
183 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
186 * Remove this task from the list it blocked on. The
187 * task can migrate while we acquire the lock, but at
188 * most one time. So at most two passes through loop.
191 rnp = t->rcu_blocked_node;
192 spin_lock(&rnp->lock);
193 if (rnp == t->rcu_blocked_node)
195 spin_unlock(&rnp->lock);
197 empty = list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]);
198 list_del_init(&t->rcu_node_entry);
199 t->rcu_blocked_node = NULL;
202 * If this was the last task on the current list, and if
203 * we aren't waiting on any CPUs, report the quiescent state.
204 * Note that both cpu_quiet_msk_finish() and cpu_quiet_msk()
205 * drop rnp->lock and restore irq.
207 if (!empty && rnp->qsmask == 0 &&
208 list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1])) {
209 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
210 if (rnp->parent == NULL) {
211 /* Only one rcu_node in the tree. */
212 cpu_quiet_msk_finish(&rcu_preempt_state, flags);
215 /* Report up the rest of the hierarchy. */
217 spin_unlock_irqrestore(&rnp->lock, flags);
219 spin_lock_irqsave(&rnp->lock, flags);
220 cpu_quiet_msk(mask, &rcu_preempt_state, rnp, flags);
223 spin_unlock(&rnp->lock);
225 local_irq_restore(flags);
229 * Tree-preemptable RCU implementation for rcu_read_unlock().
230 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
231 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
232 * invoke rcu_read_unlock_special() to clean up after a context switch
233 * in an RCU read-side critical section and other special cases.
235 void __rcu_read_unlock(void)
237 struct task_struct *t = current;
239 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
240 if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
241 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
242 rcu_read_unlock_special(t);
244 EXPORT_SYMBOL_GPL(__rcu_read_unlock);
246 #ifdef CONFIG_RCU_CPU_STALL_DETECTOR
249 * Scan the current list of tasks blocked within RCU read-side critical
250 * sections, printing out the tid of each.
252 static void rcu_print_task_stall(struct rcu_node *rnp)
255 struct list_head *lp;
256 int phase = rnp->gpnum & 0x1;
257 struct task_struct *t;
259 if (!list_empty(&rnp->blocked_tasks[phase])) {
260 spin_lock_irqsave(&rnp->lock, flags);
261 phase = rnp->gpnum & 0x1; /* re-read under lock. */
262 lp = &rnp->blocked_tasks[phase];
263 list_for_each_entry(t, lp, rcu_node_entry)
264 printk(" P%d", t->pid);
265 spin_unlock_irqrestore(&rnp->lock, flags);
269 #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
272 * Check that the list of blocked tasks for the newly completed grace
273 * period is in fact empty. It is a serious bug to complete a grace
274 * period that still has RCU readers blocked! This function must be
275 * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
276 * must be held by the caller.
278 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
280 WARN_ON_ONCE(!list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]));
284 * Check for preempted RCU readers for the specified rcu_node structure.
285 * If the caller needs a reliable answer, it must hold the rcu_node's
288 static int rcu_preempted_readers(struct rcu_node *rnp)
290 return !list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]);
293 #ifdef CONFIG_HOTPLUG_CPU
296 * Handle tasklist migration for case in which all CPUs covered by the
297 * specified rcu_node have gone offline. Move them up to the root
298 * rcu_node. The reason for not just moving them to the immediate
299 * parent is to remove the need for rcu_read_unlock_special() to
300 * make more than two attempts to acquire the target rcu_node's lock.
302 * The caller must hold rnp->lock with irqs disabled.
304 static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
305 struct rcu_node *rnp)
308 struct list_head *lp;
309 struct list_head *lp_root;
310 struct rcu_node *rnp_root = rcu_get_root(rsp);
311 struct task_struct *tp;
313 if (rnp == rnp_root) {
314 WARN_ONCE(1, "Last CPU thought to be offlined?");
315 return; /* Shouldn't happen: at least one CPU online. */
319 * Move tasks up to root rcu_node. Rely on the fact that the
320 * root rcu_node can be at most one ahead of the rest of the
321 * rcu_nodes in terms of gp_num value. This fact allows us to
322 * move the blocked_tasks[] array directly, element by element.
324 for (i = 0; i < 2; i++) {
325 lp = &rnp->blocked_tasks[i];
326 lp_root = &rnp_root->blocked_tasks[i];
327 while (!list_empty(lp)) {
328 tp = list_entry(lp->next, typeof(*tp), rcu_node_entry);
329 spin_lock(&rnp_root->lock); /* irqs already disabled */
330 list_del(&tp->rcu_node_entry);
331 tp->rcu_blocked_node = rnp_root;
332 list_add(&tp->rcu_node_entry, lp_root);
333 spin_unlock(&rnp_root->lock); /* irqs remain disabled */
339 * Do CPU-offline processing for preemptable RCU.
341 static void rcu_preempt_offline_cpu(int cpu)
343 __rcu_offline_cpu(cpu, &rcu_preempt_state);
346 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
349 * Check for a quiescent state from the current CPU. When a task blocks,
350 * the task is recorded in the corresponding CPU's rcu_node structure,
351 * which is checked elsewhere.
353 * Caller must disable hard irqs.
355 static void rcu_preempt_check_callbacks(int cpu)
357 struct task_struct *t = current;
359 if (t->rcu_read_lock_nesting == 0) {
360 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
364 if (per_cpu(rcu_preempt_data, cpu).qs_pending) {
365 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
370 * Process callbacks for preemptable RCU.
372 static void rcu_preempt_process_callbacks(void)
374 __rcu_process_callbacks(&rcu_preempt_state,
375 &__get_cpu_var(rcu_preempt_data));
379 * Queue a preemptable-RCU callback for invocation after a grace period.
381 void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
383 __call_rcu(head, func, &rcu_preempt_state);
385 EXPORT_SYMBOL_GPL(call_rcu);
388 * Check to see if there is any immediate preemptable-RCU-related work
391 static int rcu_preempt_pending(int cpu)
393 return __rcu_pending(&rcu_preempt_state,
394 &per_cpu(rcu_preempt_data, cpu));
398 * Does preemptable RCU need the CPU to stay out of dynticks mode?
400 static int rcu_preempt_needs_cpu(int cpu)
402 return !!per_cpu(rcu_preempt_data, cpu).nxtlist;
406 * Initialize preemptable RCU's per-CPU data.
408 static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
410 rcu_init_percpu_data(cpu, &rcu_preempt_state, 1);
414 * Check for a task exiting while in a preemptable-RCU read-side
415 * critical section, clean up if so. No need to issue warnings,
416 * as debug_check_no_locks_held() already does this if lockdep
421 struct task_struct *t = current;
423 if (t->rcu_read_lock_nesting == 0)
425 t->rcu_read_lock_nesting = 1;
429 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
432 * Tell them what RCU they are running.
434 static inline void rcu_bootup_announce(void)
436 printk(KERN_INFO "Hierarchical RCU implementation.\n");
440 * Return the number of RCU batches processed thus far for debug & stats.
442 long rcu_batches_completed(void)
444 return rcu_batches_completed_sched();
446 EXPORT_SYMBOL_GPL(rcu_batches_completed);
449 * Because preemptable RCU does not exist, we never have to check for
450 * CPUs being in quiescent states.
452 static void rcu_preempt_note_context_switch(int cpu)
456 #ifdef CONFIG_RCU_CPU_STALL_DETECTOR
459 * Because preemptable RCU does not exist, we never have to check for
460 * tasks blocked within RCU read-side critical sections.
462 static void rcu_print_task_stall(struct rcu_node *rnp)
466 #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
469 * Because there is no preemptable RCU, there can be no readers blocked,
470 * so there is no need to check for blocked tasks.
472 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
477 * Because preemptable RCU does not exist, there are never any preempted
480 static int rcu_preempted_readers(struct rcu_node *rnp)
485 #ifdef CONFIG_HOTPLUG_CPU
488 * Because preemptable RCU does not exist, it never needs to migrate
489 * tasks that were blocked within RCU read-side critical sections.
491 static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
492 struct rcu_node *rnp)
497 * Because preemptable RCU does not exist, it never needs CPU-offline
500 static void rcu_preempt_offline_cpu(int cpu)
504 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
507 * Because preemptable RCU does not exist, it never has any callbacks
510 void rcu_preempt_check_callbacks(int cpu)
515 * Because preemptable RCU does not exist, it never has any callbacks
518 void rcu_preempt_process_callbacks(void)
523 * In classic RCU, call_rcu() is just call_rcu_sched().
525 void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
527 call_rcu_sched(head, func);
529 EXPORT_SYMBOL_GPL(call_rcu);
532 * Because preemptable RCU does not exist, it never has any work to do.
534 static int rcu_preempt_pending(int cpu)
540 * Because preemptable RCU does not exist, it never needs any CPU.
542 static int rcu_preempt_needs_cpu(int cpu)
548 * Because preemptable RCU does not exist, there is no per-CPU
549 * data to initialize.
551 static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
555 #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */