git://ftp.safe.ca
/
safe
/
jmp
/
linux-2.6
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Merge branch 'master' of /home/davem/src/GIT/linux-2.6/
[safe/jmp/linux-2.6]
/
kernel
/
tracepoint.c
diff --git
a/kernel/tracepoint.c
b/kernel/tracepoint.c
index
be86b9a
..
cc89be5
100644
(file)
--- a/
kernel/tracepoint.c
+++ b/
kernel/tracepoint.c
@@
-48,7
+48,7
@@
static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE];
/*
* Note about RCU :
/*
* Note about RCU :
- * It is used to
to
delay the free of multiple probes array until a quiescent
+ * It is used to delay the free of multiple probes array until a quiescent
* state is reached.
* Tracepoint entries modifications are protected by the tracepoints_mutex.
*/
* state is reached.
* Tracepoint entries modifications are protected by the tracepoints_mutex.
*/
@@
-243,6
+243,11
@@
static void set_tracepoint(struct tracepoint_entry **entry,
{
WARN_ON(strcmp((*entry)->name, elem->name) != 0);
{
WARN_ON(strcmp((*entry)->name, elem->name) != 0);
+ if (elem->regfunc && !elem->state && active)
+ elem->regfunc();
+ else if (elem->unregfunc && elem->state && !active)
+ elem->unregfunc();
+
/*
* rcu_assign_pointer has a smp_wmb() which makes sure that the new
* probe callbacks array is consistent before setting a pointer to it.
/*
* rcu_assign_pointer has a smp_wmb() which makes sure that the new
* probe callbacks array is consistent before setting a pointer to it.
@@
-262,6
+267,9
@@
static void set_tracepoint(struct tracepoint_entry **entry,
*/
static void disable_tracepoint(struct tracepoint *elem)
{
*/
static void disable_tracepoint(struct tracepoint *elem)
{
+ if (elem->unregfunc && elem->state)
+ elem->unregfunc();
+
elem->state = 0;
rcu_assign_pointer(elem->funcs, NULL);
}
elem->state = 0;
rcu_assign_pointer(elem->funcs, NULL);
}
@@
-576,9
+584,9
@@
__initcall(init_tracepoints);
#endif /* CONFIG_MODULES */
#endif /* CONFIG_MODULES */
-#ifdef CONFIG_
FTRACE_SYSCALL
S
+#ifdef CONFIG_
HAVE_SYSCALL_TRACEPOINT
S
-static DEFINE_MUTEX(regfunc_mutex);
+/* NB: reg/unreg are called while guarded with the tracepoints_mutex */
static int sys_tracepoint_refcount;
void syscall_regfunc(void)
static int sys_tracepoint_refcount;
void syscall_regfunc(void)
@@
-586,16
+594,16
@@
void syscall_regfunc(void)
unsigned long flags;
struct task_struct *g, *t;
unsigned long flags;
struct task_struct *g, *t;
- mutex_lock(®func_mutex);
if (!sys_tracepoint_refcount) {
read_lock_irqsave(&tasklist_lock, flags);
do_each_thread(g, t) {
if (!sys_tracepoint_refcount) {
read_lock_irqsave(&tasklist_lock, flags);
do_each_thread(g, t) {
- set_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
+ /* Skip kernel threads. */
+ if (t->mm)
+ set_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
} while_each_thread(g, t);
read_unlock_irqrestore(&tasklist_lock, flags);
}
sys_tracepoint_refcount++;
} while_each_thread(g, t);
read_unlock_irqrestore(&tasklist_lock, flags);
}
sys_tracepoint_refcount++;
- mutex_unlock(®func_mutex);
}
void syscall_unregfunc(void)
}
void syscall_unregfunc(void)
@@
-603,7
+611,6
@@
void syscall_unregfunc(void)
unsigned long flags;
struct task_struct *g, *t;
unsigned long flags;
struct task_struct *g, *t;
- mutex_lock(®func_mutex);
sys_tracepoint_refcount--;
if (!sys_tracepoint_refcount) {
read_lock_irqsave(&tasklist_lock, flags);
sys_tracepoint_refcount--;
if (!sys_tracepoint_refcount) {
read_lock_irqsave(&tasklist_lock, flags);
@@
-612,6
+619,5
@@
void syscall_unregfunc(void)
} while_each_thread(g, t);
read_unlock_irqrestore(&tasklist_lock, flags);
}
} while_each_thread(g, t);
read_unlock_irqrestore(&tasklist_lock, flags);
}
- mutex_unlock(®func_mutex);
}
#endif
}
#endif