allow_signal: kill the bogus ->mm check, add a note about CLONE_SIGHAND
[safe/jmp/linux-2.6] / kernel / softirq.c
index 9f90fdc..3a94905 100644 (file)
@@ -25,6 +25,9 @@
 #include <linux/smp.h>
 #include <linux/tick.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/irq.h>
+
 #include <asm/irq.h>
 /*
    - No shared variables, all the data are CPU local.
@@ -54,9 +57,8 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
 
 char *softirq_to_name[NR_SOFTIRQS] = {
-       "HI_SOFTIRQ", "TIMER_SOFTIRQ", "NET_TX_SOFTIRQ", "NET_RX_SOFTIRQ",
-       "BLOCK_SOFTIRQ", "TASKLET_SOFTIRQ", "SCHED_SOFTIRQ", "HRTIMER_SOFTIRQ",
-       "RCU_SOFTIRQ"
+       "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK",
+       "TASKLET", "SCHED", "HRTIMER",  "RCU"
 };
 
 /*
@@ -65,7 +67,7 @@ char *softirq_to_name[NR_SOFTIRQS] = {
  * to the pending events, so lets the scheduler to balance
  * the softirq load for us.
  */
-static inline void wakeup_softirqd(void)
+void wakeup_softirqd(void)
 {
        /* Interrupts are disabled: no need to stop preemption */
        struct task_struct *tsk = __get_cpu_var(ksoftirqd);
@@ -211,9 +213,11 @@ restart:
        do {
                if (pending & 1) {
                        int prev_count = preempt_count();
+                       kstat_incr_softirqs_this_cpu(h - softirq_vec);
 
+                       trace_softirq_entry(h, softirq_vec);
                        h->action(h);
-
+                       trace_softirq_exit(h, softirq_vec);
                        if (unlikely(prev_count != preempt_count())) {
                                printk(KERN_ERR "huh, entered softirq %td %s %p"
                                       "with preempt_count %08x,"
@@ -379,6 +383,17 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
 
 EXPORT_SYMBOL(__tasklet_hi_schedule);
 
+void __tasklet_hi_schedule_first(struct tasklet_struct *t)
+{
+       BUG_ON(!irqs_disabled());
+
+       t->next = __get_cpu_var(tasklet_hi_vec).head;
+       __get_cpu_var(tasklet_hi_vec).head = t;
+       __raise_softirq_irqoff(HI_SOFTIRQ);
+}
+
+EXPORT_SYMBOL(__tasklet_hi_schedule_first);
+
 static void tasklet_action(struct softirq_action *a)
 {
        struct tasklet_struct *list;
@@ -468,9 +483,9 @@ void tasklet_kill(struct tasklet_struct *t)
                printk("Attempt to kill tasklet from interrupt\n");
 
        while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
-               do
+               do {
                        yield();
-               while (test_bit(TASKLET_STATE_SCHED, &t->state));
+               while (test_bit(TASKLET_STATE_SCHED, &t->state));
        }
        tasklet_unlock_wait(t);
        clear_bit(TASKLET_STATE_SCHED, &t->state);
@@ -514,7 +529,7 @@ static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softir
                cp->flags = 0;
                cp->priv = softirq;
 
-               __smp_call_function_single(cpu, cp);
+               __smp_call_function_single(cpu, cp, 0);
                return 0;
        }
        return 1;
@@ -824,7 +839,7 @@ int __init __weak arch_early_irq_init(void)
        return 0;
 }
 
-int __weak arch_init_chip_data(struct irq_desc *desc, int cpu)
+int __weak arch_init_chip_data(struct irq_desc *desc, int node)
 {
        return 0;
 }