Blackfin/ipipe: prepare status bitops for SMP support
[safe/jmp/linux-2.6] / arch / blackfin / kernel / ipipe.c
index d8cde1f..a77307a 100644 (file)
 #include <linux/slab.h>
 #include <linux/errno.h>
 #include <linux/kthread.h>
-#include <asm/unistd.h>
+#include <linux/unistd.h>
+#include <linux/io.h>
 #include <asm/system.h>
 #include <asm/atomic.h>
-#include <asm/io.h>
 
 DEFINE_PER_CPU(struct pt_regs, __ipipe_tick_regs);
 
@@ -52,7 +52,7 @@ EXPORT_SYMBOL(__ipipe_freq_scale);
 
 atomic_t __ipipe_irq_lvdepth[IVG15 + 1];
 
-unsigned long __ipipe_irq_lvmask = __all_masked_irq_flags;
+unsigned long __ipipe_irq_lvmask = bfin_no_irqs;
 EXPORT_SYMBOL(__ipipe_irq_lvmask);
 
 static void __ipipe_ack_irq(unsigned irq, struct irq_desc *desc)
@@ -90,6 +90,7 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
        struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
        struct ipipe_domain *this_domain, *next_domain;
        struct list_head *head, *pos;
+       struct ipipe_irqdesc *idesc;
        int m_ack, s = -1;
 
        /*
@@ -100,17 +101,20 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
         */
        m_ack = (regs == NULL || irq == IRQ_SYSTMR || irq == IRQ_CORETMR);
        this_domain = __ipipe_current_domain;
+       idesc = &this_domain->irqs[irq];
 
-       if (unlikely(test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control)))
+       if (unlikely(test_bit(IPIPE_STICKY_FLAG, &idesc->control)))
                head = &this_domain->p_link;
        else {
                head = __ipipe_pipeline.next;
                next_domain = list_entry(head, struct ipipe_domain, p_link);
-               if (likely(test_bit(IPIPE_WIRED_FLAG, &next_domain->irqs[irq].control))) {
-                       if (!m_ack && next_domain->irqs[irq].acknowledge != NULL)
-                               next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
+               idesc = &next_domain->irqs[irq];
+               if (likely(test_bit(IPIPE_WIRED_FLAG, &idesc->control))) {
+                       if (!m_ack && idesc->acknowledge != NULL)
+                               idesc->acknowledge(irq, irq_to_desc(irq));
                        if (test_bit(IPIPE_SYNCDEFER_FLAG, &p->status))
-                               s = __test_and_set_bit(IPIPE_STALL_FLAG, &p->status);
+                               s = __test_and_set_bit(IPIPE_STALL_FLAG,
+                                                      &p->status);
                        __ipipe_dispatch_wired(next_domain, irq);
                        goto out;
                }
@@ -121,14 +125,15 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
        pos = head;
        while (pos != &__ipipe_pipeline) {
                next_domain = list_entry(pos, struct ipipe_domain, p_link);
-               if (test_bit(IPIPE_HANDLE_FLAG, &next_domain->irqs[irq].control)) {
+               idesc = &next_domain->irqs[irq];
+               if (test_bit(IPIPE_HANDLE_FLAG, &idesc->control)) {
                        __ipipe_set_irq_pending(next_domain, irq);
-                       if (!m_ack && next_domain->irqs[irq].acknowledge != NULL) {
-                               next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
+                       if (!m_ack && idesc->acknowledge != NULL) {
+                               idesc->acknowledge(irq, irq_to_desc(irq));
                                m_ack = 1;
                        }
                }
-               if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control))
+               if (!test_bit(IPIPE_PASS_FLAG, &idesc->control))
                        break;
                pos = next_domain->p_link.next;
        }
@@ -159,11 +164,6 @@ out:
                __clear_bit(IPIPE_STALL_FLAG, &p->status);
 }
 
-int __ipipe_check_root(void)
-{
-       return ipipe_root_domain_p;
-}
-
 void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
 {
        struct irq_desc *desc = irq_to_desc(irq);
@@ -186,30 +186,6 @@ void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
 }
 EXPORT_SYMBOL(__ipipe_disable_irqdesc);
 
-void __ipipe_stall_root_raw(void)
-{
-       /*
-        * This code is called by the ins{bwl} routines (see
-        * arch/blackfin/lib/ins.S), which are heavily used by the
-        * network stack. It masks all interrupts but those handled by
-        * non-root domains, so that we keep decent network transfer
-        * rates for Linux without inducing pathological jitter for
-        * the real-time domain.
-        */
-       __asm__ __volatile__ ("sti %0;" : : "d"(__ipipe_irq_lvmask));
-
-       __set_bit(IPIPE_STALL_FLAG,
-                 &ipipe_root_cpudom_var(status));
-}
-
-void __ipipe_unstall_root_raw(void)
-{
-       __clear_bit(IPIPE_STALL_FLAG,
-                   &ipipe_root_cpudom_var(status));
-
-       __asm__ __volatile__ ("sti %0;" : : "d"(bfin_irq_flags));
-}
-
 int __ipipe_syscall_root(struct pt_regs *regs)
 {
        struct ipipe_percpu_domain_data *p;
@@ -333,17 +309,96 @@ asmlinkage void __ipipe_sync_root(void)
 
 void ___ipipe_sync_pipeline(unsigned long syncmask)
 {
-       if (__ipipe_root_domain_p) {
-               if (test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status)))
-                       return;
-       }
+       if (__ipipe_root_domain_p &&
+           test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status)))
+               return;
 
        __ipipe_sync_stage(syncmask);
 }
 
-EXPORT_SYMBOL(show_stack);
+void __ipipe_disable_root_irqs_hw(void)
+{
+       /*
+        * This code is called by the ins{bwl} routines (see
+        * arch/blackfin/lib/ins.S), which are heavily used by the
+        * network stack. It masks all interrupts but those handled by
+        * non-root domains, so that we keep decent network transfer
+        * rates for Linux without inducing pathological jitter for
+        * the real-time domain.
+        */
+       bfin_sti(__ipipe_irq_lvmask);
+       __set_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status));
+}
+
+void __ipipe_enable_root_irqs_hw(void)
+{
+       __clear_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status));
+       bfin_sti(bfin_irq_flags);
+}
 
-#ifdef CONFIG_IPIPE_TRACE_MCOUNT
-void notrace _mcount(void);
-EXPORT_SYMBOL(_mcount);
-#endif /* CONFIG_IPIPE_TRACE_MCOUNT */
+/*
+ * We could use standard atomic bitops in the following root status
+ * manipulation routines, but let's prepare for SMP support in the
+ * same move, preventing CPU migration as required.
+ */
+void __ipipe_stall_root(void)
+{
+       unsigned long *p, flags;
+
+       local_irq_save_hw(flags);
+       p = &__ipipe_root_status;
+       __set_bit(IPIPE_STALL_FLAG, p);
+       local_irq_restore_hw(flags);
+}
+EXPORT_SYMBOL(__ipipe_stall_root);
+
+unsigned long __ipipe_test_and_stall_root(void)
+{
+       unsigned long *p, flags;
+       int x;
+
+       local_irq_save_hw(flags);
+       p = &__ipipe_root_status;
+       x = __test_and_set_bit(IPIPE_STALL_FLAG, p);
+       local_irq_restore_hw(flags);
+
+       return x;
+}
+EXPORT_SYMBOL(__ipipe_test_and_stall_root);
+
+unsigned long __ipipe_test_root(void)
+{
+       const unsigned long *p;
+       unsigned long flags;
+       int x;
+
+       local_irq_save_hw_smp(flags);
+       p = &__ipipe_root_status;
+       x = test_bit(IPIPE_STALL_FLAG, p);
+       local_irq_restore_hw_smp(flags);
+
+       return x;
+}
+EXPORT_SYMBOL(__ipipe_test_root);
+
+void __ipipe_lock_root(void)
+{
+       unsigned long *p, flags;
+
+       local_irq_save_hw(flags);
+       p = &__ipipe_root_status;
+       __set_bit(IPIPE_SYNCDEFER_FLAG, p);
+       local_irq_restore_hw(flags);
+}
+EXPORT_SYMBOL(__ipipe_lock_root);
+
+void __ipipe_unlock_root(void)
+{
+       unsigned long *p, flags;
+
+       local_irq_save_hw(flags);
+       p = &__ipipe_root_status;
+       __clear_bit(IPIPE_SYNCDEFER_FLAG, p);
+       local_irq_restore_hw(flags);
+}
+EXPORT_SYMBOL(__ipipe_unlock_root);