packet : remove init_net restriction
[safe/jmp/linux-2.6] / net / core / link_watch.c
index b5f4579..bdbce2f 100644 (file)
 #include <linux/rtnetlink.h>
 #include <linux/jiffies.h>
 #include <linux/spinlock.h>
-#include <linux/slab.h>
 #include <linux/workqueue.h>
 #include <linux/bitops.h>
 #include <asm/types.h>
 
 
 enum lw_bits {
-       LW_RUNNING = 0,
+       LW_URGENT = 0,
 };
 
 static unsigned long linkwatch_flags;
@@ -35,7 +34,7 @@ static unsigned long linkwatch_nextevent;
 static void linkwatch_event(struct work_struct *dummy);
 static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event);
 
-static struct net_device *lweventlist;
+static LIST_HEAD(lweventlist);
 static DEFINE_SPINLOCK(lweventlist_lock);
 
 static unsigned char default_operstate(const struct net_device *dev)
@@ -77,10 +76,10 @@ static void rfc2863_policy(struct net_device *dev)
 }
 
 
-static int linkwatch_urgent_event(struct net_device *dev)
+static bool linkwatch_urgent_event(struct net_device *dev)
 {
        return netif_running(dev) && netif_carrier_ok(dev) &&
-              dev->qdisc != dev->qdisc_sleeping;
+               qdisc_tx_changing(dev);
 }
 
 
@@ -89,28 +88,81 @@ static void linkwatch_add_event(struct net_device *dev)
        unsigned long flags;
 
        spin_lock_irqsave(&lweventlist_lock, flags);
-       dev->link_watch_next = lweventlist;
-       lweventlist = dev;
+       if (list_empty(&dev->link_watch_list)) {
+               list_add_tail(&dev->link_watch_list, &lweventlist);
+               dev_hold(dev);
+       }
        spin_unlock_irqrestore(&lweventlist_lock, flags);
 }
 
 
-static void linkwatch_schedule_work(unsigned long delay)
+static void linkwatch_schedule_work(int urgent)
 {
-       if (test_and_set_bit(LW_RUNNING, &linkwatch_flags))
+       unsigned long delay = linkwatch_nextevent - jiffies;
+
+       if (test_bit(LW_URGENT, &linkwatch_flags))
                return;
 
+       /* Minimise down-time: drop delay for up event. */
+       if (urgent) {
+               if (test_and_set_bit(LW_URGENT, &linkwatch_flags))
+                       return;
+               delay = 0;
+       }
+
        /* If we wrap around we'll delay it by at most HZ. */
        if (delay > HZ)
                delay = 0;
 
-       schedule_delayed_work(&linkwatch_work, delay);
+       /*
+        * This is true if we've scheduled it immeditately or if we don't
+        * need an immediate execution and it's already pending.
+        */
+       if (schedule_delayed_work(&linkwatch_work, delay) == !delay)
+               return;
+
+       /* Don't bother if there is nothing urgent. */
+       if (!test_bit(LW_URGENT, &linkwatch_flags))
+               return;
+
+       /* It's already running which is good enough. */
+       if (!cancel_delayed_work(&linkwatch_work))
+               return;
+
+       /* Otherwise we reschedule it again for immediate exection. */
+       schedule_delayed_work(&linkwatch_work, 0);
 }
 
 
+static void linkwatch_do_dev(struct net_device *dev)
+{
+       /*
+        * Make sure the above read is complete since it can be
+        * rewritten as soon as we clear the bit below.
+        */
+       smp_mb__before_clear_bit();
+
+       /* We are about to handle this device,
+        * so new events can be accepted
+        */
+       clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);
+
+       rfc2863_policy(dev);
+       if (dev->flags & IFF_UP) {
+               if (netif_carrier_ok(dev))
+                       dev_activate(dev);
+               else
+                       dev_deactivate(dev);
+
+               netdev_state_change(dev);
+       }
+       dev_put(dev);
+}
+
 static void __linkwatch_run_queue(int urgent_only)
 {
-       struct net_device *next;
+       struct net_device *dev;
+       LIST_HEAD(wrk);
 
        /*
         * Limit the number of linkwatch events to one
@@ -121,50 +173,47 @@ static void __linkwatch_run_queue(int urgent_only)
         */
        if (!urgent_only)
                linkwatch_nextevent = jiffies + HZ;
-       clear_bit(LW_RUNNING, &linkwatch_flags);
+       /* Limit wrap-around effect on delay. */
+       else if (time_after(linkwatch_nextevent, jiffies + HZ))
+               linkwatch_nextevent = jiffies;
+
+       clear_bit(LW_URGENT, &linkwatch_flags);
 
        spin_lock_irq(&lweventlist_lock);
-       next = lweventlist;
-       lweventlist = NULL;
-       spin_unlock_irq(&lweventlist_lock);
+       list_splice_init(&lweventlist, &wrk);
 
-       while (next) {
-               struct net_device *dev = next;
+       while (!list_empty(&wrk)) {
 
-               next = dev->link_watch_next;
+               dev = list_first_entry(&wrk, struct net_device, link_watch_list);
+               list_del_init(&dev->link_watch_list);
 
                if (urgent_only && !linkwatch_urgent_event(dev)) {
-                       linkwatch_add_event(dev);
+                       list_add_tail(&dev->link_watch_list, &lweventlist);
                        continue;
                }
+               spin_unlock_irq(&lweventlist_lock);
+               linkwatch_do_dev(dev);
+               spin_lock_irq(&lweventlist_lock);
+       }
 
-               /*
-                * Make sure the above read is complete since it can be
-                * rewritten as soon as we clear the bit below.
-                */
-               smp_mb__before_clear_bit();
-
-               /* We are about to handle this device,
-                * so new events can be accepted
-                */
-               clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);
-
-               rfc2863_policy(dev);
-               if (dev->flags & IFF_UP) {
-                       if (netif_carrier_ok(dev)) {
-                               WARN_ON(dev->qdisc_sleeping == &noop_qdisc);
-                               dev_activate(dev);
-                       } else
-                               dev_deactivate(dev);
-
-                       netdev_state_change(dev);
-               }
+       if (!list_empty(&lweventlist))
+               linkwatch_schedule_work(0);
+       spin_unlock_irq(&lweventlist_lock);
+}
 
-               dev_put(dev);
-       }
+void linkwatch_forget_dev(struct net_device *dev)
+{
+       unsigned long flags;
+       int clean = 0;
 
-       if (lweventlist)
-               linkwatch_schedule_work(linkwatch_nextevent - jiffies);
+       spin_lock_irqsave(&lweventlist_lock, flags);
+       if (!list_empty(&dev->link_watch_list)) {
+               list_del_init(&dev->link_watch_list);
+               clean = 1;
+       }
+       spin_unlock_irqrestore(&lweventlist_lock, flags);
+       if (clean)
+               linkwatch_do_dev(dev);
 }
 
 
@@ -185,21 +234,14 @@ static void linkwatch_event(struct work_struct *dummy)
 
 void linkwatch_fire_event(struct net_device *dev)
 {
-       if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) {
-               unsigned long delay;
-
-               dev_hold(dev);
+       bool urgent = linkwatch_urgent_event(dev);
 
+       if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) {
                linkwatch_add_event(dev);
+       } else if (!urgent)
+               return;
 
-               delay = linkwatch_nextevent - jiffies;
-
-               /* Minimise down-time: drop delay for up event. */
-               if (linkwatch_urgent_event(dev))
-                       delay = 0;
-
-               linkwatch_schedule_work(delay);
-       }
+       linkwatch_schedule_work(urgent);
 }
 
 EXPORT_SYMBOL(linkwatch_fire_event);