pkt_sched: Remove qdisc->ops->requeue() etc.
[safe/jmp/linux-2.6] / net / sched / sch_api.c
index bbf149d..5bcef13 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/kmod.h>
 #include <linux/list.h>
 #include <linux/hrtimer.h>
+#include <linux/lockdep.h>
 
 #include <net/net_namespace.h>
 #include <net/sock.h>
@@ -96,10 +97,9 @@ static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
 
    Auxiliary routines:
 
-   ---requeue
+   ---peek
 
-   requeues once dequeued packet. It is used for non-standard or
-   just buggy devices, which can defer output even if netif_queue_stopped()=0.
+   like dequeue but without removing a packet from the queue
 
    ---reset
 
@@ -146,8 +146,14 @@ int register_qdisc(struct Qdisc_ops *qops)
 
        if (qops->enqueue == NULL)
                qops->enqueue = noop_qdisc_ops.enqueue;
-       if (qops->requeue == NULL)
-               qops->requeue = noop_qdisc_ops.requeue;
+       if (qops->peek == NULL) {
+               if (qops->dequeue == NULL) {
+                       qops->peek = noop_qdisc_ops.peek;
+               } else {
+                       rc = -EINVAL;
+                       goto out;
+               }
+       }
        if (qops->dequeue == NULL)
                qops->dequeue = noop_qdisc_ops.dequeue;
 
@@ -198,19 +204,53 @@ struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
        return NULL;
 }
 
+/*
+ * This lock is needed until some qdiscs stop calling qdisc_tree_decrease_qlen()
+ * without rtnl_lock(); currently hfsc_dequeue(), netem_dequeue(), tbf_dequeue()
+ */
+static DEFINE_SPINLOCK(qdisc_list_lock);
+
+static void qdisc_list_add(struct Qdisc *q)
+{
+       if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
+               spin_lock_bh(&qdisc_list_lock);
+               list_add_tail(&q->list, &qdisc_root_sleeping(q)->list);
+               spin_unlock_bh(&qdisc_list_lock);
+       }
+}
+
+void qdisc_list_del(struct Qdisc *q)
+{
+       if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
+               spin_lock_bh(&qdisc_list_lock);
+               list_del(&q->list);
+               spin_unlock_bh(&qdisc_list_lock);
+       }
+}
+EXPORT_SYMBOL(qdisc_list_del);
+
 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
 {
        unsigned int i;
+       struct Qdisc *q;
+
+       spin_lock_bh(&qdisc_list_lock);
 
        for (i = 0; i < dev->num_tx_queues; i++) {
                struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
-               struct Qdisc *q, *txq_root = txq->qdisc_sleeping;
+               struct Qdisc *txq_root = txq->qdisc_sleeping;
 
                q = qdisc_match_from_root(txq_root, handle);
                if (q)
-                       return q;
+                       goto unlock;
        }
-       return qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle);
+
+       q = qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle);
+
+unlock:
+       spin_unlock_bh(&qdisc_list_lock);
+
+       return q;
 }
 
 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
@@ -426,7 +466,7 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
 
        wd->qdisc->flags &= ~TCQ_F_THROTTLED;
        smp_wmb();
-       __netif_schedule(wd->qdisc);
+       __netif_schedule(qdisc_root(wd->qdisc));
 
        return HRTIMER_NORESTART;
 }
@@ -443,6 +483,10 @@ void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
 {
        ktime_t time;
 
+       if (test_bit(__QDISC_STATE_DEACTIVATED,
+                    &qdisc_root_sleeping(wd->qdisc)->state))
+               return;
+
        wd->qdisc->flags |= TCQ_F_THROTTLED;
        time = ktime_set(0, 0);
        time = ktime_add_ns(time, PSCHED_US2NS(expires));
@@ -585,7 +629,7 @@ static struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
        struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
        spinlock_t *root_lock;
 
-       root_lock = qdisc_root_lock(oqdisc);
+       root_lock = qdisc_lock(oqdisc);
        spin_lock_bh(root_lock);
 
        /* Prune old scheduler */
@@ -596,7 +640,7 @@ static struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
        if (qdisc == NULL)
                qdisc = &noop_qdisc;
        dev_queue->qdisc_sleeping = qdisc;
-       dev_queue->qdisc = &noop_qdisc;
+       rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
 
        spin_unlock_bh(root_lock);
 
@@ -637,11 +681,8 @@ static void notify_and_destroy(struct sk_buff *skb, struct nlmsghdr *n, u32 clid
        if (new || old)
                qdisc_notify(skb, n, clid, old, new);
 
-       if (old) {
-               spin_lock_bh(&old->q.lock);
+       if (old)
                qdisc_destroy(old);
-               spin_unlock_bh(&old->q.lock);
-       }
 }
 
 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
@@ -707,6 +748,10 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
        return err;
 }
 
+/* lockdep annotation is needed for ingress; egress gets it only for name */
+static struct lock_class_key qdisc_tx_lock;
+static struct lock_class_key qdisc_rx_lock;
+
 /*
    Allocate and initialize new qdisc.
 
@@ -724,7 +769,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
        struct qdisc_size_table *stab;
 
        ops = qdisc_lookup_ops(kind);
-#ifdef CONFIG_KMOD
+#ifdef CONFIG_MODULES
        if (ops == NULL && kind != NULL) {
                char name[IFNAMSIZ];
                if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
@@ -767,6 +812,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
        if (handle == TC_H_INGRESS) {
                sch->flags |= TCQ_F_INGRESS;
                handle = TC_H_MAKE(TC_H_INGRESS, 0);
+               lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock);
        } else {
                if (handle == 0) {
                        handle = qdisc_alloc_handle(dev);
@@ -774,6 +820,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
                        if (handle == 0)
                                goto err_out3;
                }
+               lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
        }
 
        sch->handle = handle;
@@ -788,9 +835,16 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
                        sch->stab = stab;
                }
                if (tca[TCA_RATE]) {
+                       spinlock_t *root_lock;
+
+                       if ((sch->parent != TC_H_ROOT) &&
+                           !(sch->flags & TCQ_F_INGRESS))
+                               root_lock = qdisc_root_sleeping_lock(sch);
+                       else
+                               root_lock = qdisc_lock(sch);
+
                        err = gen_new_estimator(&sch->bstats, &sch->rate_est,
-                                               qdisc_root_lock(sch),
-                                               tca[TCA_RATE]);
+                                               root_lock, tca[TCA_RATE]);
                        if (err) {
                                /*
                                 * Any broken qdiscs that would require
@@ -802,8 +856,8 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
                                goto err_out3;
                        }
                }
-               if ((parent != TC_H_ROOT) && !(sch->flags & TCQ_F_INGRESS))
-                       list_add_tail(&sch->list, &dev_queue->qdisc_sleeping->list);
+
+               qdisc_list_add(sch);
 
                return sch;
        }
@@ -842,7 +896,8 @@ static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
 
        if (tca[TCA_RATE])
                gen_replace_estimator(&sch->bstats, &sch->rate_est,
-                                     qdisc_root_lock(sch), tca[TCA_RATE]);
+                                     qdisc_root_sleeping_lock(sch),
+                                     tca[TCA_RATE]);
        return 0;
 }
 
@@ -1084,20 +1139,13 @@ create_n_graft:
        }
 
 graft:
-       if (1) {
-               spinlock_t *root_lock;
-
-               err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
-               if (err) {
-                       if (q) {
-                               root_lock = qdisc_root_lock(q);
-                               spin_lock_bh(root_lock);
-                               qdisc_destroy(q);
-                               spin_unlock_bh(root_lock);
-                       }
-                       return err;
-               }
+       err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
+       if (err) {
+               if (q)
+                       qdisc_destroy(q);
+               return err;
        }
+
        return 0;
 }
 
@@ -1126,8 +1174,8 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
        if (q->stab && qdisc_dump_stab(skb, q->stab) < 0)
                goto nla_put_failure;
 
-       if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
-                                        TCA_XSTATS, qdisc_root_lock(q), &d) < 0)
+       if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
+                                        qdisc_root_sleeping_lock(q), &d) < 0)
                goto nla_put_failure;
 
        if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
@@ -1418,8 +1466,8 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
        if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
                goto nla_put_failure;
 
-       if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
-                                        TCA_XSTATS, qdisc_root_lock(q), &d) < 0)
+       if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
+                                        qdisc_root_sleeping_lock(q), &d) < 0)
                goto nla_put_failure;
 
        if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)