ipv6: sysctl fixes
[safe/jmp/linux-2.6] / net / sched / sch_api.c
index 15b91a9..e7fb9e0 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/kmod.h>
 #include <linux/list.h>
 #include <linux/hrtimer.h>
+#include <linux/lockdep.h>
 
 #include <net/net_namespace.h>
 #include <net/sock.h>
@@ -99,7 +100,7 @@ static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
    ---requeue
 
    requeues once dequeued packet. It is used for non-standard or
-   just buggy devices, which can defer output even if dev->tbusy=0.
+   just buggy devices, which can defer output even if netif_queue_stopped()=0.
 
    ---reset
 
@@ -183,17 +184,70 @@ EXPORT_SYMBOL(unregister_qdisc);
    (root qdisc, all its children, children of children etc.)
  */
 
-struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
+struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
 {
        struct Qdisc *q;
 
-       list_for_each_entry(q, &dev->qdisc_list, list) {
+       if (!(root->flags & TCQ_F_BUILTIN) &&
+           root->handle == handle)
+               return root;
+
+       list_for_each_entry(q, &root->list, list) {
                if (q->handle == handle)
                        return q;
        }
        return NULL;
 }
 
+/*
+ * This lock is needed until some qdiscs stop calling qdisc_tree_decrease_qlen()
+ * without rtnl_lock(); currently hfsc_dequeue(), netem_dequeue(), tbf_dequeue()
+ */
+static DEFINE_SPINLOCK(qdisc_list_lock);
+
+static void qdisc_list_add(struct Qdisc *q)
+{
+       if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
+               spin_lock_bh(&qdisc_list_lock);
+               list_add_tail(&q->list, &qdisc_root_sleeping(q)->list);
+               spin_unlock_bh(&qdisc_list_lock);
+       }
+}
+
+void qdisc_list_del(struct Qdisc *q)
+{
+       if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
+               spin_lock_bh(&qdisc_list_lock);
+               list_del(&q->list);
+               spin_unlock_bh(&qdisc_list_lock);
+       }
+}
+EXPORT_SYMBOL(qdisc_list_del);
+
+struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
+{
+       unsigned int i;
+       struct Qdisc *q;
+
+       spin_lock_bh(&qdisc_list_lock);
+
+       for (i = 0; i < dev->num_tx_queues; i++) {
+               struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+               struct Qdisc *txq_root = txq->qdisc_sleeping;
+
+               q = qdisc_match_from_root(txq_root, handle);
+               if (q)
+                       goto unlock;
+       }
+
+       q = qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle);
+
+unlock:
+       spin_unlock_bh(&qdisc_list_lock);
+
+       return q;
+}
+
 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
 {
        unsigned long cl;
@@ -277,15 +331,137 @@ void qdisc_put_rtab(struct qdisc_rate_table *tab)
 }
 EXPORT_SYMBOL(qdisc_put_rtab);
 
+static LIST_HEAD(qdisc_stab_list);
+static DEFINE_SPINLOCK(qdisc_stab_lock);
+
+static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
+       [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) },
+       [TCA_STAB_DATA] = { .type = NLA_BINARY },
+};
+
+static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
+{
+       struct nlattr *tb[TCA_STAB_MAX + 1];
+       struct qdisc_size_table *stab;
+       struct tc_sizespec *s;
+       unsigned int tsize = 0;
+       u16 *tab = NULL;
+       int err;
+
+       err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy);
+       if (err < 0)
+               return ERR_PTR(err);
+       if (!tb[TCA_STAB_BASE])
+               return ERR_PTR(-EINVAL);
+
+       s = nla_data(tb[TCA_STAB_BASE]);
+
+       if (s->tsize > 0) {
+               if (!tb[TCA_STAB_DATA])
+                       return ERR_PTR(-EINVAL);
+               tab = nla_data(tb[TCA_STAB_DATA]);
+               tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
+       }
+
+       if (!s || tsize != s->tsize || (!tab && tsize > 0))
+               return ERR_PTR(-EINVAL);
+
+       spin_lock(&qdisc_stab_lock);
+
+       list_for_each_entry(stab, &qdisc_stab_list, list) {
+               if (memcmp(&stab->szopts, s, sizeof(*s)))
+                       continue;
+               if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
+                       continue;
+               stab->refcnt++;
+               spin_unlock(&qdisc_stab_lock);
+               return stab;
+       }
+
+       spin_unlock(&qdisc_stab_lock);
+
+       stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
+       if (!stab)
+               return ERR_PTR(-ENOMEM);
+
+       stab->refcnt = 1;
+       stab->szopts = *s;
+       if (tsize > 0)
+               memcpy(stab->data, tab, tsize * sizeof(u16));
+
+       spin_lock(&qdisc_stab_lock);
+       list_add_tail(&stab->list, &qdisc_stab_list);
+       spin_unlock(&qdisc_stab_lock);
+
+       return stab;
+}
+
+void qdisc_put_stab(struct qdisc_size_table *tab)
+{
+       if (!tab)
+               return;
+
+       spin_lock(&qdisc_stab_lock);
+
+       if (--tab->refcnt == 0) {
+               list_del(&tab->list);
+               kfree(tab);
+       }
+
+       spin_unlock(&qdisc_stab_lock);
+}
+EXPORT_SYMBOL(qdisc_put_stab);
+
+static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
+{
+       struct nlattr *nest;
+
+       nest = nla_nest_start(skb, TCA_STAB);
+       NLA_PUT(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts);
+       nla_nest_end(skb, nest);
+
+       return skb->len;
+
+nla_put_failure:
+       return -1;
+}
+
+void qdisc_calculate_pkt_len(struct sk_buff *skb, struct qdisc_size_table *stab)
+{
+       int pkt_len, slot;
+
+       pkt_len = skb->len + stab->szopts.overhead;
+       if (unlikely(!stab->szopts.tsize))
+               goto out;
+
+       slot = pkt_len + stab->szopts.cell_align;
+       if (unlikely(slot < 0))
+               slot = 0;
+
+       slot >>= stab->szopts.cell_log;
+       if (likely(slot < stab->szopts.tsize))
+               pkt_len = stab->data[slot];
+       else
+               pkt_len = stab->data[stab->szopts.tsize - 1] *
+                               (slot / stab->szopts.tsize) +
+                               stab->data[slot % stab->szopts.tsize];
+
+       pkt_len <<= stab->szopts.size_log;
+out:
+       if (unlikely(pkt_len < 1))
+               pkt_len = 1;
+       qdisc_skb_cb(skb)->pkt_len = pkt_len;
+}
+EXPORT_SYMBOL(qdisc_calculate_pkt_len);
+
 static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
 {
        struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
                                                 timer);
-       struct net_device *dev = wd->qdisc->dev;
 
        wd->qdisc->flags &= ~TCQ_F_THROTTLED;
        smp_wmb();
-       netif_schedule(dev);
+       __netif_schedule(qdisc_root(wd->qdisc));
 
        return HRTIMER_NORESTART;
 }
@@ -302,6 +478,10 @@ void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
 {
        ktime_t time;
 
+       if (test_bit(__QDISC_STATE_DEACTIVATED,
+                    &qdisc_root_sleeping(wd->qdisc)->state))
+               return;
+
        wd->qdisc->flags |= TCQ_F_THROTTLED;
        time = ktime_set(0, 0);
        time = ktime_add_ns(time, PSCHED_US2NS(expires));
@@ -316,6 +496,110 @@ void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
 }
 EXPORT_SYMBOL(qdisc_watchdog_cancel);
 
+static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
+{
+       unsigned int size = n * sizeof(struct hlist_head), i;
+       struct hlist_head *h;
+
+       if (size <= PAGE_SIZE)
+               h = kmalloc(size, GFP_KERNEL);
+       else
+               h = (struct hlist_head *)
+                       __get_free_pages(GFP_KERNEL, get_order(size));
+
+       if (h != NULL) {
+               for (i = 0; i < n; i++)
+                       INIT_HLIST_HEAD(&h[i]);
+       }
+       return h;
+}
+
+static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n)
+{
+       unsigned int size = n * sizeof(struct hlist_head);
+
+       if (size <= PAGE_SIZE)
+               kfree(h);
+       else
+               free_pages((unsigned long)h, get_order(size));
+}
+
+void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
+{
+       struct Qdisc_class_common *cl;
+       struct hlist_node *n, *next;
+       struct hlist_head *nhash, *ohash;
+       unsigned int nsize, nmask, osize;
+       unsigned int i, h;
+
+       /* Rehash when load factor exceeds 0.75 */
+       if (clhash->hashelems * 4 <= clhash->hashsize * 3)
+               return;
+       nsize = clhash->hashsize * 2;
+       nmask = nsize - 1;
+       nhash = qdisc_class_hash_alloc(nsize);
+       if (nhash == NULL)
+               return;
+
+       ohash = clhash->hash;
+       osize = clhash->hashsize;
+
+       sch_tree_lock(sch);
+       for (i = 0; i < osize; i++) {
+               hlist_for_each_entry_safe(cl, n, next, &ohash[i], hnode) {
+                       h = qdisc_class_hash(cl->classid, nmask);
+                       hlist_add_head(&cl->hnode, &nhash[h]);
+               }
+       }
+       clhash->hash     = nhash;
+       clhash->hashsize = nsize;
+       clhash->hashmask = nmask;
+       sch_tree_unlock(sch);
+
+       qdisc_class_hash_free(ohash, osize);
+}
+EXPORT_SYMBOL(qdisc_class_hash_grow);
+
+int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
+{
+       unsigned int size = 4;
+
+       clhash->hash = qdisc_class_hash_alloc(size);
+       if (clhash->hash == NULL)
+               return -ENOMEM;
+       clhash->hashsize  = size;
+       clhash->hashmask  = size - 1;
+       clhash->hashelems = 0;
+       return 0;
+}
+EXPORT_SYMBOL(qdisc_class_hash_init);
+
+void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
+{
+       qdisc_class_hash_free(clhash->hash, clhash->hashsize);
+}
+EXPORT_SYMBOL(qdisc_class_hash_destroy);
+
+void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
+                            struct Qdisc_class_common *cl)
+{
+       unsigned int h;
+
+       INIT_HLIST_NODE(&cl->hnode);
+       h = qdisc_class_hash(cl->classid, clhash->hashmask);
+       hlist_add_head(&cl->hnode, &clhash->hash[h]);
+       clhash->hashelems++;
+}
+EXPORT_SYMBOL(qdisc_class_hash_insert);
+
+void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
+                            struct Qdisc_class_common *cl)
+{
+       hlist_del(&cl->hnode);
+       clhash->hashelems--;
+}
+EXPORT_SYMBOL(qdisc_class_hash_remove);
+
 /* Allocate an unique handle from space managed by kernel */
 
 static u32 qdisc_alloc_handle(struct net_device *dev)
@@ -332,47 +616,28 @@ static u32 qdisc_alloc_handle(struct net_device *dev)
        return i>0 ? autohandle : 0;
 }
 
-/* Attach toplevel qdisc to device dev */
+/* Attach toplevel qdisc to device queue. */
 
-static struct Qdisc *
-dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc)
+static struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
+                                    struct Qdisc *qdisc)
 {
-       struct Qdisc *oqdisc;
-
-       if (dev->flags & IFF_UP)
-               dev_deactivate(dev);
-
-       qdisc_lock_tree(dev);
-       if (qdisc && qdisc->flags&TCQ_F_INGRESS) {
-               oqdisc = dev->qdisc_ingress;
-               /* Prune old scheduler */
-               if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) {
-                       /* delete */
-                       qdisc_reset(oqdisc);
-                       dev->qdisc_ingress = NULL;
-               } else {  /* new */
-                       dev->qdisc_ingress = qdisc;
-               }
-
-       } else {
-
-               oqdisc = dev->qdisc_sleeping;
+       struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
+       spinlock_t *root_lock;
 
-               /* Prune old scheduler */
-               if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
-                       qdisc_reset(oqdisc);
+       root_lock = qdisc_root_lock(oqdisc);
+       spin_lock_bh(root_lock);
 
-               /* ... and graft new one */
-               if (qdisc == NULL)
-                       qdisc = &noop_qdisc;
-               dev->qdisc_sleeping = qdisc;
-               dev->qdisc = &noop_qdisc;
-       }
+       /* Prune old scheduler */
+       if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
+               qdisc_reset(oqdisc);
 
-       qdisc_unlock_tree(dev);
+       /* ... and graft new one */
+       if (qdisc == NULL)
+               qdisc = &noop_qdisc;
+       dev_queue->qdisc_sleeping = qdisc;
+       dev_queue->qdisc = &noop_qdisc;
 
-       if (dev->flags & IFF_UP)
-               dev_activate(dev);
+       spin_unlock_bh(root_lock);
 
        return oqdisc;
 }
@@ -386,7 +651,10 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
        if (n == 0)
                return;
        while ((parentid = sch->parent)) {
-               sch = qdisc_lookup(sch->dev, TC_H_MAJ(parentid));
+               if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
+                       return;
+
+               sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
                if (sch == NULL) {
                        WARN_ON(parentid != TC_H_ROOT);
                        return;
@@ -402,26 +670,61 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
 }
 EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
 
-/* Graft qdisc "new" to class "classid" of qdisc "parent" or
-   to device "dev".
+static void notify_and_destroy(struct sk_buff *skb, struct nlmsghdr *n, u32 clid,
+                              struct Qdisc *old, struct Qdisc *new)
+{
+       if (new || old)
+               qdisc_notify(skb, n, clid, old, new);
 
-   Old qdisc is not destroyed but returned in *old.
+       if (old)
+               qdisc_destroy(old);
+}
+
+/* Graft qdisc "new" to class "classid" of qdisc "parent" or
+ * to device "dev".
+ *
+ * When appropriate send a netlink notification using 'skb'
+ * and "n".
+ *
+ * On success, destroy old qdisc.
  */
 
 static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
-                      u32 classid,
-                      struct Qdisc *new, struct Qdisc **old)
+                      struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
+                      struct Qdisc *new, struct Qdisc *old)
 {
+       struct Qdisc *q = old;
        int err = 0;
-       struct Qdisc *q = *old;
-
 
        if (parent == NULL) {
-               if (q && q->flags&TCQ_F_INGRESS) {
-                       *old = dev_graft_qdisc(dev, q);
-               } else {
-                       *old = dev_graft_qdisc(dev, new);
+               unsigned int i, num_q, ingress;
+
+               ingress = 0;
+               num_q = dev->num_tx_queues;
+               if ((q && q->flags & TCQ_F_INGRESS) ||
+                   (new && new->flags & TCQ_F_INGRESS)) {
+                       num_q = 1;
+                       ingress = 1;
+               }
+
+               if (dev->flags & IFF_UP)
+                       dev_deactivate(dev);
+
+               for (i = 0; i < num_q; i++) {
+                       struct netdev_queue *dev_queue = &dev->rx_queue;
+
+                       if (!ingress)
+                               dev_queue = netdev_get_tx_queue(dev, i);
+
+                       old = dev_graft_qdisc(dev_queue, new);
+                       if (new && i > 0)
+                               atomic_inc(&new->refcnt);
+
+                       notify_and_destroy(skb, n, classid, old, new);
                }
+
+               if (dev->flags & IFF_UP)
+                       dev_activate(dev);
        } else {
                const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
 
@@ -430,14 +733,20 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
                if (cops) {
                        unsigned long cl = cops->get(parent, classid);
                        if (cl) {
-                               err = cops->graft(parent, cl, new, old);
+                               err = cops->graft(parent, cl, new, &old);
                                cops->put(parent, cl);
                        }
                }
+               if (!err)
+                       notify_and_destroy(skb, n, classid, old, new);
        }
        return err;
 }
 
+/* lockdep annotation is needed for ingress; egress gets it only for name */
+static struct lock_class_key qdisc_tx_lock;
+static struct lock_class_key qdisc_rx_lock;
+
 /*
    Allocate and initialize new qdisc.
 
@@ -445,13 +754,14 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
  */
 
 static struct Qdisc *
-qdisc_create(struct net_device *dev, u32 parent, u32 handle,
-          struct nlattr **tca, int *errp)
+qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
+            u32 parent, u32 handle, struct nlattr **tca, int *errp)
 {
        int err;
        struct nlattr *kind = tca[TCA_KIND];
        struct Qdisc *sch;
        struct Qdisc_ops *ops;
+       struct qdisc_size_table *stab;
 
        ops = qdisc_lookup_ops(kind);
 #ifdef CONFIG_KMOD
@@ -486,7 +796,7 @@ qdisc_create(struct net_device *dev, u32 parent, u32 handle,
        if (ops == NULL)
                goto err_out;
 
-       sch = qdisc_alloc(dev, ops);
+       sch = qdisc_alloc(dev_queue, ops);
        if (IS_ERR(sch)) {
                err = PTR_ERR(sch);
                goto err_out2;
@@ -496,24 +806,32 @@ qdisc_create(struct net_device *dev, u32 parent, u32 handle,
 
        if (handle == TC_H_INGRESS) {
                sch->flags |= TCQ_F_INGRESS;
-               sch->stats_lock = &dev->ingress_lock;
                handle = TC_H_MAKE(TC_H_INGRESS, 0);
+               lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock);
        } else {
-               sch->stats_lock = &dev->queue_lock;
                if (handle == 0) {
                        handle = qdisc_alloc_handle(dev);
                        err = -ENOMEM;
                        if (handle == 0)
                                goto err_out3;
                }
+               lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
        }
 
        sch->handle = handle;
 
        if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
+               if (tca[TCA_STAB]) {
+                       stab = qdisc_get_stab(tca[TCA_STAB]);
+                       if (IS_ERR(stab)) {
+                               err = PTR_ERR(stab);
+                               goto err_out3;
+                       }
+                       sch->stab = stab;
+               }
                if (tca[TCA_RATE]) {
                        err = gen_new_estimator(&sch->bstats, &sch->rate_est,
-                                               sch->stats_lock,
+                                               qdisc_root_lock(sch),
                                                tca[TCA_RATE]);
                        if (err) {
                                /*
@@ -526,13 +844,13 @@ qdisc_create(struct net_device *dev, u32 parent, u32 handle,
                                goto err_out3;
                        }
                }
-               qdisc_lock_tree(dev);
-               list_add_tail(&sch->list, &dev->qdisc_list);
-               qdisc_unlock_tree(dev);
+
+               qdisc_list_add(sch);
 
                return sch;
        }
 err_out3:
+       qdisc_put_stab(sch->stab);
        dev_put(dev);
        kfree((char *) sch - sch->padded);
 err_out2:
@@ -544,18 +862,29 @@ err_out:
 
 static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
 {
-       if (tca[TCA_OPTIONS]) {
-               int err;
+       struct qdisc_size_table *stab = NULL;
+       int err = 0;
 
+       if (tca[TCA_OPTIONS]) {
                if (sch->ops->change == NULL)
                        return -EINVAL;
                err = sch->ops->change(sch, tca[TCA_OPTIONS]);
                if (err)
                        return err;
        }
+
+       if (tca[TCA_STAB]) {
+               stab = qdisc_get_stab(tca[TCA_STAB]);
+               if (IS_ERR(stab))
+                       return PTR_ERR(stab);
+       }
+
+       qdisc_put_stab(sch->stab);
+       sch->stab = stab;
+
        if (tca[TCA_RATE])
                gen_replace_estimator(&sch->bstats, &sch->rate_est,
-                       sch->stats_lock, tca[TCA_RATE]);
+                                     qdisc_root_lock(sch), tca[TCA_RATE]);
        return 0;
 }
 
@@ -631,10 +960,12 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
                                        return -ENOENT;
                                q = qdisc_leaf(p, clid);
                        } else { /* ingress */
-                               q = dev->qdisc_ingress;
+                               q = dev->rx_queue.qdisc_sleeping;
                        }
                } else {
-                       q = dev->qdisc_sleeping;
+                       struct netdev_queue *dev_queue;
+                       dev_queue = netdev_get_tx_queue(dev, 0);
+                       q = dev_queue->qdisc_sleeping;
                }
                if (!q)
                        return -ENOENT;
@@ -654,14 +985,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
                        return -EINVAL;
                if (q->handle == 0)
                        return -ENOENT;
-               if ((err = qdisc_graft(dev, p, clid, NULL, &q)) != 0)
+               if ((err = qdisc_graft(dev, p, skb, n, clid, NULL, q)) != 0)
                        return err;
-               if (q) {
-                       qdisc_notify(skb, n, clid, q, NULL);
-                       qdisc_lock_tree(dev);
-                       qdisc_destroy(q);
-                       qdisc_unlock_tree(dev);
-               }
        } else {
                qdisc_notify(skb, n, clid, NULL, q);
        }
@@ -705,10 +1030,12 @@ replay:
                                        return -ENOENT;
                                q = qdisc_leaf(p, clid);
                        } else { /*ingress */
-                               q = dev->qdisc_ingress;
+                               q = dev->rx_queue.qdisc_sleeping;
                        }
                } else {
-                       q = dev->qdisc_sleeping;
+                       struct netdev_queue *dev_queue;
+                       dev_queue = netdev_get_tx_queue(dev, 0);
+                       q = dev_queue->qdisc_sleeping;
                }
 
                /* It may be default qdisc, ignore it */
@@ -785,10 +1112,12 @@ create_n_graft:
        if (!(n->nlmsg_flags&NLM_F_CREATE))
                return -ENOENT;
        if (clid == TC_H_INGRESS)
-               q = qdisc_create(dev, tcm->tcm_parent, tcm->tcm_parent,
+               q = qdisc_create(dev, &dev->rx_queue,
+                                tcm->tcm_parent, tcm->tcm_parent,
                                 tca, &err);
        else
-               q = qdisc_create(dev, tcm->tcm_parent, tcm->tcm_handle,
+               q = qdisc_create(dev, netdev_get_tx_queue(dev, 0),
+                                tcm->tcm_parent, tcm->tcm_handle,
                                 tca, &err);
        if (q == NULL) {
                if (err == -EAGAIN)
@@ -797,24 +1126,13 @@ create_n_graft:
        }
 
 graft:
-       if (1) {
-               struct Qdisc *old_q = NULL;
-               err = qdisc_graft(dev, p, clid, q, &old_q);
-               if (err) {
-                       if (q) {
-                               qdisc_lock_tree(dev);
-                               qdisc_destroy(q);
-                               qdisc_unlock_tree(dev);
-                       }
-                       return err;
-               }
-               qdisc_notify(skb, n, clid, old_q, q);
-               if (old_q) {
-                       qdisc_lock_tree(dev);
-                       qdisc_destroy(old_q);
-                       qdisc_unlock_tree(dev);
-               }
+       err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
+       if (err) {
+               if (q)
+                       qdisc_destroy(q);
+               return err;
        }
+
        return 0;
 }
 
@@ -831,7 +1149,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
        tcm->tcm_family = AF_UNSPEC;
        tcm->tcm__pad1 = 0;
        tcm->tcm__pad2 = 0;
-       tcm->tcm_ifindex = q->dev->ifindex;
+       tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
        tcm->tcm_parent = clid;
        tcm->tcm_handle = q->handle;
        tcm->tcm_info = atomic_read(&q->refcnt);
@@ -840,8 +1158,11 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
                goto nla_put_failure;
        q->qstats.qlen = q->q.qlen;
 
+       if (q->stab && qdisc_dump_stab(skb, q->stab) < 0)
+               goto nla_put_failure;
+
        if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
-                       TCA_XSTATS, q->stats_lock, &d) < 0)
+                                        TCA_XSTATS, qdisc_root_lock(q), &d) < 0)
                goto nla_put_failure;
 
        if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
@@ -891,13 +1212,57 @@ err_out:
        return -EINVAL;
 }
 
+static bool tc_qdisc_dump_ignore(struct Qdisc *q)
+{
+       return (q->flags & TCQ_F_BUILTIN) ? true : false;
+}
+
+static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
+                             struct netlink_callback *cb,
+                             int *q_idx_p, int s_q_idx)
+{
+       int ret = 0, q_idx = *q_idx_p;
+       struct Qdisc *q;
+
+       if (!root)
+               return 0;
+
+       q = root;
+       if (q_idx < s_q_idx) {
+               q_idx++;
+       } else {
+               if (!tc_qdisc_dump_ignore(q) &&
+                   tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
+                                 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
+                       goto done;
+               q_idx++;
+       }
+       list_for_each_entry(q, &root->list, list) {
+               if (q_idx < s_q_idx) {
+                       q_idx++;
+                       continue;
+               }
+               if (!tc_qdisc_dump_ignore(q) && 
+                   tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
+                                 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
+                       goto done;
+               q_idx++;
+       }
+
+out:
+       *q_idx_p = q_idx;
+       return ret;
+done:
+       ret = -1;
+       goto out;
+}
+
 static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
 {
        struct net *net = sock_net(skb->sk);
        int idx, q_idx;
        int s_idx, s_q_idx;
        struct net_device *dev;
-       struct Qdisc *q;
 
        if (net != &init_net)
                return 0;
@@ -907,21 +1272,22 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
        read_lock(&dev_base_lock);
        idx = 0;
        for_each_netdev(&init_net, dev) {
+               struct netdev_queue *dev_queue;
+
                if (idx < s_idx)
                        goto cont;
                if (idx > s_idx)
                        s_q_idx = 0;
                q_idx = 0;
-               list_for_each_entry(q, &dev->qdisc_list, list) {
-                       if (q_idx < s_q_idx) {
-                               q_idx++;
-                               continue;
-                       }
-                       if (tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
-                                         cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
-                               goto done;
-                       q_idx++;
-               }
+
+               dev_queue = netdev_get_tx_queue(dev, 0);
+               if (tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, &q_idx, s_q_idx) < 0)
+                       goto done;
+
+               dev_queue = &dev->rx_queue;
+               if (tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, &q_idx, s_q_idx) < 0)
+                       goto done;
+
 cont:
                idx++;
        }
@@ -946,6 +1312,7 @@ done:
 static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
 {
        struct net *net = sock_net(skb->sk);
+       struct netdev_queue *dev_queue;
        struct tcmsg *tcm = NLMSG_DATA(n);
        struct nlattr *tca[TCA_MAX + 1];
        struct net_device *dev;
@@ -983,6 +1350,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
 
        /* Step 1. Determine qdisc handle X:0 */
 
+       dev_queue = netdev_get_tx_queue(dev, 0);
        if (pid != TC_H_ROOT) {
                u32 qid1 = TC_H_MAJ(pid);
 
@@ -993,7 +1361,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
                } else if (qid1) {
                        qid = qid1;
                } else if (qid == 0)
-                       qid = dev->qdisc_sleeping->handle;
+                       qid = dev_queue->qdisc_sleeping->handle;
 
                /* Now qid is genuine qdisc handle consistent
                   both with parent and child.
@@ -1004,7 +1372,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
                        pid = TC_H_MAKE(qid, pid);
        } else {
                if (qid == 0)
-                       qid = dev->qdisc_sleeping->handle;
+                       qid = dev_queue->qdisc_sleeping->handle;
        }
 
        /* OK. Locate qdisc */
@@ -1077,7 +1445,7 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
        nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
        tcm = NLMSG_DATA(nlh);
        tcm->tcm_family = AF_UNSPEC;
-       tcm->tcm_ifindex = q->dev->ifindex;
+       tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
        tcm->tcm_parent = q->handle;
        tcm->tcm_handle = q->handle;
        tcm->tcm_info = 0;
@@ -1086,7 +1454,7 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
                goto nla_put_failure;
 
        if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
-                       TCA_XSTATS, q->stats_lock, &d) < 0)
+                                        TCA_XSTATS, qdisc_root_lock(q), &d) < 0)
                goto nla_put_failure;
 
        if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
@@ -1137,15 +1505,62 @@ static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walk
                              a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS);
 }
 
+static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
+                               struct tcmsg *tcm, struct netlink_callback *cb,
+                               int *t_p, int s_t)
+{
+       struct qdisc_dump_args arg;
+
+       if (tc_qdisc_dump_ignore(q) ||
+           *t_p < s_t || !q->ops->cl_ops ||
+           (tcm->tcm_parent &&
+            TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
+               (*t_p)++;
+               return 0;
+       }
+       if (*t_p > s_t)
+               memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
+       arg.w.fn = qdisc_class_dump;
+       arg.skb = skb;
+       arg.cb = cb;
+       arg.w.stop  = 0;
+       arg.w.skip = cb->args[1];
+       arg.w.count = 0;
+       q->ops->cl_ops->walk(q, &arg.w);
+       cb->args[1] = arg.w.count;
+       if (arg.w.stop)
+               return -1;
+       (*t_p)++;
+       return 0;
+}
+
+static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
+                              struct tcmsg *tcm, struct netlink_callback *cb,
+                              int *t_p, int s_t)
+{
+       struct Qdisc *q;
+
+       if (!root)
+               return 0;
+
+       if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
+               return -1;
+
+       list_for_each_entry(q, &root->list, list) {
+               if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
+                       return -1;
+       }
+
+       return 0;
+}
+
 static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
 {
+       struct tcmsg *tcm = (struct tcmsg*)NLMSG_DATA(cb->nlh);
        struct net *net = sock_net(skb->sk);
-       int t;
-       int s_t;
+       struct netdev_queue *dev_queue;
        struct net_device *dev;
-       struct Qdisc *q;
-       struct tcmsg *tcm = (struct tcmsg*)NLMSG_DATA(cb->nlh);
-       struct qdisc_dump_args arg;
+       int t, s_t;
 
        if (net != &init_net)
                return 0;
@@ -1158,28 +1573,15 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
        s_t = cb->args[0];
        t = 0;
 
-       list_for_each_entry(q, &dev->qdisc_list, list) {
-               if (t < s_t || !q->ops->cl_ops ||
-                   (tcm->tcm_parent &&
-                    TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
-                       t++;
-                       continue;
-               }
-               if (t > s_t)
-                       memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
-               arg.w.fn = qdisc_class_dump;
-               arg.skb = skb;
-               arg.cb = cb;
-               arg.w.stop  = 0;
-               arg.w.skip = cb->args[1];
-               arg.w.count = 0;
-               q->ops->cl_ops->walk(q, &arg.w);
-               cb->args[1] = arg.w.count;
-               if (arg.w.stop)
-                       break;
-               t++;
-       }
+       dev_queue = netdev_get_tx_queue(dev, 0);
+       if (tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, &t, s_t) < 0)
+               goto done;
+
+       dev_queue = &dev->rx_queue;
+       if (tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, &t, s_t) < 0)
+               goto done;
 
+done:
        cb->args[0] = t;
 
        dev_put(dev);
@@ -1249,12 +1651,12 @@ void tcf_destroy(struct tcf_proto *tp)
        kfree(tp);
 }
 
-void tcf_destroy_chain(struct tcf_proto *fl)
+void tcf_destroy_chain(struct tcf_proto **fl)
 {
        struct tcf_proto *tp;
 
-       while ((tp = fl) != NULL) {
-               fl = tp->next;
+       while ((tp = *fl) != NULL) {
+               *fl = tp->next;
                tcf_destroy(tp);
        }
 }