net/netlabel: Add kmalloc NULL tests
[safe/jmp/linux-2.6] / net / sched / sch_api.c
index b3ef830..24d17ce 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/kmod.h>
 #include <linux/list.h>
 #include <linux/hrtimer.h>
+#include <linux/lockdep.h>
 
 #include <net/net_namespace.h>
 #include <net/sock.h>
@@ -96,10 +97,9 @@ static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
 
    Auxiliary routines:
 
-   ---requeue
+   ---peek
 
-   requeues once dequeued packet. It is used for non-standard or
-   just buggy devices, which can defer output even if netif_queue_stopped()=0.
+   like dequeue but without removing a packet from the queue
 
    ---reset
 
@@ -146,8 +146,14 @@ int register_qdisc(struct Qdisc_ops *qops)
 
        if (qops->enqueue == NULL)
                qops->enqueue = noop_qdisc_ops.enqueue;
-       if (qops->requeue == NULL)
-               qops->requeue = noop_qdisc_ops.requeue;
+       if (qops->peek == NULL) {
+               if (qops->dequeue == NULL) {
+                       qops->peek = noop_qdisc_ops.peek;
+               } else {
+                       rc = -EINVAL;
+                       goto out;
+               }
+       }
        if (qops->dequeue == NULL)
                qops->dequeue = noop_qdisc_ops.dequeue;
 
@@ -183,17 +189,53 @@ EXPORT_SYMBOL(unregister_qdisc);
    (root qdisc, all its children, children of children etc.)
  */
 
-struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
+static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
 {
        struct Qdisc *q;
 
-       list_for_each_entry(q, &dev->qdisc_list, list) {
+       if (!(root->flags & TCQ_F_BUILTIN) &&
+           root->handle == handle)
+               return root;
+
+       list_for_each_entry(q, &root->list, list) {
                if (q->handle == handle)
                        return q;
        }
        return NULL;
 }
 
+static void qdisc_list_add(struct Qdisc *q)
+{
+       if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS))
+               list_add_tail(&q->list, &qdisc_root_sleeping(q)->list);
+}
+
+void qdisc_list_del(struct Qdisc *q)
+{
+       if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS))
+               list_del(&q->list);
+}
+EXPORT_SYMBOL(qdisc_list_del);
+
+struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
+{
+       unsigned int i;
+       struct Qdisc *q;
+
+       for (i = 0; i < dev->num_tx_queues; i++) {
+               struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+               struct Qdisc *txq_root = txq->qdisc_sleeping;
+
+               q = qdisc_match_from_root(txq_root, handle);
+               if (q)
+                       goto out;
+       }
+
+       q = qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle);
+out:
+       return q;
+}
+
 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
 {
        unsigned long cl;
@@ -277,14 +319,149 @@ void qdisc_put_rtab(struct qdisc_rate_table *tab)
 }
 EXPORT_SYMBOL(qdisc_put_rtab);
 
+static LIST_HEAD(qdisc_stab_list);
+static DEFINE_SPINLOCK(qdisc_stab_lock);
+
+static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
+       [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) },
+       [TCA_STAB_DATA] = { .type = NLA_BINARY },
+};
+
+static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
+{
+       struct nlattr *tb[TCA_STAB_MAX + 1];
+       struct qdisc_size_table *stab;
+       struct tc_sizespec *s;
+       unsigned int tsize = 0;
+       u16 *tab = NULL;
+       int err;
+
+       err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy);
+       if (err < 0)
+               return ERR_PTR(err);
+       if (!tb[TCA_STAB_BASE])
+               return ERR_PTR(-EINVAL);
+
+       s = nla_data(tb[TCA_STAB_BASE]);
+
+       if (s->tsize > 0) {
+               if (!tb[TCA_STAB_DATA])
+                       return ERR_PTR(-EINVAL);
+               tab = nla_data(tb[TCA_STAB_DATA]);
+               tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
+       }
+
+       if (!s || tsize != s->tsize || (!tab && tsize > 0))
+               return ERR_PTR(-EINVAL);
+
+       spin_lock(&qdisc_stab_lock);
+
+       list_for_each_entry(stab, &qdisc_stab_list, list) {
+               if (memcmp(&stab->szopts, s, sizeof(*s)))
+                       continue;
+               if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
+                       continue;
+               stab->refcnt++;
+               spin_unlock(&qdisc_stab_lock);
+               return stab;
+       }
+
+       spin_unlock(&qdisc_stab_lock);
+
+       stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
+       if (!stab)
+               return ERR_PTR(-ENOMEM);
+
+       stab->refcnt = 1;
+       stab->szopts = *s;
+       if (tsize > 0)
+               memcpy(stab->data, tab, tsize * sizeof(u16));
+
+       spin_lock(&qdisc_stab_lock);
+       list_add_tail(&stab->list, &qdisc_stab_list);
+       spin_unlock(&qdisc_stab_lock);
+
+       return stab;
+}
+
+void qdisc_put_stab(struct qdisc_size_table *tab)
+{
+       if (!tab)
+               return;
+
+       spin_lock(&qdisc_stab_lock);
+
+       if (--tab->refcnt == 0) {
+               list_del(&tab->list);
+               kfree(tab);
+       }
+
+       spin_unlock(&qdisc_stab_lock);
+}
+EXPORT_SYMBOL(qdisc_put_stab);
+
+static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
+{
+       struct nlattr *nest;
+
+       nest = nla_nest_start(skb, TCA_STAB);
+       if (nest == NULL)
+               goto nla_put_failure;
+       NLA_PUT(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts);
+       nla_nest_end(skb, nest);
+
+       return skb->len;
+
+nla_put_failure:
+       return -1;
+}
+
+void qdisc_calculate_pkt_len(struct sk_buff *skb, struct qdisc_size_table *stab)
+{
+       int pkt_len, slot;
+
+       pkt_len = skb->len + stab->szopts.overhead;
+       if (unlikely(!stab->szopts.tsize))
+               goto out;
+
+       slot = pkt_len + stab->szopts.cell_align;
+       if (unlikely(slot < 0))
+               slot = 0;
+
+       slot >>= stab->szopts.cell_log;
+       if (likely(slot < stab->szopts.tsize))
+               pkt_len = stab->data[slot];
+       else
+               pkt_len = stab->data[stab->szopts.tsize - 1] *
+                               (slot / stab->szopts.tsize) +
+                               stab->data[slot % stab->szopts.tsize];
+
+       pkt_len <<= stab->szopts.size_log;
+out:
+       if (unlikely(pkt_len < 1))
+               pkt_len = 1;
+       qdisc_skb_cb(skb)->pkt_len = pkt_len;
+}
+EXPORT_SYMBOL(qdisc_calculate_pkt_len);
+
+void qdisc_warn_nonwc(char *txt, struct Qdisc *qdisc)
+{
+       if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
+               printk(KERN_WARNING
+                      "%s: %s qdisc %X: is non-work-conserving?\n",
+                      txt, qdisc->ops->id, qdisc->handle >> 16);
+               qdisc->flags |= TCQ_F_WARN_NONWC;
+       }
+}
+EXPORT_SYMBOL(qdisc_warn_nonwc);
+
 static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
 {
        struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
                                                 timer);
 
        wd->qdisc->flags &= ~TCQ_F_THROTTLED;
-       smp_wmb();
-       __netif_schedule(wd->qdisc);
+       __netif_schedule(qdisc_root(wd->qdisc));
 
        return HRTIMER_NORESTART;
 }
@@ -301,9 +478,13 @@ void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
 {
        ktime_t time;
 
+       if (test_bit(__QDISC_STATE_DEACTIVATED,
+                    &qdisc_root_sleeping(wd->qdisc)->state))
+               return;
+
        wd->qdisc->flags |= TCQ_F_THROTTLED;
        time = ktime_set(0, 0);
-       time = ktime_add_ns(time, PSCHED_US2NS(expires));
+       time = ktime_add_ns(time, PSCHED_TICKS2NS(expires));
        hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS);
 }
 EXPORT_SYMBOL(qdisc_watchdog_schedule);
@@ -315,7 +496,7 @@ void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
 }
 EXPORT_SYMBOL(qdisc_watchdog_cancel);
 
-struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
+static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
 {
        unsigned int size = n * sizeof(struct hlist_head), i;
        struct hlist_head *h;
@@ -440,44 +621,21 @@ static u32 qdisc_alloc_handle(struct net_device *dev)
 static struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
                                     struct Qdisc *qdisc)
 {
+       struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
        spinlock_t *root_lock;
-       struct Qdisc *oqdisc;
-       int ingress;
-
-       ingress = 0;
-       if (qdisc && qdisc->flags&TCQ_F_INGRESS)
-               ingress = 1;
 
-       if (ingress) {
-               oqdisc = dev_queue->qdisc;
-       } else {
-               oqdisc = dev_queue->qdisc_sleeping;
-       }
-
-       root_lock = qdisc_root_lock(oqdisc);
+       root_lock = qdisc_lock(oqdisc);
        spin_lock_bh(root_lock);
 
-       if (ingress) {
-               /* Prune old scheduler */
-               if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) {
-                       /* delete */
-                       qdisc_reset(oqdisc);
-                       dev_queue->qdisc = NULL;
-               } else {  /* new */
-                       dev_queue->qdisc = qdisc;
-               }
+       /* Prune old scheduler */
+       if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
+               qdisc_reset(oqdisc);
 
-       } else {
-               /* Prune old scheduler */
-               if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
-                       qdisc_reset(oqdisc);
-
-               /* ... and graft new one */
-               if (qdisc == NULL)
-                       qdisc = &noop_qdisc;
-               dev_queue->qdisc_sleeping = qdisc;
-               dev_queue->qdisc = &noop_qdisc;
-       }
+       /* ... and graft new one */
+       if (qdisc == NULL)
+               qdisc = &noop_qdisc;
+       dev_queue->qdisc_sleeping = qdisc;
+       rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
 
        spin_unlock_bh(root_lock);
 
@@ -518,11 +676,8 @@ static void notify_and_destroy(struct sk_buff *skb, struct nlmsghdr *n, u32 clid
        if (new || old)
                qdisc_notify(skb, n, clid, old, new);
 
-       if (old) {
-               spin_lock_bh(&old->q.lock);
+       if (old)
                qdisc_destroy(old);
-               spin_unlock_bh(&old->q.lock);
-       }
 }
 
 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
@@ -546,7 +701,8 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
 
                ingress = 0;
                num_q = dev->num_tx_queues;
-               if (q && q->flags & TCQ_F_INGRESS) {
+               if ((q && q->flags & TCQ_F_INGRESS) ||
+                   (new && new->flags & TCQ_F_INGRESS)) {
                        num_q = 1;
                        ingress = 1;
                }
@@ -560,13 +716,10 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
                        if (!ingress)
                                dev_queue = netdev_get_tx_queue(dev, i);
 
-                       if (ingress) {
-                               old = dev_graft_qdisc(dev_queue, q);
-                       } else {
-                               old = dev_graft_qdisc(dev_queue, new);
-                               if (new && i > 0)
-                                       atomic_inc(&new->refcnt);
-                       }
+                       old = dev_graft_qdisc(dev_queue, new);
+                       if (new && i > 0)
+                               atomic_inc(&new->refcnt);
+
                        notify_and_destroy(skb, n, classid, old, new);
                }
 
@@ -590,6 +743,10 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
        return err;
 }
 
+/* lockdep annotation is needed for ingress; egress gets it only for name */
+static struct lock_class_key qdisc_tx_lock;
+static struct lock_class_key qdisc_rx_lock;
+
 /*
    Allocate and initialize new qdisc.
 
@@ -604,9 +761,10 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
        struct nlattr *kind = tca[TCA_KIND];
        struct Qdisc *sch;
        struct Qdisc_ops *ops;
+       struct qdisc_size_table *stab;
 
        ops = qdisc_lookup_ops(kind);
-#ifdef CONFIG_KMOD
+#ifdef CONFIG_MODULES
        if (ops == NULL && kind != NULL) {
                char name[IFNAMSIZ];
                if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
@@ -649,6 +807,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
        if (handle == TC_H_INGRESS) {
                sch->flags |= TCQ_F_INGRESS;
                handle = TC_H_MAKE(TC_H_INGRESS, 0);
+               lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock);
        } else {
                if (handle == 0) {
                        handle = qdisc_alloc_handle(dev);
@@ -656,15 +815,31 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
                        if (handle == 0)
                                goto err_out3;
                }
+               lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
        }
 
        sch->handle = handle;
 
        if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
+               if (tca[TCA_STAB]) {
+                       stab = qdisc_get_stab(tca[TCA_STAB]);
+                       if (IS_ERR(stab)) {
+                               err = PTR_ERR(stab);
+                               goto err_out3;
+                       }
+                       sch->stab = stab;
+               }
                if (tca[TCA_RATE]) {
+                       spinlock_t *root_lock;
+
+                       if ((sch->parent != TC_H_ROOT) &&
+                           !(sch->flags & TCQ_F_INGRESS))
+                               root_lock = qdisc_root_sleeping_lock(sch);
+                       else
+                               root_lock = qdisc_lock(sch);
+
                        err = gen_new_estimator(&sch->bstats, &sch->rate_est,
-                                               qdisc_root_lock(sch),
-                                               tca[TCA_RATE]);
+                                               root_lock, tca[TCA_RATE]);
                        if (err) {
                                /*
                                 * Any broken qdiscs that would require
@@ -676,13 +851,13 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
                                goto err_out3;
                        }
                }
-               spin_lock_bh(&dev->qdisc_list_lock);
-               list_add_tail(&sch->list, &dev->qdisc_list);
-               spin_unlock_bh(&dev->qdisc_list_lock);
+
+               qdisc_list_add(sch);
 
                return sch;
        }
 err_out3:
+       qdisc_put_stab(sch->stab);
        dev_put(dev);
        kfree((char *) sch - sch->padded);
 err_out2:
@@ -694,18 +869,33 @@ err_out:
 
 static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
 {
-       if (tca[TCA_OPTIONS]) {
-               int err;
+       struct qdisc_size_table *stab = NULL;
+       int err = 0;
 
+       if (tca[TCA_OPTIONS]) {
                if (sch->ops->change == NULL)
                        return -EINVAL;
                err = sch->ops->change(sch, tca[TCA_OPTIONS]);
                if (err)
                        return err;
        }
+
+       if (tca[TCA_STAB]) {
+               stab = qdisc_get_stab(tca[TCA_STAB]);
+               if (IS_ERR(stab))
+                       return PTR_ERR(stab);
+       }
+
+       qdisc_put_stab(sch->stab);
+       sch->stab = stab;
+
        if (tca[TCA_RATE])
+               /* NB: ignores errors from replace_estimator
+                  because change can't be undone. */
                gen_replace_estimator(&sch->bstats, &sch->rate_est,
-                                     qdisc_root_lock(sch), tca[TCA_RATE]);
+                                           qdisc_root_sleeping_lock(sch),
+                                           tca[TCA_RATE]);
+
        return 0;
 }
 
@@ -781,7 +971,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
                                        return -ENOENT;
                                q = qdisc_leaf(p, clid);
                        } else { /* ingress */
-                               q = dev->rx_queue.qdisc;
+                               q = dev->rx_queue.qdisc_sleeping;
                        }
                } else {
                        struct netdev_queue *dev_queue;
@@ -851,7 +1041,7 @@ replay:
                                        return -ENOENT;
                                q = qdisc_leaf(p, clid);
                        } else { /*ingress */
-                               q = dev->rx_queue.qdisc;
+                               q = dev->rx_queue.qdisc_sleeping;
                        }
                } else {
                        struct netdev_queue *dev_queue;
@@ -947,20 +1137,13 @@ create_n_graft:
        }
 
 graft:
-       if (1) {
-               spinlock_t *root_lock;
-
-               err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
-               if (err) {
-                       if (q) {
-                               root_lock = qdisc_root_lock(q);
-                               spin_lock_bh(root_lock);
-                               qdisc_destroy(q);
-                               spin_unlock_bh(root_lock);
-                       }
-                       return err;
-               }
+       err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
+       if (err) {
+               if (q)
+                       qdisc_destroy(q);
+               return err;
        }
+
        return 0;
 }
 
@@ -986,8 +1169,11 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
                goto nla_put_failure;
        q->qstats.qlen = q->q.qlen;
 
-       if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
-                                        TCA_XSTATS, qdisc_root_lock(q), &d) < 0)
+       if (q->stab && qdisc_dump_stab(skb, q->stab) < 0)
+               goto nla_put_failure;
+
+       if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
+                                        qdisc_root_sleeping_lock(q), &d) < 0)
                goto nla_put_failure;
 
        if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
@@ -1037,13 +1223,57 @@ err_out:
        return -EINVAL;
 }
 
+static bool tc_qdisc_dump_ignore(struct Qdisc *q)
+{
+       return (q->flags & TCQ_F_BUILTIN) ? true : false;
+}
+
+static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
+                             struct netlink_callback *cb,
+                             int *q_idx_p, int s_q_idx)
+{
+       int ret = 0, q_idx = *q_idx_p;
+       struct Qdisc *q;
+
+       if (!root)
+               return 0;
+
+       q = root;
+       if (q_idx < s_q_idx) {
+               q_idx++;
+       } else {
+               if (!tc_qdisc_dump_ignore(q) &&
+                   tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
+                                 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
+                       goto done;
+               q_idx++;
+       }
+       list_for_each_entry(q, &root->list, list) {
+               if (q_idx < s_q_idx) {
+                       q_idx++;
+                       continue;
+               }
+               if (!tc_qdisc_dump_ignore(q) && 
+                   tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
+                                 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
+                       goto done;
+               q_idx++;
+       }
+
+out:
+       *q_idx_p = q_idx;
+       return ret;
+done:
+       ret = -1;
+       goto out;
+}
+
 static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
 {
        struct net *net = sock_net(skb->sk);
        int idx, q_idx;
        int s_idx, s_q_idx;
        struct net_device *dev;
-       struct Qdisc *q;
 
        if (net != &init_net)
                return 0;
@@ -1053,21 +1283,22 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
        read_lock(&dev_base_lock);
        idx = 0;
        for_each_netdev(&init_net, dev) {
+               struct netdev_queue *dev_queue;
+
                if (idx < s_idx)
                        goto cont;
                if (idx > s_idx)
                        s_q_idx = 0;
                q_idx = 0;
-               list_for_each_entry(q, &dev->qdisc_list, list) {
-                       if (q_idx < s_q_idx) {
-                               q_idx++;
-                               continue;
-                       }
-                       if (tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
-                                         cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
-                               goto done;
-                       q_idx++;
-               }
+
+               dev_queue = netdev_get_tx_queue(dev, 0);
+               if (tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, &q_idx, s_q_idx) < 0)
+                       goto done;
+
+               dev_queue = &dev->rx_queue;
+               if (tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, &q_idx, s_q_idx) < 0)
+                       goto done;
+
 cont:
                idx++;
        }
@@ -1233,8 +1464,8 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
        if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
                goto nla_put_failure;
 
-       if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
-                                        TCA_XSTATS, qdisc_root_lock(q), &d) < 0)
+       if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
+                                        qdisc_root_sleeping_lock(q), &d) < 0)
                goto nla_put_failure;
 
        if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
@@ -1285,15 +1516,62 @@ static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walk
                              a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS);
 }
 
+static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
+                               struct tcmsg *tcm, struct netlink_callback *cb,
+                               int *t_p, int s_t)
+{
+       struct qdisc_dump_args arg;
+
+       if (tc_qdisc_dump_ignore(q) ||
+           *t_p < s_t || !q->ops->cl_ops ||
+           (tcm->tcm_parent &&
+            TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
+               (*t_p)++;
+               return 0;
+       }
+       if (*t_p > s_t)
+               memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
+       arg.w.fn = qdisc_class_dump;
+       arg.skb = skb;
+       arg.cb = cb;
+       arg.w.stop  = 0;
+       arg.w.skip = cb->args[1];
+       arg.w.count = 0;
+       q->ops->cl_ops->walk(q, &arg.w);
+       cb->args[1] = arg.w.count;
+       if (arg.w.stop)
+               return -1;
+       (*t_p)++;
+       return 0;
+}
+
+static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
+                              struct tcmsg *tcm, struct netlink_callback *cb,
+                              int *t_p, int s_t)
+{
+       struct Qdisc *q;
+
+       if (!root)
+               return 0;
+
+       if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
+               return -1;
+
+       list_for_each_entry(q, &root->list, list) {
+               if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
+                       return -1;
+       }
+
+       return 0;
+}
+
 static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
 {
+       struct tcmsg *tcm = (struct tcmsg*)NLMSG_DATA(cb->nlh);
        struct net *net = sock_net(skb->sk);
-       int t;
-       int s_t;
+       struct netdev_queue *dev_queue;
        struct net_device *dev;
-       struct Qdisc *q;
-       struct tcmsg *tcm = (struct tcmsg*)NLMSG_DATA(cb->nlh);
-       struct qdisc_dump_args arg;
+       int t, s_t;
 
        if (net != &init_net)
                return 0;
@@ -1306,28 +1584,15 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
        s_t = cb->args[0];
        t = 0;
 
-       list_for_each_entry(q, &dev->qdisc_list, list) {
-               if (t < s_t || !q->ops->cl_ops ||
-                   (tcm->tcm_parent &&
-                    TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
-                       t++;
-                       continue;
-               }
-               if (t > s_t)
-                       memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
-               arg.w.fn = qdisc_class_dump;
-               arg.skb = skb;
-               arg.cb = cb;
-               arg.w.stop  = 0;
-               arg.w.skip = cb->args[1];
-               arg.w.count = 0;
-               q->ops->cl_ops->walk(q, &arg.w);
-               cb->args[1] = arg.w.count;
-               if (arg.w.stop)
-                       break;
-               t++;
-       }
+       dev_queue = netdev_get_tx_queue(dev, 0);
+       if (tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, &t, s_t) < 0)
+               goto done;
 
+       dev_queue = &dev->rx_queue;
+       if (tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, &t, s_t) < 0)
+               goto done;
+
+done:
        cb->args[0] = t;
 
        dev_put(dev);
@@ -1415,7 +1680,7 @@ static int psched_show(struct seq_file *seq, void *v)
 
        hrtimer_get_res(CLOCK_MONOTONIC, &ts);
        seq_printf(seq, "%08x %08x %08x %08x\n",
-                  (u32)NSEC_PER_USEC, (u32)PSCHED_US2NS(1),
+                  (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
                   1000000,
                   (u32)NSEC_PER_SEC/(u32)ktime_to_ns(timespec_to_ktime(ts)));