ALSA: sound/usb: add preliminary support for UAC2 interrupts
[safe/jmp/linux-2.6] / net / sched / sch_cbq.c
index 9f2ace5..3846d65 100644 (file)
@@ -128,7 +128,7 @@ struct cbq_class
        long                    avgidle;
        long                    deficit;        /* Saved deficit for WRR */
        psched_time_t           penalized;
-       struct gnet_stats_basic bstats;
+       struct gnet_stats_basic_packed bstats;
        struct gnet_stats_queue qstats;
        struct gnet_stats_rate_est rate_est;
        struct tc_cbq_xstats    xstats;
@@ -230,7 +230,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
            (cl = cbq_class_lookup(q, prio)) != NULL)
                return cl;
 
-       *qerr = NET_XMIT_BYPASS;
+       *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
        for (;;) {
                int result = 0;
                defmap = head->defaults;
@@ -256,7 +256,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
                switch (result) {
                case TC_ACT_QUEUED:
                case TC_ACT_STOLEN:
-                       *qerr = NET_XMIT_SUCCESS;
+                       *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
                case TC_ACT_SHOT:
                        return NULL;
                case TC_ACT_RECLASSIFY:
@@ -370,7 +370,6 @@ static int
 cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
        struct cbq_sched_data *q = qdisc_priv(sch);
-       int len = skb->len;
        int uninitialized_var(ret);
        struct cbq_class *cl = cbq_classify(skb, sch, &ret);
 
@@ -378,7 +377,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        q->rx_class = cl;
 #endif
        if (cl == NULL) {
-               if (ret == NET_XMIT_BYPASS)
+               if (ret & __NET_XMIT_BYPASS)
                        sch->qstats.drops++;
                kfree_skb(skb);
                return ret;
@@ -387,51 +386,22 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 #ifdef CONFIG_NET_CLS_ACT
        cl->q->__parent = sch;
 #endif
-       if ((ret = cl->q->enqueue(skb, cl->q)) == NET_XMIT_SUCCESS) {
+       ret = qdisc_enqueue(skb, cl->q);
+       if (ret == NET_XMIT_SUCCESS) {
                sch->q.qlen++;
                sch->bstats.packets++;
-               sch->bstats.bytes+=len;
+               sch->bstats.bytes += qdisc_pkt_len(skb);
                cbq_mark_toplevel(q, cl);
                if (!cl->next_alive)
                        cbq_activate_class(cl);
                return ret;
        }
 
-       sch->qstats.drops++;
-       cbq_mark_toplevel(q, cl);
-       cl->qstats.drops++;
-       return ret;
-}
-
-static int
-cbq_requeue(struct sk_buff *skb, struct Qdisc *sch)
-{
-       struct cbq_sched_data *q = qdisc_priv(sch);
-       struct cbq_class *cl;
-       int ret;
-
-       if ((cl = q->tx_class) == NULL) {
-               kfree_skb(skb);
+       if (net_xmit_drop_count(ret)) {
                sch->qstats.drops++;
-               return NET_XMIT_CN;
-       }
-       q->tx_class = NULL;
-
-       cbq_mark_toplevel(q, cl);
-
-#ifdef CONFIG_NET_CLS_ACT
-       q->rx_class = cl;
-       cl->q->__parent = sch;
-#endif
-       if ((ret = cl->q->ops->requeue(skb, cl->q)) == 0) {
-               sch->q.qlen++;
-               sch->qstats.requeues++;
-               if (!cl->next_alive)
-                       cbq_activate_class(cl);
-               return 0;
+               cbq_mark_toplevel(q, cl);
+               cl->qstats.drops++;
        }
-       sch->qstats.drops++;
-       cl->qstats.drops++;
        return ret;
 }
 
@@ -517,6 +487,10 @@ static void cbq_ovl_delay(struct cbq_class *cl)
        struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
        psched_tdiff_t delay = cl->undertime - q->now;
 
+       if (test_bit(__QDISC_STATE_DEACTIVATED,
+                    &qdisc_root_sleeping(cl->qdisc)->state))
+               return;
+
        if (!cl->delayed) {
                psched_time_t sched = q->now;
                ktime_t expires;
@@ -535,11 +509,12 @@ static void cbq_ovl_delay(struct cbq_class *cl)
                        q->pmask |= (1<<TC_CBQ_MAXPRIO);
 
                        expires = ktime_set(0, 0);
-                       expires = ktime_add_ns(expires, PSCHED_US2NS(sched));
+                       expires = ktime_add_ns(expires, PSCHED_TICKS2NS(sched));
                        if (hrtimer_try_to_cancel(&q->delay_timer) &&
-                           ktime_to_ns(ktime_sub(q->delay_timer.expires,
-                                                 expires)) > 0)
-                               q->delay_timer.expires = expires;
+                           ktime_to_ns(ktime_sub(
+                                       hrtimer_get_expires(&q->delay_timer),
+                                       expires)) > 0)
+                               hrtimer_set_expires(&q->delay_timer, expires);
                        hrtimer_restart(&q->delay_timer);
                        cl->delayed = 1;
                        cl->xstats.overactions++;
@@ -645,19 +620,18 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
                ktime_t time;
 
                time = ktime_set(0, 0);
-               time = ktime_add_ns(time, PSCHED_US2NS(now + delay));
+               time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay));
                hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS);
        }
 
        sch->flags &= ~TCQ_F_THROTTLED;
-       netif_schedule(qdisc_dev(sch));
+       __netif_schedule(qdisc_root(sch));
        return HRTIMER_NORESTART;
 }
 
 #ifdef CONFIG_NET_CLS_ACT
 static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
 {
-       int len = skb->len;
        struct Qdisc *sch = child->__parent;
        struct cbq_sched_data *q = qdisc_priv(sch);
        struct cbq_class *cl = q->rx_class;
@@ -665,21 +639,24 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
        q->rx_class = NULL;
 
        if (cl && (cl = cbq_reclassify(skb, cl)) != NULL) {
+               int ret;
 
                cbq_mark_toplevel(q, cl);
 
                q->rx_class = cl;
                cl->q->__parent = sch;
 
-               if (cl->q->enqueue(skb, cl->q) == 0) {
+               ret = qdisc_enqueue(skb, cl->q);
+               if (ret == NET_XMIT_SUCCESS) {
                        sch->q.qlen++;
                        sch->bstats.packets++;
-                       sch->bstats.bytes+=len;
+                       sch->bstats.bytes += qdisc_pkt_len(skb);
                        if (!cl->next_alive)
                                cbq_activate_class(cl);
                        return 0;
                }
-               sch->qstats.drops++;
+               if (net_xmit_drop_count(ret))
+                       sch->qstats.drops++;
                return 0;
        }
 
@@ -880,7 +857,7 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio)
                        if (skb == NULL)
                                goto skip_class;
 
-                       cl->deficit -= skb->len;
+                       cl->deficit -= qdisc_pkt_len(skb);
                        q->tx_class = cl;
                        q->tx_borrowed = borrow;
                        if (borrow != cl) {
@@ -888,11 +865,11 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio)
                                borrow->xstats.borrows++;
                                cl->xstats.borrows++;
 #else
-                               borrow->xstats.borrows += skb->len;
-                               cl->xstats.borrows += skb->len;
+                               borrow->xstats.borrows += qdisc_pkt_len(skb);
+                               cl->xstats.borrows += qdisc_pkt_len(skb);
 #endif
                        }
-                       q->tx_len = skb->len;
+                       q->tx_len = qdisc_pkt_len(skb);
 
                        if (cl->deficit <= 0) {
                                q->active[prio] = cl;
@@ -1176,7 +1153,7 @@ static void cbq_unlink_class(struct cbq_class *this)
                                this->tparent->children = NULL;
                }
        } else {
-               BUG_TRAP(this->sibling == this);
+               WARN_ON(this->sibling != this);
        }
 }
 
@@ -1632,7 +1609,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
                cl->xstats.undertime = cl->undertime - q->now;
 
        if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
-           gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
+           gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
            gnet_stats_copy_queue(d, &cl->qstats) < 0)
                return -1;
 
@@ -1644,28 +1621,25 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
 {
        struct cbq_class *cl = (struct cbq_class*)arg;
 
-       if (cl) {
-               if (new == NULL) {
-                       new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
-                                               &pfifo_qdisc_ops,
-                                               cl->common.classid);
-                       if (new == NULL)
-                               return -ENOBUFS;
-               } else {
+       if (new == NULL) {
+               new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
+                                       &pfifo_qdisc_ops, cl->common.classid);
+               if (new == NULL)
+                       return -ENOBUFS;
+       } else {
 #ifdef CONFIG_NET_CLS_ACT
-                       if (cl->police == TC_POLICE_RECLASSIFY)
-                               new->reshape_fail = cbq_reshape_fail;
+               if (cl->police == TC_POLICE_RECLASSIFY)
+                       new->reshape_fail = cbq_reshape_fail;
 #endif
-               }
-               sch_tree_lock(sch);
-               *old = xchg(&cl->q, new);
-               qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
-               qdisc_reset(*old);
-               sch_tree_unlock(sch);
-
-               return 0;
        }
-       return -ENOENT;
+       sch_tree_lock(sch);
+       *old = cl->q;
+       cl->q = new;
+       qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+       qdisc_reset(*old);
+       sch_tree_unlock(sch);
+
+       return 0;
 }
 
 static struct Qdisc *
@@ -1673,7 +1647,7 @@ cbq_leaf(struct Qdisc *sch, unsigned long arg)
 {
        struct cbq_class *cl = (struct cbq_class*)arg;
 
-       return cl ? cl->q : NULL;
+       return cl->q;
 }
 
 static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg)
@@ -1700,7 +1674,7 @@ static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
 {
        struct cbq_sched_data *q = qdisc_priv(sch);
 
-       BUG_TRAP(!cl->filters);
+       WARN_ON(cl->filters);
 
        tcf_destroy_chain(&cl->filter_list);
        qdisc_destroy(cl->q);
@@ -1744,12 +1718,13 @@ static void cbq_put(struct Qdisc *sch, unsigned long arg)
 
        if (--cl->refcnt == 0) {
 #ifdef CONFIG_NET_CLS_ACT
+               spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
                struct cbq_sched_data *q = qdisc_priv(sch);
 
-               spin_lock_bh(&qdisc_dev(sch)->queue_lock);
+               spin_lock_bh(root_lock);
                if (q->rx_class == cl)
                        q->rx_class = NULL;
-               spin_unlock_bh(&qdisc_dev(sch)->queue_lock);
+               spin_unlock_bh(root_lock);
 #endif
 
                cbq_destroy_class(sch, cl);
@@ -1786,11 +1761,23 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
                }
 
                if (tb[TCA_CBQ_RATE]) {
-                       rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB]);
+                       rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]),
+                                             tb[TCA_CBQ_RTAB]);
                        if (rtab == NULL)
                                return -EINVAL;
                }
 
+               if (tca[TCA_RATE]) {
+                       err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
+                                                   qdisc_root_sleeping_lock(sch),
+                                                   tca[TCA_RATE]);
+                       if (err) {
+                               if (rtab)
+                                       qdisc_put_rtab(rtab);
+                               return err;
+                       }
+               }
+
                /* Change class parameters */
                sch_tree_lock(sch);
 
@@ -1798,8 +1785,8 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
                        cbq_deactivate_class(cl);
 
                if (rtab) {
-                       rtab = xchg(&cl->R_tab, rtab);
-                       qdisc_put_rtab(rtab);
+                       qdisc_put_rtab(cl->R_tab);
+                       cl->R_tab = rtab;
                }
 
                if (tb[TCA_CBQ_LSSOPT])
@@ -1826,10 +1813,6 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
 
                sch_tree_unlock(sch);
 
-               if (tca[TCA_RATE])
-                       gen_replace_estimator(&cl->bstats, &cl->rate_est,
-                                             &qdisc_dev(sch)->queue_lock,
-                                             tca[TCA_RATE]);
                return 0;
        }
 
@@ -1876,6 +1859,17 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
        cl = kzalloc(sizeof(*cl), GFP_KERNEL);
        if (cl == NULL)
                goto failure;
+
+       if (tca[TCA_RATE]) {
+               err = gen_new_estimator(&cl->bstats, &cl->rate_est,
+                                       qdisc_root_sleeping_lock(sch),
+                                       tca[TCA_RATE]);
+               if (err) {
+                       kfree(cl);
+                       goto failure;
+               }
+       }
+
        cl->R_tab = rtab;
        rtab = NULL;
        cl->refcnt = 1;
@@ -1917,10 +1911,6 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
 
        qdisc_class_hash_grow(sch, &q->clhash);
 
-       if (tca[TCA_RATE])
-               gen_new_estimator(&cl->bstats, &cl->rate_est,
-                                 &qdisc_dev(sch)->queue_lock, tca[TCA_RATE]);
-
        *arg = (unsigned long)cl;
        return 0;
 
@@ -1966,8 +1956,11 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
        cbq_rmprio(q, cl);
        sch_tree_unlock(sch);
 
-       if (--cl->refcnt == 0)
-               cbq_destroy_class(sch, cl);
+       BUG_ON(--cl->refcnt == 0);
+       /*
+        * This shouldn't happen: we "hold" one cops->get() when called
+        * from tc_ctl_tclass; the destroy method is done from cops->put().
+        */
 
        return 0;
 }
@@ -2054,7 +2047,7 @@ static struct Qdisc_ops cbq_qdisc_ops __read_mostly = {
        .priv_size      =       sizeof(struct cbq_sched_data),
        .enqueue        =       cbq_enqueue,
        .dequeue        =       cbq_dequeue,
-       .requeue        =       cbq_requeue,
+       .peek           =       qdisc_peek_dequeued,
        .drop           =       cbq_drop,
        .init           =       cbq_init,
        .reset          =       cbq_reset,