pkt_sched: gen_estimator: Dont report fake rate estimators
[safe/jmp/linux-2.6] / net / sched / sch_gred.c
index 39fa285..40408d5 100644 (file)
@@ -188,7 +188,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
        }
 
        q->packetsin++;
-       q->bytesin += skb->len;
+       q->bytesin += qdisc_pkt_len(skb);
 
        if (gred_wred_mode(t))
                gred_load_wred_set(t, q);
@@ -226,8 +226,8 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
                        break;
        }
 
-       if (q->backlog + skb->len <= q->limit) {
-               q->backlog += skb->len;
+       if (q->backlog + qdisc_pkt_len(skb) <= q->limit) {
+               q->backlog += qdisc_pkt_len(skb);
                return qdisc_enqueue_tail(skb, sch);
        }
 
@@ -240,26 +240,6 @@ congestion_drop:
        return NET_XMIT_CN;
 }
 
-static int gred_requeue(struct sk_buff *skb, struct Qdisc* sch)
-{
-       struct gred_sched *t = qdisc_priv(sch);
-       struct gred_sched_data *q;
-       u16 dp = tc_index_to_dp(skb);
-
-       if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
-               if (net_ratelimit())
-                       printk(KERN_WARNING "GRED: Unable to relocate VQ 0x%x "
-                              "for requeue, screwing up backlog.\n",
-                              tc_index_to_dp(skb));
-       } else {
-               if (red_is_idling(&q->parms))
-                       red_end_of_idle_period(&q->parms);
-               q->backlog += skb->len;
-       }
-
-       return qdisc_requeue(skb, sch);
-}
-
 static struct sk_buff *gred_dequeue(struct Qdisc* sch)
 {
        struct sk_buff *skb;
@@ -277,7 +257,7 @@ static struct sk_buff *gred_dequeue(struct Qdisc* sch)
                                       "VQ 0x%x after dequeue, screwing up "
                                       "backlog.\n", tc_index_to_dp(skb));
                } else {
-                       q->backlog -= skb->len;
+                       q->backlog -= qdisc_pkt_len(skb);
 
                        if (!q->backlog && !gred_wred_mode(t))
                                red_start_of_idle_period(&q->parms);
@@ -299,7 +279,7 @@ static unsigned int gred_drop(struct Qdisc* sch)
 
        skb = qdisc_dequeue_tail(sch);
        if (skb) {
-               unsigned int len = skb->len;
+               unsigned int len = qdisc_pkt_len(skb);
                struct gred_sched_data *q;
                u16 dp = tc_index_to_dp(skb);
 
@@ -602,7 +582,7 @@ static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
        .priv_size      =       sizeof(struct gred_sched),
        .enqueue        =       gred_enqueue,
        .dequeue        =       gred_dequeue,
-       .requeue        =       gred_requeue,
+       .peek           =       qdisc_peek_head,
        .drop           =       gred_drop,
        .init           =       gred_init,
        .reset          =       gred_reset,