net: correct off-by-one write allocations reports
[safe/jmp/linux-2.6] / net / sched / sch_sfq.c
index 8589da6..8706920 100644 (file)
@@ -119,7 +119,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
        u32 h, h2;
 
        switch (skb->protocol) {
-       case __constant_htons(ETH_P_IP):
+       case htons(ETH_P_IP):
        {
                const struct iphdr *iph = ip_hdr(skb);
                h = iph->daddr;
@@ -134,7 +134,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
                        h2 ^= *(((u32*)iph) + iph->ihl);
                break;
        }
-       case __constant_htons(ETH_P_IPV6):
+       case htons(ETH_P_IPV6):
        {
                struct ipv6hdr *iph = ipv6_hdr(skb);
                h = iph->daddr.s6_addr32[3];
@@ -149,7 +149,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
                break;
        }
        default:
-               h = (unsigned long)skb->dst ^ skb->protocol;
+               h = (unsigned long)skb_dst(skb) ^ skb->protocol;
                h2 = (unsigned long)skb->sk;
        }
 
@@ -171,14 +171,14 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
        if (!q->filter_list)
                return sfq_hash(q, skb) + 1;
 
-       *qerr = NET_XMIT_BYPASS;
+       *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
        result = tc_classify(skb, q->filter_list, &res);
        if (result >= 0) {
 #ifdef CONFIG_NET_CLS_ACT
                switch (result) {
                case TC_ACT_STOLEN:
                case TC_ACT_QUEUED:
-                       *qerr = NET_XMIT_SUCCESS;
+                       *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
                case TC_ACT_SHOT:
                        return 0;
                }
@@ -281,11 +281,11 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        struct sfq_sched_data *q = qdisc_priv(sch);
        unsigned int hash;
        sfq_index x;
-       int ret;
+       int uninitialized_var(ret);
 
        hash = sfq_classify(skb, sch, &ret);
        if (hash == 0) {
-               if (ret == NET_XMIT_BYPASS)
+               if (ret & __NET_XMIT_BYPASS)
                        sch->qstats.drops++;
                kfree_skb(skb);
                return ret;
@@ -329,71 +329,20 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        return NET_XMIT_CN;
 }
 
-static int
-sfq_requeue(struct sk_buff *skb, struct Qdisc *sch)
+static struct sk_buff *
+sfq_peek(struct Qdisc *sch)
 {
        struct sfq_sched_data *q = qdisc_priv(sch);
-       unsigned int hash;
-       sfq_index x;
-       int ret;
-
-       hash = sfq_classify(skb, sch, &ret);
-       if (hash == 0) {
-               if (ret == NET_XMIT_BYPASS)
-                       sch->qstats.drops++;
-               kfree_skb(skb);
-               return ret;
-       }
-       hash--;
-
-       x = q->ht[hash];
-       if (x == SFQ_DEPTH) {
-               q->ht[hash] = x = q->dep[SFQ_DEPTH].next;
-               q->hash[x] = hash;
-       }
-
-       sch->qstats.backlog += qdisc_pkt_len(skb);
-       __skb_queue_head(&q->qs[x], skb);
-       /* If selected queue has length q->limit+1, this means that
-        * all another queues are empty and we do simple tail drop.
-        * This packet is still requeued at head of queue, tail packet
-        * is dropped.
-        */
-       if (q->qs[x].qlen > q->limit) {
-               skb = q->qs[x].prev;
-               __skb_unlink(skb, &q->qs[x]);
-               sch->qstats.drops++;
-               sch->qstats.backlog -= qdisc_pkt_len(skb);
-               kfree_skb(skb);
-               return NET_XMIT_CN;
-       }
+       sfq_index a;
 
-       sfq_inc(q, x);
-       if (q->qs[x].qlen == 1) {               /* The flow is new */
-               if (q->tail == SFQ_DEPTH) {     /* It is the first flow */
-                       q->tail = x;
-                       q->next[x] = x;
-                       q->allot[x] = q->quantum;
-               } else {
-                       q->next[x] = q->next[q->tail];
-                       q->next[q->tail] = x;
-                       q->tail = x;
-               }
-       }
-
-       if (++sch->q.qlen <= q->limit) {
-               sch->qstats.requeues++;
-               return 0;
-       }
+       /* No active slots */
+       if (q->tail == SFQ_DEPTH)
+               return NULL;
 
-       sch->qstats.drops++;
-       sfq_drop(sch);
-       return NET_XMIT_CN;
+       a = q->next[q->tail];
+       return skb_peek(&q->qs[a]);
 }
 
-
-
-
 static struct sk_buff *
 sfq_dequeue(struct Qdisc *sch)
 {
@@ -486,7 +435,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
        int i;
 
        q->perturb_timer.function = sfq_perturbation;
-       q->perturb_timer.data = (unsigned long)sch;;
+       q->perturb_timer.data = (unsigned long)sch;
        init_timer_deferrable(&q->perturb_timer);
 
        for (i = 0; i < SFQ_HASH_DIVISOR; i++)
@@ -624,7 +573,7 @@ static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
        .priv_size      =       sizeof(struct sfq_sched_data),
        .enqueue        =       sfq_enqueue,
        .dequeue        =       sfq_dequeue,
-       .requeue        =       sfq_requeue,
+       .peek           =       sfq_peek,
        .drop           =       sfq_drop,
        .init           =       sfq_init,
        .reset          =       sfq_reset,