3ec95df4a85ed50c2548b69c591c33333e3edd63
[safe/jmp/linux-2.6] / net / sched / sch_htb.c
1 /* vim: ts=8 sw=8
2  * net/sched/sch_htb.c  Hierarchical token bucket, feed tree version
3  *
4  *              This program is free software; you can redistribute it and/or
5  *              modify it under the terms of the GNU General Public License
6  *              as published by the Free Software Foundation; either version
7  *              2 of the License, or (at your option) any later version.
8  *
9  * Authors:     Martin Devera, <devik@cdi.cz>
10  *
11  * Credits (in time order) for older HTB versions:
12  *              Stef Coene <stef.coene@docum.org>
13  *                      HTB support at LARTC mailing list
14  *              Ondrej Kraus, <krauso@barr.cz> 
15  *                      found missing INIT_QDISC(htb)
16  *              Vladimir Smelhaus, Aamer Akhter, Bert Hubert
17  *                      helped a lot to locate nasty class stall bug
18  *              Andi Kleen, Jamal Hadi, Bert Hubert
19  *                      code review and helpful comments on shaping
20  *              Tomasz Wrona, <tw@eter.tym.pl>
21  *                      created test case so that I was able to fix nasty bug
22  *              Wilfried Weissmann
23  *                      spotted bug in dequeue code and helped with fix
24  *              Jiri Fojtasek
25  *                      fixed requeue routine
26  *              and many others. thanks.
27  *
28  * $Id: sch_htb.c,v 1.25 2003/12/07 11:08:25 devik Exp devik $
29  */
30 #include <linux/config.h>
31 #include <linux/module.h>
32 #include <asm/uaccess.h>
33 #include <asm/system.h>
34 #include <linux/bitops.h>
35 #include <linux/types.h>
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <linux/string.h>
39 #include <linux/mm.h>
40 #include <linux/socket.h>
41 #include <linux/sockios.h>
42 #include <linux/in.h>
43 #include <linux/errno.h>
44 #include <linux/interrupt.h>
45 #include <linux/if_ether.h>
46 #include <linux/inet.h>
47 #include <linux/netdevice.h>
48 #include <linux/etherdevice.h>
49 #include <linux/notifier.h>
50 #include <net/ip.h>
51 #include <net/route.h>
52 #include <linux/skbuff.h>
53 #include <linux/list.h>
54 #include <linux/compiler.h>
55 #include <net/sock.h>
56 #include <net/pkt_sched.h>
57 #include <linux/rbtree.h>
58
59 /* HTB algorithm.
60     Author: devik@cdi.cz
61     ========================================================================
62     HTB is like TBF with multiple classes. It is also similar to CBQ because
63     it allows to assign priority to each class in hierarchy. 
64     In fact it is another implementation of Floyd's formal sharing.
65
66     Levels:
67     Each class is assigned level. Leaf has ALWAYS level 0 and root 
68     classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
69     one less than their parent.
70 */
71
72 #define HTB_HSIZE 16    /* classid hash size */
73 #define HTB_EWMAC 2     /* rate average over HTB_EWMAC*HTB_HSIZE sec */
74 #undef HTB_DEBUG        /* compile debugging support (activated by tc tool) */
75 #define HTB_RATECM 1    /* whether to use rate computer */
76 #define HTB_HYSTERESIS 1/* whether to use mode hysteresis for speedup */
77 #define HTB_QLOCK(S) spin_lock_bh(&(S)->dev->queue_lock)
78 #define HTB_QUNLOCK(S) spin_unlock_bh(&(S)->dev->queue_lock)
79 #define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
80
81 #if HTB_VER >> 16 != TC_HTB_PROTOVER
82 #error "Mismatched sch_htb.c and pkt_sch.h"
83 #endif
84
85 /* debugging support; S is subsystem, these are defined:
86   0 - netlink messages
87   1 - enqueue
88   2 - drop & requeue
89   3 - dequeue main
90   4 - dequeue one prio DRR part
91   5 - dequeue class accounting
92   6 - class overlimit status computation
93   7 - hint tree
94   8 - event queue
95  10 - rate estimator
96  11 - classifier 
97  12 - fast dequeue cache
98
99  L is level; 0 = none, 1 = basic info, 2 = detailed, 3 = full
100  q->debug uint32 contains 16 2-bit fields one for subsystem starting
101  from LSB
102  */
103 #ifdef HTB_DEBUG
104 #define HTB_DBG_COND(S,L) (((q->debug>>(2*S))&3) >= L)
105 #define HTB_DBG(S,L,FMT,ARG...) if (HTB_DBG_COND(S,L)) \
106         printk(KERN_DEBUG FMT,##ARG)
107 #define HTB_CHCL(cl) BUG_TRAP((cl)->magic == HTB_CMAGIC)
108 #define HTB_PASSQ q,
109 #define HTB_ARGQ struct htb_sched *q,
110 #define static
111 #undef __inline__
112 #define __inline__
113 #undef inline
114 #define inline
115 #define HTB_CMAGIC 0xFEFAFEF1
116 #define htb_safe_rb_erase(N,R) do { BUG_TRAP((N)->rb_color != -1); \
117                 if ((N)->rb_color == -1) break; \
118                 rb_erase(N,R); \
119                 (N)->rb_color = -1; } while (0)
120 #else
121 #define HTB_DBG_COND(S,L) (0)
122 #define HTB_DBG(S,L,FMT,ARG...)
123 #define HTB_PASSQ
124 #define HTB_ARGQ
125 #define HTB_CHCL(cl)
126 #define htb_safe_rb_erase(N,R) rb_erase(N,R)
127 #endif
128
129
130 /* used internaly to keep status of single class */
131 enum htb_cmode {
132     HTB_CANT_SEND,              /* class can't send and can't borrow */
133     HTB_MAY_BORROW,             /* class can't send but may borrow */
134     HTB_CAN_SEND                /* class can send */
135 };
136
137 /* interior & leaf nodes; props specific to leaves are marked L: */
138 struct htb_class
139 {
140 #ifdef HTB_DEBUG
141         unsigned magic;
142 #endif
143     /* general class parameters */
144     u32 classid;
145     struct gnet_stats_basic bstats;
146     struct gnet_stats_queue qstats;
147     struct gnet_stats_rate_est rate_est;
148     struct tc_htb_xstats xstats;/* our special stats */
149     int refcnt;                 /* usage count of this class */
150
151 #ifdef HTB_RATECM
152     /* rate measurement counters */
153     unsigned long rate_bytes,sum_bytes;
154     unsigned long rate_packets,sum_packets;
155 #endif
156
157     /* topology */
158     int level;                  /* our level (see above) */
159     struct htb_class *parent;   /* parent class */
160     struct list_head hlist;     /* classid hash list item */
161     struct list_head sibling;   /* sibling list item */
162     struct list_head children;  /* children list */
163
164     union {
165             struct htb_class_leaf {
166                     struct Qdisc *q;
167                     int prio;
168                     int aprio;  
169                     int quantum;
170                     int deficit[TC_HTB_MAXDEPTH];
171                     struct list_head drop_list;
172             } leaf;
173             struct htb_class_inner {
174                     struct rb_root feed[TC_HTB_NUMPRIO]; /* feed trees */
175                     struct rb_node *ptr[TC_HTB_NUMPRIO]; /* current class ptr */
176             /* When class changes from state 1->2 and disconnects from 
177                parent's feed then we lost ptr value and start from the
178               first child again. Here we store classid of the
179               last valid ptr (used when ptr is NULL). */
180               u32 last_ptr_id[TC_HTB_NUMPRIO];
181             } inner;
182     } un;
183     struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
184     struct rb_node pq_node;              /* node for event queue */
185     unsigned long pq_key;       /* the same type as jiffies global */
186     
187     int prio_activity;          /* for which prios are we active */
188     enum htb_cmode cmode;       /* current mode of the class */
189
190     /* class attached filters */
191     struct tcf_proto *filter_list;
192     int filter_cnt;
193
194     int warned;         /* only one warning about non work conserving .. */
195
196     /* token bucket parameters */
197     struct qdisc_rate_table *rate;      /* rate table of the class itself */
198     struct qdisc_rate_table *ceil;      /* ceiling rate (limits borrows too) */
199     long buffer,cbuffer;                /* token bucket depth/rate */
200     long mbuffer;                       /* max wait time */
201     long tokens,ctokens;                /* current number of tokens */
202     psched_time_t t_c;                  /* checkpoint time */
203 };
204
205 /* TODO: maybe compute rate when size is too large .. or drop ? */
206 static __inline__ long L2T(struct htb_class *cl,struct qdisc_rate_table *rate,
207         int size)
208
209     int slot = size >> rate->rate.cell_log;
210     if (slot > 255) {
211         cl->xstats.giants++;
212         slot = 255;
213     }
214     return rate->data[slot];
215 }
216
217 struct htb_sched
218 {
219     struct list_head root;                      /* root classes list */
220     struct list_head hash[HTB_HSIZE];           /* hashed by classid */
221     struct list_head drops[TC_HTB_NUMPRIO];     /* active leaves (for drops) */
222     
223     /* self list - roots of self generating tree */
224     struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
225     int row_mask[TC_HTB_MAXDEPTH];
226     struct rb_node *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
227     u32 last_ptr_id[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
228
229     /* self wait list - roots of wait PQs per row */
230     struct rb_root wait_pq[TC_HTB_MAXDEPTH];
231
232     /* time of nearest event per level (row) */
233     unsigned long near_ev_cache[TC_HTB_MAXDEPTH];
234
235     /* cached value of jiffies in dequeue */
236     unsigned long jiffies;
237
238     /* whether we hit non-work conserving class during this dequeue; we use */
239     int nwc_hit;        /* this to disable mindelay complaint in dequeue */
240
241     int defcls;         /* class where unclassified flows go to */
242     u32 debug;          /* subsystem debug levels */
243
244     /* filters for qdisc itself */
245     struct tcf_proto *filter_list;
246     int filter_cnt;
247
248     int rate2quantum;           /* quant = rate / rate2quantum */
249     psched_time_t now;          /* cached dequeue time */
250     struct timer_list timer;    /* send delay timer */
251 #ifdef HTB_RATECM
252     struct timer_list rttim;    /* rate computer timer */
253     int recmp_bucket;           /* which hash bucket to recompute next */
254 #endif
255     
256     /* non shaped skbs; let them go directly thru */
257     struct sk_buff_head direct_queue;
258     int direct_qlen;  /* max qlen of above */
259
260     long direct_pkts;
261 };
262
263 /* compute hash of size HTB_HSIZE for given handle */
264 static __inline__ int htb_hash(u32 h) 
265 {
266 #if HTB_HSIZE != 16
267  #error "Declare new hash for your HTB_HSIZE"
268 #endif
269     h ^= h>>8;  /* stolen from cbq_hash */
270     h ^= h>>4;
271     return h & 0xf;
272 }
273
274 /* find class in global hash table using given handle */
275 static __inline__ struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
276 {
277         struct htb_sched *q = qdisc_priv(sch);
278         struct list_head *p;
279         if (TC_H_MAJ(handle) != sch->handle) 
280                 return NULL;
281         
282         list_for_each (p,q->hash+htb_hash(handle)) {
283                 struct htb_class *cl = list_entry(p,struct htb_class,hlist);
284                 if (cl->classid == handle)
285                         return cl;
286         }
287         return NULL;
288 }
289
290 /**
291  * htb_classify - classify a packet into class
292  *
293  * It returns NULL if the packet should be dropped or -1 if the packet
294  * should be passed directly thru. In all other cases leaf class is returned.
295  * We allow direct class selection by classid in priority. The we examine
296  * filters in qdisc and in inner nodes (if higher filter points to the inner
297  * node). If we end up with classid MAJOR:0 we enqueue the skb into special
298  * internal fifo (direct). These packets then go directly thru. If we still 
299  * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull
300  * then finish and return direct queue.
301  */
302 #define HTB_DIRECT (struct htb_class*)-1
303 static inline u32 htb_classid(struct htb_class *cl)
304 {
305         return (cl && cl != HTB_DIRECT) ? cl->classid : TC_H_UNSPEC;
306 }
307
308 static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
309 {
310         struct htb_sched *q = qdisc_priv(sch);
311         struct htb_class *cl;
312         struct tcf_result res;
313         struct tcf_proto *tcf;
314         int result;
315
316         /* allow to select class by setting skb->priority to valid classid;
317            note that nfmark can be used too by attaching filter fw with no
318            rules in it */
319         if (skb->priority == sch->handle)
320                 return HTB_DIRECT;  /* X:0 (direct flow) selected */
321         if ((cl = htb_find(skb->priority,sch)) != NULL && cl->level == 0) 
322                 return cl;
323
324         *qerr = NET_XMIT_BYPASS;
325         tcf = q->filter_list;
326         while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
327 #ifdef CONFIG_NET_CLS_ACT
328                 switch (result) {
329                 case TC_ACT_QUEUED:
330                 case TC_ACT_STOLEN: 
331                         *qerr = NET_XMIT_SUCCESS;
332                 case TC_ACT_SHOT:
333                         return NULL;
334                 }
335 #elif defined(CONFIG_NET_CLS_POLICE)
336                 if (result == TC_POLICE_SHOT)
337                         return HTB_DIRECT;
338 #endif
339                 if ((cl = (void*)res.class) == NULL) {
340                         if (res.classid == sch->handle)
341                                 return HTB_DIRECT;  /* X:0 (direct flow) */
342                         if ((cl = htb_find(res.classid,sch)) == NULL)
343                                 break; /* filter selected invalid classid */
344                 }
345                 if (!cl->level)
346                         return cl; /* we hit leaf; return it */
347
348                 /* we have got inner class; apply inner filter chain */
349                 tcf = cl->filter_list;
350         }
351         /* classification failed; try to use default class */
352         cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle),q->defcls),sch);
353         if (!cl || cl->level)
354                 return HTB_DIRECT; /* bad default .. this is safe bet */
355         return cl;
356 }
357
358 #ifdef HTB_DEBUG
359 static void htb_next_rb_node(struct rb_node **n);
360 #define HTB_DUMTREE(root,memb) if(root) { \
361         struct rb_node *n = (root)->rb_node; \
362         while (n->rb_left) n = n->rb_left; \
363         while (n) { \
364                 struct htb_class *cl = rb_entry(n, struct htb_class, memb); \
365                 printk(" %x",cl->classid); htb_next_rb_node (&n); \
366         } }
367
368 static void htb_debug_dump (struct htb_sched *q)
369 {
370         int i,p;
371         printk(KERN_DEBUG "htb*g j=%lu lj=%lu\n",jiffies,q->jiffies);
372         /* rows */
373         for (i=TC_HTB_MAXDEPTH-1;i>=0;i--) {
374                 printk(KERN_DEBUG "htb*r%d m=%x",i,q->row_mask[i]);
375                 for (p=0;p<TC_HTB_NUMPRIO;p++) {
376                         if (!q->row[i][p].rb_node) continue;
377                         printk(" p%d:",p);
378                         HTB_DUMTREE(q->row[i]+p,node[p]);
379                 }
380                 printk("\n");
381         }
382         /* classes */
383         for (i = 0; i < HTB_HSIZE; i++) {
384                 struct list_head *l;
385                 list_for_each (l,q->hash+i) {
386                         struct htb_class *cl = list_entry(l,struct htb_class,hlist);
387                         long diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer);
388                         printk(KERN_DEBUG "htb*c%x m=%d t=%ld c=%ld pq=%lu df=%ld ql=%d "
389                                         "pa=%x f:",
390                                 cl->classid,cl->cmode,cl->tokens,cl->ctokens,
391                                 cl->pq_node.rb_color==-1?0:cl->pq_key,diff,
392                                 cl->level?0:cl->un.leaf.q->q.qlen,cl->prio_activity);
393                         if (cl->level)
394                         for (p=0;p<TC_HTB_NUMPRIO;p++) {
395                                 if (!cl->un.inner.feed[p].rb_node) continue;
396                                 printk(" p%d a=%x:",p,cl->un.inner.ptr[p]?rb_entry(cl->un.inner.ptr[p], struct htb_class,node[p])->classid:0);
397                                 HTB_DUMTREE(cl->un.inner.feed+p,node[p]);
398                         }
399                         printk("\n");
400                 }
401         }
402 }
403 #endif
404 /**
405  * htb_add_to_id_tree - adds class to the round robin list
406  *
407  * Routine adds class to the list (actually tree) sorted by classid.
408  * Make sure that class is not already on such list for given prio.
409  */
410 static void htb_add_to_id_tree (HTB_ARGQ struct rb_root *root,
411                 struct htb_class *cl,int prio)
412 {
413         struct rb_node **p = &root->rb_node, *parent = NULL;
414         HTB_DBG(7,3,"htb_add_id_tree cl=%X prio=%d\n",cl->classid,prio);
415 #ifdef HTB_DEBUG
416         if (cl->node[prio].rb_color != -1) { BUG_TRAP(0); return; }
417         HTB_CHCL(cl);
418         if (*p) {
419                 struct htb_class *x = rb_entry(*p,struct htb_class,node[prio]);
420                 HTB_CHCL(x);
421         }
422 #endif
423         while (*p) {
424                 struct htb_class *c; parent = *p;
425                 c = rb_entry(parent, struct htb_class, node[prio]);
426                 HTB_CHCL(c);
427                 if (cl->classid > c->classid)
428                         p = &parent->rb_right;
429                 else 
430                         p = &parent->rb_left;
431         }
432         rb_link_node(&cl->node[prio], parent, p);
433         rb_insert_color(&cl->node[prio], root);
434 }
435
436 /**
437  * htb_add_to_wait_tree - adds class to the event queue with delay
438  *
439  * The class is added to priority event queue to indicate that class will
440  * change its mode in cl->pq_key microseconds. Make sure that class is not
441  * already in the queue.
442  */
443 static void htb_add_to_wait_tree (struct htb_sched *q,
444                 struct htb_class *cl,long delay,int debug_hint)
445 {
446         struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
447         HTB_DBG(7,3,"htb_add_wt cl=%X key=%lu\n",cl->classid,cl->pq_key);
448 #ifdef HTB_DEBUG
449         if (cl->pq_node.rb_color != -1) { BUG_TRAP(0); return; }
450         HTB_CHCL(cl);
451         if ((delay <= 0 || delay > cl->mbuffer) && net_ratelimit())
452                 printk(KERN_ERR "HTB: suspicious delay in wait_tree d=%ld cl=%X h=%d\n",delay,cl->classid,debug_hint);
453 #endif
454         cl->pq_key = q->jiffies + PSCHED_US2JIFFIE(delay);
455         if (cl->pq_key == q->jiffies)
456                 cl->pq_key++;
457
458         /* update the nearest event cache */
459         if (time_after(q->near_ev_cache[cl->level], cl->pq_key))
460                 q->near_ev_cache[cl->level] = cl->pq_key;
461         
462         while (*p) {
463                 struct htb_class *c; parent = *p;
464                 c = rb_entry(parent, struct htb_class, pq_node);
465                 if (time_after_eq(cl->pq_key, c->pq_key))
466                         p = &parent->rb_right;
467                 else 
468                         p = &parent->rb_left;
469         }
470         rb_link_node(&cl->pq_node, parent, p);
471         rb_insert_color(&cl->pq_node, &q->wait_pq[cl->level]);
472 }
473
474 /**
475  * htb_next_rb_node - finds next node in binary tree
476  *
477  * When we are past last key we return NULL.
478  * Average complexity is 2 steps per call.
479  */
480 static void htb_next_rb_node(struct rb_node **n)
481 {
482         *n = rb_next(*n);
483 }
484
485 /**
486  * htb_add_class_to_row - add class to its row
487  *
488  * The class is added to row at priorities marked in mask.
489  * It does nothing if mask == 0.
490  */
491 static inline void htb_add_class_to_row(struct htb_sched *q, 
492                 struct htb_class *cl,int mask)
493 {
494         HTB_DBG(7,2,"htb_addrow cl=%X mask=%X rmask=%X\n",
495                         cl->classid,mask,q->row_mask[cl->level]);
496         HTB_CHCL(cl);
497         q->row_mask[cl->level] |= mask;
498         while (mask) {
499                 int prio = ffz(~mask);
500                 mask &= ~(1 << prio);
501                 htb_add_to_id_tree(HTB_PASSQ q->row[cl->level]+prio,cl,prio);
502         }
503 }
504
505 /**
506  * htb_remove_class_from_row - removes class from its row
507  *
508  * The class is removed from row at priorities marked in mask.
509  * It does nothing if mask == 0.
510  */
511 static __inline__ void htb_remove_class_from_row(struct htb_sched *q,
512                 struct htb_class *cl,int mask)
513 {
514         int m = 0;
515         HTB_CHCL(cl);
516         while (mask) {
517                 int prio = ffz(~mask);
518                 mask &= ~(1 << prio);
519                 if (q->ptr[cl->level][prio] == cl->node+prio)
520                         htb_next_rb_node(q->ptr[cl->level]+prio);
521                 htb_safe_rb_erase(cl->node + prio,q->row[cl->level]+prio);
522                 if (!q->row[cl->level][prio].rb_node) 
523                         m |= 1 << prio;
524         }
525         HTB_DBG(7,2,"htb_delrow cl=%X mask=%X rmask=%X maskdel=%X\n",
526                         cl->classid,mask,q->row_mask[cl->level],m);
527         q->row_mask[cl->level] &= ~m;
528 }
529
530 /**
531  * htb_activate_prios - creates active classe's feed chain
532  *
533  * The class is connected to ancestors and/or appropriate rows
534  * for priorities it is participating on. cl->cmode must be new 
535  * (activated) mode. It does nothing if cl->prio_activity == 0.
536  */
537 static void htb_activate_prios(struct htb_sched *q,struct htb_class *cl)
538 {
539         struct htb_class *p = cl->parent;
540         long m,mask = cl->prio_activity;
541         HTB_DBG(7,2,"htb_act_prios cl=%X mask=%lX cmode=%d\n",cl->classid,mask,cl->cmode);
542         HTB_CHCL(cl);
543
544         while (cl->cmode == HTB_MAY_BORROW && p && mask) {
545                 HTB_CHCL(p);
546                 m = mask; while (m) {
547                         int prio = ffz(~m);
548                         m &= ~(1 << prio);
549                         
550                         if (p->un.inner.feed[prio].rb_node)
551                                 /* parent already has its feed in use so that
552                                    reset bit in mask as parent is already ok */
553                                 mask &= ~(1 << prio);
554                         
555                         htb_add_to_id_tree(HTB_PASSQ p->un.inner.feed+prio,cl,prio);
556                 }
557                 HTB_DBG(7,3,"htb_act_pr_aft p=%X pact=%X mask=%lX pmode=%d\n",
558                                 p->classid,p->prio_activity,mask,p->cmode);
559                 p->prio_activity |= mask;
560                 cl = p; p = cl->parent;
561                 HTB_CHCL(cl);
562         }
563         if (cl->cmode == HTB_CAN_SEND && mask)
564                 htb_add_class_to_row(q,cl,mask);
565 }
566
567 /**
568  * htb_deactivate_prios - remove class from feed chain
569  *
570  * cl->cmode must represent old mode (before deactivation). It does 
571  * nothing if cl->prio_activity == 0. Class is removed from all feed
572  * chains and rows.
573  */
574 static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
575 {
576         struct htb_class *p = cl->parent;
577         long m,mask = cl->prio_activity;
578         HTB_DBG(7,2,"htb_deact_prios cl=%X mask=%lX cmode=%d\n",cl->classid,mask,cl->cmode);
579         HTB_CHCL(cl);
580
581         while (cl->cmode == HTB_MAY_BORROW && p && mask) {
582                 m = mask; mask = 0; 
583                 while (m) {
584                         int prio = ffz(~m);
585                         m &= ~(1 << prio);
586                         
587                         if (p->un.inner.ptr[prio] == cl->node+prio) {
588                                 /* we are removing child which is pointed to from
589                                    parent feed - forget the pointer but remember
590                                    classid */
591                                 p->un.inner.last_ptr_id[prio] = cl->classid;
592                                 p->un.inner.ptr[prio] = NULL;
593                         }
594                         
595                         htb_safe_rb_erase(cl->node + prio,p->un.inner.feed + prio);
596                         
597                         if (!p->un.inner.feed[prio].rb_node) 
598                                 mask |= 1 << prio;
599                 }
600                 HTB_DBG(7,3,"htb_deact_pr_aft p=%X pact=%X mask=%lX pmode=%d\n",
601                                 p->classid,p->prio_activity,mask,p->cmode);
602                 p->prio_activity &= ~mask;
603                 cl = p; p = cl->parent;
604                 HTB_CHCL(cl);
605         }
606         if (cl->cmode == HTB_CAN_SEND && mask) 
607                 htb_remove_class_from_row(q,cl,mask);
608 }
609
610 /**
611  * htb_class_mode - computes and returns current class mode
612  *
613  * It computes cl's mode at time cl->t_c+diff and returns it. If mode
614  * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
615  * from now to time when cl will change its state. 
616  * Also it is worth to note that class mode doesn't change simply
617  * at cl->{c,}tokens == 0 but there can rather be hysteresis of 
618  * 0 .. -cl->{c,}buffer range. It is meant to limit number of
619  * mode transitions per time unit. The speed gain is about 1/6.
620  */
621 static __inline__ enum htb_cmode 
622 htb_class_mode(struct htb_class *cl,long *diff)
623 {
624     long toks;
625
626     if ((toks = (cl->ctokens + *diff)) < (
627 #if HTB_HYSTERESIS
628             cl->cmode != HTB_CANT_SEND ? -cl->cbuffer :
629 #endif
630             0)) {
631             *diff = -toks;
632             return HTB_CANT_SEND;
633     }
634     if ((toks = (cl->tokens + *diff)) >= (
635 #if HTB_HYSTERESIS
636             cl->cmode == HTB_CAN_SEND ? -cl->buffer :
637 #endif
638             0))
639             return HTB_CAN_SEND;
640
641     *diff = -toks;
642     return HTB_MAY_BORROW;
643 }
644
645 /**
646  * htb_change_class_mode - changes classe's mode
647  *
648  * This should be the only way how to change classe's mode under normal
649  * cirsumstances. Routine will update feed lists linkage, change mode
650  * and add class to the wait event queue if appropriate. New mode should
651  * be different from old one and cl->pq_key has to be valid if changing
652  * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
653  */
654 static void 
655 htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
656
657         enum htb_cmode new_mode = htb_class_mode(cl,diff);
658         
659         HTB_CHCL(cl);
660         HTB_DBG(7,1,"htb_chging_clmode %d->%d cl=%X\n",cl->cmode,new_mode,cl->classid);
661
662         if (new_mode == cl->cmode)
663                 return; 
664         
665         if (cl->prio_activity) { /* not necessary: speed optimization */
666                 if (cl->cmode != HTB_CANT_SEND) 
667                         htb_deactivate_prios(q,cl);
668                 cl->cmode = new_mode;
669                 if (new_mode != HTB_CANT_SEND) 
670                         htb_activate_prios(q,cl);
671         } else 
672                 cl->cmode = new_mode;
673 }
674
675 /**
676  * htb_activate - inserts leaf cl into appropriate active feeds 
677  *
678  * Routine learns (new) priority of leaf and activates feed chain
679  * for the prio. It can be called on already active leaf safely.
680  * It also adds leaf into droplist.
681  */
682 static __inline__ void htb_activate(struct htb_sched *q,struct htb_class *cl)
683 {
684         BUG_TRAP(!cl->level && cl->un.leaf.q && cl->un.leaf.q->q.qlen);
685         HTB_CHCL(cl);
686         if (!cl->prio_activity) {
687                 cl->prio_activity = 1 << (cl->un.leaf.aprio = cl->un.leaf.prio);
688                 htb_activate_prios(q,cl);
689                 list_add_tail(&cl->un.leaf.drop_list,q->drops+cl->un.leaf.aprio);
690         }
691 }
692
693 /**
694  * htb_deactivate - remove leaf cl from active feeds 
695  *
696  * Make sure that leaf is active. In the other words it can't be called
697  * with non-active leaf. It also removes class from the drop list.
698  */
699 static __inline__ void 
700 htb_deactivate(struct htb_sched *q,struct htb_class *cl)
701 {
702         BUG_TRAP(cl->prio_activity);
703         HTB_CHCL(cl);
704         htb_deactivate_prios(q,cl);
705         cl->prio_activity = 0;
706         list_del_init(&cl->un.leaf.drop_list);
707 }
708
709 static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
710 {
711     int ret;
712     struct htb_sched *q = qdisc_priv(sch);
713     struct htb_class *cl = htb_classify(skb,sch,&ret);
714
715     if (cl == HTB_DIRECT) {
716         /* enqueue to helper queue */
717         if (q->direct_queue.qlen < q->direct_qlen) {
718             __skb_queue_tail(&q->direct_queue, skb);
719             q->direct_pkts++;
720         } else {
721             kfree_skb(skb);
722             sch->qstats.drops++;
723             return NET_XMIT_DROP;
724         }
725 #ifdef CONFIG_NET_CLS_ACT
726     } else if (!cl) {
727         if (ret == NET_XMIT_BYPASS)
728                 sch->qstats.drops++;
729         kfree_skb (skb);
730         return ret;
731 #endif
732     } else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
733         sch->qstats.drops++;
734         cl->qstats.drops++;
735         return NET_XMIT_DROP;
736     } else {
737         cl->bstats.packets++; cl->bstats.bytes += skb->len;
738         htb_activate (q,cl);
739     }
740
741     sch->q.qlen++;
742     sch->bstats.packets++; sch->bstats.bytes += skb->len;
743     HTB_DBG(1,1,"htb_enq_ok cl=%X skb=%p\n",(cl && cl != HTB_DIRECT)?cl->classid:0,skb);
744     return NET_XMIT_SUCCESS;
745 }
746
747 /* TODO: requeuing packet charges it to policers again !! */
748 static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
749 {
750     struct htb_sched *q = qdisc_priv(sch);
751     int ret =  NET_XMIT_SUCCESS;
752     struct htb_class *cl = htb_classify(skb,sch, &ret);
753     struct sk_buff *tskb;
754
755     if (cl == HTB_DIRECT || !cl) {
756         /* enqueue to helper queue */
757         if (q->direct_queue.qlen < q->direct_qlen && cl) {
758             __skb_queue_head(&q->direct_queue, skb);
759         } else {
760             __skb_queue_head(&q->direct_queue, skb);
761             tskb = __skb_dequeue_tail(&q->direct_queue);
762             kfree_skb (tskb);
763             sch->qstats.drops++;
764             return NET_XMIT_CN; 
765         }
766     } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
767         sch->qstats.drops++;
768         cl->qstats.drops++;
769         return NET_XMIT_DROP;
770     } else 
771             htb_activate (q,cl);
772
773     sch->q.qlen++;
774     sch->qstats.requeues++;
775     HTB_DBG(1,1,"htb_req_ok cl=%X skb=%p\n",(cl && cl != HTB_DIRECT)?cl->classid:0,skb);
776     return NET_XMIT_SUCCESS;
777 }
778
779 static void htb_timer(unsigned long arg)
780 {
781     struct Qdisc *sch = (struct Qdisc*)arg;
782     sch->flags &= ~TCQ_F_THROTTLED;
783     wmb();
784     netif_schedule(sch->dev);
785 }
786
787 #ifdef HTB_RATECM
788 #define RT_GEN(D,R) R+=D-(R/HTB_EWMAC);D=0
789 static void htb_rate_timer(unsigned long arg)
790 {
791         struct Qdisc *sch = (struct Qdisc*)arg;
792         struct htb_sched *q = qdisc_priv(sch);
793         struct list_head *p;
794
795         /* lock queue so that we can muck with it */
796         HTB_QLOCK(sch);
797         HTB_DBG(10,1,"htb_rttmr j=%ld\n",jiffies);
798
799         q->rttim.expires = jiffies + HZ;
800         add_timer(&q->rttim);
801
802         /* scan and recompute one bucket at time */
803         if (++q->recmp_bucket >= HTB_HSIZE) 
804                 q->recmp_bucket = 0;
805         list_for_each (p,q->hash+q->recmp_bucket) {
806                 struct htb_class *cl = list_entry(p,struct htb_class,hlist);
807                 HTB_DBG(10,2,"htb_rttmr_cl cl=%X sbyte=%lu spkt=%lu\n",
808                                 cl->classid,cl->sum_bytes,cl->sum_packets);
809                 RT_GEN (cl->sum_bytes,cl->rate_bytes);
810                 RT_GEN (cl->sum_packets,cl->rate_packets);
811         }
812         HTB_QUNLOCK(sch);
813 }
814 #endif
815
816 /**
817  * htb_charge_class - charges amount "bytes" to leaf and ancestors
818  *
819  * Routine assumes that packet "bytes" long was dequeued from leaf cl
820  * borrowing from "level". It accounts bytes to ceil leaky bucket for
821  * leaf and all ancestors and to rate bucket for ancestors at levels
822  * "level" and higher. It also handles possible change of mode resulting
823  * from the update. Note that mode can also increase here (MAY_BORROW to
824  * CAN_SEND) because we can use more precise clock that event queue here.
825  * In such case we remove class from event queue first.
826  */
827 static void htb_charge_class(struct htb_sched *q,struct htb_class *cl,
828                 int level,int bytes)
829 {       
830         long toks,diff;
831         enum htb_cmode old_mode;
832         HTB_DBG(5,1,"htb_chrg_cl cl=%X lev=%d len=%d\n",cl->classid,level,bytes);
833
834 #define HTB_ACCNT(T,B,R) toks = diff + cl->T; \
835         if (toks > cl->B) toks = cl->B; \
836         toks -= L2T(cl, cl->R, bytes); \
837         if (toks <= -cl->mbuffer) toks = 1-cl->mbuffer; \
838         cl->T = toks
839
840         while (cl) {
841                 HTB_CHCL(cl);
842                 diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer);
843 #ifdef HTB_DEBUG
844                 if (diff > cl->mbuffer || diff < 0 || PSCHED_TLESS(q->now, cl->t_c)) {
845                         if (net_ratelimit())
846                                 printk(KERN_ERR "HTB: bad diff in charge, cl=%X diff=%lX now=%Lu then=%Lu j=%lu\n",
847                                        cl->classid, diff,
848 #ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
849                                        q->now.tv_sec * 1000000ULL + q->now.tv_usec,
850                                        cl->t_c.tv_sec * 1000000ULL + cl->t_c.tv_usec,
851 #else
852                                        (unsigned long long) q->now,
853                                        (unsigned long long) cl->t_c,
854 #endif
855                                        q->jiffies);
856                         diff = 1000;
857                 }
858 #endif
859                 if (cl->level >= level) {
860                         if (cl->level == level) cl->xstats.lends++;
861                         HTB_ACCNT (tokens,buffer,rate);
862                 } else {
863                         cl->xstats.borrows++;
864                         cl->tokens += diff; /* we moved t_c; update tokens */
865                 }
866                 HTB_ACCNT (ctokens,cbuffer,ceil);
867                 cl->t_c = q->now;
868                 HTB_DBG(5,2,"htb_chrg_clp cl=%X diff=%ld tok=%ld ctok=%ld\n",cl->classid,diff,cl->tokens,cl->ctokens);
869
870                 old_mode = cl->cmode; diff = 0;
871                 htb_change_class_mode(q,cl,&diff);
872                 if (old_mode != cl->cmode) {
873                         if (old_mode != HTB_CAN_SEND)
874                                 htb_safe_rb_erase(&cl->pq_node,q->wait_pq+cl->level);
875                         if (cl->cmode != HTB_CAN_SEND)
876                                 htb_add_to_wait_tree (q,cl,diff,1);
877                 }
878                 
879 #ifdef HTB_RATECM
880                 /* update rate counters */
881                 cl->sum_bytes += bytes; cl->sum_packets++;
882 #endif
883
884                 /* update byte stats except for leaves which are already updated */
885                 if (cl->level) {
886                         cl->bstats.bytes += bytes;
887                         cl->bstats.packets++;
888                 }
889                 cl = cl->parent;
890         }
891 }
892
893 /**
894  * htb_do_events - make mode changes to classes at the level
895  *
896  * Scans event queue for pending events and applies them. Returns jiffies to
897  * next pending event (0 for no event in pq).
898  * Note: Aplied are events whose have cl->pq_key <= jiffies.
899  */
900 static long htb_do_events(struct htb_sched *q,int level)
901 {
902         int i;
903         HTB_DBG(8,1,"htb_do_events l=%d root=%p rmask=%X\n",
904                         level,q->wait_pq[level].rb_node,q->row_mask[level]);
905         for (i = 0; i < 500; i++) {
906                 struct htb_class *cl;
907                 long diff;
908                 struct rb_node *p = q->wait_pq[level].rb_node;
909                 if (!p) return 0;
910                 while (p->rb_left) p = p->rb_left;
911
912                 cl = rb_entry(p, struct htb_class, pq_node);
913                 if (time_after(cl->pq_key, q->jiffies)) {
914                         HTB_DBG(8,3,"htb_do_ev_ret delay=%ld\n",cl->pq_key - q->jiffies);
915                         return cl->pq_key - q->jiffies;
916                 }
917                 htb_safe_rb_erase(p,q->wait_pq+level);
918                 diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer);
919 #ifdef HTB_DEBUG
920                 if (diff > cl->mbuffer || diff < 0 || PSCHED_TLESS(q->now, cl->t_c)) {
921                         if (net_ratelimit())
922                                 printk(KERN_ERR "HTB: bad diff in events, cl=%X diff=%lX now=%Lu then=%Lu j=%lu\n",
923                                        cl->classid, diff,
924 #ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
925                                        q->now.tv_sec * 1000000ULL + q->now.tv_usec,
926                                        cl->t_c.tv_sec * 1000000ULL + cl->t_c.tv_usec,
927 #else
928                                        (unsigned long long) q->now,
929                                        (unsigned long long) cl->t_c,
930 #endif
931                                        q->jiffies);
932                         diff = 1000;
933                 }
934 #endif
935                 htb_change_class_mode(q,cl,&diff);
936                 if (cl->cmode != HTB_CAN_SEND)
937                         htb_add_to_wait_tree (q,cl,diff,2);
938         }
939         if (net_ratelimit())
940                 printk(KERN_WARNING "htb: too many events !\n");
941         return HZ/10;
942 }
943
944 /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
945    is no such one exists. */
946 static struct rb_node *
947 htb_id_find_next_upper(int prio,struct rb_node *n,u32 id)
948 {
949         struct rb_node *r = NULL;
950         while (n) {
951                 struct htb_class *cl = rb_entry(n,struct htb_class,node[prio]);
952                 if (id == cl->classid) return n;
953                 
954                 if (id > cl->classid) {
955                         n = n->rb_right;
956                 } else {
957                         r = n;
958                         n = n->rb_left;
959                 }
960         }
961         return r;
962 }
963
964 /**
965  * htb_lookup_leaf - returns next leaf class in DRR order
966  *
967  * Find leaf where current feed pointers points to.
968  */
969 static struct htb_class *
970 htb_lookup_leaf(HTB_ARGQ struct rb_root *tree,int prio,struct rb_node **pptr,u32 *pid)
971 {
972         int i;
973         struct {
974                 struct rb_node *root;
975                 struct rb_node **pptr;
976                 u32 *pid;
977         } stk[TC_HTB_MAXDEPTH],*sp = stk;
978         
979         BUG_TRAP(tree->rb_node);
980         sp->root = tree->rb_node;
981         sp->pptr = pptr;
982         sp->pid = pid;
983
984         for (i = 0; i < 65535; i++) {
985                 HTB_DBG(4,2,"htb_lleaf ptr=%p pid=%X\n",*sp->pptr,*sp->pid);
986                 
987                 if (!*sp->pptr && *sp->pid) { 
988                         /* ptr was invalidated but id is valid - try to recover 
989                            the original or next ptr */
990                         *sp->pptr = htb_id_find_next_upper(prio,sp->root,*sp->pid);
991                 }
992                 *sp->pid = 0; /* ptr is valid now so that remove this hint as it
993                                  can become out of date quickly */
994                 if (!*sp->pptr) { /* we are at right end; rewind & go up */
995                         *sp->pptr = sp->root;
996                         while ((*sp->pptr)->rb_left) 
997                                 *sp->pptr = (*sp->pptr)->rb_left;
998                         if (sp > stk) {
999                                 sp--;
1000                                 BUG_TRAP(*sp->pptr); if(!*sp->pptr) return NULL;
1001                                 htb_next_rb_node (sp->pptr);
1002                         }
1003                 } else {
1004                         struct htb_class *cl;
1005                         cl = rb_entry(*sp->pptr,struct htb_class,node[prio]);
1006                         HTB_CHCL(cl);
1007                         if (!cl->level) 
1008                                 return cl;
1009                         (++sp)->root = cl->un.inner.feed[prio].rb_node;
1010                         sp->pptr = cl->un.inner.ptr+prio;
1011                         sp->pid = cl->un.inner.last_ptr_id+prio;
1012                 }
1013         }
1014         BUG_TRAP(0);
1015         return NULL;
1016 }
1017
1018 /* dequeues packet at given priority and level; call only if
1019    you are sure that there is active class at prio/level */
1020 static struct sk_buff *
1021 htb_dequeue_tree(struct htb_sched *q,int prio,int level)
1022 {
1023         struct sk_buff *skb = NULL;
1024         struct htb_class *cl,*start;
1025         /* look initial class up in the row */
1026         start = cl = htb_lookup_leaf (HTB_PASSQ q->row[level]+prio,prio,
1027                         q->ptr[level]+prio,q->last_ptr_id[level]+prio);
1028         
1029         do {
1030 next:
1031                 BUG_TRAP(cl); 
1032                 if (!cl) return NULL;
1033                 HTB_DBG(4,1,"htb_deq_tr prio=%d lev=%d cl=%X defic=%d\n",
1034                                 prio,level,cl->classid,cl->un.leaf.deficit[level]);
1035
1036                 /* class can be empty - it is unlikely but can be true if leaf
1037                    qdisc drops packets in enqueue routine or if someone used
1038                    graft operation on the leaf since last dequeue; 
1039                    simply deactivate and skip such class */
1040                 if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
1041                         struct htb_class *next;
1042                         htb_deactivate(q,cl);
1043
1044                         /* row/level might become empty */
1045                         if ((q->row_mask[level] & (1 << prio)) == 0)
1046                                 return NULL; 
1047                         
1048                         next = htb_lookup_leaf (HTB_PASSQ q->row[level]+prio,
1049                                         prio,q->ptr[level]+prio,q->last_ptr_id[level]+prio);
1050
1051                         if (cl == start) /* fix start if we just deleted it */
1052                                 start = next;
1053                         cl = next;
1054                         goto next;
1055                 }
1056         
1057                 if (likely((skb = cl->un.leaf.q->dequeue(cl->un.leaf.q)) != NULL)) 
1058                         break;
1059                 if (!cl->warned) {
1060                         printk(KERN_WARNING "htb: class %X isn't work conserving ?!\n",cl->classid);
1061                         cl->warned = 1;
1062                 }
1063                 q->nwc_hit++;
1064                 htb_next_rb_node((level?cl->parent->un.inner.ptr:q->ptr[0])+prio);
1065                 cl = htb_lookup_leaf (HTB_PASSQ q->row[level]+prio,prio,q->ptr[level]+prio,
1066                                 q->last_ptr_id[level]+prio);
1067
1068         } while (cl != start);
1069
1070         if (likely(skb != NULL)) {
1071                 if ((cl->un.leaf.deficit[level] -= skb->len) < 0) {
1072                         HTB_DBG(4,2,"htb_next_cl oldptr=%p quant_add=%d\n",
1073                                 level?cl->parent->un.inner.ptr[prio]:q->ptr[0][prio],cl->un.leaf.quantum);
1074                         cl->un.leaf.deficit[level] += cl->un.leaf.quantum;
1075                         htb_next_rb_node((level?cl->parent->un.inner.ptr:q->ptr[0])+prio);
1076                 }
1077                 /* this used to be after charge_class but this constelation
1078                    gives us slightly better performance */
1079                 if (!cl->un.leaf.q->q.qlen)
1080                         htb_deactivate (q,cl);
1081                 htb_charge_class (q,cl,level,skb->len);
1082         }
1083         return skb;
1084 }
1085
1086 static void htb_delay_by(struct Qdisc *sch,long delay)
1087 {
1088         struct htb_sched *q = qdisc_priv(sch);
1089         if (delay <= 0) delay = 1;
1090         if (unlikely(delay > 5*HZ)) {
1091                 if (net_ratelimit())
1092                         printk(KERN_INFO "HTB delay %ld > 5sec\n", delay);
1093                 delay = 5*HZ;
1094         }
1095         /* why don't use jiffies here ? because expires can be in past */
1096         mod_timer(&q->timer, q->jiffies + delay);
1097         sch->flags |= TCQ_F_THROTTLED;
1098         sch->qstats.overlimits++;
1099         HTB_DBG(3,1,"htb_deq t_delay=%ld\n",delay);
1100 }
1101
1102 static struct sk_buff *htb_dequeue(struct Qdisc *sch)
1103 {
1104         struct sk_buff *skb = NULL;
1105         struct htb_sched *q = qdisc_priv(sch);
1106         int level;
1107         long min_delay;
1108 #ifdef HTB_DEBUG
1109         int evs_used = 0;
1110 #endif
1111
1112         q->jiffies = jiffies;
1113         HTB_DBG(3,1,"htb_deq dircnt=%d qlen=%d\n",skb_queue_len(&q->direct_queue),
1114                         sch->q.qlen);
1115
1116         /* try to dequeue direct packets as high prio (!) to minimize cpu work */
1117         if ((skb = __skb_dequeue(&q->direct_queue)) != NULL) {
1118                 sch->flags &= ~TCQ_F_THROTTLED;
1119                 sch->q.qlen--;
1120                 return skb;
1121         }
1122
1123         if (!sch->q.qlen) goto fin;
1124         PSCHED_GET_TIME(q->now);
1125
1126         min_delay = LONG_MAX;
1127         q->nwc_hit = 0;
1128         for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
1129                 /* common case optimization - skip event handler quickly */
1130                 int m;
1131                 long delay;
1132                 if (time_after_eq(q->jiffies, q->near_ev_cache[level])) {
1133                         delay = htb_do_events(q,level);
1134                         q->near_ev_cache[level] = q->jiffies + (delay ? delay : HZ);
1135 #ifdef HTB_DEBUG
1136                         evs_used++;
1137 #endif
1138                 } else
1139                         delay = q->near_ev_cache[level] - q->jiffies;   
1140                 
1141                 if (delay && min_delay > delay) 
1142                         min_delay = delay;
1143                 m = ~q->row_mask[level];
1144                 while (m != (int)(-1)) {
1145                         int prio = ffz (m);
1146                         m |= 1 << prio;
1147                         skb = htb_dequeue_tree(q,prio,level);
1148                         if (likely(skb != NULL)) {
1149                                 sch->q.qlen--;
1150                                 sch->flags &= ~TCQ_F_THROTTLED;
1151                                 goto fin;
1152                         }
1153                 }
1154         }
1155 #ifdef HTB_DEBUG
1156         if (!q->nwc_hit && min_delay >= 10*HZ && net_ratelimit()) {
1157                 if (min_delay == LONG_MAX) {
1158                         printk(KERN_ERR "HTB: dequeue bug (%d,%lu,%lu), report it please !\n",
1159                                         evs_used,q->jiffies,jiffies);
1160                         htb_debug_dump(q);
1161                 } else 
1162                         printk(KERN_WARNING "HTB: mindelay=%ld, some class has "
1163                                         "too small rate\n",min_delay);
1164         }
1165 #endif
1166         htb_delay_by (sch,min_delay > 5*HZ ? 5*HZ : min_delay);
1167 fin:
1168         HTB_DBG(3,1,"htb_deq_end %s j=%lu skb=%p\n",sch->dev->name,q->jiffies,skb);
1169         return skb;
1170 }
1171
1172 /* try to drop from each class (by prio) until one succeed */
1173 static unsigned int htb_drop(struct Qdisc* sch)
1174 {
1175         struct htb_sched *q = qdisc_priv(sch);
1176         int prio;
1177
1178         for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
1179                 struct list_head *p;
1180                 list_for_each (p,q->drops+prio) {
1181                         struct htb_class *cl = list_entry(p, struct htb_class,
1182                                                           un.leaf.drop_list);
1183                         unsigned int len;
1184                         if (cl->un.leaf.q->ops->drop && 
1185                                 (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
1186                                 sch->q.qlen--;
1187                                 if (!cl->un.leaf.q->q.qlen)
1188                                         htb_deactivate (q,cl);
1189                                 return len;
1190                         }
1191                 }
1192         }
1193         return 0;
1194 }
1195
1196 /* reset all classes */
1197 /* always caled under BH & queue lock */
1198 static void htb_reset(struct Qdisc* sch)
1199 {
1200         struct htb_sched *q = qdisc_priv(sch);
1201         int i;
1202         HTB_DBG(0,1,"htb_reset sch=%p, handle=%X\n",sch,sch->handle);
1203
1204         for (i = 0; i < HTB_HSIZE; i++) {
1205                 struct list_head *p;
1206                 list_for_each (p,q->hash+i) {
1207                         struct htb_class *cl = list_entry(p,struct htb_class,hlist);
1208                         if (cl->level)
1209                                 memset(&cl->un.inner,0,sizeof(cl->un.inner));
1210                         else {
1211                                 if (cl->un.leaf.q) 
1212                                         qdisc_reset(cl->un.leaf.q);
1213                                 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1214                         }
1215                         cl->prio_activity = 0;
1216                         cl->cmode = HTB_CAN_SEND;
1217 #ifdef HTB_DEBUG
1218                         cl->pq_node.rb_color = -1;
1219                         memset(cl->node,255,sizeof(cl->node));
1220 #endif
1221
1222                 }
1223         }
1224         sch->flags &= ~TCQ_F_THROTTLED;
1225         del_timer(&q->timer);
1226         __skb_queue_purge(&q->direct_queue);
1227         sch->q.qlen = 0;
1228         memset(q->row,0,sizeof(q->row));
1229         memset(q->row_mask,0,sizeof(q->row_mask));
1230         memset(q->wait_pq,0,sizeof(q->wait_pq));
1231         memset(q->ptr,0,sizeof(q->ptr));
1232         for (i = 0; i < TC_HTB_NUMPRIO; i++)
1233                 INIT_LIST_HEAD(q->drops+i);
1234 }
1235
1236 static int htb_init(struct Qdisc *sch, struct rtattr *opt)
1237 {
1238         struct htb_sched *q = qdisc_priv(sch);
1239         struct rtattr *tb[TCA_HTB_INIT];
1240         struct tc_htb_glob *gopt;
1241         int i;
1242 #ifdef HTB_DEBUG
1243         printk(KERN_INFO "HTB init, kernel part version %d.%d\n",
1244                           HTB_VER >> 16,HTB_VER & 0xffff);
1245 #endif
1246         if (!opt || rtattr_parse_nested(tb, TCA_HTB_INIT, opt) ||
1247                         tb[TCA_HTB_INIT-1] == NULL ||
1248                         RTA_PAYLOAD(tb[TCA_HTB_INIT-1]) < sizeof(*gopt)) {
1249                 printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n");
1250                 return -EINVAL;
1251         }
1252         gopt = RTA_DATA(tb[TCA_HTB_INIT-1]);
1253         if (gopt->version != HTB_VER >> 16) {
1254                 printk(KERN_ERR "HTB: need tc/htb version %d (minor is %d), you have %d\n",
1255                                 HTB_VER >> 16,HTB_VER & 0xffff,gopt->version);
1256                 return -EINVAL;
1257         }
1258         q->debug = gopt->debug;
1259         HTB_DBG(0,1,"htb_init sch=%p handle=%X r2q=%d\n",sch,sch->handle,gopt->rate2quantum);
1260
1261         INIT_LIST_HEAD(&q->root);
1262         for (i = 0; i < HTB_HSIZE; i++)
1263                 INIT_LIST_HEAD(q->hash+i);
1264         for (i = 0; i < TC_HTB_NUMPRIO; i++)
1265                 INIT_LIST_HEAD(q->drops+i);
1266
1267         init_timer(&q->timer);
1268         skb_queue_head_init(&q->direct_queue);
1269
1270         q->direct_qlen = sch->dev->tx_queue_len;
1271         if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
1272                 q->direct_qlen = 2;
1273         q->timer.function = htb_timer;
1274         q->timer.data = (unsigned long)sch;
1275
1276 #ifdef HTB_RATECM
1277         init_timer(&q->rttim);
1278         q->rttim.function = htb_rate_timer;
1279         q->rttim.data = (unsigned long)sch;
1280         q->rttim.expires = jiffies + HZ;
1281         add_timer(&q->rttim);
1282 #endif
1283         if ((q->rate2quantum = gopt->rate2quantum) < 1)
1284                 q->rate2quantum = 1;
1285         q->defcls = gopt->defcls;
1286
1287         return 0;
1288 }
1289
1290 static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1291 {
1292         struct htb_sched *q = qdisc_priv(sch);
1293         unsigned char    *b = skb->tail;
1294         struct rtattr *rta;
1295         struct tc_htb_glob gopt;
1296         HTB_DBG(0,1,"htb_dump sch=%p, handle=%X\n",sch,sch->handle);
1297         HTB_QLOCK(sch);
1298         gopt.direct_pkts = q->direct_pkts;
1299
1300 #ifdef HTB_DEBUG
1301         if (HTB_DBG_COND(0,2))
1302                 htb_debug_dump(q);
1303 #endif
1304         gopt.version = HTB_VER;
1305         gopt.rate2quantum = q->rate2quantum;
1306         gopt.defcls = q->defcls;
1307         gopt.debug = q->debug;
1308         rta = (struct rtattr*)b;
1309         RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1310         RTA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
1311         rta->rta_len = skb->tail - b;
1312         HTB_QUNLOCK(sch);
1313         return skb->len;
1314 rtattr_failure:
1315         HTB_QUNLOCK(sch);
1316         skb_trim(skb, skb->tail - skb->data);
1317         return -1;
1318 }
1319
1320 static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1321         struct sk_buff *skb, struct tcmsg *tcm)
1322 {
1323 #ifdef HTB_DEBUG
1324         struct htb_sched *q = qdisc_priv(sch);
1325 #endif
1326         struct htb_class *cl = (struct htb_class*)arg;
1327         unsigned char    *b = skb->tail;
1328         struct rtattr *rta;
1329         struct tc_htb_opt opt;
1330
1331         HTB_DBG(0,1,"htb_dump_class handle=%X clid=%X\n",sch->handle,cl->classid);
1332
1333         HTB_QLOCK(sch);
1334         tcm->tcm_parent = cl->parent ? cl->parent->classid : TC_H_ROOT;
1335         tcm->tcm_handle = cl->classid;
1336         if (!cl->level && cl->un.leaf.q)
1337                 tcm->tcm_info = cl->un.leaf.q->handle;
1338
1339         rta = (struct rtattr*)b;
1340         RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1341
1342         memset (&opt,0,sizeof(opt));
1343
1344         opt.rate = cl->rate->rate; opt.buffer = cl->buffer;
1345         opt.ceil = cl->ceil->rate; opt.cbuffer = cl->cbuffer;
1346         opt.quantum = cl->un.leaf.quantum; opt.prio = cl->un.leaf.prio;
1347         opt.level = cl->level; 
1348         RTA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
1349         rta->rta_len = skb->tail - b;
1350         HTB_QUNLOCK(sch);
1351         return skb->len;
1352 rtattr_failure:
1353         HTB_QUNLOCK(sch);
1354         skb_trim(skb, b - skb->data);
1355         return -1;
1356 }
1357
1358 static int
1359 htb_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1360         struct gnet_dump *d)
1361 {
1362         struct htb_class *cl = (struct htb_class*)arg;
1363
1364 #ifdef HTB_RATECM
1365         cl->rate_est.bps = cl->rate_bytes/(HTB_EWMAC*HTB_HSIZE);
1366         cl->rate_est.pps = cl->rate_packets/(HTB_EWMAC*HTB_HSIZE);
1367 #endif
1368
1369         if (!cl->level && cl->un.leaf.q)
1370                 cl->qstats.qlen = cl->un.leaf.q->q.qlen;
1371         cl->xstats.tokens = cl->tokens;
1372         cl->xstats.ctokens = cl->ctokens;
1373
1374         if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1375             gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1376             gnet_stats_copy_queue(d, &cl->qstats) < 0)
1377                 return -1;
1378
1379         return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1380 }
1381
1382 static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1383         struct Qdisc **old)
1384 {
1385         struct htb_class *cl = (struct htb_class*)arg;
1386
1387         if (cl && !cl->level) {
1388                 if (new == NULL && (new = qdisc_create_dflt(sch->dev, 
1389                                         &pfifo_qdisc_ops)) == NULL)
1390                                         return -ENOBUFS;
1391                 sch_tree_lock(sch);
1392                 if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) {
1393                         if (cl->prio_activity)
1394                                 htb_deactivate (qdisc_priv(sch),cl);
1395
1396                         /* TODO: is it correct ? Why CBQ doesn't do it ? */
1397                         sch->q.qlen -= (*old)->q.qlen;  
1398                         qdisc_reset(*old);
1399                 }
1400                 sch_tree_unlock(sch);
1401                 return 0;
1402         }
1403         return -ENOENT;
1404 }
1405
1406 static struct Qdisc * htb_leaf(struct Qdisc *sch, unsigned long arg)
1407 {
1408         struct htb_class *cl = (struct htb_class*)arg;
1409         return (cl && !cl->level) ? cl->un.leaf.q : NULL;
1410 }
1411
1412 static unsigned long htb_get(struct Qdisc *sch, u32 classid)
1413 {
1414 #ifdef HTB_DEBUG
1415         struct htb_sched *q = qdisc_priv(sch);
1416 #endif
1417         struct htb_class *cl = htb_find(classid,sch);
1418         HTB_DBG(0,1,"htb_get clid=%X q=%p cl=%p ref=%d\n",classid,q,cl,cl?cl->refcnt:0);
1419         if (cl) 
1420                 cl->refcnt++;
1421         return (unsigned long)cl;
1422 }
1423
1424 static void htb_destroy_filters(struct tcf_proto **fl)
1425 {
1426         struct tcf_proto *tp;
1427
1428         while ((tp = *fl) != NULL) {
1429                 *fl = tp->next;
1430                 tcf_destroy(tp);
1431         }
1432 }
1433
1434 static void htb_destroy_class(struct Qdisc* sch,struct htb_class *cl)
1435 {
1436         struct htb_sched *q = qdisc_priv(sch);
1437         HTB_DBG(0,1,"htb_destrycls clid=%X ref=%d\n", cl?cl->classid:0,cl?cl->refcnt:0);
1438         if (!cl->level) {
1439                 BUG_TRAP(cl->un.leaf.q);
1440                 sch->q.qlen -= cl->un.leaf.q->q.qlen;
1441                 qdisc_destroy(cl->un.leaf.q);
1442         }
1443         qdisc_put_rtab(cl->rate);
1444         qdisc_put_rtab(cl->ceil);
1445         
1446         htb_destroy_filters (&cl->filter_list);
1447         
1448         while (!list_empty(&cl->children)) 
1449                 htb_destroy_class (sch,list_entry(cl->children.next,
1450                                         struct htb_class,sibling));
1451
1452         /* note: this delete may happen twice (see htb_delete) */
1453         list_del(&cl->hlist);
1454         list_del(&cl->sibling);
1455         
1456         if (cl->prio_activity)
1457                 htb_deactivate (q,cl);
1458         
1459         if (cl->cmode != HTB_CAN_SEND)
1460                 htb_safe_rb_erase(&cl->pq_node,q->wait_pq+cl->level);
1461         
1462         kfree(cl);
1463 }
1464
1465 /* always caled under BH & queue lock */
1466 static void htb_destroy(struct Qdisc* sch)
1467 {
1468         struct htb_sched *q = qdisc_priv(sch);
1469         HTB_DBG(0,1,"htb_destroy q=%p\n",q);
1470
1471         del_timer_sync (&q->timer);
1472 #ifdef HTB_RATECM
1473         del_timer_sync (&q->rttim);
1474 #endif
1475         /* This line used to be after htb_destroy_class call below
1476            and surprisingly it worked in 2.4. But it must precede it 
1477            because filter need its target class alive to be able to call
1478            unbind_filter on it (without Oops). */
1479         htb_destroy_filters(&q->filter_list);
1480         
1481         while (!list_empty(&q->root)) 
1482                 htb_destroy_class (sch,list_entry(q->root.next,
1483                                         struct htb_class,sibling));
1484
1485         __skb_queue_purge(&q->direct_queue);
1486 }
1487
1488 static int htb_delete(struct Qdisc *sch, unsigned long arg)
1489 {
1490         struct htb_sched *q = qdisc_priv(sch);
1491         struct htb_class *cl = (struct htb_class*)arg;
1492         HTB_DBG(0,1,"htb_delete q=%p cl=%X ref=%d\n",q,cl?cl->classid:0,cl?cl->refcnt:0);
1493
1494         // TODO: why don't allow to delete subtree ? references ? does
1495         // tc subsys quarantee us that in htb_destroy it holds no class
1496         // refs so that we can remove children safely there ?
1497         if (!list_empty(&cl->children) || cl->filter_cnt)
1498                 return -EBUSY;
1499         
1500         sch_tree_lock(sch);
1501         
1502         /* delete from hash and active; remainder in destroy_class */
1503         list_del_init(&cl->hlist);
1504         if (cl->prio_activity)
1505                 htb_deactivate (q,cl);
1506
1507         if (--cl->refcnt == 0)
1508                 htb_destroy_class(sch,cl);
1509
1510         sch_tree_unlock(sch);
1511         return 0;
1512 }
1513
1514 static void htb_put(struct Qdisc *sch, unsigned long arg)
1515 {
1516 #ifdef HTB_DEBUG
1517         struct htb_sched *q = qdisc_priv(sch);
1518 #endif
1519         struct htb_class *cl = (struct htb_class*)arg;
1520         HTB_DBG(0,1,"htb_put q=%p cl=%X ref=%d\n",q,cl?cl->classid:0,cl?cl->refcnt:0);
1521
1522         if (--cl->refcnt == 0)
1523                 htb_destroy_class(sch,cl);
1524 }
1525
1526 static int htb_change_class(struct Qdisc *sch, u32 classid, 
1527                 u32 parentid, struct rtattr **tca, unsigned long *arg)
1528 {
1529         int err = -EINVAL;
1530         struct htb_sched *q = qdisc_priv(sch);
1531         struct htb_class *cl = (struct htb_class*)*arg,*parent;
1532         struct rtattr *opt = tca[TCA_OPTIONS-1];
1533         struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
1534         struct rtattr *tb[TCA_HTB_RTAB];
1535         struct tc_htb_opt *hopt;
1536
1537         /* extract all subattrs from opt attr */
1538         if (!opt || rtattr_parse_nested(tb, TCA_HTB_RTAB, opt) ||
1539                         tb[TCA_HTB_PARMS-1] == NULL ||
1540                         RTA_PAYLOAD(tb[TCA_HTB_PARMS-1]) < sizeof(*hopt))
1541                 goto failure;
1542         
1543         parent = parentid == TC_H_ROOT ? NULL : htb_find (parentid,sch);
1544
1545         hopt = RTA_DATA(tb[TCA_HTB_PARMS-1]);
1546         HTB_DBG(0,1,"htb_chg cl=%p(%X), clid=%X, parid=%X, opt/prio=%d, rate=%u, buff=%d, quant=%d\n", cl,cl?cl->classid:0,classid,parentid,(int)hopt->prio,hopt->rate.rate,hopt->buffer,hopt->quantum);
1547         rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB-1]);
1548         ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB-1]);
1549         if (!rtab || !ctab) goto failure;
1550
1551         if (!cl) { /* new class */
1552                 struct Qdisc *new_q;
1553                 /* check for valid classid */
1554                 if (!classid || TC_H_MAJ(classid^sch->handle) || htb_find(classid,sch))
1555                         goto failure;
1556
1557                 /* check maximal depth */
1558                 if (parent && parent->parent && parent->parent->level < 2) {
1559                         printk(KERN_ERR "htb: tree is too deep\n");
1560                         goto failure;
1561                 }
1562                 err = -ENOBUFS;
1563                 if ((cl = kmalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
1564                         goto failure;
1565                 
1566                 memset(cl, 0, sizeof(*cl));
1567                 cl->refcnt = 1;
1568                 INIT_LIST_HEAD(&cl->sibling);
1569                 INIT_LIST_HEAD(&cl->hlist);
1570                 INIT_LIST_HEAD(&cl->children);
1571                 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1572 #ifdef HTB_DEBUG
1573                 cl->magic = HTB_CMAGIC;
1574 #endif
1575
1576                 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1577                    so that can't be used inside of sch_tree_lock
1578                    -- thanks to Karlis Peisenieks */
1579                 new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
1580                 sch_tree_lock(sch);
1581                 if (parent && !parent->level) {
1582                         /* turn parent into inner node */
1583                         sch->q.qlen -= parent->un.leaf.q->q.qlen;
1584                         qdisc_destroy (parent->un.leaf.q);
1585                         if (parent->prio_activity) 
1586                                 htb_deactivate (q,parent);
1587
1588                         /* remove from evt list because of level change */
1589                         if (parent->cmode != HTB_CAN_SEND) {
1590                                 htb_safe_rb_erase(&parent->pq_node,q->wait_pq /*+0*/);
1591                                 parent->cmode = HTB_CAN_SEND;
1592                         }
1593                         parent->level = (parent->parent ? parent->parent->level
1594                                         : TC_HTB_MAXDEPTH) - 1;
1595                         memset (&parent->un.inner,0,sizeof(parent->un.inner));
1596                 }
1597                 /* leaf (we) needs elementary qdisc */
1598                 cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
1599
1600                 cl->classid = classid; cl->parent = parent;
1601
1602                 /* set class to be in HTB_CAN_SEND state */
1603                 cl->tokens = hopt->buffer;
1604                 cl->ctokens = hopt->cbuffer;
1605                 cl->mbuffer = 60000000; /* 1min */
1606                 PSCHED_GET_TIME(cl->t_c);
1607                 cl->cmode = HTB_CAN_SEND;
1608
1609                 /* attach to the hash list and parent's family */
1610                 list_add_tail(&cl->hlist, q->hash+htb_hash(classid));
1611                 list_add_tail(&cl->sibling, parent ? &parent->children : &q->root);
1612 #ifdef HTB_DEBUG
1613                 { 
1614                         int i;
1615                         for (i = 0; i < TC_HTB_NUMPRIO; i++) cl->node[i].rb_color = -1;
1616                         cl->pq_node.rb_color = -1;
1617                 }
1618 #endif
1619         } else sch_tree_lock(sch);
1620
1621         /* it used to be a nasty bug here, we have to check that node
1622            is really leaf before changing cl->un.leaf ! */
1623         if (!cl->level) {
1624                 cl->un.leaf.quantum = rtab->rate.rate / q->rate2quantum;
1625                 if (!hopt->quantum && cl->un.leaf.quantum < 1000) {
1626                         printk(KERN_WARNING "HTB: quantum of class %X is small. Consider r2q change.\n", cl->classid);
1627                         cl->un.leaf.quantum = 1000;
1628                 }
1629                 if (!hopt->quantum && cl->un.leaf.quantum > 200000) {
1630                         printk(KERN_WARNING "HTB: quantum of class %X is big. Consider r2q change.\n", cl->classid);
1631                         cl->un.leaf.quantum = 200000;
1632                 }
1633                 if (hopt->quantum)
1634                         cl->un.leaf.quantum = hopt->quantum;
1635                 if ((cl->un.leaf.prio = hopt->prio) >= TC_HTB_NUMPRIO)
1636                         cl->un.leaf.prio = TC_HTB_NUMPRIO - 1;
1637         }
1638
1639         cl->buffer = hopt->buffer;
1640         cl->cbuffer = hopt->cbuffer;
1641         if (cl->rate) qdisc_put_rtab(cl->rate); cl->rate = rtab;
1642         if (cl->ceil) qdisc_put_rtab(cl->ceil); cl->ceil = ctab;
1643         sch_tree_unlock(sch);
1644
1645         *arg = (unsigned long)cl;
1646         return 0;
1647
1648 failure:
1649         if (rtab) qdisc_put_rtab(rtab);
1650         if (ctab) qdisc_put_rtab(ctab);
1651         return err;
1652 }
1653
1654 static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
1655 {
1656         struct htb_sched *q = qdisc_priv(sch);
1657         struct htb_class *cl = (struct htb_class *)arg;
1658         struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list;
1659         HTB_DBG(0,2,"htb_tcf q=%p clid=%X fref=%d fl=%p\n",q,cl?cl->classid:0,cl?cl->filter_cnt:q->filter_cnt,*fl);
1660         return fl;
1661 }
1662
1663 static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1664         u32 classid)
1665 {
1666         struct htb_sched *q = qdisc_priv(sch);
1667         struct htb_class *cl = htb_find (classid,sch);
1668         HTB_DBG(0,2,"htb_bind q=%p clid=%X cl=%p fref=%d\n",q,classid,cl,cl?cl->filter_cnt:q->filter_cnt);
1669         /*if (cl && !cl->level) return 0;
1670           The line above used to be there to prevent attaching filters to 
1671           leaves. But at least tc_index filter uses this just to get class 
1672           for other reasons so that we have to allow for it.
1673           ----
1674           19.6.2002 As Werner explained it is ok - bind filter is just
1675           another way to "lock" the class - unlike "get" this lock can
1676           be broken by class during destroy IIUC.
1677          */
1678         if (cl) 
1679                 cl->filter_cnt++; 
1680         else 
1681                 q->filter_cnt++;
1682         return (unsigned long)cl;
1683 }
1684
1685 static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1686 {
1687         struct htb_sched *q = qdisc_priv(sch);
1688         struct htb_class *cl = (struct htb_class *)arg;
1689         HTB_DBG(0,2,"htb_unbind q=%p cl=%p fref=%d\n",q,cl,cl?cl->filter_cnt:q->filter_cnt);
1690         if (cl) 
1691                 cl->filter_cnt--; 
1692         else 
1693                 q->filter_cnt--;
1694 }
1695
1696 static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1697 {
1698         struct htb_sched *q = qdisc_priv(sch);
1699         int i;
1700
1701         if (arg->stop)
1702                 return;
1703
1704         for (i = 0; i < HTB_HSIZE; i++) {
1705                 struct list_head *p;
1706                 list_for_each (p,q->hash+i) {
1707                         struct htb_class *cl = list_entry(p,struct htb_class,hlist);
1708                         if (arg->count < arg->skip) {
1709                                 arg->count++;
1710                                 continue;
1711                         }
1712                         if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1713                                 arg->stop = 1;
1714                                 return;
1715                         }
1716                         arg->count++;
1717                 }
1718         }
1719 }
1720
1721 static struct Qdisc_class_ops htb_class_ops = {
1722         .graft          =       htb_graft,
1723         .leaf           =       htb_leaf,
1724         .get            =       htb_get,
1725         .put            =       htb_put,
1726         .change         =       htb_change_class,
1727         .delete         =       htb_delete,
1728         .walk           =       htb_walk,
1729         .tcf_chain      =       htb_find_tcf,
1730         .bind_tcf       =       htb_bind_filter,
1731         .unbind_tcf     =       htb_unbind_filter,
1732         .dump           =       htb_dump_class,
1733         .dump_stats     =       htb_dump_class_stats,
1734 };
1735
1736 static struct Qdisc_ops htb_qdisc_ops = {
1737         .next           =       NULL,
1738         .cl_ops         =       &htb_class_ops,
1739         .id             =       "htb",
1740         .priv_size      =       sizeof(struct htb_sched),
1741         .enqueue        =       htb_enqueue,
1742         .dequeue        =       htb_dequeue,
1743         .requeue        =       htb_requeue,
1744         .drop           =       htb_drop,
1745         .init           =       htb_init,
1746         .reset          =       htb_reset,
1747         .destroy        =       htb_destroy,
1748         .change         =       NULL /* htb_change */,
1749         .dump           =       htb_dump,
1750         .owner          =       THIS_MODULE,
1751 };
1752
1753 static int __init htb_module_init(void)
1754 {
1755     return register_qdisc(&htb_qdisc_ops);
1756 }
1757 static void __exit htb_module_exit(void) 
1758 {
1759     unregister_qdisc(&htb_qdisc_ops);
1760 }
1761 module_init(htb_module_init)
1762 module_exit(htb_module_exit)
1763 MODULE_LICENSE("GPL");