* Credits (in time order) for older HTB versions:
* Stef Coene <stef.coene@docum.org>
* HTB support at LARTC mailing list
- * Ondrej Kraus, <krauso@barr.cz>
+ * Ondrej Kraus, <krauso@barr.cz>
* found missing INIT_QDISC(htb)
* Vladimir Smelhaus, Aamer Akhter, Bert Hubert
* helped a lot to locate nasty class stall bug
* Jiri Fojtasek
* fixed requeue routine
* and many others. thanks.
- *
- * $Id: sch_htb.c,v 1.25 2003/12/07 11:08:25 devik Exp devik $
*/
#include <linux/module.h>
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <linux/bitops.h>
+#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/sched.h>
#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/in.h>
#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/if_ether.h>
-#include <linux/inet.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/notifier.h>
-#include <net/ip.h>
-#include <net/route.h>
#include <linux/skbuff.h>
#include <linux/list.h>
#include <linux/compiler.h>
-#include <net/sock.h>
-#include <net/pkt_sched.h>
#include <linux/rbtree.h>
+#include <net/netlink.h>
+#include <net/pkt_sched.h>
/* HTB algorithm.
Author: devik@cdi.cz
========================================================================
HTB is like TBF with multiple classes. It is also similar to CBQ because
- it allows to assign priority to each class in hierarchy.
+ it allows to assign priority to each class in hierarchy.
In fact it is another implementation of Floyd's formal sharing.
Levels:
- Each class is assigned level. Leaf has ALWAYS level 0 and root
+ Each class is assigned level. Leaf has ALWAYS level 0 and root
classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
one less than their parent.
*/
-#define HTB_HSIZE 16 /* classid hash size */
-#define HTB_EWMAC 2 /* rate average over HTB_EWMAC*HTB_HSIZE sec */
-#define HTB_RATECM 1 /* whether to use rate computer */
-#define HTB_HYSTERESIS 1 /* whether to use mode hysteresis for speedup */
+static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */
#define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
#if HTB_VER >> 16 != TC_HTB_PROTOVER
#error "Mismatched sch_htb.c and pkt_sch.h"
#endif
+/* Module parameter and sysfs export */
+module_param (htb_hysteresis, int, 0640);
+MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate");
+
/* used internaly to keep status of single class */
enum htb_cmode {
HTB_CANT_SEND, /* class can't send and can't borrow */
/* interior & leaf nodes; props specific to leaves are marked L: */
struct htb_class {
+ struct Qdisc_class_common common;
/* general class parameters */
- u32 classid;
struct gnet_stats_basic bstats;
struct gnet_stats_queue qstats;
struct gnet_stats_rate_est rate_est;
struct tc_htb_xstats xstats; /* our special stats */
int refcnt; /* usage count of this class */
-#ifdef HTB_RATECM
- /* rate measurement counters */
- unsigned long rate_bytes, sum_bytes;
- unsigned long rate_packets, sum_packets;
-#endif
-
/* topology */
int level; /* our level (see above) */
+ unsigned int children;
struct htb_class *parent; /* parent class */
- struct hlist_node hlist; /* classid hash list item */
- struct list_head sibling; /* sibling list item */
- struct list_head children; /* children list */
union {
struct htb_class_leaf {
} un;
struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
struct rb_node pq_node; /* node for event queue */
- unsigned long pq_key; /* the same type as jiffies global */
+ psched_time_t pq_key;
int prio_activity; /* for which prios are we active */
enum htb_cmode cmode; /* current mode of the class */
psched_tdiff_t mbuffer; /* max wait time */
long tokens, ctokens; /* current number of tokens */
psched_time_t t_c; /* checkpoint time */
+
+ int prio; /* For parent to leaf return possible here */
+ int quantum; /* we do backup. Finally full replacement */
+ /* of un.leaf originals should be done. */
};
-/* TODO: maybe compute rate when size is too large .. or drop ? */
static inline long L2T(struct htb_class *cl, struct qdisc_rate_table *rate,
int size)
{
- int slot = size >> rate->rate.cell_log;
- if (slot > 255) {
- cl->xstats.giants++;
- slot = 255;
- }
- return rate->data[slot];
+ long result = qdisc_l2t(rate, size);
+ return result;
}
struct htb_sched {
- struct list_head root; /* root classes list */
- struct hlist_head hash[HTB_HSIZE]; /* hashed by classid */
+ struct Qdisc_class_hash clhash;
struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */
/* self list - roots of self generating tree */
struct rb_root wait_pq[TC_HTB_MAXDEPTH];
/* time of nearest event per level (row) */
- unsigned long near_ev_cache[TC_HTB_MAXDEPTH];
-
- /* cached value of jiffies in dequeue */
- unsigned long jiffies;
+ psched_time_t near_ev_cache[TC_HTB_MAXDEPTH];
/* whether we hit non-work conserving class during this dequeue; we use */
int nwc_hit; /* this to disable mindelay complaint in dequeue */
/* filters for qdisc itself */
struct tcf_proto *filter_list;
- int filter_cnt;
int rate2quantum; /* quant = rate / rate2quantum */
psched_time_t now; /* cached dequeue time */
- struct timer_list timer; /* send delay timer */
-#ifdef HTB_RATECM
- struct timer_list rttim; /* rate computer timer */
- int recmp_bucket; /* which hash bucket to recompute next */
-#endif
+ struct qdisc_watchdog watchdog;
/* non shaped skbs; let them go directly thru */
struct sk_buff_head direct_queue;
long direct_pkts;
};
-/* compute hash of size HTB_HSIZE for given handle */
-static inline int htb_hash(u32 h)
-{
-#if HTB_HSIZE != 16
-#error "Declare new hash for your HTB_HSIZE"
-#endif
- h ^= h >> 8; /* stolen from cbq_hash */
- h ^= h >> 4;
- return h & 0xf;
-}
-
/* find class in global hash table using given handle */
static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
{
struct htb_sched *q = qdisc_priv(sch);
- struct hlist_node *p;
- struct htb_class *cl;
+ struct Qdisc_class_common *clc;
- if (TC_H_MAJ(handle) != sch->handle)
+ clc = qdisc_class_find(&q->clhash, handle);
+ if (clc == NULL)
return NULL;
-
- hlist_for_each_entry(cl, p, q->hash + htb_hash(handle), hlist) {
- if (cl->classid == handle)
- return cl;
- }
- return NULL;
+ return container_of(clc, struct htb_class, common);
}
/**
* We allow direct class selection by classid in priority. The we examine
* filters in qdisc and in inner nodes (if higher filter points to the inner
* node). If we end up with classid MAJOR:0 we enqueue the skb into special
- * internal fifo (direct). These packets then go directly thru. If we still
+ * internal fifo (direct). These packets then go directly thru. If we still
* have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull
* then finish and return direct queue.
*/
#define HTB_DIRECT (struct htb_class*)-1
-static inline u32 htb_classid(struct htb_class *cl)
-{
- return (cl && cl != HTB_DIRECT) ? cl->classid : TC_H_UNSPEC;
-}
static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
int *qerr)
if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0)
return cl;
- *qerr = NET_XMIT_BYPASS;
+ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
tcf = q->filter_list;
while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
case TC_ACT_QUEUED:
case TC_ACT_STOLEN:
- *qerr = NET_XMIT_SUCCESS;
+ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT:
return NULL;
}
-#elif defined(CONFIG_NET_CLS_POLICE)
- if (result == TC_POLICE_SHOT)
- return HTB_DIRECT;
#endif
if ((cl = (void *)res.class) == NULL) {
if (res.classid == sch->handle)
parent = *p;
c = rb_entry(parent, struct htb_class, node[prio]);
- if (cl->classid > c->classid)
+ if (cl->common.classid > c->common.classid)
p = &parent->rb_right;
else
p = &parent->rb_left;
{
struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
- cl->pq_key = q->jiffies + PSCHED_US2JIFFIE(delay);
- if (cl->pq_key == q->jiffies)
+ cl->pq_key = q->now + delay;
+ if (cl->pq_key == q->now)
cl->pq_key++;
/* update the nearest event cache */
- if (time_after(q->near_ev_cache[cl->level], cl->pq_key))
+ if (q->near_ev_cache[cl->level] > cl->pq_key)
q->near_ev_cache[cl->level] = cl->pq_key;
while (*p) {
struct htb_class *c;
parent = *p;
c = rb_entry(parent, struct htb_class, pq_node);
- if (time_after_eq(cl->pq_key, c->pq_key))
+ if (cl->pq_key >= c->pq_key)
p = &parent->rb_right;
else
p = &parent->rb_left;
* htb_activate_prios - creates active classe's feed chain
*
* The class is connected to ancestors and/or appropriate rows
- * for priorities it is participating on. cl->cmode must be new
+ * for priorities it is participating on. cl->cmode must be new
* (activated) mode. It does nothing if cl->prio_activity == 0.
*/
static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
/**
* htb_deactivate_prios - remove class from feed chain
*
- * cl->cmode must represent old mode (before deactivation). It does
+ * cl->cmode must represent old mode (before deactivation). It does
* nothing if cl->prio_activity == 0. Class is removed from all feed
* chains and rows.
*/
/* we are removing child which is pointed to from
parent feed - forget the pointer but remember
classid */
- p->un.inner.last_ptr_id[prio] = cl->classid;
+ p->un.inner.last_ptr_id[prio] = cl->common.classid;
p->un.inner.ptr[prio] = NULL;
}
htb_remove_class_from_row(q, cl, mask);
}
-#if HTB_HYSTERESIS
static inline long htb_lowater(const struct htb_class *cl)
{
- return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
+ if (htb_hysteresis)
+ return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
+ else
+ return 0;
}
static inline long htb_hiwater(const struct htb_class *cl)
{
- return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
+ if (htb_hysteresis)
+ return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
+ else
+ return 0;
}
-#else
-#define htb_lowater(cl) (0)
-#define htb_hiwater(cl) (0)
-#endif
+
/**
* htb_class_mode - computes and returns current class mode
*
* It computes cl's mode at time cl->t_c+diff and returns it. If mode
* is not HTB_CAN_SEND then cl->pq_key is updated to time difference
- * from now to time when cl will change its state.
+ * from now to time when cl will change its state.
* Also it is worth to note that class mode doesn't change simply
- * at cl->{c,}tokens == 0 but there can rather be hysteresis of
+ * at cl->{c,}tokens == 0 but there can rather be hysteresis of
* 0 .. -cl->{c,}buffer range. It is meant to limit number of
* mode transitions per time unit. The speed gain is about 1/6.
*/
}
/**
- * htb_activate - inserts leaf cl into appropriate active feeds
+ * htb_activate - inserts leaf cl into appropriate active feeds
*
* Routine learns (new) priority of leaf and activates feed chain
* for the prio. It can be called on already active leaf safely.
*/
static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
{
- BUG_TRAP(!cl->level && cl->un.leaf.q && cl->un.leaf.q->q.qlen);
+ WARN_ON(cl->level || !cl->un.leaf.q || !cl->un.leaf.q->q.qlen);
if (!cl->prio_activity) {
cl->prio_activity = 1 << (cl->un.leaf.aprio = cl->un.leaf.prio);
}
/**
- * htb_deactivate - remove leaf cl from active feeds
+ * htb_deactivate - remove leaf cl from active feeds
*
* Make sure that leaf is active. In the other words it can't be called
* with non-active leaf. It also removes class from the drop list.
*/
static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
{
- BUG_TRAP(cl->prio_activity);
+ WARN_ON(!cl->prio_activity);
htb_deactivate_prios(q, cl);
cl->prio_activity = 0;
}
#ifdef CONFIG_NET_CLS_ACT
} else if (!cl) {
- if (ret == NET_XMIT_BYPASS)
+ if (ret & __NET_XMIT_BYPASS)
sch->qstats.drops++;
kfree_skb(skb);
return ret;
#endif
- } else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) !=
- NET_XMIT_SUCCESS) {
- sch->qstats.drops++;
- cl->qstats.drops++;
- return NET_XMIT_DROP;
+ } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) {
+ if (net_xmit_drop_count(ret)) {
+ sch->qstats.drops++;
+ cl->qstats.drops++;
+ }
+ return ret;
} else {
- cl->bstats.packets++;
- cl->bstats.bytes += skb->len;
+ cl->bstats.packets +=
+ skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
+ cl->bstats.bytes += qdisc_pkt_len(skb);
htb_activate(q, cl);
}
sch->q.qlen++;
- sch->bstats.packets++;
- sch->bstats.bytes += skb->len;
+ sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
+ sch->bstats.bytes += qdisc_pkt_len(skb);
return NET_XMIT_SUCCESS;
}
/* TODO: requeuing packet charges it to policers again !! */
static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
{
+ int ret;
struct htb_sched *q = qdisc_priv(sch);
- int ret = NET_XMIT_SUCCESS;
struct htb_class *cl = htb_classify(skb, sch, &ret);
struct sk_buff *tskb;
- if (cl == HTB_DIRECT || !cl) {
+ if (cl == HTB_DIRECT) {
/* enqueue to helper queue */
- if (q->direct_queue.qlen < q->direct_qlen && cl) {
+ if (q->direct_queue.qlen < q->direct_qlen) {
__skb_queue_head(&q->direct_queue, skb);
} else {
__skb_queue_head(&q->direct_queue, skb);
sch->qstats.drops++;
return NET_XMIT_CN;
}
- } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) !=
+#ifdef CONFIG_NET_CLS_ACT
+ } else if (!cl) {
+ if (ret & __NET_XMIT_BYPASS)
+ sch->qstats.drops++;
+ kfree_skb(skb);
+ return ret;
+#endif
+ } else if ((ret = cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q)) !=
NET_XMIT_SUCCESS) {
- sch->qstats.drops++;
- cl->qstats.drops++;
- return NET_XMIT_DROP;
+ if (net_xmit_drop_count(ret)) {
+ sch->qstats.drops++;
+ cl->qstats.drops++;
+ }
+ return ret;
} else
htb_activate(q, cl);
return NET_XMIT_SUCCESS;
}
-static void htb_timer(unsigned long arg)
-{
- struct Qdisc *sch = (struct Qdisc *)arg;
- sch->flags &= ~TCQ_F_THROTTLED;
- wmb();
- netif_schedule(sch->dev);
-}
-
-#ifdef HTB_RATECM
-#define RT_GEN(D,R) R+=D-(R/HTB_EWMAC);D=0
-static void htb_rate_timer(unsigned long arg)
-{
- struct Qdisc *sch = (struct Qdisc *)arg;
- struct htb_sched *q = qdisc_priv(sch);
- struct hlist_node *p;
- struct htb_class *cl;
-
-
- /* lock queue so that we can muck with it */
- spin_lock_bh(&sch->dev->queue_lock);
-
- q->rttim.expires = jiffies + HZ;
- add_timer(&q->rttim);
-
- /* scan and recompute one bucket at time */
- if (++q->recmp_bucket >= HTB_HSIZE)
- q->recmp_bucket = 0;
-
- hlist_for_each_entry(cl,p, q->hash + q->recmp_bucket, hlist) {
- RT_GEN(cl->sum_bytes, cl->rate_bytes);
- RT_GEN(cl->sum_packets, cl->rate_packets);
- }
- spin_unlock_bh(&sch->dev->queue_lock);
-}
-#endif
-
/**
* htb_charge_class - charges amount "bytes" to leaf and ancestors
*
* In such case we remove class from event queue first.
*/
static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
- int level, int bytes)
+ int level, struct sk_buff *skb)
{
+ int bytes = qdisc_pkt_len(skb);
long toks, diff;
enum htb_cmode old_mode;
cl->T = toks
while (cl) {
- diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32) cl->mbuffer);
+ diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer);
if (cl->level >= level) {
if (cl->level == level)
cl->xstats.lends++;
if (cl->cmode != HTB_CAN_SEND)
htb_add_to_wait_tree(q, cl, diff);
}
-#ifdef HTB_RATECM
- /* update rate counters */
- cl->sum_bytes += bytes;
- cl->sum_packets++;
-#endif
/* update byte stats except for leaves which are already updated */
if (cl->level) {
cl->bstats.bytes += bytes;
- cl->bstats.packets++;
+ cl->bstats.packets += skb_is_gso(skb)?
+ skb_shinfo(skb)->gso_segs:1;
}
cl = cl->parent;
}
/**
* htb_do_events - make mode changes to classes at the level
*
- * Scans event queue for pending events and applies them. Returns jiffies to
+ * Scans event queue for pending events and applies them. Returns time of
* next pending event (0 for no event in pq).
- * Note: Aplied are events whose have cl->pq_key <= jiffies.
+ * Note: Applied are events whose have cl->pq_key <= q->now.
*/
-static long htb_do_events(struct htb_sched *q, int level)
+static psched_time_t htb_do_events(struct htb_sched *q, int level)
{
- int i;
-
- for (i = 0; i < 500; i++) {
+ /* don't run for longer than 2 jiffies; 2 is used instead of
+ 1 to simplify things when jiffy is going to be incremented
+ too soon */
+ unsigned long stop_at = jiffies + 2;
+ while (time_before(jiffies, stop_at)) {
struct htb_class *cl;
long diff;
- struct rb_node *p = q->wait_pq[level].rb_node;
+ struct rb_node *p = rb_first(&q->wait_pq[level]);
+
if (!p)
return 0;
- while (p->rb_left)
- p = p->rb_left;
cl = rb_entry(p, struct htb_class, pq_node);
- if (time_after(cl->pq_key, q->jiffies)) {
- return cl->pq_key - q->jiffies;
- }
+ if (cl->pq_key > q->now)
+ return cl->pq_key;
+
htb_safe_rb_erase(p, q->wait_pq + level);
- diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32) cl->mbuffer);
+ diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer);
htb_change_class_mode(q, cl, &diff);
if (cl->cmode != HTB_CAN_SEND)
htb_add_to_wait_tree(q, cl, diff);
}
- if (net_ratelimit())
- printk(KERN_WARNING "htb: too many events !\n");
- return HZ / 10;
+ /* too much load - let's continue on next jiffie */
+ return q->now + PSCHED_TICKS_PER_SEC / HZ;
}
/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
while (n) {
struct htb_class *cl =
rb_entry(n, struct htb_class, node[prio]);
- if (id == cl->classid)
+ if (id == cl->common.classid)
return n;
- if (id > cl->classid) {
+ if (id > cl->common.classid) {
n = n->rb_right;
} else {
r = n;
u32 *pid;
} stk[TC_HTB_MAXDEPTH], *sp = stk;
- BUG_TRAP(tree->rb_node);
+ WARN_ON(!tree->rb_node);
sp->root = tree->rb_node;
sp->pptr = pptr;
sp->pid = pid;
for (i = 0; i < 65535; i++) {
if (!*sp->pptr && *sp->pid) {
- /* ptr was invalidated but id is valid - try to recover
+ /* ptr was invalidated but id is valid - try to recover
the original or next ptr */
*sp->pptr =
htb_id_find_next_upper(prio, sp->root, *sp->pid);
*sp->pptr = (*sp->pptr)->rb_left;
if (sp > stk) {
sp--;
- BUG_TRAP(*sp->pptr);
+ WARN_ON(!*sp->pptr);
if (!*sp->pptr)
return NULL;
htb_next_rb_node(sp->pptr);
sp->pid = cl->un.inner.last_ptr_id + prio;
}
}
- BUG_TRAP(0);
+ WARN_ON(1);
return NULL;
}
do {
next:
- BUG_TRAP(cl);
+ WARN_ON(!cl);
if (!cl)
return NULL;
/* class can be empty - it is unlikely but can be true if leaf
qdisc drops packets in enqueue routine or if someone used
- graft operation on the leaf since last dequeue;
+ graft operation on the leaf since last dequeue;
simply deactivate and skip such class */
if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
struct htb_class *next;
if (!cl->warned) {
printk(KERN_WARNING
"htb: class %X isn't work conserving ?!\n",
- cl->classid);
+ cl->common.classid);
cl->warned = 1;
}
q->nwc_hit++;
} while (cl != start);
if (likely(skb != NULL)) {
- if ((cl->un.leaf.deficit[level] -= skb->len) < 0) {
+ cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb);
+ if (cl->un.leaf.deficit[level] < 0) {
cl->un.leaf.deficit[level] += cl->un.leaf.quantum;
htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
ptr[0]) + prio);
gives us slightly better performance */
if (!cl->un.leaf.q->q.qlen)
htb_deactivate(q, cl);
- htb_charge_class(q, cl, level, skb->len);
+ htb_charge_class(q, cl, level, skb);
}
return skb;
}
-static void htb_delay_by(struct Qdisc *sch, long delay)
-{
- struct htb_sched *q = qdisc_priv(sch);
- if (delay <= 0)
- delay = 1;
- if (unlikely(delay > 5 * HZ)) {
- if (net_ratelimit())
- printk(KERN_INFO "HTB delay %ld > 5sec\n", delay);
- delay = 5 * HZ;
- }
- /* why don't use jiffies here ? because expires can be in past */
- mod_timer(&q->timer, q->jiffies + delay);
- sch->flags |= TCQ_F_THROTTLED;
- sch->qstats.overlimits++;
-}
-
static struct sk_buff *htb_dequeue(struct Qdisc *sch)
{
struct sk_buff *skb = NULL;
struct htb_sched *q = qdisc_priv(sch);
int level;
- long min_delay;
-
- q->jiffies = jiffies;
+ psched_time_t next_event;
/* try to dequeue direct packets as high prio (!) to minimize cpu work */
skb = __skb_dequeue(&q->direct_queue);
if (!sch->q.qlen)
goto fin;
- PSCHED_GET_TIME(q->now);
+ q->now = psched_get_time();
- min_delay = LONG_MAX;
+ next_event = q->now + 5 * PSCHED_TICKS_PER_SEC;
q->nwc_hit = 0;
for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
/* common case optimization - skip event handler quickly */
int m;
- long delay;
- if (time_after_eq(q->jiffies, q->near_ev_cache[level])) {
- delay = htb_do_events(q, level);
- q->near_ev_cache[level] =
- q->jiffies + (delay ? delay : HZ);
+ psched_time_t event;
+
+ if (q->now >= q->near_ev_cache[level]) {
+ event = htb_do_events(q, level);
+ if (!event)
+ event = q->now + PSCHED_TICKS_PER_SEC;
+ q->near_ev_cache[level] = event;
} else
- delay = q->near_ev_cache[level] - q->jiffies;
+ event = q->near_ev_cache[level];
+
+ if (event && next_event > event)
+ next_event = event;
- if (delay && min_delay > delay)
- min_delay = delay;
m = ~q->row_mask[level];
while (m != (int)(-1)) {
int prio = ffz(m);
}
}
}
- htb_delay_by(sch, min_delay > 5 * HZ ? 5 * HZ : min_delay);
+ sch->qstats.overlimits++;
+ qdisc_watchdog_schedule(&q->watchdog, next_event);
fin:
return skb;
}
static void htb_reset(struct Qdisc *sch)
{
struct htb_sched *q = qdisc_priv(sch);
- int i;
-
- for (i = 0; i < HTB_HSIZE; i++) {
- struct hlist_node *p;
- struct htb_class *cl;
+ struct htb_class *cl;
+ struct hlist_node *n;
+ unsigned int i;
- hlist_for_each_entry(cl, p, q->hash + i, hlist) {
+ for (i = 0; i < q->clhash.hashsize; i++) {
+ hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
if (cl->level)
memset(&cl->un.inner, 0, sizeof(cl->un.inner));
else {
}
}
- sch->flags &= ~TCQ_F_THROTTLED;
- del_timer(&q->timer);
+ qdisc_watchdog_cancel(&q->watchdog);
__skb_queue_purge(&q->direct_queue);
sch->q.qlen = 0;
memset(q->row, 0, sizeof(q->row));
INIT_LIST_HEAD(q->drops + i);
}
-static int htb_init(struct Qdisc *sch, struct rtattr *opt)
+static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
+ [TCA_HTB_PARMS] = { .len = sizeof(struct tc_htb_opt) },
+ [TCA_HTB_INIT] = { .len = sizeof(struct tc_htb_glob) },
+ [TCA_HTB_CTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
+ [TCA_HTB_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
+};
+
+static int htb_init(struct Qdisc *sch, struct nlattr *opt)
{
struct htb_sched *q = qdisc_priv(sch);
- struct rtattr *tb[TCA_HTB_INIT];
+ struct nlattr *tb[TCA_HTB_INIT + 1];
struct tc_htb_glob *gopt;
+ int err;
int i;
- if (!opt || rtattr_parse_nested(tb, TCA_HTB_INIT, opt) ||
- tb[TCA_HTB_INIT - 1] == NULL ||
- RTA_PAYLOAD(tb[TCA_HTB_INIT - 1]) < sizeof(*gopt)) {
+
+ if (!opt)
+ return -EINVAL;
+
+ err = nla_parse_nested(tb, TCA_HTB_INIT, opt, htb_policy);
+ if (err < 0)
+ return err;
+
+ if (tb[TCA_HTB_INIT] == NULL) {
printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n");
return -EINVAL;
}
- gopt = RTA_DATA(tb[TCA_HTB_INIT - 1]);
+ gopt = nla_data(tb[TCA_HTB_INIT]);
if (gopt->version != HTB_VER >> 16) {
printk(KERN_ERR
"HTB: need tc/htb version %d (minor is %d), you have %d\n",
return -EINVAL;
}
- INIT_LIST_HEAD(&q->root);
- for (i = 0; i < HTB_HSIZE; i++)
- INIT_HLIST_HEAD(q->hash + i);
+ err = qdisc_class_hash_init(&q->clhash);
+ if (err < 0)
+ return err;
for (i = 0; i < TC_HTB_NUMPRIO; i++)
INIT_LIST_HEAD(q->drops + i);
- init_timer(&q->timer);
+ qdisc_watchdog_init(&q->watchdog, sch);
skb_queue_head_init(&q->direct_queue);
- q->direct_qlen = sch->dev->tx_queue_len;
+ q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
q->direct_qlen = 2;
- q->timer.function = htb_timer;
- q->timer.data = (unsigned long)sch;
-
-#ifdef HTB_RATECM
- init_timer(&q->rttim);
- q->rttim.function = htb_rate_timer;
- q->rttim.data = (unsigned long)sch;
- q->rttim.expires = jiffies + HZ;
- add_timer(&q->rttim);
-#endif
+
if ((q->rate2quantum = gopt->rate2quantum) < 1)
q->rate2quantum = 1;
q->defcls = gopt->defcls;
static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
{
+ spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
struct htb_sched *q = qdisc_priv(sch);
- unsigned char *b = skb->tail;
- struct rtattr *rta;
+ struct nlattr *nest;
struct tc_htb_glob gopt;
- spin_lock_bh(&sch->dev->queue_lock);
- gopt.direct_pkts = q->direct_pkts;
+ spin_lock_bh(root_lock);
+
+ gopt.direct_pkts = q->direct_pkts;
gopt.version = HTB_VER;
gopt.rate2quantum = q->rate2quantum;
gopt.defcls = q->defcls;
gopt.debug = 0;
- rta = (struct rtattr *)b;
- RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
- RTA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
- rta->rta_len = skb->tail - b;
- spin_unlock_bh(&sch->dev->queue_lock);
+
+ nest = nla_nest_start(skb, TCA_OPTIONS);
+ if (nest == NULL)
+ goto nla_put_failure;
+ NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
+ nla_nest_end(skb, nest);
+
+ spin_unlock_bh(root_lock);
return skb->len;
-rtattr_failure:
- spin_unlock_bh(&sch->dev->queue_lock);
- skb_trim(skb, skb->tail - skb->data);
+
+nla_put_failure:
+ spin_unlock_bh(root_lock);
+ nla_nest_cancel(skb, nest);
return -1;
}
struct sk_buff *skb, struct tcmsg *tcm)
{
struct htb_class *cl = (struct htb_class *)arg;
- unsigned char *b = skb->tail;
- struct rtattr *rta;
+ spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
+ struct nlattr *nest;
struct tc_htb_opt opt;
- spin_lock_bh(&sch->dev->queue_lock);
- tcm->tcm_parent = cl->parent ? cl->parent->classid : TC_H_ROOT;
- tcm->tcm_handle = cl->classid;
+ spin_lock_bh(root_lock);
+ tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
+ tcm->tcm_handle = cl->common.classid;
if (!cl->level && cl->un.leaf.q)
tcm->tcm_info = cl->un.leaf.q->handle;
- rta = (struct rtattr *)b;
- RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
+ nest = nla_nest_start(skb, TCA_OPTIONS);
+ if (nest == NULL)
+ goto nla_put_failure;
memset(&opt, 0, sizeof(opt));
opt.quantum = cl->un.leaf.quantum;
opt.prio = cl->un.leaf.prio;
opt.level = cl->level;
- RTA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
- rta->rta_len = skb->tail - b;
- spin_unlock_bh(&sch->dev->queue_lock);
+ NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
+
+ nla_nest_end(skb, nest);
+ spin_unlock_bh(root_lock);
return skb->len;
-rtattr_failure:
- spin_unlock_bh(&sch->dev->queue_lock);
- skb_trim(skb, b - skb->data);
+
+nla_put_failure:
+ spin_unlock_bh(root_lock);
+ nla_nest_cancel(skb, nest);
return -1;
}
{
struct htb_class *cl = (struct htb_class *)arg;
-#ifdef HTB_RATECM
- cl->rate_est.bps = cl->rate_bytes / (HTB_EWMAC * HTB_HSIZE);
- cl->rate_est.pps = cl->rate_packets / (HTB_EWMAC * HTB_HSIZE);
-#endif
-
if (!cl->level && cl->un.leaf.q)
cl->qstats.qlen = cl->un.leaf.q->q.qlen;
cl->xstats.tokens = cl->tokens;
struct htb_class *cl = (struct htb_class *)arg;
if (cl && !cl->level) {
- if (new == NULL && (new = qdisc_create_dflt(sch->dev,
- &pfifo_qdisc_ops))
+ if (new == NULL &&
+ (new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
+ &pfifo_qdisc_ops,
+ cl->common.classid))
== NULL)
return -ENOBUFS;
sch_tree_lock(sch);
if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) {
- if (cl->prio_activity)
- htb_deactivate(qdisc_priv(sch), cl);
-
- /* TODO: is it correct ? Why CBQ doesn't do it ? */
- sch->q.qlen -= (*old)->q.qlen;
+ qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
qdisc_reset(*old);
}
sch_tree_unlock(sch);
return (cl && !cl->level) ? cl->un.leaf.q : NULL;
}
+static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
+{
+ struct htb_class *cl = (struct htb_class *)arg;
+
+ if (cl->un.leaf.q->q.qlen == 0)
+ htb_deactivate(qdisc_priv(sch), cl);
+}
+
static unsigned long htb_get(struct Qdisc *sch, u32 classid)
{
struct htb_class *cl = htb_find(classid, sch);
return (unsigned long)cl;
}
-static void htb_destroy_filters(struct tcf_proto **fl)
+static inline int htb_parent_last_child(struct htb_class *cl)
{
- struct tcf_proto *tp;
+ if (!cl->parent)
+ /* the root class */
+ return 0;
+ if (cl->parent->children > 1)
+ /* not the last child */
+ return 0;
+ return 1;
+}
- while ((tp = *fl) != NULL) {
- *fl = tp->next;
- tcf_destroy(tp);
- }
+static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
+ struct Qdisc *new_q)
+{
+ struct htb_class *parent = cl->parent;
+
+ WARN_ON(cl->level || !cl->un.leaf.q || cl->prio_activity);
+
+ if (parent->cmode != HTB_CAN_SEND)
+ htb_safe_rb_erase(&parent->pq_node, q->wait_pq + parent->level);
+
+ parent->level = 0;
+ memset(&parent->un.inner, 0, sizeof(parent->un.inner));
+ INIT_LIST_HEAD(&parent->un.leaf.drop_list);
+ parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
+ parent->un.leaf.quantum = parent->quantum;
+ parent->un.leaf.prio = parent->prio;
+ parent->tokens = parent->buffer;
+ parent->ctokens = parent->cbuffer;
+ parent->t_c = psched_get_time();
+ parent->cmode = HTB_CAN_SEND;
}
static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
{
- struct htb_sched *q = qdisc_priv(sch);
if (!cl->level) {
- BUG_TRAP(cl->un.leaf.q);
- sch->q.qlen -= cl->un.leaf.q->q.qlen;
+ WARN_ON(!cl->un.leaf.q);
qdisc_destroy(cl->un.leaf.q);
}
+ gen_kill_estimator(&cl->bstats, &cl->rate_est);
qdisc_put_rtab(cl->rate);
qdisc_put_rtab(cl->ceil);
- htb_destroy_filters(&cl->filter_list);
-
- while (!list_empty(&cl->children))
- htb_destroy_class(sch, list_entry(cl->children.next,
- struct htb_class, sibling));
-
- /* note: this delete may happen twice (see htb_delete) */
- if (!hlist_unhashed(&cl->hlist))
- hlist_del(&cl->hlist);
- list_del(&cl->sibling);
-
- if (cl->prio_activity)
- htb_deactivate(q, cl);
-
- if (cl->cmode != HTB_CAN_SEND)
- htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
-
+ tcf_destroy_chain(&cl->filter_list);
kfree(cl);
}
static void htb_destroy(struct Qdisc *sch)
{
struct htb_sched *q = qdisc_priv(sch);
+ struct hlist_node *n, *next;
+ struct htb_class *cl;
+ unsigned int i;
- del_timer_sync(&q->timer);
-#ifdef HTB_RATECM
- del_timer_sync(&q->rttim);
-#endif
+ qdisc_watchdog_cancel(&q->watchdog);
/* This line used to be after htb_destroy_class call below
- and surprisingly it worked in 2.4. But it must precede it
+ and surprisingly it worked in 2.4. But it must precede it
because filter need its target class alive to be able to call
unbind_filter on it (without Oops). */
- htb_destroy_filters(&q->filter_list);
-
- while (!list_empty(&q->root))
- htb_destroy_class(sch, list_entry(q->root.next,
- struct htb_class, sibling));
+ tcf_destroy_chain(&q->filter_list);
+ for (i = 0; i < q->clhash.hashsize; i++) {
+ hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode)
+ tcf_destroy_chain(&cl->filter_list);
+ }
+ for (i = 0; i < q->clhash.hashsize; i++) {
+ hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i],
+ common.hnode)
+ htb_destroy_class(sch, cl);
+ }
+ qdisc_class_hash_destroy(&q->clhash);
__skb_queue_purge(&q->direct_queue);
}
{
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = (struct htb_class *)arg;
+ unsigned int qlen;
+ struct Qdisc *new_q = NULL;
+ int last_child = 0;
// TODO: why don't allow to delete subtree ? references ? does
// tc subsys quarantee us that in htb_destroy it holds no class
// refs so that we can remove children safely there ?
- if (!list_empty(&cl->children) || cl->filter_cnt)
+ if (cl->children || cl->filter_cnt)
return -EBUSY;
+ if (!cl->level && htb_parent_last_child(cl)) {
+ new_q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
+ &pfifo_qdisc_ops,
+ cl->parent->common.classid);
+ last_child = 1;
+ }
+
sch_tree_lock(sch);
+ if (!cl->level) {
+ qlen = cl->un.leaf.q->q.qlen;
+ qdisc_reset(cl->un.leaf.q);
+ qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen);
+ }
+
/* delete from hash and active; remainder in destroy_class */
- if (!hlist_unhashed(&cl->hlist))
- hlist_del(&cl->hlist);
+ qdisc_class_hash_remove(&q->clhash, &cl->common);
+ if (cl->parent)
+ cl->parent->children--;
if (cl->prio_activity)
htb_deactivate(q, cl);
+ if (cl->cmode != HTB_CAN_SEND)
+ htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
+
+ if (last_child)
+ htb_parent_to_leaf(q, cl, new_q);
+
if (--cl->refcnt == 0)
htb_destroy_class(sch, cl);
}
static int htb_change_class(struct Qdisc *sch, u32 classid,
- u32 parentid, struct rtattr **tca,
+ u32 parentid, struct nlattr **tca,
unsigned long *arg)
{
int err = -EINVAL;
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = (struct htb_class *)*arg, *parent;
- struct rtattr *opt = tca[TCA_OPTIONS - 1];
+ struct nlattr *opt = tca[TCA_OPTIONS];
struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
- struct rtattr *tb[TCA_HTB_RTAB];
+ struct nlattr *tb[TCA_HTB_RTAB + 1];
struct tc_htb_opt *hopt;
/* extract all subattrs from opt attr */
- if (!opt || rtattr_parse_nested(tb, TCA_HTB_RTAB, opt) ||
- tb[TCA_HTB_PARMS - 1] == NULL ||
- RTA_PAYLOAD(tb[TCA_HTB_PARMS - 1]) < sizeof(*hopt))
+ if (!opt)
+ goto failure;
+
+ err = nla_parse_nested(tb, TCA_HTB_RTAB, opt, htb_policy);
+ if (err < 0)
+ goto failure;
+
+ err = -EINVAL;
+ if (tb[TCA_HTB_PARMS] == NULL)
goto failure;
parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
- hopt = RTA_DATA(tb[TCA_HTB_PARMS - 1]);
+ hopt = nla_data(tb[TCA_HTB_PARMS]);
- rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB - 1]);
- ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB - 1]);
+ rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]);
+ ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]);
if (!rtab || !ctab)
goto failure;
if (!cl) { /* new class */
struct Qdisc *new_q;
int prio;
+ struct {
+ struct nlattr nla;
+ struct gnet_estimator opt;
+ } est = {
+ .nla = {
+ .nla_len = nla_attr_size(sizeof(est.opt)),
+ .nla_type = TCA_RATE,
+ },
+ .opt = {
+ /* 4s interval, 16s averaging constant */
+ .interval = 2,
+ .ewma_log = 2,
+ },
+ };
/* check for valid classid */
if (!classid || TC_H_MAJ(classid ^ sch->handle)
if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
goto failure;
+ gen_new_estimator(&cl->bstats, &cl->rate_est,
+ qdisc_root_sleeping_lock(sch),
+ tca[TCA_RATE] ? : &est.nla);
cl->refcnt = 1;
- INIT_LIST_HEAD(&cl->sibling);
- INIT_HLIST_NODE(&cl->hlist);
- INIT_LIST_HEAD(&cl->children);
+ cl->children = 0;
INIT_LIST_HEAD(&cl->un.leaf.drop_list);
RB_CLEAR_NODE(&cl->pq_node);
/* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
so that can't be used inside of sch_tree_lock
-- thanks to Karlis Peisenieks */
- new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
+ new_q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
+ &pfifo_qdisc_ops, classid);
sch_tree_lock(sch);
if (parent && !parent->level) {
+ unsigned int qlen = parent->un.leaf.q->q.qlen;
+
/* turn parent into inner node */
- sch->q.qlen -= parent->un.leaf.q->q.qlen;
+ qdisc_reset(parent->un.leaf.q);
+ qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen);
qdisc_destroy(parent->un.leaf.q);
if (parent->prio_activity)
htb_deactivate(q, parent);
/* leaf (we) needs elementary qdisc */
cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
- cl->classid = classid;
+ cl->common.classid = classid;
cl->parent = parent;
/* set class to be in HTB_CAN_SEND state */
cl->tokens = hopt->buffer;
cl->ctokens = hopt->cbuffer;
- cl->mbuffer = PSCHED_JIFFIE2US(HZ * 60); /* 1min */
- PSCHED_GET_TIME(cl->t_c);
+ cl->mbuffer = 60 * PSCHED_TICKS_PER_SEC; /* 1min */
+ cl->t_c = psched_get_time();
cl->cmode = HTB_CAN_SEND;
/* attach to the hash list and parent's family */
- hlist_add_head(&cl->hlist, q->hash + htb_hash(classid));
- list_add_tail(&cl->sibling,
- parent ? &parent->children : &q->root);
- } else
+ qdisc_class_hash_insert(&q->clhash, &cl->common);
+ if (parent)
+ parent->children++;
+ } else {
+ if (tca[TCA_RATE])
+ gen_replace_estimator(&cl->bstats, &cl->rate_est,
+ qdisc_root_sleeping_lock(sch),
+ tca[TCA_RATE]);
sch_tree_lock(sch);
+ }
/* it used to be a nasty bug here, we have to check that node
is really leaf before changing cl->un.leaf ! */
if (!hopt->quantum && cl->un.leaf.quantum < 1000) {
printk(KERN_WARNING
"HTB: quantum of class %X is small. Consider r2q change.\n",
- cl->classid);
+ cl->common.classid);
cl->un.leaf.quantum = 1000;
}
if (!hopt->quantum && cl->un.leaf.quantum > 200000) {
printk(KERN_WARNING
"HTB: quantum of class %X is big. Consider r2q change.\n",
- cl->classid);
+ cl->common.classid);
cl->un.leaf.quantum = 200000;
}
if (hopt->quantum)
cl->un.leaf.quantum = hopt->quantum;
if ((cl->un.leaf.prio = hopt->prio) >= TC_HTB_NUMPRIO)
cl->un.leaf.prio = TC_HTB_NUMPRIO - 1;
+
+ /* backup for htb_parent_to_leaf */
+ cl->quantum = cl->un.leaf.quantum;
+ cl->prio = cl->un.leaf.prio;
}
cl->buffer = hopt->buffer;
cl->ceil = ctab;
sch_tree_unlock(sch);
+ qdisc_class_hash_grow(sch, &q->clhash);
+
*arg = (unsigned long)cl;
return 0;
static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
u32 classid)
{
- struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = htb_find(classid, sch);
/*if (cl && !cl->level) return 0;
*/
if (cl)
cl->filter_cnt++;
- else
- q->filter_cnt++;
return (unsigned long)cl;
}
static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
{
- struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = (struct htb_class *)arg;
if (cl)
cl->filter_cnt--;
- else
- q->filter_cnt--;
}
static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
struct htb_sched *q = qdisc_priv(sch);
- int i;
+ struct htb_class *cl;
+ struct hlist_node *n;
+ unsigned int i;
if (arg->stop)
return;
- for (i = 0; i < HTB_HSIZE; i++) {
- struct hlist_node *p;
- struct htb_class *cl;
-
- hlist_for_each_entry(cl, p, q->hash + i, hlist) {
+ for (i = 0; i < q->clhash.hashsize; i++) {
+ hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
if (arg->count < arg->skip) {
arg->count++;
continue;
}
}
-static struct Qdisc_class_ops htb_class_ops = {
+static const struct Qdisc_class_ops htb_class_ops = {
.graft = htb_graft,
.leaf = htb_leaf,
+ .qlen_notify = htb_qlen_notify,
.get = htb_get,
.put = htb_put,
.change = htb_change_class,
.dump_stats = htb_dump_class_stats,
};
-static struct Qdisc_ops htb_qdisc_ops = {
+static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
.next = NULL,
.cl_ops = &htb_class_ops,
.id = "htb",