X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=net%2Fsched%2Fsch_cbq.c;h=a294542cb8e4724eba3f3995e775568123cb481b;hb=c764c9ade6d9b710bad2b9c631ede9864333b98c;hp=ba82dfab6043d7379094ec0771d1bbdfcbb470f3;hpb=5e50da01d0ce7ef0ba3ed6cfabd62f327da0aca6;p=safe%2Fjmp%2Flinux-2.6 diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index ba82dfa..a294542 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include @@ -30,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -40,12 +40,12 @@ ======================================= Sources: [1] Sally Floyd and Van Jacobson, "Link-sharing and Resource - Management Models for Packet Networks", + Management Models for Packet Networks", IEEE/ACM Transactions on Networking, Vol.3, No.4, 1995 - [2] Sally Floyd, "Notes on CBQ and Guaranteed Service", 1995 + [2] Sally Floyd, "Notes on CBQ and Guaranteed Service", 1995 - [3] Sally Floyd, "Notes on Class-Based Queueing: Setting + [3] Sally Floyd, "Notes on Class-Based Queueing: Setting Parameters", 1996 [4] Sally Floyd and Michael Speer, "Experimental Results @@ -59,12 +59,12 @@ the implementation is different. Particularly: --- The WRR algorithm is different. Our version looks more - reasonable (I hope) and works when quanta are allowed to be - less than MTU, which is always the case when real time classes - have small rates. Note, that the statement of [3] is - incomplete, delay may actually be estimated even if class - per-round allotment is less than MTU. Namely, if per-round - allotment is W*r_i, and r_1+...+r_k = r < 1 + reasonable (I hope) and works when quanta are allowed to be + less than MTU, which is always the case when real time classes + have small rates. Note, that the statement of [3] is + incomplete, delay may actually be estimated even if class + per-round allotment is less than MTU. Namely, if per-round + allotment is W*r_i, and r_1+...+r_k = r < 1 delay_i <= ([MTU/(W*r_i)]*W*r + W*r + k*MTU)/B @@ -113,7 +113,7 @@ struct cbq_class /* Overlimit strategy parameters */ void (*overlimit)(struct cbq_class *cl); - long penalty; + psched_tdiff_t penalty; /* General scheduler (WRR) parameters */ long allot; @@ -144,7 +144,7 @@ struct cbq_class psched_time_t undertime; long avgidle; long deficit; /* Saved deficit for WRR */ - unsigned long penalized; + psched_time_t penalized; struct gnet_stats_basic bstats; struct gnet_stats_queue qstats; struct gnet_stats_rate_est rate_est; @@ -181,12 +181,12 @@ struct cbq_sched_data psched_time_t now_rt; /* Cached real time */ unsigned pmask; - struct timer_list delay_timer; - struct timer_list wd_timer; /* Watchdog timer, + struct hrtimer delay_timer; + struct qdisc_watchdog watchdog; /* Watchdog timer, started when CBQ has backlog, but cannot transmit just now */ - long wd_expires; + psched_tdiff_t wd_expires; int toplevel; u32 hgenerator; }; @@ -280,7 +280,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) #ifdef CONFIG_NET_CLS_ACT switch (result) { case TC_ACT_QUEUED: - case TC_ACT_STOLEN: + case TC_ACT_STOLEN: *qerr = NET_XMIT_SUCCESS; case TC_ACT_SHOT: return NULL; @@ -371,8 +371,6 @@ static void cbq_deactivate_class(struct cbq_class *this) return; } } - - cl = cl_prev->next_alive; return; } } while ((cl_prev = cl) != q->active[prio]); @@ -387,12 +385,12 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl) psched_time_t now; psched_tdiff_t incr; - PSCHED_GET_TIME(now); - incr = PSCHED_TDIFF(now, q->now_rt); - PSCHED_TADD2(q->now, incr, now); + now = psched_get_time(); + incr = now - q->now_rt; + now = q->now + incr; do { - if (PSCHED_TLESS(cl->undertime, now)) { + if (cl->undertime < now) { q->toplevel = cl->level; return; } @@ -476,12 +474,12 @@ cbq_requeue(struct sk_buff *skb, struct Qdisc *sch) static void cbq_ovl_classic(struct cbq_class *cl) { struct cbq_sched_data *q = qdisc_priv(cl->qdisc); - psched_tdiff_t delay = PSCHED_TDIFF(cl->undertime, q->now); + psched_tdiff_t delay = cl->undertime - q->now; if (!cl->delayed) { delay += cl->offtime; - /* + /* Class goes to sleep, so that it will have no chance to work avgidle. Let's forgive it 8) @@ -494,7 +492,7 @@ static void cbq_ovl_classic(struct cbq_class *cl) cl->avgidle = cl->minidle; if (delay <= 0) delay = 1; - PSCHED_TADD2(q->now, delay, cl->undertime); + cl->undertime = q->now + delay; cl->xstats.overactions++; cl->delayed = 1; @@ -511,7 +509,7 @@ static void cbq_ovl_classic(struct cbq_class *cl) psched_tdiff_t base_delay = q->wd_expires; for (b = cl->borrow; b; b = b->borrow) { - delay = PSCHED_TDIFF(b->undertime, q->now); + delay = b->undertime - q->now; if (delay < base_delay) { if (delay <= 0) delay = 1; @@ -549,27 +547,32 @@ static void cbq_ovl_rclassic(struct cbq_class *cl) static void cbq_ovl_delay(struct cbq_class *cl) { struct cbq_sched_data *q = qdisc_priv(cl->qdisc); - psched_tdiff_t delay = PSCHED_TDIFF(cl->undertime, q->now); + psched_tdiff_t delay = cl->undertime - q->now; if (!cl->delayed) { - unsigned long sched = jiffies; + psched_time_t sched = q->now; + ktime_t expires; delay += cl->offtime; if (cl->avgidle < 0) delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log); if (cl->avgidle < cl->minidle) cl->avgidle = cl->minidle; - PSCHED_TADD2(q->now, delay, cl->undertime); + cl->undertime = q->now + delay; if (delay > 0) { - sched += PSCHED_US2JIFFIE(delay) + cl->penalty; + sched += delay + cl->penalty; cl->penalized = sched; cl->cpriority = TC_CBQ_MAXPRIO; q->pmask |= (1<delay_timer) && - (long)(q->delay_timer.expires - sched) > 0) - q->delay_timer.expires = sched; - add_timer(&q->delay_timer); + + expires = ktime_set(0, 0); + expires = ktime_add_ns(expires, PSCHED_US2NS(sched)); + if (hrtimer_try_to_cancel(&q->delay_timer) && + ktime_to_ns(ktime_sub(q->delay_timer.expires, + expires)) > 0) + q->delay_timer.expires = expires; + hrtimer_restart(&q->delay_timer); cl->delayed = 1; cl->xstats.overactions++; return; @@ -586,7 +589,7 @@ static void cbq_ovl_lowprio(struct cbq_class *cl) { struct cbq_sched_data *q = qdisc_priv(cl->qdisc); - cl->penalized = jiffies + cl->penalty; + cl->penalized = q->now + cl->penalty; if (cl->cpriority != cl->priority2) { cl->cpriority = cl->priority2; @@ -607,27 +610,19 @@ static void cbq_ovl_drop(struct cbq_class *cl) cbq_ovl_classic(cl); } -static void cbq_watchdog(unsigned long arg) -{ - struct Qdisc *sch = (struct Qdisc*)arg; - - sch->flags &= ~TCQ_F_THROTTLED; - netif_schedule(sch->dev); -} - -static unsigned long cbq_undelay_prio(struct cbq_sched_data *q, int prio) +static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio, + psched_time_t now) { struct cbq_class *cl; struct cbq_class *cl_prev = q->active[prio]; - unsigned long now = jiffies; - unsigned long sched = now; + psched_time_t sched = now; if (cl_prev == NULL) - return now; + return 0; do { cl = cl_prev->next_alive; - if ((long)(now - cl->penalized) > 0) { + if (now - cl->penalized > 0) { cl_prev->next_alive = cl->next_alive; cl->next_alive = NULL; cl->cpriority = cl->priority; @@ -643,30 +638,34 @@ static unsigned long cbq_undelay_prio(struct cbq_sched_data *q, int prio) } cl = cl_prev->next_alive; - } else if ((long)(sched - cl->penalized) > 0) + } else if (sched - cl->penalized > 0) sched = cl->penalized; } while ((cl_prev = cl) != q->active[prio]); - return (long)(sched - now); + return sched - now; } -static void cbq_undelay(unsigned long arg) +static enum hrtimer_restart cbq_undelay(struct hrtimer *timer) { - struct Qdisc *sch = (struct Qdisc*)arg; - struct cbq_sched_data *q = qdisc_priv(sch); - long delay = 0; + struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data, + delay_timer); + struct Qdisc *sch = q->watchdog.qdisc; + psched_time_t now; + psched_tdiff_t delay = 0; unsigned pmask; + now = psched_get_time(); + pmask = q->pmask; q->pmask = 0; while (pmask) { int prio = ffz(~pmask); - long tmp; + psched_tdiff_t tmp; pmask &= ~(1< 0) { q->pmask |= 1<delay_timer.expires = jiffies + delay; - add_timer(&q->delay_timer); + ktime_t time; + + time = ktime_set(0, 0); + time = ktime_add_ns(time, PSCHED_US2NS(now + delay)); + hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS); } sch->flags &= ~TCQ_F_THROTTLED; netif_schedule(sch->dev); + return HRTIMER_NORESTART; } @@ -719,7 +722,7 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) } #endif -/* +/* It is mission critical procedure. We "regenerate" toplevel cutoff, if transmitting class @@ -735,13 +738,13 @@ cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl, if (cl && q->toplevel >= borrowed->level) { if (cl->q->q.qlen > 1) { do { - if (PSCHED_IS_PASTPERFECT(borrowed->undertime)) { + if (borrowed->undertime == PSCHED_PASTPERFECT) { q->toplevel = borrowed->level; return; } } while ((borrowed=borrowed->borrow) != NULL); } -#if 0 +#if 0 /* It is not necessary now. Uncommenting it will save CPU cycles, but decrease fairness. */ @@ -770,10 +773,10 @@ cbq_update(struct cbq_sched_data *q) (now - last) is total time between packet right edges. (last_pktlen/rate) is "virtual" busy time, so that - idle = (now - last) - last_pktlen/rate + idle = (now - last) - last_pktlen/rate */ - idle = PSCHED_TDIFF(q->now, cl->last); + idle = q->now - cl->last; if ((unsigned long)idle > 128*1024*1024) { avgidle = cl->maxidle; } else { @@ -817,13 +820,11 @@ cbq_update(struct cbq_sched_data *q) idle -= L2T(&q->link, len); idle += L2T(cl, len); - PSCHED_AUDIT_TDIFF(idle); - - PSCHED_TADD2(q->now, idle, cl->undertime); + cl->undertime = q->now + idle; } else { /* Underlimit */ - PSCHED_SET_PASTPERFECT(cl->undertime); + cl->undertime = PSCHED_PASTPERFECT; if (avgidle > cl->maxidle) cl->avgidle = cl->maxidle; else @@ -844,8 +845,7 @@ cbq_under_limit(struct cbq_class *cl) if (cl->tparent == NULL) return cl; - if (PSCHED_IS_PASTPERFECT(cl->undertime) || - !PSCHED_TLESS(q->now, cl->undertime)) { + if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) { cl->delayed = 0; return cl; } @@ -868,8 +868,7 @@ cbq_under_limit(struct cbq_class *cl) } if (cl->level > q->toplevel) return NULL; - } while (!PSCHED_IS_PASTPERFECT(cl->undertime) && - PSCHED_TLESS(q->now, cl->undertime)); + } while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime); cl->delayed = 0; return cl; @@ -909,7 +908,7 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio) skb = cl->q->dequeue(cl->q); /* Class did not give us any skb :-( - It could occur even if cl->q->q.qlen != 0 + It could occur even if cl->q->q.qlen != 0 f.e. if cl->q == "tbf" */ if (skb == NULL) @@ -1004,8 +1003,8 @@ cbq_dequeue(struct Qdisc *sch) psched_time_t now; psched_tdiff_t incr; - PSCHED_GET_TIME(now); - incr = PSCHED_TDIFF(now, q->now_rt); + now = psched_get_time(); + incr = now - q->now_rt; if (q->tx_class) { psched_tdiff_t incr2; @@ -1017,12 +1016,12 @@ cbq_dequeue(struct Qdisc *sch) cbq_time = max(real_time, work); */ incr2 = L2T(&q->link, q->tx_len); - PSCHED_TADD(q->now, incr2); + q->now += incr2; cbq_update(q); if ((incr -= incr2) < 0) incr = 0; } - PSCHED_TADD(q->now, incr); + q->now += incr; q->now_rt = now; for (;;) { @@ -1054,11 +1053,11 @@ cbq_dequeue(struct Qdisc *sch) */ if (q->toplevel == TC_CBQ_MAXLEVEL && - PSCHED_IS_PASTPERFECT(q->link.undertime)) + q->link.undertime == PSCHED_PASTPERFECT) break; q->toplevel = TC_CBQ_MAXLEVEL; - PSCHED_SET_PASTPERFECT(q->link.undertime); + q->link.undertime = PSCHED_PASTPERFECT; } /* No packets in scheduler or nobody wants to give them to us :-( @@ -1066,13 +1065,9 @@ cbq_dequeue(struct Qdisc *sch) if (sch->q.qlen) { sch->qstats.overlimits++; - if (q->wd_expires) { - long delay = PSCHED_US2JIFFIE(q->wd_expires); - if (delay <= 0) - delay = 1; - mod_timer(&q->wd_timer, jiffies + delay); - sch->flags |= TCQ_F_THROTTLED; - } + if (q->wd_expires) + qdisc_watchdog_schedule(&q->watchdog, + now + q->wd_expires); } return NULL; } @@ -1258,6 +1253,8 @@ static unsigned int cbq_drop(struct Qdisc* sch) do { if (cl->q->ops->drop && (len = cl->q->ops->drop(cl->q))) { sch->q.qlen--; + if (!cl->q->q.qlen) + cbq_deactivate_class(cl); return len; } } while ((cl = cl->next_alive) != cl_head); @@ -1277,10 +1274,10 @@ cbq_reset(struct Qdisc* sch) q->pmask = 0; q->tx_class = NULL; q->tx_borrowed = NULL; - del_timer(&q->wd_timer); - del_timer(&q->delay_timer); + qdisc_watchdog_cancel(&q->watchdog); + hrtimer_cancel(&q->delay_timer); q->toplevel = TC_CBQ_MAXLEVEL; - PSCHED_GET_TIME(q->now); + q->now = psched_get_time(); q->now_rt = q->now; for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++) @@ -1291,7 +1288,7 @@ cbq_reset(struct Qdisc* sch) qdisc_reset(cl->q); cl->next_alive = NULL; - PSCHED_SET_PASTPERFECT(cl->undertime); + cl->undertime = PSCHED_PASTPERFECT; cl->avgidle = cl->maxidle; cl->deficit = cl->quantum; cl->cpriority = cl->priority; @@ -1380,7 +1377,7 @@ static int cbq_set_overlimit(struct cbq_class *cl, struct tc_cbq_ovl *ovl) default: return -EINVAL; } - cl->penalty = (ovl->penalty*HZ)/1000; + cl->penalty = ovl->penalty; return 0; } @@ -1447,14 +1444,11 @@ static int cbq_init(struct Qdisc *sch, struct rtattr *opt) q->link.minidle = -0x7FFFFFFF; q->link.stats_lock = &sch->dev->queue_lock; - init_timer(&q->wd_timer); - q->wd_timer.data = (unsigned long)sch; - q->wd_timer.function = cbq_watchdog; - init_timer(&q->delay_timer); - q->delay_timer.data = (unsigned long)sch; + qdisc_watchdog_init(&q->watchdog, sch); + hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); q->delay_timer.function = cbq_undelay; q->toplevel = TC_CBQ_MAXLEVEL; - PSCHED_GET_TIME(q->now); + q->now = psched_get_time(); q->now_rt = q->now; cbq_link_class(&q->link); @@ -1468,19 +1462,19 @@ static int cbq_init(struct Qdisc *sch, struct rtattr *opt) static __inline__ int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl) { - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); RTA_PUT(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate); return skb->len; rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } static __inline__ int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl) { - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct tc_cbq_lssopt opt; opt.flags = 0; @@ -1499,13 +1493,13 @@ static __inline__ int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl) return skb->len; rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } static __inline__ int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl) { - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct tc_cbq_wrropt opt; opt.flags = 0; @@ -1517,30 +1511,30 @@ static __inline__ int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl) return skb->len; rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } static __inline__ int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl) { - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct tc_cbq_ovl opt; opt.strategy = cl->ovl_strategy; opt.priority2 = cl->priority2+1; opt.pad = 0; - opt.penalty = (cl->penalty*1000)/HZ; + opt.penalty = cl->penalty; RTA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt); return skb->len; rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } static __inline__ int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl) { - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct tc_cbq_fopt opt; if (cl->split || cl->defmap) { @@ -1552,14 +1546,14 @@ static __inline__ int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl) return skb->len; rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } #ifdef CONFIG_NET_CLS_POLICE static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl) { - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct tc_cbq_police opt; if (cl->police) { @@ -1571,7 +1565,7 @@ static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl) return skb->len; rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } #endif @@ -1593,18 +1587,18 @@ static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl) static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb) { struct cbq_sched_data *q = qdisc_priv(sch); - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct rtattr *rta; rta = (struct rtattr*)b; RTA_PUT(skb, TCA_OPTIONS, 0, NULL); if (cbq_dump_attr(skb, &q->link) < 0) goto rtattr_failure; - rta->rta_len = skb->tail - b; + rta->rta_len = skb_tail_pointer(skb) - b; return skb->len; rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } @@ -1622,7 +1616,7 @@ cbq_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb, struct tcmsg *tcm) { struct cbq_class *cl = (struct cbq_class*)arg; - unsigned char *b = skb->tail; + unsigned char *b = skb_tail_pointer(skb); struct rtattr *rta; if (cl->tparent) @@ -1636,11 +1630,11 @@ cbq_dump_class(struct Qdisc *sch, unsigned long arg, RTA_PUT(skb, TCA_OPTIONS, 0, NULL); if (cbq_dump_attr(skb, cl) < 0) goto rtattr_failure; - rta->rta_len = skb->tail - b; + rta->rta_len = skb_tail_pointer(skb) - b; return skb->len; rtattr_failure: - skb_trim(skb, b - skb->data); + nlmsg_trim(skb, b); return -1; } @@ -1655,8 +1649,8 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg, cl->xstats.avgidle = cl->avgidle; cl->xstats.undertime = 0; - if (!PSCHED_IS_PASTPERFECT(cl->undertime)) - cl->xstats.undertime = PSCHED_TDIFF(cl->undertime, q->now); + if (cl->undertime != PSCHED_PASTPERFECT) + cl->xstats.undertime = cl->undertime - q->now; if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || #ifdef CONFIG_NET_ESTIMATOR @@ -1685,8 +1679,7 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, #endif } sch_tree_lock(sch); - *old = cl->q; - cl->q = new; + *old = xchg(&cl->q, new); qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); qdisc_reset(*old); sch_tree_unlock(sch); @@ -1704,6 +1697,14 @@ cbq_leaf(struct Qdisc *sch, unsigned long arg) return cl ? cl->q : NULL; } +static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg) +{ + struct cbq_class *cl = (struct cbq_class *)arg; + + if (cl->q->q.qlen == 0) + cbq_deactivate_class(cl); +} + static unsigned long cbq_get(struct Qdisc *sch, u32 classid) { struct cbq_sched_data *q = qdisc_priv(sch); @@ -1716,23 +1717,13 @@ static unsigned long cbq_get(struct Qdisc *sch, u32 classid) return 0; } -static void cbq_destroy_filters(struct cbq_class *cl) -{ - struct tcf_proto *tp; - - while ((tp = cl->filter_list) != NULL) { - cl->filter_list = tp->next; - tcf_destroy(tp); - } -} - static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl) { struct cbq_sched_data *q = qdisc_priv(sch); BUG_TRAP(!cl->filters); - cbq_destroy_filters(cl); + tcf_destroy_chain(cl->filter_list); qdisc_destroy(cl->q); qdisc_put_rtab(cl->R_tab); #ifdef CONFIG_NET_ESTIMATOR @@ -1759,7 +1750,7 @@ cbq_destroy(struct Qdisc* sch) */ for (h = 0; h < 16; h++) for (cl = q->classes[h]; cl; cl = cl->next) - cbq_destroy_filters(cl); + tcf_destroy_chain(cl->filter_list); for (h = 0; h < 16; h++) { struct cbq_class *next; @@ -1988,12 +1979,17 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg) { struct cbq_sched_data *q = qdisc_priv(sch); struct cbq_class *cl = (struct cbq_class*)arg; + unsigned int qlen; if (cl->filters || cl->children || cl == &q->link) return -EBUSY; sch_tree_lock(sch); + qlen = cl->q->q.qlen; + qdisc_reset(cl->q); + qdisc_tree_decrease_qlen(cl->q, qlen); + if (cl->next_alive) cbq_deactivate_class(cl); @@ -2084,6 +2080,7 @@ static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg) static struct Qdisc_class_ops cbq_class_ops = { .graft = cbq_graft, .leaf = cbq_leaf, + .qlen_notify = cbq_qlen_notify, .get = cbq_get, .put = cbq_put, .change = cbq_change_class, @@ -2118,7 +2115,7 @@ static int __init cbq_module_init(void) { return register_qdisc(&cbq_qdisc_ops); } -static void __exit cbq_module_exit(void) +static void __exit cbq_module_exit(void) { unregister_qdisc(&cbq_qdisc_ops); }