2 * net/sched/sch_red.c Random Early Detection queue.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 * J Hadi Salim 980914: computation fixes
13 * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
14 * J Hadi Salim 980816: ECN support
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/skbuff.h>
21 #include <net/pkt_sched.h>
22 #include <net/inet_ecn.h>
26 /* Parameters, settable by user:
27 -----------------------------
29 limit - bytes (must be > qth_max + burst)
31 Hard limit on queue length, should be chosen >qth_max
32 to allow packet bursts. This parameter does not
33 affect the algorithms behaviour and can be chosen
34 arbitrarily high (well, less than ram size)
35 Really, this limit will never be reached
36 if RED works correctly.
41 u32 limit; /* HARD maximal queue length */
43 struct red_parms parms;
44 struct red_stats stats;
48 static inline int red_use_ecn(struct red_sched_data *q)
50 return q->flags & TC_RED_ECN;
53 static inline int red_use_harddrop(struct red_sched_data *q)
55 return q->flags & TC_RED_HARDDROP;
58 static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
60 struct red_sched_data *q = qdisc_priv(sch);
61 struct Qdisc *child = q->qdisc;
64 q->parms.qavg = red_calc_qavg(&q->parms, child->qstats.backlog);
66 if (red_is_idling(&q->parms))
67 red_end_of_idle_period(&q->parms);
69 switch (red_action(&q->parms, q->parms.qavg)) {
74 sch->qstats.overlimits++;
75 if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
84 sch->qstats.overlimits++;
85 if (red_use_harddrop(q) || !red_use_ecn(q) ||
86 !INET_ECN_set_ce(skb)) {
87 q->stats.forced_drop++;
91 q->stats.forced_mark++;
95 ret = child->enqueue(skb, child);
96 if (likely(ret == NET_XMIT_SUCCESS)) {
97 sch->bstats.bytes += skb->len;
98 sch->bstats.packets++;
107 qdisc_drop(skb, sch);
111 static int red_requeue(struct sk_buff *skb, struct Qdisc* sch)
113 struct red_sched_data *q = qdisc_priv(sch);
114 struct Qdisc *child = q->qdisc;
117 if (red_is_idling(&q->parms))
118 red_end_of_idle_period(&q->parms);
120 ret = child->ops->requeue(skb, child);
121 if (likely(ret == NET_XMIT_SUCCESS)) {
122 sch->qstats.requeues++;
128 static struct sk_buff * red_dequeue(struct Qdisc* sch)
131 struct red_sched_data *q = qdisc_priv(sch);
132 struct Qdisc *child = q->qdisc;
134 skb = child->dequeue(child);
137 else if (!red_is_idling(&q->parms))
138 red_start_of_idle_period(&q->parms);
143 static unsigned int red_drop(struct Qdisc* sch)
145 struct red_sched_data *q = qdisc_priv(sch);
146 struct Qdisc *child = q->qdisc;
149 if (child->ops->drop && (len = child->ops->drop(child)) > 0) {
156 if (!red_is_idling(&q->parms))
157 red_start_of_idle_period(&q->parms);
162 static void red_reset(struct Qdisc* sch)
164 struct red_sched_data *q = qdisc_priv(sch);
166 qdisc_reset(q->qdisc);
168 red_restart(&q->parms);
171 static void red_destroy(struct Qdisc *sch)
173 struct red_sched_data *q = qdisc_priv(sch);
174 qdisc_destroy(q->qdisc);
177 static struct Qdisc *red_create_dflt(struct Qdisc *sch, u32 limit)
183 q = qdisc_create_dflt(sch->dev, &bfifo_qdisc_ops,
184 TC_H_MAKE(sch->handle, 1));
186 nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)),
189 nla->nla_type = RTM_NEWQDISC;
190 nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt));
191 ((struct tc_fifo_qopt *)nla_data(nla))->limit = limit;
193 ret = q->ops->change(q, nla);
204 static int red_change(struct Qdisc *sch, struct nlattr *opt)
206 struct red_sched_data *q = qdisc_priv(sch);
207 struct nlattr *tb[TCA_RED_MAX + 1];
208 struct tc_red_qopt *ctl;
209 struct Qdisc *child = NULL;
211 if (opt == NULL || nla_parse_nested(tb, TCA_RED_MAX, opt, NULL))
214 if (tb[TCA_RED_PARMS] == NULL ||
215 nla_len(tb[TCA_RED_PARMS]) < sizeof(*ctl) ||
216 tb[TCA_RED_STAB] == NULL ||
217 nla_len(tb[TCA_RED_STAB]) < RED_STAB_SIZE)
220 ctl = nla_data(tb[TCA_RED_PARMS]);
222 if (ctl->limit > 0) {
223 child = red_create_dflt(sch, ctl->limit);
229 q->flags = ctl->flags;
230 q->limit = ctl->limit;
232 qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
233 qdisc_destroy(xchg(&q->qdisc, child));
236 red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
237 ctl->Plog, ctl->Scell_log,
238 nla_data(tb[TCA_RED_STAB]));
240 if (skb_queue_empty(&sch->q))
241 red_end_of_idle_period(&q->parms);
243 sch_tree_unlock(sch);
247 static int red_init(struct Qdisc* sch, struct nlattr *opt)
249 struct red_sched_data *q = qdisc_priv(sch);
251 q->qdisc = &noop_qdisc;
252 return red_change(sch, opt);
255 static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
257 struct red_sched_data *q = qdisc_priv(sch);
258 struct nlattr *opts = NULL;
259 struct tc_red_qopt opt = {
262 .qth_min = q->parms.qth_min >> q->parms.Wlog,
263 .qth_max = q->parms.qth_max >> q->parms.Wlog,
264 .Wlog = q->parms.Wlog,
265 .Plog = q->parms.Plog,
266 .Scell_log = q->parms.Scell_log,
269 opts = nla_nest_start(skb, TCA_OPTIONS);
271 goto nla_put_failure;
272 NLA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt);
273 return nla_nest_end(skb, opts);
276 return nla_nest_cancel(skb, opts);
279 static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
281 struct red_sched_data *q = qdisc_priv(sch);
282 struct tc_red_xstats st = {
283 .early = q->stats.prob_drop + q->stats.forced_drop,
284 .pdrop = q->stats.pdrop,
285 .other = q->stats.other,
286 .marked = q->stats.prob_mark + q->stats.forced_mark,
289 return gnet_stats_copy_app(d, &st, sizeof(st));
292 static int red_dump_class(struct Qdisc *sch, unsigned long cl,
293 struct sk_buff *skb, struct tcmsg *tcm)
295 struct red_sched_data *q = qdisc_priv(sch);
299 tcm->tcm_handle |= TC_H_MIN(1);
300 tcm->tcm_info = q->qdisc->handle;
304 static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
307 struct red_sched_data *q = qdisc_priv(sch);
313 *old = xchg(&q->qdisc, new);
314 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
316 sch_tree_unlock(sch);
320 static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg)
322 struct red_sched_data *q = qdisc_priv(sch);
326 static unsigned long red_get(struct Qdisc *sch, u32 classid)
331 static void red_put(struct Qdisc *sch, unsigned long arg)
336 static int red_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
337 struct nlattr **tca, unsigned long *arg)
342 static int red_delete(struct Qdisc *sch, unsigned long cl)
347 static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
350 if (walker->count >= walker->skip)
351 if (walker->fn(sch, 1, walker) < 0) {
359 static struct tcf_proto **red_find_tcf(struct Qdisc *sch, unsigned long cl)
364 static const struct Qdisc_class_ops red_class_ops = {
369 .change = red_change_class,
370 .delete = red_delete,
372 .tcf_chain = red_find_tcf,
373 .dump = red_dump_class,
376 static struct Qdisc_ops red_qdisc_ops __read_mostly = {
378 .priv_size = sizeof(struct red_sched_data),
379 .cl_ops = &red_class_ops,
380 .enqueue = red_enqueue,
381 .dequeue = red_dequeue,
382 .requeue = red_requeue,
386 .destroy = red_destroy,
387 .change = red_change,
389 .dump_stats = red_dump_stats,
390 .owner = THIS_MODULE,
393 static int __init red_module_init(void)
395 return register_qdisc(&red_qdisc_ops);
398 static void __exit red_module_exit(void)
400 unregister_qdisc(&red_qdisc_ops);
403 module_init(red_module_init)
404 module_exit(red_module_exit)
406 MODULE_LICENSE("GPL");