X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=net%2Fsched%2Fsch_generic.c;h=a63029ef3eddc9e1bc19cbc552f327e6fdafa8aa;hb=7fee226a;hp=3385ee5925418d73da404e429c33b20f6db9f4a1;hpb=fd44de7cc1d430caef91ad9aecec9ff000fe86f8;p=safe%2Fjmp%2Flinux-2.6 diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 3385ee5..a63029e 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -11,198 +11,256 @@ * - Ingress support */ -#include -#include #include #include #include #include #include #include -#include -#include -#include -#include #include -#include #include #include #include #include #include #include -#include +#include #include +#include /* Main transmission queue. */ /* Modifications to data participating in scheduling must be protected with - * dev->queue_lock spinlock. + * qdisc_lock(qdisc) spinlock. * * The idea is the following: - * - enqueue, dequeue are serialized via top level device - * spinlock dev->queue_lock. - * - ingress filtering is serialized via top level device - * spinlock dev->ingress_lock. + * - enqueue, dequeue are serialized via qdisc root lock + * - ingress filtering is also serialized via qdisc root lock * - updates to tree and tree walking are only done under the rtnl mutex. */ -void qdisc_lock_tree(struct net_device *dev) +static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) { - spin_lock_bh(&dev->queue_lock); - spin_lock(&dev->ingress_lock); + skb_dst_force(skb); + q->gso_skb = skb; + q->qstats.requeues++; + q->q.qlen++; /* it's still part of the queue */ + __netif_schedule(q); + + return 0; } -void qdisc_unlock_tree(struct net_device *dev) +static inline struct sk_buff *dequeue_skb(struct Qdisc *q) { - spin_unlock(&dev->ingress_lock); - spin_unlock_bh(&dev->queue_lock); + struct sk_buff *skb = q->gso_skb; + + if (unlikely(skb)) { + struct net_device *dev = qdisc_dev(q); + struct netdev_queue *txq; + + /* check the reason of requeuing without tx lock first */ + txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); + if (!netif_tx_queue_stopped(txq) && + !netif_tx_queue_frozen(txq)) { + q->gso_skb = NULL; + q->q.qlen--; + } else + skb = NULL; + } else { + skb = q->dequeue(q); + } + + return skb; } -/* - dev->queue_lock serializes queue accesses for this device - AND dev->qdisc pointer itself. +static inline int handle_dev_cpu_collision(struct sk_buff *skb, + struct netdev_queue *dev_queue, + struct Qdisc *q) +{ + int ret; + + if (unlikely(dev_queue->xmit_lock_owner == smp_processor_id())) { + /* + * Same CPU holding the lock. It may be a transient + * configuration error, when hard_start_xmit() recurses. We + * detect it by checking xmit owner and drop the packet when + * deadloop is detected. Return OK to try the next skb. + */ + kfree_skb(skb); + if (net_ratelimit()) + printk(KERN_WARNING "Dead loop on netdevice %s, " + "fix it urgently!\n", dev_queue->dev->name); + ret = qdisc_qlen(q); + } else { + /* + * Another cpu is holding lock, requeue & delay xmits for + * some time. + */ + __get_cpu_var(softnet_data).cpu_collision++; + ret = dev_requeue_skb(skb, q); + } - netif_tx_lock serializes accesses to device driver. + return ret; +} - dev->queue_lock and netif_tx_lock are mutually exclusive, - if one is grabbed, another must be free. +/* + * Transmit one skb, and handle the return status as required. Holding the + * __QDISC_STATE_RUNNING bit guarantees that only one CPU can execute this + * function. + * + * Returns to the caller: + * 0 - queue is empty or throttled. + * >0 - queue is not empty. */ +int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, + struct net_device *dev, struct netdev_queue *txq, + spinlock_t *root_lock) +{ + int ret = NETDEV_TX_BUSY; + + /* And release qdisc */ + spin_unlock(root_lock); + HARD_TX_LOCK(dev, txq, smp_processor_id()); + if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq)) + ret = dev_hard_start_xmit(skb, dev, txq); -/* Kick device. - Note, that this procedure can be called by a watchdog timer, so that - we do not check dev->tbusy flag here. + HARD_TX_UNLOCK(dev, txq); - Returns: 0 - queue is empty. - >0 - queue is not empty, but throttled. - <0 - queue is not empty. Device is throttled, if dev->tbusy != 0. + spin_lock(root_lock); - NOTE: Called under dev->queue_lock with locally disabled BH. -*/ + if (dev_xmit_complete(ret)) { + /* Driver sent out skb successfully or skb was consumed */ + ret = qdisc_qlen(q); + } else if (ret == NETDEV_TX_LOCKED) { + /* Driver try lock failed */ + ret = handle_dev_cpu_collision(skb, txq, q); + } else { + /* Driver returned NETDEV_TX_BUSY - requeue skb */ + if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit())) + printk(KERN_WARNING "BUG %s code %d qlen %d\n", + dev->name, ret, q->q.qlen); + + ret = dev_requeue_skb(skb, q); + } + + if (ret && (netif_tx_queue_stopped(txq) || + netif_tx_queue_frozen(txq))) + ret = 0; + + return ret; +} -static inline int qdisc_restart(struct net_device *dev) +/* + * NOTE: Called under qdisc_lock(q) with locally disabled BH. + * + * __QDISC_STATE_RUNNING guarantees only one CPU can process + * this qdisc at a time. qdisc_lock(q) serializes queue accesses for + * this queue. + * + * netif_tx_lock serializes accesses to device driver. + * + * qdisc_lock(q) and netif_tx_lock are mutually exclusive, + * if one is grabbed, another must be free. + * + * Note, that this procedure can be called by a watchdog timer + * + * Returns to the caller: + * 0 - queue is empty or throttled. + * >0 - queue is not empty. + * + */ +static inline int qdisc_restart(struct Qdisc *q) { - struct Qdisc *q = dev->qdisc; + struct netdev_queue *txq; + struct net_device *dev; + spinlock_t *root_lock; struct sk_buff *skb; /* Dequeue packet */ - if (((skb = dev->gso_skb)) || ((skb = q->dequeue(q)))) { - unsigned nolock = (dev->features & NETIF_F_LLTX); + skb = dequeue_skb(q); + if (unlikely(!skb)) + return 0; + WARN_ON_ONCE(skb_dst_is_noref(skb)); + root_lock = qdisc_lock(q); + dev = qdisc_dev(q); + txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); + + return sch_direct_xmit(skb, q, dev, txq, root_lock); +} - dev->gso_skb = NULL; +void __qdisc_run(struct Qdisc *q) +{ + unsigned long start_time = jiffies; + while (qdisc_restart(q)) { /* - * When the driver has LLTX set it does its own locking - * in start_xmit. No need to add additional overhead by - * locking again. These checks are worth it because - * even uncongested locks can be quite expensive. - * The driver can do trylock like here too, in case - * of lock congestion it should return -1 and the packet - * will be requeued. + * Postpone processing if + * 1. another process needs the CPU; + * 2. we've been doing it for too long. */ - if (!nolock) { - if (!netif_tx_trylock(dev)) { - collision: - /* So, someone grabbed the driver. */ - - /* It may be transient configuration error, - when hard_start_xmit() recurses. We detect - it by checking xmit owner and drop the - packet when deadloop is detected. - */ - if (dev->xmit_lock_owner == smp_processor_id()) { - kfree_skb(skb); - if (net_ratelimit()) - printk(KERN_DEBUG "Dead loop on netdevice %s, fix it urgently!\n", dev->name); - return -1; - } - __get_cpu_var(netdev_rx_stat).cpu_collision++; - goto requeue; - } + if (need_resched() || jiffies != start_time) { + __netif_schedule(q); + break; } - - { - /* And release queue */ - spin_unlock(&dev->queue_lock); - - if (!netif_queue_stopped(dev)) { - int ret; - - ret = dev_hard_start_xmit(skb, dev); - if (ret == NETDEV_TX_OK) { - if (!nolock) { - netif_tx_unlock(dev); - } - spin_lock(&dev->queue_lock); - return -1; - } - if (ret == NETDEV_TX_LOCKED && nolock) { - spin_lock(&dev->queue_lock); - goto collision; - } - } - - /* NETDEV_TX_BUSY - we need to requeue */ - /* Release the driver */ - if (!nolock) { - netif_tx_unlock(dev); - } - spin_lock(&dev->queue_lock); - q = dev->qdisc; - } - - /* Device kicked us out :( - This is possible in three cases: - - 0. driver is locked - 1. fastroute is enabled - 2. device cannot determine busy state - before start of transmission (f.e. dialout) - 3. device is buggy (ppp) - */ - -requeue: - if (skb->next) - dev->gso_skb = skb; - else - q->ops->requeue(skb, q); - netif_schedule(dev); - return 1; } - BUG_ON((int) q->q.qlen < 0); - return q->q.qlen; + + clear_bit(__QDISC_STATE_RUNNING, &q->state); } -void __qdisc_run(struct net_device *dev) +unsigned long dev_trans_start(struct net_device *dev) { - if (unlikely(dev->qdisc == &noop_qdisc)) - goto out; - - while (qdisc_restart(dev) < 0 && !netif_queue_stopped(dev)) - /* NOTHING */; + unsigned long val, res = dev->trans_start; + unsigned int i; -out: - clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state); + for (i = 0; i < dev->num_tx_queues; i++) { + val = netdev_get_tx_queue(dev, i)->trans_start; + if (val && time_after(val, res)) + res = val; + } + dev->trans_start = res; + return res; } +EXPORT_SYMBOL(dev_trans_start); static void dev_watchdog(unsigned long arg) { struct net_device *dev = (struct net_device *)arg; netif_tx_lock(dev); - if (dev->qdisc != &noop_qdisc) { + if (!qdisc_tx_is_noop(dev)) { if (netif_device_present(dev) && netif_running(dev) && netif_carrier_ok(dev)) { - if (netif_queue_stopped(dev) && - time_after(jiffies, dev->trans_start + dev->watchdog_timeo)) { + int some_queue_timedout = 0; + unsigned int i; + unsigned long trans_start; + + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq; + + txq = netdev_get_tx_queue(dev, i); + /* + * old device drivers set dev->trans_start + */ + trans_start = txq->trans_start ? : dev->trans_start; + if (netif_tx_queue_stopped(txq) && + time_after(jiffies, (trans_start + + dev->watchdog_timeo))) { + some_queue_timedout = 1; + break; + } + } - printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n", - dev->name); - dev->tx_timeout(dev); + if (some_queue_timedout) { + char drivername[64]; + WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n", + dev->name, netdev_drivername(dev, drivername, 64), i); + dev->netdev_ops->ndo_tx_timeout(dev); } - if (!mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + dev->watchdog_timeo))) + if (!mod_timer(&dev->watchdog_timer, + round_jiffies(jiffies + + dev->watchdog_timeo))) dev_hold(dev); } } @@ -211,19 +269,13 @@ static void dev_watchdog(unsigned long arg) dev_put(dev); } -static void dev_watchdog_init(struct net_device *dev) -{ - init_timer(&dev->watchdog_timer); - dev->watchdog_timer.data = (unsigned long)dev; - dev->watchdog_timer.function = dev_watchdog; -} - void __netdev_watchdog_up(struct net_device *dev) { - if (dev->tx_timeout) { + if (dev->netdev_ops->ndo_tx_timeout) { if (dev->watchdog_timeo <= 0) dev->watchdog_timeo = 5*HZ; - if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo)) + if (!mod_timer(&dev->watchdog_timer, + round_jiffies(jiffies + dev->watchdog_timeo))) dev_hold(dev); } } @@ -241,19 +293,39 @@ static void dev_watchdog_down(struct net_device *dev) netif_tx_unlock_bh(dev); } +/** + * netif_carrier_on - set carrier + * @dev: network device + * + * Device has detected that carrier. + */ void netif_carrier_on(struct net_device *dev) { - if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) + if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) { + if (dev->reg_state == NETREG_UNINITIALIZED) + return; linkwatch_fire_event(dev); - if (netif_running(dev)) - __netdev_watchdog_up(dev); + if (netif_running(dev)) + __netdev_watchdog_up(dev); + } } +EXPORT_SYMBOL(netif_carrier_on); +/** + * netif_carrier_off - clear carrier + * @dev: network device + * + * Device has detected loss of carrier. + */ void netif_carrier_off(struct net_device *dev) { - if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) + if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) { + if (dev->reg_state == NETREG_UNINITIALIZED) + return; linkwatch_fire_event(dev); + } } +EXPORT_SYMBOL(netif_carrier_off); /* "NOOP" scheduler: the best scheduler, recommended for all interfaces under all circumstances. It is difficult to invent anything faster or @@ -271,47 +343,54 @@ static struct sk_buff *noop_dequeue(struct Qdisc * qdisc) return NULL; } -static int noop_requeue(struct sk_buff *skb, struct Qdisc* qdisc) -{ - if (net_ratelimit()) - printk(KERN_DEBUG "%s deferred output. It is buggy.\n", - skb->dev->name); - kfree_skb(skb); - return NET_XMIT_CN; -} - -struct Qdisc_ops noop_qdisc_ops = { +struct Qdisc_ops noop_qdisc_ops __read_mostly = { .id = "noop", .priv_size = 0, .enqueue = noop_enqueue, .dequeue = noop_dequeue, - .requeue = noop_requeue, + .peek = noop_dequeue, .owner = THIS_MODULE, }; +static struct netdev_queue noop_netdev_queue = { + .qdisc = &noop_qdisc, + .qdisc_sleeping = &noop_qdisc, +}; + struct Qdisc noop_qdisc = { .enqueue = noop_enqueue, .dequeue = noop_dequeue, .flags = TCQ_F_BUILTIN, .ops = &noop_qdisc_ops, .list = LIST_HEAD_INIT(noop_qdisc.list), + .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), + .dev_queue = &noop_netdev_queue, }; +EXPORT_SYMBOL(noop_qdisc); -static struct Qdisc_ops noqueue_qdisc_ops = { +static struct Qdisc_ops noqueue_qdisc_ops __read_mostly = { .id = "noqueue", .priv_size = 0, .enqueue = noop_enqueue, .dequeue = noop_dequeue, - .requeue = noop_requeue, + .peek = noop_dequeue, .owner = THIS_MODULE, }; +static struct Qdisc noqueue_qdisc; +static struct netdev_queue noqueue_netdev_queue = { + .qdisc = &noqueue_qdisc, + .qdisc_sleeping = &noqueue_qdisc, +}; + static struct Qdisc noqueue_qdisc = { .enqueue = NULL, .dequeue = noop_dequeue, .flags = TCQ_F_BUILTIN, .ops = &noqueue_qdisc_ops, .list = LIST_HEAD_INIT(noqueue_qdisc.list), + .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock), + .dev_queue = &noqueue_netdev_queue, }; @@ -324,18 +403,38 @@ static const u8 prio2band[TC_PRIO_MAX+1] = #define PFIFO_FAST_BANDS 3 -static inline struct sk_buff_head *prio2list(struct sk_buff *skb, - struct Qdisc *qdisc) +/* + * Private data for a pfifo_fast scheduler containing: + * - queues for the three band + * - bitmap indicating which of the bands contain skbs + */ +struct pfifo_fast_priv { + u32 bitmap; + struct sk_buff_head q[PFIFO_FAST_BANDS]; +}; + +/* + * Convert a bitmap to the first band number where an skb is queued, where: + * bitmap=0 means there are no skbs on any band. + * bitmap=1 means there is an skb on band 0. + * bitmap=7 means there are skbs on all 3 bands, etc. + */ +static const int bitmap2band[] = {-1, 0, 1, 0, 2, 0, 1, 0}; + +static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv, + int band) { - struct sk_buff_head *list = qdisc_priv(qdisc); - return list + prio2band[skb->priority & TC_PRIO_MAX]; + return priv->q + band; } static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) { - struct sk_buff_head *list = prio2list(skb, qdisc); + if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) { + int band = prio2band[skb->priority & TC_PRIO_MAX]; + struct pfifo_fast_priv *priv = qdisc_priv(qdisc); + struct sk_buff_head *list = band2list(priv, band); - if (skb_queue_len(list) < qdisc->dev->tx_queue_len) { + priv->bitmap |= (1 << band); qdisc->q.qlen++; return __qdisc_enqueue_tail(skb, qdisc, list); } @@ -345,33 +444,46 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) { - int prio; - struct sk_buff_head *list = qdisc_priv(qdisc); + struct pfifo_fast_priv *priv = qdisc_priv(qdisc); + int band = bitmap2band[priv->bitmap]; - for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { - if (!skb_queue_empty(list + prio)) { - qdisc->q.qlen--; - return __qdisc_dequeue_head(qdisc, list + prio); - } + if (likely(band >= 0)) { + struct sk_buff_head *list = band2list(priv, band); + struct sk_buff *skb = __qdisc_dequeue_head(qdisc, list); + + qdisc->q.qlen--; + if (skb_queue_empty(list)) + priv->bitmap &= ~(1 << band); + + return skb; } return NULL; } -static int pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc) +static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc) { - qdisc->q.qlen++; - return __qdisc_requeue(skb, qdisc, prio2list(skb, qdisc)); + struct pfifo_fast_priv *priv = qdisc_priv(qdisc); + int band = bitmap2band[priv->bitmap]; + + if (band >= 0) { + struct sk_buff_head *list = band2list(priv, band); + + return skb_peek(list); + } + + return NULL; } static void pfifo_fast_reset(struct Qdisc* qdisc) { int prio; - struct sk_buff_head *list = qdisc_priv(qdisc); + struct pfifo_fast_priv *priv = qdisc_priv(qdisc); for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) - __qdisc_reset_queue(qdisc, list + prio); + __qdisc_reset_queue(qdisc, band2list(priv, prio)); + priv->bitmap = 0; qdisc->qstats.backlog = 0; qdisc->q.qlen = 0; } @@ -381,44 +493,45 @@ static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb) struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1); - RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); + NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); return skb->len; -rtattr_failure: +nla_put_failure: return -1; } -static int pfifo_fast_init(struct Qdisc *qdisc, struct rtattr *opt) +static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt) { int prio; - struct sk_buff_head *list = qdisc_priv(qdisc); + struct pfifo_fast_priv *priv = qdisc_priv(qdisc); for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) - skb_queue_head_init(list + prio); + skb_queue_head_init(band2list(priv, prio)); return 0; } -static struct Qdisc_ops pfifo_fast_ops = { +struct Qdisc_ops pfifo_fast_ops __read_mostly = { .id = "pfifo_fast", - .priv_size = PFIFO_FAST_BANDS * sizeof(struct sk_buff_head), + .priv_size = sizeof(struct pfifo_fast_priv), .enqueue = pfifo_fast_enqueue, .dequeue = pfifo_fast_dequeue, - .requeue = pfifo_fast_requeue, + .peek = pfifo_fast_peek, .init = pfifo_fast_init, .reset = pfifo_fast_reset, .dump = pfifo_fast_dump, .owner = THIS_MODULE, }; -struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops) +struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, + struct Qdisc_ops *ops) { void *p; struct Qdisc *sch; unsigned int size; int err = -ENOBUFS; - /* ensure that the Qdisc and the private data are 32-byte aligned */ + /* ensure that the Qdisc and the private data are 64-byte aligned */ size = QDISC_ALIGN(sizeof(*sch)); size += ops->priv_size + (QDISC_ALIGNTO - 1); @@ -433,24 +546,25 @@ struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops) sch->ops = ops; sch->enqueue = ops->enqueue; sch->dequeue = ops->dequeue; - sch->dev = dev; - dev_hold(dev); + sch->dev_queue = dev_queue; + dev_hold(qdisc_dev(sch)); atomic_set(&sch->refcnt, 1); return sch; errout: - return ERR_PTR(-err); + return ERR_PTR(err); } -struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops, +struct Qdisc * qdisc_create_dflt(struct net_device *dev, + struct netdev_queue *dev_queue, + struct Qdisc_ops *ops, unsigned int parentid) { struct Qdisc *sch; - sch = qdisc_alloc(dev, ops); + sch = qdisc_alloc(dev_queue, ops); if (IS_ERR(sch)) goto errout; - sch->stats_lock = &dev->queue_lock; sch->parent = parentid; if (!ops->init || ops->init(sch, NULL) == 0) @@ -460,149 +574,277 @@ struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops, errout: return NULL; } +EXPORT_SYMBOL(qdisc_create_dflt); -/* Under dev->queue_lock and BH! */ +/* Under qdisc_lock(qdisc) and BH! */ void qdisc_reset(struct Qdisc *qdisc) { - struct Qdisc_ops *ops = qdisc->ops; + const struct Qdisc_ops *ops = qdisc->ops; if (ops->reset) ops->reset(qdisc); -} -/* this is the rcu callback function to clean up a qdisc when there - * are no further references to it */ + if (qdisc->gso_skb) { + kfree_skb(qdisc->gso_skb); + qdisc->gso_skb = NULL; + qdisc->q.qlen = 0; + } +} +EXPORT_SYMBOL(qdisc_reset); -static void __qdisc_destroy(struct rcu_head *head) +static void qdisc_rcu_free(struct rcu_head *head) { - struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu); + struct Qdisc *qdisc = container_of(head, struct Qdisc, rcu_head); + kfree((char *) qdisc - qdisc->padded); } -/* Under dev->queue_lock and BH! */ - void qdisc_destroy(struct Qdisc *qdisc) { - struct Qdisc_ops *ops = qdisc->ops; + const struct Qdisc_ops *ops = qdisc->ops; if (qdisc->flags & TCQ_F_BUILTIN || !atomic_dec_and_test(&qdisc->refcnt)) return; - list_del(&qdisc->list); -#ifdef CONFIG_NET_ESTIMATOR - gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est); +#ifdef CONFIG_NET_SCHED + qdisc_list_del(qdisc); + + qdisc_put_stab(qdisc->stab); #endif + gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est); if (ops->reset) ops->reset(qdisc); if (ops->destroy) ops->destroy(qdisc); module_put(ops->owner); - dev_put(qdisc->dev); - call_rcu(&qdisc->q_rcu, __qdisc_destroy); + dev_put(qdisc_dev(qdisc)); + + kfree_skb(qdisc->gso_skb); + /* + * gen_estimator est_timer() might access qdisc->q.lock, + * wait a RCU grace period before freeing qdisc. + */ + call_rcu(&qdisc->rcu_head, qdisc_rcu_free); +} +EXPORT_SYMBOL(qdisc_destroy); + +/* Attach toplevel qdisc to device queue. */ +struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, + struct Qdisc *qdisc) +{ + struct Qdisc *oqdisc = dev_queue->qdisc_sleeping; + spinlock_t *root_lock; + + root_lock = qdisc_lock(oqdisc); + spin_lock_bh(root_lock); + + /* Prune old scheduler */ + if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) + qdisc_reset(oqdisc); + + /* ... and graft new one */ + if (qdisc == NULL) + qdisc = &noop_qdisc; + dev_queue->qdisc_sleeping = qdisc; + rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc); + + spin_unlock_bh(root_lock); + + return oqdisc; +} + +static void attach_one_default_qdisc(struct net_device *dev, + struct netdev_queue *dev_queue, + void *_unused) +{ + struct Qdisc *qdisc; + + if (dev->tx_queue_len) { + qdisc = qdisc_create_dflt(dev, dev_queue, + &pfifo_fast_ops, TC_H_ROOT); + if (!qdisc) { + printk(KERN_INFO "%s: activation failed\n", dev->name); + return; + } + + /* Can by-pass the queue discipline for default qdisc */ + qdisc->flags |= TCQ_F_CAN_BYPASS; + } else { + qdisc = &noqueue_qdisc; + } + dev_queue->qdisc_sleeping = qdisc; +} + +static void attach_default_qdiscs(struct net_device *dev) +{ + struct netdev_queue *txq; + struct Qdisc *qdisc; + + txq = netdev_get_tx_queue(dev, 0); + + if (!netif_is_multiqueue(dev) || dev->tx_queue_len == 0) { + netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); + dev->qdisc = txq->qdisc_sleeping; + atomic_inc(&dev->qdisc->refcnt); + } else { + qdisc = qdisc_create_dflt(dev, txq, &mq_qdisc_ops, TC_H_ROOT); + if (qdisc) { + qdisc->ops->attach(qdisc); + dev->qdisc = qdisc; + } + } +} + +static void transition_one_qdisc(struct net_device *dev, + struct netdev_queue *dev_queue, + void *_need_watchdog) +{ + struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping; + int *need_watchdog_p = _need_watchdog; + + if (!(new_qdisc->flags & TCQ_F_BUILTIN)) + clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state); + + rcu_assign_pointer(dev_queue->qdisc, new_qdisc); + if (need_watchdog_p && new_qdisc != &noqueue_qdisc) { + dev_queue->trans_start = 0; + *need_watchdog_p = 1; + } } void dev_activate(struct net_device *dev) { + int need_watchdog; + /* No queueing discipline is attached to device; create default one i.e. pfifo_fast for devices, which need queueing and noqueue_qdisc for virtual interfaces */ - if (dev->qdisc_sleeping == &noop_qdisc) { - struct Qdisc *qdisc; - if (dev->tx_queue_len) { - qdisc = qdisc_create_dflt(dev, &pfifo_fast_ops, - TC_H_ROOT); - if (qdisc == NULL) { - printk(KERN_INFO "%s: activation failed\n", dev->name); - return; - } - list_add_tail(&qdisc->list, &dev->qdisc_list); - } else { - qdisc = &noqueue_qdisc; - } - dev->qdisc_sleeping = qdisc; - } + if (dev->qdisc == &noop_qdisc) + attach_default_qdiscs(dev); if (!netif_carrier_ok(dev)) /* Delay activation until next carrier-on event */ return; - spin_lock_bh(&dev->queue_lock); - rcu_assign_pointer(dev->qdisc, dev->qdisc_sleeping); - if (dev->qdisc != &noqueue_qdisc) { + need_watchdog = 0; + netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog); + transition_one_qdisc(dev, &dev->rx_queue, NULL); + + if (need_watchdog) { dev->trans_start = jiffies; dev_watchdog_up(dev); } - spin_unlock_bh(&dev->queue_lock); } -void dev_deactivate(struct net_device *dev) +static void dev_deactivate_queue(struct net_device *dev, + struct netdev_queue *dev_queue, + void *_qdisc_default) { + struct Qdisc *qdisc_default = _qdisc_default; struct Qdisc *qdisc; - spin_lock_bh(&dev->queue_lock); - qdisc = dev->qdisc; - dev->qdisc = &noop_qdisc; + qdisc = dev_queue->qdisc; + if (qdisc) { + spin_lock_bh(qdisc_lock(qdisc)); + + if (!(qdisc->flags & TCQ_F_BUILTIN)) + set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state); + + rcu_assign_pointer(dev_queue->qdisc, qdisc_default); + qdisc_reset(qdisc); + + spin_unlock_bh(qdisc_lock(qdisc)); + } +} + +static bool some_qdisc_is_busy(struct net_device *dev) +{ + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *dev_queue; + spinlock_t *root_lock; + struct Qdisc *q; + int val; + + dev_queue = netdev_get_tx_queue(dev, i); + q = dev_queue->qdisc_sleeping; + root_lock = qdisc_lock(q); + + spin_lock_bh(root_lock); - qdisc_reset(qdisc); + val = (test_bit(__QDISC_STATE_RUNNING, &q->state) || + test_bit(__QDISC_STATE_SCHED, &q->state)); - spin_unlock_bh(&dev->queue_lock); + spin_unlock_bh(root_lock); + + if (val) + return true; + } + return false; +} + +void dev_deactivate(struct net_device *dev) +{ + netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc); + dev_deactivate_queue(dev, &dev->rx_queue, &noop_qdisc); dev_watchdog_down(dev); - /* Wait for outstanding dev_queue_xmit calls. */ + /* Wait for outstanding qdisc-less dev_queue_xmit calls. */ synchronize_rcu(); /* Wait for outstanding qdisc_run calls. */ - while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state)) + while (some_qdisc_is_busy(dev)) yield(); +} - if (dev->gso_skb) { - kfree_skb(dev->gso_skb); - dev->gso_skb = NULL; - } +static void dev_init_scheduler_queue(struct net_device *dev, + struct netdev_queue *dev_queue, + void *_qdisc) +{ + struct Qdisc *qdisc = _qdisc; + + dev_queue->qdisc = qdisc; + dev_queue->qdisc_sleeping = qdisc; } void dev_init_scheduler(struct net_device *dev) { - qdisc_lock_tree(dev); dev->qdisc = &noop_qdisc; - dev->qdisc_sleeping = &noop_qdisc; - INIT_LIST_HEAD(&dev->qdisc_list); - qdisc_unlock_tree(dev); + netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); + dev_init_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc); - dev_watchdog_init(dev); + setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev); } -void dev_shutdown(struct net_device *dev) +static void shutdown_scheduler_queue(struct net_device *dev, + struct netdev_queue *dev_queue, + void *_qdisc_default) { - struct Qdisc *qdisc; + struct Qdisc *qdisc = dev_queue->qdisc_sleeping; + struct Qdisc *qdisc_default = _qdisc_default; + + if (qdisc) { + rcu_assign_pointer(dev_queue->qdisc, qdisc_default); + dev_queue->qdisc_sleeping = qdisc_default; - qdisc_lock_tree(dev); - qdisc = dev->qdisc_sleeping; - dev->qdisc = &noop_qdisc; - dev->qdisc_sleeping = &noop_qdisc; - qdisc_destroy(qdisc); -#if defined(CONFIG_NET_SCH_INGRESS) || defined(CONFIG_NET_SCH_INGRESS_MODULE) - if ((qdisc = dev->qdisc_ingress) != NULL) { - dev->qdisc_ingress = NULL; qdisc_destroy(qdisc); } -#endif - BUG_TRAP(!timer_pending(&dev->watchdog_timer)); - qdisc_unlock_tree(dev); } -EXPORT_SYMBOL(netif_carrier_on); -EXPORT_SYMBOL(netif_carrier_off); -EXPORT_SYMBOL(noop_qdisc); -EXPORT_SYMBOL(qdisc_create_dflt); -EXPORT_SYMBOL(qdisc_destroy); -EXPORT_SYMBOL(qdisc_reset); -EXPORT_SYMBOL(qdisc_lock_tree); -EXPORT_SYMBOL(qdisc_unlock_tree); +void dev_shutdown(struct net_device *dev) +{ + netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); + shutdown_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc); + qdisc_destroy(dev->qdisc); + dev->qdisc = &noop_qdisc; + + WARN_ON(timer_pending(&dev->watchdog_timer)); +}