X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=net%2Fsched%2Fsch_generic.c;h=a63029ef3eddc9e1bc19cbc552f327e6fdafa8aa;hb=7fee226a;hp=739a8711ab30c42df56b7f4efd02f902d8e5003a;hpb=7698b4fcabcd790efc4f226bada1e7b5870653af;p=safe%2Fjmp%2Flinux-2.6 diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 739a871..a63029e 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -24,75 +24,51 @@ #include #include #include +#include #include +#include /* Main transmission queue. */ /* Modifications to data participating in scheduling must be protected with - * queue->lock spinlock. + * qdisc_lock(qdisc) spinlock. * * The idea is the following: - * - enqueue, dequeue are serialized via top level device - * spinlock queue->lock. - * - ingress filtering is serialized via top level device - * spinlock dev->rx_queue.lock. + * - enqueue, dequeue are serialized via qdisc root lock + * - ingress filtering is also serialized via qdisc root lock * - updates to tree and tree walking are only done under the rtnl mutex. */ -void qdisc_lock_tree(struct net_device *dev) - __acquires(dev->rx_queue.lock) +static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) { - unsigned int i; - - local_bh_disable(); - for (i = 0; i < dev->num_tx_queues; i++) { - struct netdev_queue *txq = netdev_get_tx_queue(dev, i); - spin_lock(&txq->lock); - } - spin_lock(&dev->rx_queue.lock); -} -EXPORT_SYMBOL(qdisc_lock_tree); + skb_dst_force(skb); + q->gso_skb = skb; + q->qstats.requeues++; + q->q.qlen++; /* it's still part of the queue */ + __netif_schedule(q); -void qdisc_unlock_tree(struct net_device *dev) - __releases(dev->rx_queue.lock) -{ - unsigned int i; - - spin_unlock(&dev->rx_queue.lock); - for (i = 0; i < dev->num_tx_queues; i++) { - struct netdev_queue *txq = netdev_get_tx_queue(dev, i); - spin_unlock(&txq->lock); - } - local_bh_enable(); -} -EXPORT_SYMBOL(qdisc_unlock_tree); - -static inline int qdisc_qlen(struct Qdisc *q) -{ - return q->q.qlen; -} - -static inline int dev_requeue_skb(struct sk_buff *skb, - struct netdev_queue *dev_queue, - struct Qdisc *q) -{ - if (unlikely(skb->next)) - q->gso_skb = skb; - else - q->ops->requeue(skb, q); - - netif_schedule_queue(dev_queue); return 0; } static inline struct sk_buff *dequeue_skb(struct Qdisc *q) { - struct sk_buff *skb; + struct sk_buff *skb = q->gso_skb; + + if (unlikely(skb)) { + struct net_device *dev = qdisc_dev(q); + struct netdev_queue *txq; - if ((skb = q->gso_skb)) - q->gso_skb = NULL; - else + /* check the reason of requeuing without tx lock first */ + txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); + if (!netif_tx_queue_stopped(txq) && + !netif_tx_queue_frozen(txq)) { + q->gso_skb = NULL; + q->q.qlen--; + } else + skb = NULL; + } else { skb = q->dequeue(q); + } return skb; } @@ -120,98 +96,111 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb, * Another cpu is holding lock, requeue & delay xmits for * some time. */ - __get_cpu_var(netdev_rx_stat).cpu_collision++; - ret = dev_requeue_skb(skb, dev_queue, q); + __get_cpu_var(softnet_data).cpu_collision++; + ret = dev_requeue_skb(skb, q); } return ret; } /* - * NOTE: Called under queue->lock with locally disabled BH. - * - * __QDISC_STATE_RUNNING guarantees only one CPU can process - * this qdisc at a time. queue->lock serializes queue accesses for - * this queue AND txq->qdisc pointer itself. - * - * netif_tx_lock serializes accesses to device driver. - * - * queue->lock and netif_tx_lock are mutually exclusive, - * if one is grabbed, another must be free. - * - * Note, that this procedure can be called by a watchdog timer + * Transmit one skb, and handle the return status as required. Holding the + * __QDISC_STATE_RUNNING bit guarantees that only one CPU can execute this + * function. * * Returns to the caller: * 0 - queue is empty or throttled. * >0 - queue is not empty. - * */ -static inline int qdisc_restart(struct netdev_queue *txq, - struct Qdisc *q) +int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, + struct net_device *dev, struct netdev_queue *txq, + spinlock_t *root_lock) { int ret = NETDEV_TX_BUSY; - struct net_device *dev; - spinlock_t *root_lock; - struct sk_buff *skb; - - /* Dequeue packet */ - if (unlikely((skb = dequeue_skb(q)) == NULL)) - return 0; - - root_lock = qdisc_root_lock(q); /* And release qdisc */ spin_unlock(root_lock); - dev = txq->dev; - HARD_TX_LOCK(dev, txq, smp_processor_id()); - if (!netif_subqueue_stopped(dev, skb)) + if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq)) ret = dev_hard_start_xmit(skb, dev, txq); + HARD_TX_UNLOCK(dev, txq); spin_lock(root_lock); - switch (ret) { - case NETDEV_TX_OK: - /* Driver sent out skb successfully */ + if (dev_xmit_complete(ret)) { + /* Driver sent out skb successfully or skb was consumed */ ret = qdisc_qlen(q); - break; - - case NETDEV_TX_LOCKED: + } else if (ret == NETDEV_TX_LOCKED) { /* Driver try lock failed */ ret = handle_dev_cpu_collision(skb, txq, q); - break; - - default: + } else { /* Driver returned NETDEV_TX_BUSY - requeue skb */ if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit())) printk(KERN_WARNING "BUG %s code %d qlen %d\n", dev->name, ret, q->q.qlen); - ret = dev_requeue_skb(skb, txq, q); - break; + ret = dev_requeue_skb(skb, q); } + if (ret && (netif_tx_queue_stopped(txq) || + netif_tx_queue_frozen(txq))) + ret = 0; + return ret; } -void __qdisc_run(struct netdev_queue *txq) +/* + * NOTE: Called under qdisc_lock(q) with locally disabled BH. + * + * __QDISC_STATE_RUNNING guarantees only one CPU can process + * this qdisc at a time. qdisc_lock(q) serializes queue accesses for + * this queue. + * + * netif_tx_lock serializes accesses to device driver. + * + * qdisc_lock(q) and netif_tx_lock are mutually exclusive, + * if one is grabbed, another must be free. + * + * Note, that this procedure can be called by a watchdog timer + * + * Returns to the caller: + * 0 - queue is empty or throttled. + * >0 - queue is not empty. + * + */ +static inline int qdisc_restart(struct Qdisc *q) { - unsigned long start_time = jiffies; - struct Qdisc *q = txq->qdisc; + struct netdev_queue *txq; + struct net_device *dev; + spinlock_t *root_lock; + struct sk_buff *skb; - while (qdisc_restart(txq, q)) { - if (netif_tx_queue_stopped(txq)) - break; + /* Dequeue packet */ + skb = dequeue_skb(q); + if (unlikely(!skb)) + return 0; + WARN_ON_ONCE(skb_dst_is_noref(skb)); + root_lock = qdisc_lock(q); + dev = qdisc_dev(q); + txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); + + return sch_direct_xmit(skb, q, dev, txq, root_lock); +} +void __qdisc_run(struct Qdisc *q) +{ + unsigned long start_time = jiffies; + + while (qdisc_restart(q)) { /* * Postpone processing if * 1. another process needs the CPU; * 2. we've been doing it for too long. */ if (need_resched() || jiffies != start_time) { - netif_schedule_queue(txq); + __netif_schedule(q); break; } } @@ -219,6 +208,21 @@ void __qdisc_run(struct netdev_queue *txq) clear_bit(__QDISC_STATE_RUNNING, &q->state); } +unsigned long dev_trans_start(struct net_device *dev) +{ + unsigned long val, res = dev->trans_start; + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) { + val = netdev_get_tx_queue(dev, i)->trans_start; + if (val && time_after(val, res)) + res = val; + } + dev->trans_start = res; + return res; +} +EXPORT_SYMBOL(dev_trans_start); + static void dev_watchdog(unsigned long arg) { struct net_device *dev = (struct net_device *)arg; @@ -228,27 +232,31 @@ static void dev_watchdog(unsigned long arg) if (netif_device_present(dev) && netif_running(dev) && netif_carrier_ok(dev)) { - int some_queue_stopped = 0; + int some_queue_timedout = 0; unsigned int i; + unsigned long trans_start; for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq; txq = netdev_get_tx_queue(dev, i); - if (netif_tx_queue_stopped(txq)) { - some_queue_stopped = 1; + /* + * old device drivers set dev->trans_start + */ + trans_start = txq->trans_start ? : dev->trans_start; + if (netif_tx_queue_stopped(txq) && + time_after(jiffies, (trans_start + + dev->watchdog_timeo))) { + some_queue_timedout = 1; break; } } - if (some_queue_stopped && - time_after(jiffies, (dev->trans_start + - dev->watchdog_timeo))) { - printk(KERN_INFO "NETDEV WATCHDOG: %s: " - "transmit timed out\n", - dev->name); - dev->tx_timeout(dev); - WARN_ON_ONCE(1); + if (some_queue_timedout) { + char drivername[64]; + WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n", + dev->name, netdev_drivername(dev, drivername, 64), i); + dev->netdev_ops->ndo_tx_timeout(dev); } if (!mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + @@ -263,7 +271,7 @@ static void dev_watchdog(unsigned long arg) void __netdev_watchdog_up(struct net_device *dev) { - if (dev->tx_timeout) { + if (dev->netdev_ops->ndo_tx_timeout) { if (dev->watchdog_timeo <= 0) dev->watchdog_timeo = 5*HZ; if (!mod_timer(&dev->watchdog_timer, @@ -294,6 +302,8 @@ static void dev_watchdog_down(struct net_device *dev) void netif_carrier_on(struct net_device *dev) { if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) { + if (dev->reg_state == NETREG_UNINITIALIZED) + return; linkwatch_fire_event(dev); if (netif_running(dev)) __netdev_watchdog_up(dev); @@ -309,8 +319,11 @@ EXPORT_SYMBOL(netif_carrier_on); */ void netif_carrier_off(struct net_device *dev) { - if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) + if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) { + if (dev->reg_state == NETREG_UNINITIALIZED) + return; linkwatch_fire_event(dev); + } } EXPORT_SYMBOL(netif_carrier_off); @@ -330,27 +343,18 @@ static struct sk_buff *noop_dequeue(struct Qdisc * qdisc) return NULL; } -static int noop_requeue(struct sk_buff *skb, struct Qdisc* qdisc) -{ - if (net_ratelimit()) - printk(KERN_DEBUG "%s deferred output. It is buggy.\n", - skb->dev->name); - kfree_skb(skb); - return NET_XMIT_CN; -} - struct Qdisc_ops noop_qdisc_ops __read_mostly = { .id = "noop", .priv_size = 0, .enqueue = noop_enqueue, .dequeue = noop_dequeue, - .requeue = noop_requeue, + .peek = noop_dequeue, .owner = THIS_MODULE, }; static struct netdev_queue noop_netdev_queue = { - .lock = __SPIN_LOCK_UNLOCKED(noop_netdev_queue.lock), .qdisc = &noop_qdisc, + .qdisc_sleeping = &noop_qdisc, }; struct Qdisc noop_qdisc = { @@ -359,6 +363,7 @@ struct Qdisc noop_qdisc = { .flags = TCQ_F_BUILTIN, .ops = &noop_qdisc_ops, .list = LIST_HEAD_INIT(noop_qdisc.list), + .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), .dev_queue = &noop_netdev_queue, }; EXPORT_SYMBOL(noop_qdisc); @@ -368,16 +373,24 @@ static struct Qdisc_ops noqueue_qdisc_ops __read_mostly = { .priv_size = 0, .enqueue = noop_enqueue, .dequeue = noop_dequeue, - .requeue = noop_requeue, + .peek = noop_dequeue, .owner = THIS_MODULE, }; +static struct Qdisc noqueue_qdisc; +static struct netdev_queue noqueue_netdev_queue = { + .qdisc = &noqueue_qdisc, + .qdisc_sleeping = &noqueue_qdisc, +}; + static struct Qdisc noqueue_qdisc = { .enqueue = NULL, .dequeue = noop_dequeue, .flags = TCQ_F_BUILTIN, .ops = &noqueue_qdisc_ops, .list = LIST_HEAD_INIT(noqueue_qdisc.list), + .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock), + .dev_queue = &noqueue_netdev_queue, }; @@ -390,18 +403,38 @@ static const u8 prio2band[TC_PRIO_MAX+1] = #define PFIFO_FAST_BANDS 3 -static inline struct sk_buff_head *prio2list(struct sk_buff *skb, - struct Qdisc *qdisc) +/* + * Private data for a pfifo_fast scheduler containing: + * - queues for the three band + * - bitmap indicating which of the bands contain skbs + */ +struct pfifo_fast_priv { + u32 bitmap; + struct sk_buff_head q[PFIFO_FAST_BANDS]; +}; + +/* + * Convert a bitmap to the first band number where an skb is queued, where: + * bitmap=0 means there are no skbs on any band. + * bitmap=1 means there is an skb on band 0. + * bitmap=7 means there are skbs on all 3 bands, etc. + */ +static const int bitmap2band[] = {-1, 0, 1, 0, 2, 0, 1, 0}; + +static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv, + int band) { - struct sk_buff_head *list = qdisc_priv(qdisc); - return list + prio2band[skb->priority & TC_PRIO_MAX]; + return priv->q + band; } static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) { - struct sk_buff_head *list = prio2list(skb, qdisc); + if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) { + int band = prio2band[skb->priority & TC_PRIO_MAX]; + struct pfifo_fast_priv *priv = qdisc_priv(qdisc); + struct sk_buff_head *list = band2list(priv, band); - if (skb_queue_len(list) < qdisc_dev(qdisc)->tx_queue_len) { + priv->bitmap |= (1 << band); qdisc->q.qlen++; return __qdisc_enqueue_tail(skb, qdisc, list); } @@ -411,33 +444,46 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) { - int prio; - struct sk_buff_head *list = qdisc_priv(qdisc); + struct pfifo_fast_priv *priv = qdisc_priv(qdisc); + int band = bitmap2band[priv->bitmap]; - for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { - if (!skb_queue_empty(list + prio)) { - qdisc->q.qlen--; - return __qdisc_dequeue_head(qdisc, list + prio); - } + if (likely(band >= 0)) { + struct sk_buff_head *list = band2list(priv, band); + struct sk_buff *skb = __qdisc_dequeue_head(qdisc, list); + + qdisc->q.qlen--; + if (skb_queue_empty(list)) + priv->bitmap &= ~(1 << band); + + return skb; } return NULL; } -static int pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc) +static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc) { - qdisc->q.qlen++; - return __qdisc_requeue(skb, qdisc, prio2list(skb, qdisc)); + struct pfifo_fast_priv *priv = qdisc_priv(qdisc); + int band = bitmap2band[priv->bitmap]; + + if (band >= 0) { + struct sk_buff_head *list = band2list(priv, band); + + return skb_peek(list); + } + + return NULL; } static void pfifo_fast_reset(struct Qdisc* qdisc) { int prio; - struct sk_buff_head *list = qdisc_priv(qdisc); + struct pfifo_fast_priv *priv = qdisc_priv(qdisc); for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) - __qdisc_reset_queue(qdisc, list + prio); + __qdisc_reset_queue(qdisc, band2list(priv, prio)); + priv->bitmap = 0; qdisc->qstats.backlog = 0; qdisc->q.qlen = 0; } @@ -457,20 +503,20 @@ nla_put_failure: static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt) { int prio; - struct sk_buff_head *list = qdisc_priv(qdisc); + struct pfifo_fast_priv *priv = qdisc_priv(qdisc); for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) - skb_queue_head_init(list + prio); + skb_queue_head_init(band2list(priv, prio)); return 0; } -static struct Qdisc_ops pfifo_fast_ops __read_mostly = { +struct Qdisc_ops pfifo_fast_ops __read_mostly = { .id = "pfifo_fast", - .priv_size = PFIFO_FAST_BANDS * sizeof(struct sk_buff_head), + .priv_size = sizeof(struct pfifo_fast_priv), .enqueue = pfifo_fast_enqueue, .dequeue = pfifo_fast_dequeue, - .requeue = pfifo_fast_requeue, + .peek = pfifo_fast_peek, .init = pfifo_fast_init, .reset = pfifo_fast_reset, .dump = pfifo_fast_dump, @@ -485,7 +531,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, unsigned int size; int err = -ENOBUFS; - /* ensure that the Qdisc and the private data are 32-byte aligned */ + /* ensure that the Qdisc and the private data are 64-byte aligned */ size = QDISC_ALIGN(sizeof(*sch)); size += ops->priv_size + (QDISC_ALIGNTO - 1); @@ -530,7 +576,7 @@ errout: } EXPORT_SYMBOL(qdisc_create_dflt); -/* Under queue->lock and BH! */ +/* Under qdisc_lock(qdisc) and BH! */ void qdisc_reset(struct Qdisc *qdisc) { @@ -538,20 +584,22 @@ void qdisc_reset(struct Qdisc *qdisc) if (ops->reset) ops->reset(qdisc); + + if (qdisc->gso_skb) { + kfree_skb(qdisc->gso_skb); + qdisc->gso_skb = NULL; + qdisc->q.qlen = 0; + } } EXPORT_SYMBOL(qdisc_reset); -/* this is the rcu callback function to clean up a qdisc when there - * are no further references to it */ - -static void __qdisc_destroy(struct rcu_head *head) +static void qdisc_rcu_free(struct rcu_head *head) { - struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu); + struct Qdisc *qdisc = container_of(head, struct Qdisc, rcu_head); + kfree((char *) qdisc - qdisc->padded); } -/* Under queue->lock and BH! */ - void qdisc_destroy(struct Qdisc *qdisc) { const struct Qdisc_ops *ops = qdisc->ops; @@ -560,7 +608,11 @@ void qdisc_destroy(struct Qdisc *qdisc) !atomic_dec_and_test(&qdisc->refcnt)) return; - list_del(&qdisc->list); +#ifdef CONFIG_NET_SCHED + qdisc_list_del(qdisc); + + qdisc_put_stab(qdisc->stab); +#endif gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est); if (ops->reset) ops->reset(qdisc); @@ -569,21 +621,39 @@ void qdisc_destroy(struct Qdisc *qdisc) module_put(ops->owner); dev_put(qdisc_dev(qdisc)); - call_rcu(&qdisc->q_rcu, __qdisc_destroy); + + kfree_skb(qdisc->gso_skb); + /* + * gen_estimator est_timer() might access qdisc->q.lock, + * wait a RCU grace period before freeing qdisc. + */ + call_rcu(&qdisc->rcu_head, qdisc_rcu_free); } EXPORT_SYMBOL(qdisc_destroy); -static bool dev_all_qdisc_sleeping_noop(struct net_device *dev) +/* Attach toplevel qdisc to device queue. */ +struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, + struct Qdisc *qdisc) { - unsigned int i; + struct Qdisc *oqdisc = dev_queue->qdisc_sleeping; + spinlock_t *root_lock; - for (i = 0; i < dev->num_tx_queues; i++) { - struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + root_lock = qdisc_lock(oqdisc); + spin_lock_bh(root_lock); - if (txq->qdisc_sleeping != &noop_qdisc) - return false; - } - return true; + /* Prune old scheduler */ + if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) + qdisc_reset(oqdisc); + + /* ... and graft new one */ + if (qdisc == NULL) + qdisc = &noop_qdisc; + dev_queue->qdisc_sleeping = qdisc; + rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc); + + spin_unlock_bh(root_lock); + + return oqdisc; } static void attach_one_default_qdisc(struct net_device *dev, @@ -599,24 +669,50 @@ static void attach_one_default_qdisc(struct net_device *dev, printk(KERN_INFO "%s: activation failed\n", dev->name); return; } - list_add_tail(&qdisc->list, &dev_queue->qdisc_list); + + /* Can by-pass the queue discipline for default qdisc */ + qdisc->flags |= TCQ_F_CAN_BYPASS; } else { qdisc = &noqueue_qdisc; } dev_queue->qdisc_sleeping = qdisc; } +static void attach_default_qdiscs(struct net_device *dev) +{ + struct netdev_queue *txq; + struct Qdisc *qdisc; + + txq = netdev_get_tx_queue(dev, 0); + + if (!netif_is_multiqueue(dev) || dev->tx_queue_len == 0) { + netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); + dev->qdisc = txq->qdisc_sleeping; + atomic_inc(&dev->qdisc->refcnt); + } else { + qdisc = qdisc_create_dflt(dev, txq, &mq_qdisc_ops, TC_H_ROOT); + if (qdisc) { + qdisc->ops->attach(qdisc); + dev->qdisc = qdisc; + } + } +} + static void transition_one_qdisc(struct net_device *dev, struct netdev_queue *dev_queue, void *_need_watchdog) { + struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping; int *need_watchdog_p = _need_watchdog; - spin_lock_bh(&dev_queue->lock); - rcu_assign_pointer(dev_queue->qdisc, dev_queue->qdisc_sleeping); - if (dev_queue->qdisc != &noqueue_qdisc) + if (!(new_qdisc->flags & TCQ_F_BUILTIN)) + clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state); + + rcu_assign_pointer(dev_queue->qdisc, new_qdisc); + if (need_watchdog_p && new_qdisc != &noqueue_qdisc) { + dev_queue->trans_start = 0; *need_watchdog_p = 1; - spin_unlock_bh(&dev_queue->lock); + } } void dev_activate(struct net_device *dev) @@ -629,8 +725,8 @@ void dev_activate(struct net_device *dev) virtual interfaces */ - if (dev_all_qdisc_sleeping_noop(dev)) - netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); + if (dev->qdisc == &noop_qdisc) + attach_default_qdiscs(dev); if (!netif_carrier_ok(dev)) /* Delay activation until next carrier-on event */ @@ -638,6 +734,7 @@ void dev_activate(struct net_device *dev) need_watchdog = 0; netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog); + transition_one_qdisc(dev, &dev->rx_queue, NULL); if (need_watchdog) { dev->trans_start = jiffies; @@ -650,26 +747,23 @@ static void dev_deactivate_queue(struct net_device *dev, void *_qdisc_default) { struct Qdisc *qdisc_default = _qdisc_default; - struct sk_buff *skb = NULL; struct Qdisc *qdisc; - spin_lock_bh(&dev_queue->lock); - qdisc = dev_queue->qdisc; if (qdisc) { - dev_queue->qdisc = qdisc_default; - qdisc_reset(qdisc); + spin_lock_bh(qdisc_lock(qdisc)); - skb = qdisc->gso_skb; - qdisc->gso_skb = NULL; - } + if (!(qdisc->flags & TCQ_F_BUILTIN)) + set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state); - spin_unlock_bh(&dev_queue->lock); + rcu_assign_pointer(dev_queue->qdisc, qdisc_default); + qdisc_reset(qdisc); - kfree_skb(skb); + spin_unlock_bh(qdisc_lock(qdisc)); + } } -static bool some_qdisc_is_running(struct net_device *dev, int lock) +static bool some_qdisc_is_busy(struct net_device *dev) { unsigned int i; @@ -680,16 +774,15 @@ static bool some_qdisc_is_running(struct net_device *dev, int lock) int val; dev_queue = netdev_get_tx_queue(dev, i); - q = dev_queue->qdisc; - root_lock = qdisc_root_lock(q); + q = dev_queue->qdisc_sleeping; + root_lock = qdisc_lock(q); - if (lock) - spin_lock_bh(root_lock); + spin_lock_bh(root_lock); - val = test_bit(__QDISC_STATE_RUNNING, &q->state); + val = (test_bit(__QDISC_STATE_RUNNING, &q->state) || + test_bit(__QDISC_STATE_SCHED, &q->state)); - if (lock) - spin_unlock_bh(root_lock); + spin_unlock_bh(root_lock); if (val) return true; @@ -699,9 +792,8 @@ static bool some_qdisc_is_running(struct net_device *dev, int lock) void dev_deactivate(struct net_device *dev) { - bool running; - netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc); + dev_deactivate_queue(dev, &dev->rx_queue, &noop_qdisc); dev_watchdog_down(dev); @@ -709,25 +801,8 @@ void dev_deactivate(struct net_device *dev) synchronize_rcu(); /* Wait for outstanding qdisc_run calls. */ - do { - while (some_qdisc_is_running(dev, 0)) - yield(); - - /* - * Double-check inside queue lock to ensure that all effects - * of the queue run are visible when we return. - */ - running = some_qdisc_is_running(dev, 1); - - /* - * The running flag should never be set at this point because - * we've already set dev->qdisc to noop_qdisc *inside* the same - * pair of spin locks. That is, if any qdisc_run starts after - * our initial test it should see the noop_qdisc and then - * clear the RUNNING bit before dropping the queue lock. So - * if it is set here then we've found a bug. - */ - } while (WARN_ON_ONCE(running)); + while (some_qdisc_is_busy(dev)) + yield(); } static void dev_init_scheduler_queue(struct net_device *dev, @@ -738,15 +813,13 @@ static void dev_init_scheduler_queue(struct net_device *dev, dev_queue->qdisc = qdisc; dev_queue->qdisc_sleeping = qdisc; - INIT_LIST_HEAD(&dev_queue->qdisc_list); } void dev_init_scheduler(struct net_device *dev) { - qdisc_lock_tree(dev); + dev->qdisc = &noop_qdisc; netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); - dev_init_scheduler_queue(dev, &dev->rx_queue, NULL); - qdisc_unlock_tree(dev); + dev_init_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc); setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev); } @@ -759,7 +832,7 @@ static void shutdown_scheduler_queue(struct net_device *dev, struct Qdisc *qdisc_default = _qdisc_default; if (qdisc) { - dev_queue->qdisc = qdisc_default; + rcu_assign_pointer(dev_queue->qdisc, qdisc_default); dev_queue->qdisc_sleeping = qdisc_default; qdisc_destroy(qdisc); @@ -768,9 +841,10 @@ static void shutdown_scheduler_queue(struct net_device *dev, void dev_shutdown(struct net_device *dev) { - qdisc_lock_tree(dev); netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); - shutdown_scheduler_queue(dev, &dev->rx_queue, NULL); - BUG_TRAP(!timer_pending(&dev->watchdog_timer)); - qdisc_unlock_tree(dev); + shutdown_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc); + qdisc_destroy(dev->qdisc); + dev->qdisc = &noop_qdisc; + + WARN_ON(timer_pending(&dev->watchdog_timer)); }