/* Main transmission queue. */
/* Modifications to data participating in scheduling must be protected with
- * qdisc_root_lock(qdisc) spinlock.
+ * qdisc_lock(qdisc) spinlock.
*
* The idea is the following:
* - enqueue, dequeue are serialized via qdisc root lock
* - updates to tree and tree walking are only done under the rtnl mutex.
*/
-static inline int qdisc_qlen(struct Qdisc *q)
-{
- return q->q.qlen;
-}
-
static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
{
- if (unlikely(skb->next))
- q->gso_skb = skb;
- else
- q->ops->requeue(skb, q);
-
+ q->gso_skb = skb;
+ q->qstats.requeues++;
+ q->q.qlen++; /* it's still part of the queue */
__netif_schedule(q);
+
return 0;
}
static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
{
- struct sk_buff *skb;
+ struct sk_buff *skb = q->gso_skb;
- if ((skb = q->gso_skb))
- q->gso_skb = NULL;
- else
+ if (unlikely(skb)) {
+ struct net_device *dev = qdisc_dev(q);
+ struct netdev_queue *txq;
+
+ /* check the reason of requeuing without tx lock first */
+ txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
+ if (!netif_tx_queue_stopped(txq) &&
+ !netif_tx_queue_frozen(txq)) {
+ q->gso_skb = NULL;
+ q->q.qlen--;
+ } else
+ skb = NULL;
+ } else {
skb = q->dequeue(q);
+ }
return skb;
}
}
/*
- * NOTE: Called under qdisc_lock(q) with locally disabled BH.
- *
- * __QDISC_STATE_RUNNING guarantees only one CPU can process
- * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
- * this queue.
- *
- * netif_tx_lock serializes accesses to device driver.
- *
- * qdisc_lock(q) and netif_tx_lock are mutually exclusive,
- * if one is grabbed, another must be free.
- *
- * Note, that this procedure can be called by a watchdog timer
+ * Transmit one skb, and handle the return status as required. Holding the
+ * __QDISC_STATE_RUNNING bit guarantees that only one CPU can execute this
+ * function.
*
* Returns to the caller:
* 0 - queue is empty or throttled.
* >0 - queue is not empty.
- *
*/
-static inline int qdisc_restart(struct Qdisc *q)
+int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
+ struct net_device *dev, struct netdev_queue *txq,
+ spinlock_t *root_lock)
{
- struct netdev_queue *txq;
int ret = NETDEV_TX_BUSY;
- struct net_device *dev;
- spinlock_t *root_lock;
- struct sk_buff *skb;
-
- /* Dequeue packet */
- if (unlikely((skb = dequeue_skb(q)) == NULL))
- return 0;
-
- root_lock = qdisc_root_lock(q);
/* And release qdisc */
spin_unlock(root_lock);
- dev = qdisc_dev(q);
- txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
-
HARD_TX_LOCK(dev, txq, smp_processor_id());
- if (!netif_subqueue_stopped(dev, skb))
+ if (!netif_tx_queue_stopped(txq) &&
+ !netif_tx_queue_frozen(txq))
ret = dev_hard_start_xmit(skb, dev, txq);
HARD_TX_UNLOCK(dev, txq);
break;
}
- if (ret && netif_tx_queue_stopped(txq))
+ if (ret && (netif_tx_queue_stopped(txq) ||
+ netif_tx_queue_frozen(txq)))
ret = 0;
return ret;
}
+/*
+ * NOTE: Called under qdisc_lock(q) with locally disabled BH.
+ *
+ * __QDISC_STATE_RUNNING guarantees only one CPU can process
+ * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
+ * this queue.
+ *
+ * netif_tx_lock serializes accesses to device driver.
+ *
+ * qdisc_lock(q) and netif_tx_lock are mutually exclusive,
+ * if one is grabbed, another must be free.
+ *
+ * Note, that this procedure can be called by a watchdog timer
+ *
+ * Returns to the caller:
+ * 0 - queue is empty or throttled.
+ * >0 - queue is not empty.
+ *
+ */
+static inline int qdisc_restart(struct Qdisc *q)
+{
+ struct netdev_queue *txq;
+ struct net_device *dev;
+ spinlock_t *root_lock;
+ struct sk_buff *skb;
+
+ /* Dequeue packet */
+ skb = dequeue_skb(q);
+ if (unlikely(!skb))
+ return 0;
+
+ root_lock = qdisc_lock(q);
+ dev = qdisc_dev(q);
+ txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
+
+ return sch_direct_xmit(skb, q, dev, txq, root_lock);
+}
+
void __qdisc_run(struct Qdisc *q)
{
unsigned long start_time = jiffies;
clear_bit(__QDISC_STATE_RUNNING, &q->state);
}
+unsigned long dev_trans_start(struct net_device *dev)
+{
+ unsigned long val, res = dev->trans_start;
+ unsigned int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ val = netdev_get_tx_queue(dev, i)->trans_start;
+ if (val && time_after(val, res))
+ res = val;
+ }
+ dev->trans_start = res;
+ return res;
+}
+EXPORT_SYMBOL(dev_trans_start);
+
static void dev_watchdog(unsigned long arg)
{
struct net_device *dev = (struct net_device *)arg;
if (netif_device_present(dev) &&
netif_running(dev) &&
netif_carrier_ok(dev)) {
- int some_queue_stopped = 0;
+ int some_queue_timedout = 0;
unsigned int i;
+ unsigned long trans_start;
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq;
txq = netdev_get_tx_queue(dev, i);
- if (netif_tx_queue_stopped(txq)) {
- some_queue_stopped = 1;
+ /*
+ * old device drivers set dev->trans_start
+ */
+ trans_start = txq->trans_start ? : dev->trans_start;
+ if (netif_tx_queue_stopped(txq) &&
+ time_after(jiffies, (trans_start +
+ dev->watchdog_timeo))) {
+ some_queue_timedout = 1;
break;
}
}
- if (some_queue_stopped &&
- time_after(jiffies, (dev->trans_start +
- dev->watchdog_timeo))) {
- printk(KERN_INFO "NETDEV WATCHDOG: %s: "
- "transmit timed out\n",
- dev->name);
- dev->tx_timeout(dev);
- WARN_ON_ONCE(1);
+ if (some_queue_timedout) {
+ char drivername[64];
+ WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
+ dev->name, netdev_drivername(dev, drivername, 64), i);
+ dev->netdev_ops->ndo_tx_timeout(dev);
}
if (!mod_timer(&dev->watchdog_timer,
round_jiffies(jiffies +
void __netdev_watchdog_up(struct net_device *dev)
{
- if (dev->tx_timeout) {
+ if (dev->netdev_ops->ndo_tx_timeout) {
if (dev->watchdog_timeo <= 0)
dev->watchdog_timeo = 5*HZ;
if (!mod_timer(&dev->watchdog_timer,
void netif_carrier_on(struct net_device *dev)
{
if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
+ if (dev->reg_state == NETREG_UNINITIALIZED)
+ return;
linkwatch_fire_event(dev);
if (netif_running(dev))
__netdev_watchdog_up(dev);
*/
void netif_carrier_off(struct net_device *dev)
{
- if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state))
+ if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
+ if (dev->reg_state == NETREG_UNINITIALIZED)
+ return;
linkwatch_fire_event(dev);
+ }
}
EXPORT_SYMBOL(netif_carrier_off);
return NULL;
}
-static int noop_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
-{
- if (net_ratelimit())
- printk(KERN_DEBUG "%s deferred output. It is buggy.\n",
- skb->dev->name);
- kfree_skb(skb);
- return NET_XMIT_CN;
-}
-
struct Qdisc_ops noop_qdisc_ops __read_mostly = {
.id = "noop",
.priv_size = 0,
.enqueue = noop_enqueue,
.dequeue = noop_dequeue,
- .requeue = noop_requeue,
+ .peek = noop_dequeue,
.owner = THIS_MODULE,
};
static struct netdev_queue noop_netdev_queue = {
.qdisc = &noop_qdisc,
+ .qdisc_sleeping = &noop_qdisc,
};
struct Qdisc noop_qdisc = {
.priv_size = 0,
.enqueue = noop_enqueue,
.dequeue = noop_dequeue,
- .requeue = noop_requeue,
+ .peek = noop_dequeue,
.owner = THIS_MODULE,
};
+static struct Qdisc noqueue_qdisc;
+static struct netdev_queue noqueue_netdev_queue = {
+ .qdisc = &noqueue_qdisc,
+ .qdisc_sleeping = &noqueue_qdisc,
+};
+
static struct Qdisc noqueue_qdisc = {
.enqueue = NULL,
.dequeue = noop_dequeue,
.flags = TCQ_F_BUILTIN,
.ops = &noqueue_qdisc_ops,
.list = LIST_HEAD_INIT(noqueue_qdisc.list),
+ .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock),
+ .dev_queue = &noqueue_netdev_queue,
+};
+
+
+static const u8 prio2band[TC_PRIO_MAX+1] =
+ { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 };
+
+/* 3-band FIFO queue: old style, but should be a bit faster than
+ generic prio+fifo combination.
+ */
+
+#define PFIFO_FAST_BANDS 3
+
+/*
+ * Private data for a pfifo_fast scheduler containing:
+ * - queues for the three band
+ * - bitmap indicating which of the bands contain skbs
+ */
+struct pfifo_fast_priv {
+ u32 bitmap;
+ struct sk_buff_head q[PFIFO_FAST_BANDS];
};
+/*
+ * Convert a bitmap to the first band number where an skb is queued, where:
+ * bitmap=0 means there are no skbs on any band.
+ * bitmap=1 means there is an skb on band 0.
+ * bitmap=7 means there are skbs on all 3 bands, etc.
+ */
+static const int bitmap2band[] = {-1, 0, 1, 0, 2, 0, 1, 0};
+
+static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv,
+ int band)
+{
+ return priv->q + band;
+}
-static int fifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
+static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
{
- struct sk_buff_head *list = &qdisc->q;
+ if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) {
+ int band = prio2band[skb->priority & TC_PRIO_MAX];
+ struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
+ struct sk_buff_head *list = band2list(priv, band);
- if (skb_queue_len(list) < qdisc_dev(qdisc)->tx_queue_len)
+ priv->bitmap |= (1 << band);
+ qdisc->q.qlen++;
return __qdisc_enqueue_tail(skb, qdisc, list);
+ }
return qdisc_drop(skb, qdisc);
}
-static struct sk_buff *fifo_fast_dequeue(struct Qdisc* qdisc)
+static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
{
- struct sk_buff_head *list = &qdisc->q;
+ struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
+ int band = bitmap2band[priv->bitmap];
+
+ if (likely(band >= 0)) {
+ struct sk_buff_head *list = band2list(priv, band);
+ struct sk_buff *skb = __qdisc_dequeue_head(qdisc, list);
- if (!skb_queue_empty(list))
- return __qdisc_dequeue_head(qdisc, list);
+ qdisc->q.qlen--;
+ if (skb_queue_empty(list))
+ priv->bitmap &= ~(1 << band);
+
+ return skb;
+ }
return NULL;
}
-static int fifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
+static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc)
{
- return __qdisc_requeue(skb, qdisc, &qdisc->q);
+ struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
+ int band = bitmap2band[priv->bitmap];
+
+ if (band >= 0) {
+ struct sk_buff_head *list = band2list(priv, band);
+
+ return skb_peek(list);
+ }
+
+ return NULL;
}
-static void fifo_fast_reset(struct Qdisc* qdisc)
+static void pfifo_fast_reset(struct Qdisc* qdisc)
{
- __qdisc_reset_queue(qdisc, &qdisc->q);
+ int prio;
+ struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
+
+ for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
+ __qdisc_reset_queue(qdisc, band2list(priv, prio));
+
+ priv->bitmap = 0;
qdisc->qstats.backlog = 0;
+ qdisc->q.qlen = 0;
}
-static struct Qdisc_ops fifo_fast_ops __read_mostly = {
- .id = "fifo_fast",
- .priv_size = 0,
- .enqueue = fifo_fast_enqueue,
- .dequeue = fifo_fast_dequeue,
- .requeue = fifo_fast_requeue,
- .reset = fifo_fast_reset,
+static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
+{
+ struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
+
+ memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1);
+ NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
+ return skb->len;
+
+nla_put_failure:
+ return -1;
+}
+
+static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
+{
+ int prio;
+ struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
+
+ for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
+ skb_queue_head_init(band2list(priv, prio));
+
+ return 0;
+}
+
+struct Qdisc_ops pfifo_fast_ops __read_mostly = {
+ .id = "pfifo_fast",
+ .priv_size = sizeof(struct pfifo_fast_priv),
+ .enqueue = pfifo_fast_enqueue,
+ .dequeue = pfifo_fast_dequeue,
+ .peek = pfifo_fast_peek,
+ .init = pfifo_fast_init,
+ .reset = pfifo_fast_reset,
+ .dump = pfifo_fast_dump,
.owner = THIS_MODULE,
};
}
EXPORT_SYMBOL(qdisc_create_dflt);
-/* Under qdisc_root_lock(qdisc) and BH! */
+/* Under qdisc_lock(qdisc) and BH! */
void qdisc_reset(struct Qdisc *qdisc)
{
if (ops->reset)
ops->reset(qdisc);
+
+ if (qdisc->gso_skb) {
+ kfree_skb(qdisc->gso_skb);
+ qdisc->gso_skb = NULL;
+ qdisc->q.qlen = 0;
+ }
}
EXPORT_SYMBOL(qdisc_reset);
-/* this is the rcu callback function to clean up a qdisc when there
- * are no further references to it */
-
-static void __qdisc_destroy(struct rcu_head *head)
+void qdisc_destroy(struct Qdisc *qdisc)
{
- struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu);
const struct Qdisc_ops *ops = qdisc->ops;
+ if (qdisc->flags & TCQ_F_BUILTIN ||
+ !atomic_dec_and_test(&qdisc->refcnt))
+ return;
+
+#ifdef CONFIG_NET_SCHED
+ qdisc_list_del(qdisc);
+
+ qdisc_put_stab(qdisc->stab);
+#endif
gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
if (ops->reset)
ops->reset(qdisc);
dev_put(qdisc_dev(qdisc));
kfree_skb(qdisc->gso_skb);
-
kfree((char *) qdisc - qdisc->padded);
}
+EXPORT_SYMBOL(qdisc_destroy);
-/* Under qdisc_root_lock(qdisc) and BH! */
-
-void qdisc_destroy(struct Qdisc *qdisc)
+/* Attach toplevel qdisc to device queue. */
+struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
+ struct Qdisc *qdisc)
{
- struct net_device *dev = qdisc_dev(qdisc);
-
- if (qdisc->flags & TCQ_F_BUILTIN ||
- !atomic_dec_and_test(&qdisc->refcnt))
- return;
+ struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
+ spinlock_t *root_lock;
- spin_lock_bh(&dev->qdisc_list_lock);
- list_del(&qdisc->list);
- spin_unlock_bh(&dev->qdisc_list_lock);
+ root_lock = qdisc_lock(oqdisc);
+ spin_lock_bh(root_lock);
- call_rcu(&qdisc->q_rcu, __qdisc_destroy);
-}
-EXPORT_SYMBOL(qdisc_destroy);
+ /* Prune old scheduler */
+ if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
+ qdisc_reset(oqdisc);
-static bool dev_all_qdisc_sleeping_noop(struct net_device *dev)
-{
- unsigned int i;
+ /* ... and graft new one */
+ if (qdisc == NULL)
+ qdisc = &noop_qdisc;
+ dev_queue->qdisc_sleeping = qdisc;
+ rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
- for (i = 0; i < dev->num_tx_queues; i++) {
- struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+ spin_unlock_bh(root_lock);
- if (txq->qdisc_sleeping != &noop_qdisc)
- return false;
- }
- return true;
+ return oqdisc;
}
static void attach_one_default_qdisc(struct net_device *dev,
if (dev->tx_queue_len) {
qdisc = qdisc_create_dflt(dev, dev_queue,
- &fifo_fast_ops, TC_H_ROOT);
+ &pfifo_fast_ops, TC_H_ROOT);
if (!qdisc) {
printk(KERN_INFO "%s: activation failed\n", dev->name);
return;
}
- spin_lock_bh(&dev->qdisc_list_lock);
- list_add_tail(&qdisc->list, &dev->qdisc_list);
- spin_unlock_bh(&dev->qdisc_list_lock);
+
+ /* Can by-pass the queue discipline for default qdisc */
+ qdisc->flags |= TCQ_F_CAN_BYPASS;
} else {
qdisc = &noqueue_qdisc;
}
dev_queue->qdisc_sleeping = qdisc;
}
+static void attach_default_qdiscs(struct net_device *dev)
+{
+ struct netdev_queue *txq;
+ struct Qdisc *qdisc;
+
+ txq = netdev_get_tx_queue(dev, 0);
+
+ if (!netif_is_multiqueue(dev) || dev->tx_queue_len == 0) {
+ netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
+ dev->qdisc = txq->qdisc_sleeping;
+ atomic_inc(&dev->qdisc->refcnt);
+ } else {
+ qdisc = qdisc_create_dflt(dev, txq, &mq_qdisc_ops, TC_H_ROOT);
+ if (qdisc) {
+ qdisc->ops->attach(qdisc);
+ dev->qdisc = qdisc;
+ }
+ }
+}
+
static void transition_one_qdisc(struct net_device *dev,
struct netdev_queue *dev_queue,
void *_need_watchdog)
struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping;
int *need_watchdog_p = _need_watchdog;
+ if (!(new_qdisc->flags & TCQ_F_BUILTIN))
+ clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);
+
rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
- if (new_qdisc != &noqueue_qdisc)
+ if (need_watchdog_p && new_qdisc != &noqueue_qdisc) {
+ dev_queue->trans_start = 0;
*need_watchdog_p = 1;
+ }
}
void dev_activate(struct net_device *dev)
int need_watchdog;
/* No queueing discipline is attached to device;
- * create default one i.e. fifo_fast for devices,
- * which need queueing and noqueue_qdisc for
- * virtual interfaces.
+ create default one i.e. pfifo_fast for devices,
+ which need queueing and noqueue_qdisc for
+ virtual interfaces
*/
- if (dev_all_qdisc_sleeping_noop(dev))
- netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
+ if (dev->qdisc == &noop_qdisc)
+ attach_default_qdiscs(dev);
if (!netif_carrier_ok(dev))
/* Delay activation until next carrier-on event */
need_watchdog = 0;
netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
+ transition_one_qdisc(dev, &dev->rx_queue, NULL);
if (need_watchdog) {
dev->trans_start = jiffies;
void *_qdisc_default)
{
struct Qdisc *qdisc_default = _qdisc_default;
- struct sk_buff *skb = NULL;
struct Qdisc *qdisc;
qdisc = dev_queue->qdisc;
if (qdisc) {
spin_lock_bh(qdisc_lock(qdisc));
- dev_queue->qdisc = qdisc_default;
+ if (!(qdisc->flags & TCQ_F_BUILTIN))
+ set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
+
+ rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
qdisc_reset(qdisc);
spin_unlock_bh(qdisc_lock(qdisc));
}
-
- kfree_skb(skb);
}
-static bool some_qdisc_is_running(struct net_device *dev, int lock)
+static bool some_qdisc_is_busy(struct net_device *dev)
{
unsigned int i;
int val;
dev_queue = netdev_get_tx_queue(dev, i);
- q = dev_queue->qdisc;
- root_lock = qdisc_root_lock(q);
+ q = dev_queue->qdisc_sleeping;
+ root_lock = qdisc_lock(q);
- if (lock)
- spin_lock_bh(root_lock);
+ spin_lock_bh(root_lock);
- val = test_bit(__QDISC_STATE_RUNNING, &q->state);
+ val = (test_bit(__QDISC_STATE_RUNNING, &q->state) ||
+ test_bit(__QDISC_STATE_SCHED, &q->state));
- if (lock)
- spin_unlock_bh(root_lock);
+ spin_unlock_bh(root_lock);
if (val)
return true;
void dev_deactivate(struct net_device *dev)
{
- bool running;
-
netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc);
+ dev_deactivate_queue(dev, &dev->rx_queue, &noop_qdisc);
dev_watchdog_down(dev);
synchronize_rcu();
/* Wait for outstanding qdisc_run calls. */
- do {
- while (some_qdisc_is_running(dev, 0))
- yield();
-
- /*
- * Double-check inside queue lock to ensure that all effects
- * of the queue run are visible when we return.
- */
- running = some_qdisc_is_running(dev, 1);
-
- /*
- * The running flag should never be set at this point because
- * we've already set dev->qdisc to noop_qdisc *inside* the same
- * pair of spin locks. That is, if any qdisc_run starts after
- * our initial test it should see the noop_qdisc and then
- * clear the RUNNING bit before dropping the queue lock. So
- * if it is set here then we've found a bug.
- */
- } while (WARN_ON_ONCE(running));
+ while (some_qdisc_is_busy(dev))
+ yield();
}
static void dev_init_scheduler_queue(struct net_device *dev,
void dev_init_scheduler(struct net_device *dev)
{
+ dev->qdisc = &noop_qdisc;
netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
- dev_init_scheduler_queue(dev, &dev->rx_queue, NULL);
+ dev_init_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc);
setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev);
}
struct Qdisc *qdisc_default = _qdisc_default;
if (qdisc) {
- spinlock_t *root_lock = qdisc_root_lock(qdisc);
-
- dev_queue->qdisc = qdisc_default;
+ rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
dev_queue->qdisc_sleeping = qdisc_default;
- spin_lock(root_lock);
qdisc_destroy(qdisc);
- spin_unlock(root_lock);
}
}
void dev_shutdown(struct net_device *dev)
{
netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
- shutdown_scheduler_queue(dev, &dev->rx_queue, NULL);
- BUG_TRAP(!timer_pending(&dev->watchdog_timer));
+ shutdown_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc);
+ qdisc_destroy(dev->qdisc);
+ dev->qdisc = &noop_qdisc;
+
+ WARN_ON(timer_pending(&dev->watchdog_timer));
}