* - Ingress support
*/
-#include <asm/uaccess.h>
-#include <asm/system.h>
#include <linux/bitops.h>
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/in.h>
#include <linux/errno.h>
-#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/init.h>
#include <linux/rcupdate.h>
#include <linux/list.h>
-#include <net/sock.h>
#include <net/pkt_sched.h>
/* Main transmission queue. */
-/* Main qdisc structure lock.
-
- However, modifications
- to data, participating in scheduling must be additionally
- protected with dev->queue_lock spinlock.
-
- The idea is the following:
- - enqueue, dequeue are serialized via top level device
- spinlock dev->queue_lock.
- - tree walking is protected by read_lock_bh(qdisc_tree_lock)
- and this lock is used only in process context.
- - updates to tree are made under rtnl semaphore or
- from softirq context (__qdisc_destroy rcu-callback)
- hence this lock needs local bh disabling.
-
- qdisc_tree_lock must be grabbed BEFORE dev->queue_lock!
+/* Modifications to data participating in scheduling must be protected with
+ * qdisc_lock(qdisc) spinlock.
+ *
+ * The idea is the following:
+ * - enqueue, dequeue are serialized via qdisc root lock
+ * - ingress filtering is also serialized via qdisc root lock
+ * - updates to tree and tree walking are only done under the rtnl mutex.
*/
-DEFINE_RWLOCK(qdisc_tree_lock);
-void qdisc_lock_tree(struct net_device *dev)
+static inline int qdisc_qlen(struct Qdisc *q)
{
- write_lock_bh(&qdisc_tree_lock);
- spin_lock_bh(&dev->queue_lock);
+ return q->q.qlen;
}
-void qdisc_unlock_tree(struct net_device *dev)
+static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
{
- spin_unlock_bh(&dev->queue_lock);
- write_unlock_bh(&qdisc_tree_lock);
+ __skb_queue_head(&q->requeue, skb);
+
+ __netif_schedule(q);
+ return 0;
}
-/*
- dev->queue_lock serializes queue accesses for this device
- AND dev->qdisc pointer itself.
+static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
+{
+ struct sk_buff *skb;
- dev->xmit_lock serializes accesses to device driver.
-
- dev->queue_lock and dev->xmit_lock are mutually exclusive,
- if one is grabbed, another must be free.
- */
+ skb = __skb_dequeue(&q->requeue);
+ if (!skb)
+ skb = q->dequeue(q);
+ return skb;
+}
-/* Kick device.
- Note, that this procedure can be called by a watchdog timer, so that
- we do not check dev->tbusy flag here.
+static inline int handle_dev_cpu_collision(struct sk_buff *skb,
+ struct netdev_queue *dev_queue,
+ struct Qdisc *q)
+{
+ int ret;
- Returns: 0 - queue is empty.
- >0 - queue is not empty, but throttled.
- <0 - queue is not empty. Device is throttled, if dev->tbusy != 0.
+ if (unlikely(dev_queue->xmit_lock_owner == smp_processor_id())) {
+ /*
+ * Same CPU holding the lock. It may be a transient
+ * configuration error, when hard_start_xmit() recurses. We
+ * detect it by checking xmit owner and drop the packet when
+ * deadloop is detected. Return OK to try the next skb.
+ */
+ kfree_skb(skb);
+ if (net_ratelimit())
+ printk(KERN_WARNING "Dead loop on netdevice %s, "
+ "fix it urgently!\n", dev_queue->dev->name);
+ ret = qdisc_qlen(q);
+ } else {
+ /*
+ * Another cpu is holding lock, requeue & delay xmits for
+ * some time.
+ */
+ __get_cpu_var(netdev_rx_stat).cpu_collision++;
+ ret = dev_requeue_skb(skb, q);
+ }
- NOTE: Called under dev->queue_lock with locally disabled BH.
-*/
+ return ret;
+}
-int qdisc_restart(struct net_device *dev)
+/*
+ * NOTE: Called under qdisc_lock(q) with locally disabled BH.
+ *
+ * __QDISC_STATE_RUNNING guarantees only one CPU can process
+ * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
+ * this queue.
+ *
+ * netif_tx_lock serializes accesses to device driver.
+ *
+ * qdisc_lock(q) and netif_tx_lock are mutually exclusive,
+ * if one is grabbed, another must be free.
+ *
+ * Note, that this procedure can be called by a watchdog timer
+ *
+ * Returns to the caller:
+ * 0 - queue is empty or throttled.
+ * >0 - queue is not empty.
+ *
+ */
+static inline int qdisc_restart(struct Qdisc *q)
{
- struct Qdisc *q = dev->qdisc;
+ struct netdev_queue *txq;
+ int ret = NETDEV_TX_BUSY;
+ struct net_device *dev;
+ spinlock_t *root_lock;
struct sk_buff *skb;
/* Dequeue packet */
- if ((skb = q->dequeue(q)) != NULL) {
- unsigned nolock = (dev->features & NETIF_F_LLTX);
- /*
- * When the driver has LLTX set it does its own locking
- * in start_xmit. No need to add additional overhead by
- * locking again. These checks are worth it because
- * even uncongested locks can be quite expensive.
- * The driver can do trylock like here too, in case
- * of lock congestion it should return -1 and the packet
- * will be requeued.
- */
- if (!nolock) {
- if (!spin_trylock(&dev->xmit_lock)) {
- collision:
- /* So, someone grabbed the driver. */
-
- /* It may be transient configuration error,
- when hard_start_xmit() recurses. We detect
- it by checking xmit owner and drop the
- packet when deadloop is detected.
- */
- if (dev->xmit_lock_owner == smp_processor_id()) {
- kfree_skb(skb);
- if (net_ratelimit())
- printk(KERN_DEBUG "Dead loop on netdevice %s, fix it urgently!\n", dev->name);
- return -1;
- }
- __get_cpu_var(netdev_rx_stat).cpu_collision++;
- goto requeue;
- }
- /* Remember that the driver is grabbed by us. */
- dev->xmit_lock_owner = smp_processor_id();
- }
-
- {
- /* And release queue */
- spin_unlock(&dev->queue_lock);
-
- if (!netif_queue_stopped(dev)) {
- int ret;
- if (netdev_nit)
- dev_queue_xmit_nit(skb, dev);
-
- ret = dev->hard_start_xmit(skb, dev);
- if (ret == NETDEV_TX_OK) {
- if (!nolock) {
- dev->xmit_lock_owner = -1;
- spin_unlock(&dev->xmit_lock);
- }
- spin_lock(&dev->queue_lock);
- return -1;
- }
- if (ret == NETDEV_TX_LOCKED && nolock) {
- spin_lock(&dev->queue_lock);
- goto collision;
- }
- }
+ if (unlikely((skb = dequeue_skb(q)) == NULL))
+ return 0;
- /* NETDEV_TX_BUSY - we need to requeue */
- /* Release the driver */
- if (!nolock) {
- dev->xmit_lock_owner = -1;
- spin_unlock(&dev->xmit_lock);
- }
- spin_lock(&dev->queue_lock);
- q = dev->qdisc;
- }
+ root_lock = qdisc_lock(q);
- /* Device kicked us out :(
- This is possible in three cases:
+ /* And release qdisc */
+ spin_unlock(root_lock);
- 0. driver is locked
- 1. fastroute is enabled
- 2. device cannot determine busy state
- before start of transmission (f.e. dialout)
- 3. device is buggy (ppp)
- */
+ dev = qdisc_dev(q);
+ txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
-requeue:
- q->ops->requeue(skb, q);
- netif_schedule(dev);
- return 1;
+ HARD_TX_LOCK(dev, txq, smp_processor_id());
+ if (!netif_tx_queue_stopped(txq) &&
+ !netif_tx_queue_frozen(txq))
+ ret = dev_hard_start_xmit(skb, dev, txq);
+ HARD_TX_UNLOCK(dev, txq);
+
+ spin_lock(root_lock);
+
+ switch (ret) {
+ case NETDEV_TX_OK:
+ /* Driver sent out skb successfully */
+ ret = qdisc_qlen(q);
+ break;
+
+ case NETDEV_TX_LOCKED:
+ /* Driver try lock failed */
+ ret = handle_dev_cpu_collision(skb, txq, q);
+ break;
+
+ default:
+ /* Driver returned NETDEV_TX_BUSY - requeue skb */
+ if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit()))
+ printk(KERN_WARNING "BUG %s code %d qlen %d\n",
+ dev->name, ret, q->q.qlen);
+
+ ret = dev_requeue_skb(skb, q);
+ break;
}
- BUG_ON((int) q->q.qlen < 0);
- return q->q.qlen;
+
+ if (ret && (netif_tx_queue_stopped(txq) ||
+ netif_tx_queue_frozen(txq)))
+ ret = 0;
+
+ return ret;
+}
+
+void __qdisc_run(struct Qdisc *q)
+{
+ unsigned long start_time = jiffies;
+
+ while (qdisc_restart(q)) {
+ /*
+ * Postpone processing if
+ * 1. another process needs the CPU;
+ * 2. we've been doing it for too long.
+ */
+ if (need_resched() || jiffies != start_time) {
+ __netif_schedule(q);
+ break;
+ }
+ }
+
+ clear_bit(__QDISC_STATE_RUNNING, &q->state);
}
static void dev_watchdog(unsigned long arg)
{
struct net_device *dev = (struct net_device *)arg;
- spin_lock(&dev->xmit_lock);
- if (dev->qdisc != &noop_qdisc) {
+ netif_tx_lock(dev);
+ if (!qdisc_tx_is_noop(dev)) {
if (netif_device_present(dev) &&
netif_running(dev) &&
netif_carrier_ok(dev)) {
- if (netif_queue_stopped(dev) &&
- (jiffies - dev->trans_start) > dev->watchdog_timeo) {
- printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n", dev->name);
+ int some_queue_stopped = 0;
+ unsigned int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct netdev_queue *txq;
+
+ txq = netdev_get_tx_queue(dev, i);
+ if (netif_tx_queue_stopped(txq)) {
+ some_queue_stopped = 1;
+ break;
+ }
+ }
+
+ if (some_queue_stopped &&
+ time_after(jiffies, (dev->trans_start +
+ dev->watchdog_timeo))) {
+ char drivername[64];
+ WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit timed out\n",
+ dev->name, netdev_drivername(dev, drivername, 64));
dev->tx_timeout(dev);
}
- if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo))
+ if (!mod_timer(&dev->watchdog_timer,
+ round_jiffies(jiffies +
+ dev->watchdog_timeo)))
dev_hold(dev);
}
}
- spin_unlock(&dev->xmit_lock);
+ netif_tx_unlock(dev);
dev_put(dev);
}
-static void dev_watchdog_init(struct net_device *dev)
-{
- init_timer(&dev->watchdog_timer);
- dev->watchdog_timer.data = (unsigned long)dev;
- dev->watchdog_timer.function = dev_watchdog;
-}
-
void __netdev_watchdog_up(struct net_device *dev)
{
if (dev->tx_timeout) {
if (dev->watchdog_timeo <= 0)
dev->watchdog_timeo = 5*HZ;
- if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo))
+ if (!mod_timer(&dev->watchdog_timer,
+ round_jiffies(jiffies + dev->watchdog_timeo)))
dev_hold(dev);
}
}
static void dev_watchdog_up(struct net_device *dev)
{
- spin_lock_bh(&dev->xmit_lock);
__netdev_watchdog_up(dev);
- spin_unlock_bh(&dev->xmit_lock);
}
static void dev_watchdog_down(struct net_device *dev)
{
- spin_lock_bh(&dev->xmit_lock);
+ netif_tx_lock_bh(dev);
if (del_timer(&dev->watchdog_timer))
- __dev_put(dev);
- spin_unlock_bh(&dev->xmit_lock);
+ dev_put(dev);
+ netif_tx_unlock_bh(dev);
+}
+
+/**
+ * netif_carrier_on - set carrier
+ * @dev: network device
+ *
+ * Device has detected that carrier.
+ */
+void netif_carrier_on(struct net_device *dev)
+{
+ if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
+ linkwatch_fire_event(dev);
+ if (netif_running(dev))
+ __netdev_watchdog_up(dev);
+ }
+}
+EXPORT_SYMBOL(netif_carrier_on);
+
+/**
+ * netif_carrier_off - clear carrier
+ * @dev: network device
+ *
+ * Device has detected loss of carrier.
+ */
+void netif_carrier_off(struct net_device *dev)
+{
+ if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state))
+ linkwatch_fire_event(dev);
}
+EXPORT_SYMBOL(netif_carrier_off);
/* "NOOP" scheduler: the best scheduler, recommended for all interfaces
under all circumstances. It is difficult to invent anything faster or
return NET_XMIT_CN;
}
-struct Qdisc_ops noop_qdisc_ops = {
+struct Qdisc_ops noop_qdisc_ops __read_mostly = {
.id = "noop",
.priv_size = 0,
.enqueue = noop_enqueue,
.owner = THIS_MODULE,
};
+static struct netdev_queue noop_netdev_queue = {
+ .qdisc = &noop_qdisc,
+};
+
struct Qdisc noop_qdisc = {
.enqueue = noop_enqueue,
.dequeue = noop_dequeue,
.flags = TCQ_F_BUILTIN,
- .ops = &noop_qdisc_ops,
+ .ops = &noop_qdisc_ops,
.list = LIST_HEAD_INIT(noop_qdisc.list),
+ .requeue.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
+ .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
+ .dev_queue = &noop_netdev_queue,
};
+EXPORT_SYMBOL(noop_qdisc);
-static struct Qdisc_ops noqueue_qdisc_ops = {
+static struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
.id = "noqueue",
.priv_size = 0,
.enqueue = noop_enqueue,
.owner = THIS_MODULE,
};
+static struct Qdisc noqueue_qdisc;
+static struct netdev_queue noqueue_netdev_queue = {
+ .qdisc = &noqueue_qdisc,
+};
+
static struct Qdisc noqueue_qdisc = {
.enqueue = NULL,
.dequeue = noop_dequeue,
.flags = TCQ_F_BUILTIN,
.ops = &noqueue_qdisc_ops,
.list = LIST_HEAD_INIT(noqueue_qdisc.list),
+ .requeue.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock),
+ .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock),
+ .dev_queue = &noqueue_netdev_queue,
};
{
struct sk_buff_head *list = prio2list(skb, qdisc);
- if (skb_queue_len(list) < qdisc->dev->tx_queue_len) {
+ if (skb_queue_len(list) < qdisc_dev(qdisc)->tx_queue_len) {
qdisc->q.qlen++;
return __qdisc_enqueue_tail(skb, qdisc, list);
}
struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1);
- RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
+ NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
return skb->len;
-rtattr_failure:
+nla_put_failure:
return -1;
}
-static int pfifo_fast_init(struct Qdisc *qdisc, struct rtattr *opt)
+static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
{
int prio;
struct sk_buff_head *list = qdisc_priv(qdisc);
return 0;
}
-static struct Qdisc_ops pfifo_fast_ops = {
+static struct Qdisc_ops pfifo_fast_ops __read_mostly = {
.id = "pfifo_fast",
.priv_size = PFIFO_FAST_BANDS * sizeof(struct sk_buff_head),
.enqueue = pfifo_fast_enqueue,
.owner = THIS_MODULE,
};
-struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops)
+struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
+ struct Qdisc_ops *ops)
{
void *p;
struct Qdisc *sch;
size = QDISC_ALIGN(sizeof(*sch));
size += ops->priv_size + (QDISC_ALIGNTO - 1);
- p = kmalloc(size, GFP_KERNEL);
+ p = kzalloc(size, GFP_KERNEL);
if (!p)
goto errout;
- memset(p, 0, size);
sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
sch->padded = (char *) sch - (char *) p;
INIT_LIST_HEAD(&sch->list);
+ skb_queue_head_init(&sch->requeue);
skb_queue_head_init(&sch->q);
sch->ops = ops;
sch->enqueue = ops->enqueue;
sch->dequeue = ops->dequeue;
- sch->dev = dev;
- dev_hold(dev);
- sch->stats_lock = &dev->queue_lock;
+ sch->dev_queue = dev_queue;
+ dev_hold(qdisc_dev(sch));
atomic_set(&sch->refcnt, 1);
return sch;
errout:
- return ERR_PTR(-err);
+ return ERR_PTR(err);
}
-struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops)
+struct Qdisc * qdisc_create_dflt(struct net_device *dev,
+ struct netdev_queue *dev_queue,
+ struct Qdisc_ops *ops,
+ unsigned int parentid)
{
struct Qdisc *sch;
-
- sch = qdisc_alloc(dev, ops);
+
+ sch = qdisc_alloc(dev_queue, ops);
if (IS_ERR(sch))
goto errout;
+ sch->parent = parentid;
if (!ops->init || ops->init(sch, NULL) == 0)
return sch;
errout:
return NULL;
}
+EXPORT_SYMBOL(qdisc_create_dflt);
-/* Under dev->queue_lock and BH! */
+/* Under qdisc_lock(qdisc) and BH! */
void qdisc_reset(struct Qdisc *qdisc)
{
- struct Qdisc_ops *ops = qdisc->ops;
+ const struct Qdisc_ops *ops = qdisc->ops;
if (ops->reset)
ops->reset(qdisc);
}
+EXPORT_SYMBOL(qdisc_reset);
-/* this is the rcu callback function to clean up a qdisc when there
- * are no further references to it */
-
-static void __qdisc_destroy(struct rcu_head *head)
+void qdisc_destroy(struct Qdisc *qdisc)
{
- struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu);
- struct Qdisc_ops *ops = qdisc->ops;
+ const struct Qdisc_ops *ops = qdisc->ops;
-#ifdef CONFIG_NET_ESTIMATOR
- gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
+ if (qdisc->flags & TCQ_F_BUILTIN ||
+ !atomic_dec_and_test(&qdisc->refcnt))
+ return;
+
+#ifdef CONFIG_NET_SCHED
+ qdisc_list_del(qdisc);
+
+ qdisc_put_stab(qdisc->stab);
#endif
- write_lock(&qdisc_tree_lock);
+ gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
if (ops->reset)
ops->reset(qdisc);
if (ops->destroy)
ops->destroy(qdisc);
- write_unlock(&qdisc_tree_lock);
+
module_put(ops->owner);
+ dev_put(qdisc_dev(qdisc));
+
+ __skb_queue_purge(&qdisc->requeue);
- dev_put(qdisc->dev);
kfree((char *) qdisc - qdisc->padded);
}
+EXPORT_SYMBOL(qdisc_destroy);
-/* Under dev->queue_lock and BH! */
-
-void qdisc_destroy(struct Qdisc *qdisc)
+static bool dev_all_qdisc_sleeping_noop(struct net_device *dev)
{
- struct list_head cql = LIST_HEAD_INIT(cql);
- struct Qdisc *cq, *q, *n;
+ unsigned int i;
- if (qdisc->flags & TCQ_F_BUILTIN ||
- !atomic_dec_and_test(&qdisc->refcnt))
- return;
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
- if (!list_empty(&qdisc->list)) {
- if (qdisc->ops->cl_ops == NULL)
- list_del(&qdisc->list);
- else
- list_move(&qdisc->list, &cql);
+ if (txq->qdisc_sleeping != &noop_qdisc)
+ return false;
}
+ return true;
+}
- /* unlink inner qdiscs from dev->qdisc_list immediately */
- list_for_each_entry(cq, &cql, list)
- list_for_each_entry_safe(q, n, &qdisc->dev->qdisc_list, list)
- if (TC_H_MAJ(q->parent) == TC_H_MAJ(cq->handle)) {
- if (q->ops->cl_ops == NULL)
- list_del_init(&q->list);
- else
- list_move_tail(&q->list, &cql);
- }
- list_for_each_entry_safe(cq, n, &cql, list)
- list_del_init(&cq->list);
+static void attach_one_default_qdisc(struct net_device *dev,
+ struct netdev_queue *dev_queue,
+ void *_unused)
+{
+ struct Qdisc *qdisc;
- call_rcu(&qdisc->q_rcu, __qdisc_destroy);
+ if (dev->tx_queue_len) {
+ qdisc = qdisc_create_dflt(dev, dev_queue,
+ &pfifo_fast_ops, TC_H_ROOT);
+ if (!qdisc) {
+ printk(KERN_INFO "%s: activation failed\n", dev->name);
+ return;
+ }
+ } else {
+ qdisc = &noqueue_qdisc;
+ }
+ dev_queue->qdisc_sleeping = qdisc;
+}
+
+static void transition_one_qdisc(struct net_device *dev,
+ struct netdev_queue *dev_queue,
+ void *_need_watchdog)
+{
+ struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping;
+ int *need_watchdog_p = _need_watchdog;
+
+ if (!(new_qdisc->flags & TCQ_F_BUILTIN))
+ clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);
+
+ rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
+ if (need_watchdog_p && new_qdisc != &noqueue_qdisc)
+ *need_watchdog_p = 1;
}
void dev_activate(struct net_device *dev)
{
+ int need_watchdog;
+
/* No queueing discipline is attached to device;
create default one i.e. pfifo_fast for devices,
which need queueing and noqueue_qdisc for
virtual interfaces
*/
- if (dev->qdisc_sleeping == &noop_qdisc) {
- struct Qdisc *qdisc;
- if (dev->tx_queue_len) {
- qdisc = qdisc_create_dflt(dev, &pfifo_fast_ops);
- if (qdisc == NULL) {
- printk(KERN_INFO "%s: activation failed\n", dev->name);
- return;
- }
- write_lock_bh(&qdisc_tree_lock);
- list_add_tail(&qdisc->list, &dev->qdisc_list);
- write_unlock_bh(&qdisc_tree_lock);
- } else {
- qdisc = &noqueue_qdisc;
- }
- write_lock_bh(&qdisc_tree_lock);
- dev->qdisc_sleeping = qdisc;
- write_unlock_bh(&qdisc_tree_lock);
- }
+ if (dev_all_qdisc_sleeping_noop(dev))
+ netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
if (!netif_carrier_ok(dev))
/* Delay activation until next carrier-on event */
return;
- spin_lock_bh(&dev->queue_lock);
- rcu_assign_pointer(dev->qdisc, dev->qdisc_sleeping);
- if (dev->qdisc != &noqueue_qdisc) {
+ need_watchdog = 0;
+ netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
+ transition_one_qdisc(dev, &dev->rx_queue, NULL);
+
+ if (need_watchdog) {
dev->trans_start = jiffies;
dev_watchdog_up(dev);
}
- spin_unlock_bh(&dev->queue_lock);
}
-void dev_deactivate(struct net_device *dev)
+static void dev_deactivate_queue(struct net_device *dev,
+ struct netdev_queue *dev_queue,
+ void *_qdisc_default)
{
+ struct Qdisc *qdisc_default = _qdisc_default;
struct Qdisc *qdisc;
- spin_lock_bh(&dev->queue_lock);
- qdisc = dev->qdisc;
- dev->qdisc = &noop_qdisc;
+ qdisc = dev_queue->qdisc;
+ if (qdisc) {
+ spin_lock_bh(qdisc_lock(qdisc));
+
+ if (!(qdisc->flags & TCQ_F_BUILTIN))
+ set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
+
+ rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
+ qdisc_reset(qdisc);
+
+ spin_unlock_bh(qdisc_lock(qdisc));
+ }
+}
+
+static bool some_qdisc_is_busy(struct net_device *dev)
+{
+ unsigned int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct netdev_queue *dev_queue;
+ spinlock_t *root_lock;
+ struct Qdisc *q;
+ int val;
+
+ dev_queue = netdev_get_tx_queue(dev, i);
+ q = dev_queue->qdisc_sleeping;
+ root_lock = qdisc_lock(q);
+
+ spin_lock_bh(root_lock);
- qdisc_reset(qdisc);
+ val = (test_bit(__QDISC_STATE_RUNNING, &q->state) ||
+ test_bit(__QDISC_STATE_SCHED, &q->state));
+
+ spin_unlock_bh(root_lock);
+
+ if (val)
+ return true;
+ }
+ return false;
+}
- spin_unlock_bh(&dev->queue_lock);
+void dev_deactivate(struct net_device *dev)
+{
+ netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc);
+ dev_deactivate_queue(dev, &dev->rx_queue, &noop_qdisc);
dev_watchdog_down(dev);
- while (test_bit(__LINK_STATE_SCHED, &dev->state))
+ /* Wait for outstanding qdisc-less dev_queue_xmit calls. */
+ synchronize_rcu();
+
+ /* Wait for outstanding qdisc_run calls. */
+ while (some_qdisc_is_busy(dev))
yield();
+}
- spin_unlock_wait(&dev->xmit_lock);
+static void dev_init_scheduler_queue(struct net_device *dev,
+ struct netdev_queue *dev_queue,
+ void *_qdisc)
+{
+ struct Qdisc *qdisc = _qdisc;
+
+ dev_queue->qdisc = qdisc;
+ dev_queue->qdisc_sleeping = qdisc;
}
void dev_init_scheduler(struct net_device *dev)
{
- qdisc_lock_tree(dev);
- dev->qdisc = &noop_qdisc;
- dev->qdisc_sleeping = &noop_qdisc;
- INIT_LIST_HEAD(&dev->qdisc_list);
- qdisc_unlock_tree(dev);
+ netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
+ dev_init_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc);
- dev_watchdog_init(dev);
+ setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev);
}
-void dev_shutdown(struct net_device *dev)
+static void shutdown_scheduler_queue(struct net_device *dev,
+ struct netdev_queue *dev_queue,
+ void *_qdisc_default)
{
- struct Qdisc *qdisc;
+ struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
+ struct Qdisc *qdisc_default = _qdisc_default;
+
+ if (qdisc) {
+ rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
+ dev_queue->qdisc_sleeping = qdisc_default;
- qdisc_lock_tree(dev);
- qdisc = dev->qdisc_sleeping;
- dev->qdisc = &noop_qdisc;
- dev->qdisc_sleeping = &noop_qdisc;
- qdisc_destroy(qdisc);
-#if defined(CONFIG_NET_SCH_INGRESS) || defined(CONFIG_NET_SCH_INGRESS_MODULE)
- if ((qdisc = dev->qdisc_ingress) != NULL) {
- dev->qdisc_ingress = NULL;
qdisc_destroy(qdisc);
- }
-#endif
- BUG_TRAP(!timer_pending(&dev->watchdog_timer));
- qdisc_unlock_tree(dev);
+ }
}
-EXPORT_SYMBOL(__netdev_watchdog_up);
-EXPORT_SYMBOL(noop_qdisc);
-EXPORT_SYMBOL(noop_qdisc_ops);
-EXPORT_SYMBOL(qdisc_create_dflt);
-EXPORT_SYMBOL(qdisc_alloc);
-EXPORT_SYMBOL(qdisc_destroy);
-EXPORT_SYMBOL(qdisc_reset);
-EXPORT_SYMBOL(qdisc_restart);
-EXPORT_SYMBOL(qdisc_lock_tree);
-EXPORT_SYMBOL(qdisc_unlock_tree);
+void dev_shutdown(struct net_device *dev)
+{
+ netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
+ shutdown_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc);
+ WARN_ON(timer_pending(&dev->watchdog_timer));
+}