{
__QDISC_STATE_RUNNING,
__QDISC_STATE_SCHED,
+ __QDISC_STATE_DEACTIVATED,
+};
+
+struct qdisc_size_table {
+ struct list_head list;
+ struct tc_sizespec szopts;
+ int refcnt;
+ u16 data[];
};
struct Qdisc
int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev);
struct sk_buff * (*dequeue)(struct Qdisc *dev);
unsigned flags;
-#define TCQ_F_BUILTIN 1
-#define TCQ_F_THROTTLED 2
-#define TCQ_F_INGRESS 4
+#define TCQ_F_BUILTIN 1
+#define TCQ_F_THROTTLED 2
+#define TCQ_F_INGRESS 4
+#define TCQ_F_WARN_NONWC (1 << 16)
int padded;
struct Qdisc_ops *ops;
+ struct qdisc_size_table *stab;
+ struct list_head list;
u32 handle;
u32 parent;
atomic_t refcnt;
- unsigned long state;
- struct sk_buff *gso_skb;
- struct sk_buff_head q;
- struct netdev_queue *dev_queue;
- struct Qdisc *next_sched;
- struct list_head list;
-
- struct gnet_stats_basic bstats;
- struct gnet_stats_queue qstats;
struct gnet_stats_rate_est rate_est;
- struct rcu_head q_rcu;
int (*reshape_fail)(struct sk_buff *skb,
struct Qdisc *q);
+ void *u32_node;
+
/* This field is deprecated, but it is still used by CBQ
* and it will live until better solution will be invented.
*/
struct Qdisc *__parent;
+ struct netdev_queue *dev_queue;
+ struct Qdisc *next_sched;
+
+ struct sk_buff *gso_skb;
+ /*
+ * For performance sake on SMP, we put highly modified fields at the end
+ */
+ unsigned long state;
+ struct sk_buff_head q;
+ struct gnet_stats_basic bstats;
+ struct gnet_stats_queue qstats;
};
struct Qdisc_class_ops
int (*enqueue)(struct sk_buff *, struct Qdisc *);
struct sk_buff * (*dequeue)(struct Qdisc *);
- int (*requeue)(struct sk_buff *, struct Qdisc *);
+ struct sk_buff * (*peek)(struct Qdisc *);
unsigned int (*drop)(struct Qdisc *);
int (*init)(struct Qdisc *, struct nlattr *arg);
struct tcf_proto_ops *ops;
};
+struct qdisc_skb_cb {
+ unsigned int pkt_len;
+ char data[];
+};
+
+static inline struct qdisc_skb_cb *qdisc_skb_cb(struct sk_buff *skb)
+{
+ return (struct qdisc_skb_cb *)skb->cb;
+}
+
+static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
+{
+ return &qdisc->q.lock;
+}
+
static inline struct Qdisc *qdisc_root(struct Qdisc *qdisc)
{
return qdisc->dev_queue->qdisc;
}
+static inline struct Qdisc *qdisc_root_sleeping(struct Qdisc *qdisc)
+{
+ return qdisc->dev_queue->qdisc_sleeping;
+}
+
+/* The qdisc root lock is a mechanism by which to top level
+ * of a qdisc tree can be locked from any qdisc node in the
+ * forest. This allows changing the configuration of some
+ * aspect of the qdisc tree while blocking out asynchronous
+ * qdisc access in the packet processing paths.
+ *
+ * It is only legal to do this when the root will not change
+ * on us. Otherwise we'll potentially lock the wrong qdisc
+ * root. This is enforced by holding the RTNL semaphore, which
+ * all users of this lock accessor must do.
+ */
static inline spinlock_t *qdisc_root_lock(struct Qdisc *qdisc)
{
struct Qdisc *root = qdisc_root(qdisc);
- return &root->dev_queue->lock;
+ ASSERT_RTNL();
+ return qdisc_lock(root);
+}
+
+static inline spinlock_t *qdisc_root_sleeping_lock(struct Qdisc *qdisc)
+{
+ struct Qdisc *root = qdisc_root_sleeping(qdisc);
+
+ ASSERT_RTNL();
+ return qdisc_lock(root);
}
static inline struct net_device *qdisc_dev(struct Qdisc *qdisc)
static inline void sch_tree_lock(struct Qdisc *q)
{
- spin_lock_bh(qdisc_root_lock(q));
+ spin_lock_bh(qdisc_root_sleeping_lock(q));
}
static inline void sch_tree_unlock(struct Qdisc *q)
{
- spin_unlock_bh(qdisc_root_lock(q));
+ spin_unlock_bh(qdisc_root_sleeping_lock(q));
}
#define tcf_tree_lock(tp) sch_tree_lock((tp)->q)
extern struct Qdisc *qdisc_create_dflt(struct net_device *dev,
struct netdev_queue *dev_queue,
struct Qdisc_ops *ops, u32 parentid);
+extern void qdisc_calculate_pkt_len(struct sk_buff *skb,
+ struct qdisc_size_table *stab);
extern void tcf_destroy(struct tcf_proto *tp);
extern void tcf_destroy_chain(struct tcf_proto **fl);
return true;
}
+static inline unsigned int qdisc_pkt_len(struct sk_buff *skb)
+{
+ return qdisc_skb_cb(skb)->pkt_len;
+}
+
+/* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
+enum net_xmit_qdisc_t {
+ __NET_XMIT_STOLEN = 0x00010000,
+ __NET_XMIT_BYPASS = 0x00020000,
+};
+
+#ifdef CONFIG_NET_CLS_ACT
+#define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1)
+#else
+#define net_xmit_drop_count(e) (1)
+#endif
+
+static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+#ifdef CONFIG_NET_SCHED
+ if (sch->stab)
+ qdisc_calculate_pkt_len(skb, sch->stab);
+#endif
+ return sch->enqueue(skb, sch);
+}
+
+static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
+{
+ qdisc_skb_cb(skb)->pkt_len = skb->len;
+ return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
+}
+
static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff_head *list)
{
__skb_queue_tail(list, skb);
- sch->qstats.backlog += skb->len;
- sch->bstats.bytes += skb->len;
+ sch->qstats.backlog += qdisc_pkt_len(skb);
+ sch->bstats.bytes += qdisc_pkt_len(skb);
sch->bstats.packets++;
return NET_XMIT_SUCCESS;
struct sk_buff *skb = __skb_dequeue(list);
if (likely(skb != NULL))
- sch->qstats.backlog -= skb->len;
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
return skb;
}
struct sk_buff *skb = __skb_dequeue_tail(list);
if (likely(skb != NULL))
- sch->qstats.backlog -= skb->len;
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
return skb;
}
return __qdisc_dequeue_tail(sch, &sch->q);
}
-static inline int __qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch,
- struct sk_buff_head *list)
+static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
{
- __skb_queue_head(list, skb);
- sch->qstats.backlog += skb->len;
- sch->qstats.requeues++;
+ return skb_peek(&sch->q);
+}
- return NET_XMIT_SUCCESS;
+/* generic pseudo peek method for non-work-conserving qdisc */
+static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
+{
+ /* we can reuse ->gso_skb because peek isn't called for root qdiscs */
+ if (!sch->gso_skb) {
+ sch->gso_skb = sch->dequeue(sch);
+ if (sch->gso_skb)
+ /* it's still part of the queue */
+ sch->q.qlen++;
+ }
+
+ return sch->gso_skb;
}
-static inline int qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch)
+/* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
+static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
{
- return __qdisc_requeue(skb, sch, &sch->q);
+ struct sk_buff *skb = sch->gso_skb;
+
+ if (skb) {
+ sch->gso_skb = NULL;
+ sch->q.qlen--;
+ } else {
+ skb = sch->dequeue(sch);
+ }
+
+ return skb;
}
static inline void __qdisc_reset_queue(struct Qdisc *sch,
* We do not know the backlog in bytes of this list, it
* is up to the caller to correct it
*/
- skb_queue_purge(list);
+ __skb_queue_purge(list);
}
static inline void qdisc_reset_queue(struct Qdisc *sch)
struct sk_buff *skb = __qdisc_dequeue_tail(sch, list);
if (likely(skb != NULL)) {
- unsigned int len = skb->len;
+ unsigned int len = qdisc_pkt_len(skb);
kfree_skb(skb);
return len;
}