#ifndef __LINUX_PKT_SCHED_H
#define __LINUX_PKT_SCHED_H
+#include <linux/types.h>
+
/* Logical priority bands not depending on specific packet scheduler.
Every scheduler will map them to real traffic classes, if it has
no more precise mechanism to classify packets.
{
unsigned char cell_log;
unsigned char __reserved;
- unsigned short feature;
- short addend;
+ unsigned short overhead;
+ short cell_align;
unsigned short mpu;
__u32 rate;
};
+#define TC_RTAB_SIZE 1024
+
+struct tc_sizespec {
+ unsigned char cell_log;
+ unsigned char size_log;
+ short cell_align;
+ int overhead;
+ unsigned int linklayer;
+ unsigned int mpu;
+ unsigned int mtu;
+ unsigned int tsize;
+};
+
+enum {
+ TCA_STAB_UNSPEC,
+ TCA_STAB_BASE,
+ TCA_STAB_DATA,
+ __TCA_STAB_MAX
+};
+
+#define TCA_STAB_MAX (__TCA_STAB_MAX - 1)
+
/* FIFO section */
struct tc_fifo_qopt
/* PRIO section */
#define TCQ_PRIO_BANDS 16
+#define TCQ_MIN_PRIO_BANDS 2
struct tc_prio_qopt
{
__u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */
};
+/* MULTIQ section */
+
+struct tc_multiq_qopt {
+ __u16 bands; /* Number of bands */
+ __u16 max_bands; /* Maximum number of queues */
+};
+
/* TBF section */
struct tc_tbf_qopt
unsigned flows; /* Maximal number of flows */
};
+struct tc_sfq_xstats
+{
+ __s32 allot;
+};
+
/*
* NOTE: limit, divisor and flows are hardwired to code at the moment.
*
unsigned char Scell_log; /* cell size for idle damping */
unsigned char flags;
#define TC_RED_ECN 1
+#define TC_RED_HARDDROP 2
};
struct tc_red_xstats
#define TCA_GRED_MAX (__TCA_GRED_MAX - 1)
-#define TCA_SET_OFF TCA_GRED_PARMS
struct tc_gred_qopt
{
- __u32 limit; /* HARD maximal queue length (bytes)
-*/
- __u32 qth_min; /* Min average length threshold (bytes)
-*/
- __u32 qth_max; /* Max average length threshold (bytes)
-*/
- __u32 DP; /* upto 2^32 DPs */
- __u32 backlog;
- __u32 qave;
- __u32 forced;
- __u32 early;
- __u32 other;
- __u32 pdrop;
-
- unsigned char Wlog; /* log(W) */
- unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
- unsigned char Scell_log; /* cell size for idle damping */
- __u8 prio; /* prio of this VQ */
- __u32 packets;
- __u32 bytesin;
+ __u32 limit; /* HARD maximal queue length (bytes) */
+ __u32 qth_min; /* Min average length threshold (bytes) */
+ __u32 qth_max; /* Max average length threshold (bytes) */
+ __u32 DP; /* upto 2^32 DPs */
+ __u32 backlog;
+ __u32 qave;
+ __u32 forced;
+ __u32 early;
+ __u32 other;
+ __u32 pdrop;
+ __u8 Wlog; /* log(W) */
+ __u8 Plog; /* log(P_max/(qth_max-qth_min)) */
+ __u8 Scell_log; /* cell size for idle damping */
+ __u8 prio; /* prio of this VQ */
+ __u32 packets;
+ __u32 bytesin;
};
+
/* gred setup */
struct tc_gred_sopt
{
- __u32 DPs;
- __u32 def_DP;
- __u8 grio;
+ __u32 DPs;
+ __u32 def_DP;
+ __u8 grio;
+ __u8 flags;
+ __u16 pad1;
};
/* HTB section */
#define TC_CBQ_OVL_DROP 3
#define TC_CBQ_OVL_RCLASSIC 4
unsigned char priority2;
+ __u16 pad;
__u32 penalty;
};
TCA_NETEM_UNSPEC,
TCA_NETEM_CORR,
TCA_NETEM_DELAY_DIST,
+ TCA_NETEM_REORDER,
+ TCA_NETEM_CORRUPT,
__TCA_NETEM_MAX,
};
__u32 latency; /* added delay (us) */
__u32 limit; /* fifo limit (packets) */
__u32 loss; /* random packet loss (0=none ~0=100%) */
- __u32 gap; /* re-ordering gap (0 for delay all) */
+ __u32 gap; /* re-ordering gap (0 for none) */
__u32 duplicate; /* random packet dup (0=none ~0=100%) */
__u32 jitter; /* random jitter in latency (us) */
};
__u32 dup_corr; /* duplicate correlation */
};
+struct tc_netem_reorder
+{
+ __u32 probability;
+ __u32 correlation;
+};
+
+struct tc_netem_corrupt
+{
+ __u32 probability;
+ __u32 correlation;
+};
+
#define NETEM_DIST_SCALE 8192
+/* DRR */
+
+enum
+{
+ TCA_DRR_UNSPEC,
+ TCA_DRR_QUANTUM,
+ __TCA_DRR_MAX
+};
+
+#define TCA_DRR_MAX (__TCA_DRR_MAX - 1)
+
+struct tc_drr_stats
+{
+ __u32 deficit;
+};
+
#endif