#include <asm/dma.h>
#include <asm/div64.h> /* do_div */
-#define VERSION \
- "pktgen v2.70: Packet Generator for packet performance testing.\n"
-
+#define VERSION "2.72"
#define IP_NAME_SZ 32
#define MAX_MPLS_LABELS 16 /* This is the max label stack depth */
#define MPLS_STACK_BOTTOM htonl(0x00000100)
#define F_QUEUE_MAP_CPU (1<<14) /* queue map mirrors smp_processor_id() */
/* Thread control flag bits */
-#define T_TERMINATE (1<<0)
-#define T_STOP (1<<1) /* Stop run */
-#define T_RUN (1<<2) /* Start run */
-#define T_REMDEVALL (1<<3) /* Remove all devs */
-#define T_REMDEV (1<<4) /* Remove one dev */
+#define T_STOP (1<<0) /* Stop run */
+#define T_RUN (1<<1) /* Start run */
+#define T_REMDEVALL (1<<2) /* Remove all devs */
+#define T_REMDEV (1<<3) /* Remove one dev */
/* If lock -- can be removed after some work */
#define if_lock(t) spin_lock(&(t->if_lock));
__u32 cur_src_mac_offset;
__be32 cur_saddr;
__be32 cur_daddr;
+ __u16 ip_id;
__u16 cur_udp_dst;
__u16 cur_udp_src;
__u16 cur_queue_map;
return cmp1.tv64 < cmp2.tv64;
}
-static const char version[] __initconst = VERSION;
+static const char version[] =
+ "pktgen " VERSION ": Packet Generator for packet performance testing.\n";
static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *i);
static int pktgen_add_device(struct pktgen_thread *t, const char *ifname);
static int pgctrl_show(struct seq_file *seq, void *v)
{
- seq_puts(seq, VERSION);
+ seq_puts(seq, version);
return 0;
}
if (value == 0x7FFFFFFF)
pkt_dev->delay = ULLONG_MAX;
else
- pkt_dev->delay = (u64)value * NSEC_PER_USEC;
+ pkt_dev->delay = (u64)value;
sprintf(pg_result, "OK: delay=%llu",
(unsigned long long) pkt_dev->delay);
remove_proc_entry(pkt_dev->entry->name, pg_proc_dir);
- pkt_dev->entry = create_proc_entry(dev->name, 0600,
- pg_proc_dir);
+ pkt_dev->entry = proc_create_data(dev->name, 0600,
+ pg_proc_dir,
+ &pktgen_if_fops,
+ pkt_dev);
if (!pkt_dev->entry)
printk(KERN_ERR "pktgen: can't move proc "
" entry for '%s'\n", dev->name);
static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
{
- ktime_t start;
- s32 remaining;
+ ktime_t start_time, end_time;
+ s64 remaining;
struct hrtimer_sleeper t;
hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
hrtimer_set_expires(&t.timer, spin_until);
remaining = ktime_to_us(hrtimer_expires_remaining(&t.timer));
- if (remaining <= 0)
+ if (remaining <= 0) {
+ pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
return;
+ }
- start = ktime_now();
+ start_time = ktime_now();
if (remaining < 100)
udelay(remaining); /* really small just spin */
else {
} while (t.task && pkt_dev->running && !signal_pending(current));
__set_current_state(TASK_RUNNING);
}
- pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), start));
+ end_time = ktime_now();
+
+ pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
+ pkt_dev->next_tx = ktime_add_ns(end_time, pkt_dev->delay);
}
static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
if (pkt_dev->flags & F_QUEUE_MAP_CPU)
pkt_dev->cur_queue_map = smp_processor_id();
- else if (pkt_dev->queue_map_min < pkt_dev->queue_map_max) {
+ else if (pkt_dev->queue_map_min <= pkt_dev->queue_map_max) {
__u16 t;
if (pkt_dev->flags & F_QUEUE_MAP_RND) {
t = random32() %
iph->protocol = IPPROTO_UDP; /* UDP */
iph->saddr = pkt_dev->cur_saddr;
iph->daddr = pkt_dev->cur_daddr;
+ iph->id = htons(pkt_dev->ip_id);
+ pkt_dev->ip_id++;
iph->frag_off = 0;
iplen = 20 + 8 + datalen;
iph->tot_len = htons(iplen);
skb->dev = odev;
skb->pkt_type = PACKET_HOST;
- if (pkt_dev->nfrags <= 0)
+ if (pkt_dev->nfrags <= 0) {
pgh = (struct pktgen_hdr *)skb_put(skb, datalen);
- else {
+ memset(pgh + 1, 0, datalen - sizeof(struct pktgen_hdr));
+ } else {
int frags = pkt_dev->nfrags;
- int i;
+ int i, len;
pgh = (struct pktgen_hdr *)(((char *)(udph)) + 8);
if (frags > MAX_SKB_FRAGS)
frags = MAX_SKB_FRAGS;
if (datalen > frags * PAGE_SIZE) {
- skb_put(skb, datalen - frags * PAGE_SIZE);
+ len = datalen - frags * PAGE_SIZE;
+ memset(skb_put(skb, len), 0, len);
datalen = frags * PAGE_SIZE;
}
i = 0;
while (datalen > 0) {
- struct page *page = alloc_pages(GFP_KERNEL, 0);
+ struct page *page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
skb_shinfo(skb)->frags[i].page = page;
skb_shinfo(skb)->frags[i].page_offset = 0;
skb_shinfo(skb)->frags[i].size =
mutex_unlock(&pktgen_thread_lock);
}
-static void idle(struct pktgen_dev *pkt_dev)
+static void pktgen_resched(struct pktgen_dev *pkt_dev)
{
ktime_t idle_start = ktime_now();
+ schedule();
+ pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start));
+}
- if (need_resched())
- schedule();
- else
- cpu_relax();
+static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev)
+{
+ ktime_t idle_start = ktime_now();
+
+ while (atomic_read(&(pkt_dev->skb->users)) != 1) {
+ if (signal_pending(current))
+ break;
+ if (need_resched())
+ pktgen_resched(pkt_dev);
+ else
+ cpu_relax();
+ }
pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start));
}
-
static void pktgen_xmit(struct pktgen_dev *pkt_dev)
{
struct net_device *odev = pkt_dev->odev;
- int (*xmit)(struct sk_buff *, struct net_device *)
+ netdev_tx_t (*xmit)(struct sk_buff *, struct net_device *)
= odev->netdev_ops->ndo_start_xmit;
struct netdev_queue *txq;
u16 queue_map;
int ret;
- if (pkt_dev->delay) {
- spin(pkt_dev, pkt_dev->next_tx);
-
- /* This is max DELAY, this has special meaning of
- * "never transmit"
- */
- if (pkt_dev->delay == ULLONG_MAX) {
- pkt_dev->next_tx = ktime_add_ns(ktime_now(), ULONG_MAX);
- return;
- }
- }
-
- if (!pkt_dev->skb) {
- set_cur_queue_map(pkt_dev);
- queue_map = pkt_dev->cur_queue_map;
- } else {
- queue_map = skb_get_queue_mapping(pkt_dev->skb);
+ /* If device is offline, then don't send */
+ if (unlikely(!netif_running(odev) || !netif_carrier_ok(odev))) {
+ pktgen_stop_device(pkt_dev);
+ return;
}
- txq = netdev_get_tx_queue(odev, queue_map);
- /* Did we saturate the queue already? */
- if (netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq)) {
- /* If device is down, then all queues are permnantly frozen */
- if (netif_running(odev))
- idle(pkt_dev);
- else
- pktgen_stop_device(pkt_dev);
+ /* This is max DELAY, this has special meaning of
+ * "never transmit"
+ */
+ if (unlikely(pkt_dev->delay == ULLONG_MAX)) {
+ pkt_dev->next_tx = ktime_add_ns(ktime_now(), ULONG_MAX);
return;
}
+ /* If no skb or clone count exhausted then get new one */
if (!pkt_dev->skb || (pkt_dev->last_ok &&
++pkt_dev->clone_count >= pkt_dev->clone_skb)) {
/* build a new pkt */
pkt_dev->clone_count = 0; /* reset counter */
}
- /* fill_packet() might have changed the queue */
+ if (pkt_dev->delay && pkt_dev->last_ok)
+ spin(pkt_dev, pkt_dev->next_tx);
+
queue_map = skb_get_queue_mapping(pkt_dev->skb);
txq = netdev_get_tx_queue(odev, queue_map);
__netif_tx_lock_bh(txq);
- if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq)))
- pkt_dev->last_ok = 0;
- else {
- atomic_inc(&(pkt_dev->skb->users));
+ atomic_inc(&(pkt_dev->skb->users));
- retry_now:
+ if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq)))
+ ret = NETDEV_TX_BUSY;
+ else
ret = (*xmit)(pkt_dev->skb, odev);
- switch (ret) {
- case NETDEV_TX_OK:
- txq_trans_update(txq);
- pkt_dev->last_ok = 1;
- pkt_dev->sofar++;
- pkt_dev->seq_num++;
- pkt_dev->tx_bytes += pkt_dev->cur_pkt_size;
- break;
- case NETDEV_TX_LOCKED:
- cpu_relax();
- goto retry_now;
- default: /* Drivers are not supposed to return other values! */
- if (net_ratelimit())
- pr_info("pktgen: %s xmit error: %d\n",
- odev->name, ret);
- pkt_dev->errors++;
- /* fallthru */
- case NETDEV_TX_BUSY:
- /* Retry it next time */
- atomic_dec(&(pkt_dev->skb->users));
- pkt_dev->last_ok = 0;
- }
-
- if (pkt_dev->delay)
- pkt_dev->next_tx = ktime_add_ns(ktime_now(),
- pkt_dev->delay);
+
+ switch (ret) {
+ case NETDEV_TX_OK:
+ txq_trans_update(txq);
+ pkt_dev->last_ok = 1;
+ pkt_dev->sofar++;
+ pkt_dev->seq_num++;
+ pkt_dev->tx_bytes += pkt_dev->cur_pkt_size;
+ break;
+ default: /* Drivers are not supposed to return other values! */
+ if (net_ratelimit())
+ pr_info("pktgen: %s xmit error: %d\n",
+ odev->name, ret);
+ pkt_dev->errors++;
+ /* fallthru */
+ case NETDEV_TX_LOCKED:
+ case NETDEV_TX_BUSY:
+ /* Retry it next time */
+ atomic_dec(&(pkt_dev->skb->users));
+ pkt_dev->last_ok = 0;
}
__netif_tx_unlock_bh(txq);
/* If pkt_dev->count is zero, then run forever */
if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
- while (atomic_read(&(pkt_dev->skb->users)) != 1) {
- if (signal_pending(current))
- break;
- idle(pkt_dev);
- }
+ pktgen_wait_for_skb(pkt_dev);
/* Done with this */
pktgen_stop_device(pkt_dev);
while (!kthread_should_stop()) {
pkt_dev = next_to_run(t);
- if (!pkt_dev &&
- (t->control & (T_STOP | T_RUN | T_REMDEVALL | T_REMDEV))
- == 0) {
- prepare_to_wait(&(t->queue), &wait,
- TASK_INTERRUPTIBLE);
- schedule_timeout(HZ / 10);
- finish_wait(&(t->queue), &wait);
+ if (unlikely(!pkt_dev && t->control == 0)) {
+ wait_event_interruptible_timeout(t->queue,
+ t->control != 0,
+ HZ/10);
+ continue;
}
__set_current_state(TASK_RUNNING);
- if (pkt_dev)
+ if (likely(pkt_dev)) {
pktgen_xmit(pkt_dev);
+ if (need_resched())
+ pktgen_resched(pkt_dev);
+ else
+ cpu_relax();
+ }
+
if (t->control & T_STOP) {
pktgen_stop(t);
t->control &= ~(T_STOP);
module_init(pg_init);
module_exit(pg_cleanup);
-MODULE_AUTHOR("Robert Olsson <robert.olsson@its.uu.se");
+MODULE_AUTHOR("Robert Olsson <robert.olsson@its.uu.se>");
MODULE_DESCRIPTION("Packet Generator tool");
MODULE_LICENSE("GPL");
+MODULE_VERSION(VERSION);
module_param(pg_count_d, int, 0);
+MODULE_PARM_DESC(pg_count_d, "Default number of packets to inject");
module_param(pg_delay_d, int, 0);
+MODULE_PARM_DESC(pg_delay_d, "Default delay between packets (nanoseconds)");
module_param(pg_clone_skb_d, int, 0);
+MODULE_PARM_DESC(pg_clone_skb_d, "Default number of copies of the same packet");
module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Enable debugging of pktgen module");