1 /**************************************************************************/
3 /* IBM eServer i/pSeries Virtual Ethernet Device Driver */
4 /* Copyright (C) 2003 IBM Corp. */
5 /* Originally written by Dave Larson (larson1@us.ibm.com) */
6 /* Maintained by Santiago Leon (santil@us.ibm.com) */
8 /* This program is free software; you can redistribute it and/or modify */
9 /* it under the terms of the GNU General Public License as published by */
10 /* the Free Software Foundation; either version 2 of the License, or */
11 /* (at your option) any later version. */
13 /* This program is distributed in the hope that it will be useful, */
14 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
15 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
16 /* GNU General Public License for more details. */
18 /* You should have received a copy of the GNU General Public License */
19 /* along with this program; if not, write to the Free Software */
20 /* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 */
23 /* This module contains the implementation of a virtual ethernet device */
24 /* for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN */
25 /* option of the RS/6000 Platform Architechture to interface with virtual */
26 /* ethernet NICs that are presented to the partition by the hypervisor. */
28 /**************************************************************************/
31 - remove frag processing code - no longer needed
32 - add support for sysfs
33 - possibly remove procfs support
36 #include <linux/config.h>
37 #include <linux/module.h>
38 #include <linux/version.h>
39 #include <linux/types.h>
40 #include <linux/errno.h>
41 #include <linux/ioport.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/kernel.h>
44 #include <linux/netdevice.h>
45 #include <linux/etherdevice.h>
46 #include <linux/skbuff.h>
47 #include <linux/init.h>
48 #include <linux/delay.h>
50 #include <linux/ethtool.h>
51 #include <linux/proc_fs.h>
52 #include <asm/semaphore.h>
53 #include <asm/hvcall.h>
54 #include <asm/atomic.h>
55 #include <asm/iommu.h>
57 #include <asm/uaccess.h>
58 #include <linux/seq_file.h>
64 #define ibmveth_printk(fmt, args...) \
65 printk(KERN_INFO "%s: " fmt, __FILE__, ## args)
67 #define ibmveth_error_printk(fmt, args...) \
68 printk(KERN_ERR "(%s:%3.3d ua:%x) ERROR: " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args)
71 #define ibmveth_debug_printk_no_adapter(fmt, args...) \
72 printk(KERN_DEBUG "(%s:%3.3d): " fmt, __FILE__, __LINE__ , ## args)
73 #define ibmveth_debug_printk(fmt, args...) \
74 printk(KERN_DEBUG "(%s:%3.3d ua:%x): " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args)
75 #define ibmveth_assert(expr) \
77 printk(KERN_DEBUG "assertion failed (%s:%3.3d ua:%x): %s\n", __FILE__, __LINE__, adapter->vdev->unit_address, #expr); \
81 #define ibmveth_debug_printk_no_adapter(fmt, args...)
82 #define ibmveth_debug_printk(fmt, args...)
83 #define ibmveth_assert(expr)
86 static int ibmveth_open(struct net_device *dev);
87 static int ibmveth_close(struct net_device *dev);
88 static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
89 static int ibmveth_poll(struct net_device *dev, int *budget);
90 static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *dev);
91 static struct net_device_stats *ibmveth_get_stats(struct net_device *dev);
92 static void ibmveth_set_multicast_list(struct net_device *dev);
93 static int ibmveth_change_mtu(struct net_device *dev, int new_mtu);
94 static void ibmveth_proc_register_driver(void);
95 static void ibmveth_proc_unregister_driver(void);
96 static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter);
97 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter);
98 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
99 static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
101 #ifdef CONFIG_PROC_FS
102 #define IBMVETH_PROC_DIR "net/ibmveth"
103 static struct proc_dir_entry *ibmveth_proc_dir;
106 static const char ibmveth_driver_name[] = "ibmveth";
107 static const char ibmveth_driver_string[] = "IBM i/pSeries Virtual Ethernet Driver";
108 #define ibmveth_driver_version "1.03"
110 MODULE_AUTHOR("Santiago Leon <santil@us.ibm.com>");
111 MODULE_DESCRIPTION("IBM i/pSeries Virtual Ethernet Driver");
112 MODULE_LICENSE("GPL");
113 MODULE_VERSION(ibmveth_driver_version);
115 /* simple methods of getting data from the current rxq entry */
116 static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
118 return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].toggle == adapter->rx_queue.toggle);
121 static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
123 return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].valid);
126 static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
128 return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].offset);
131 static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
133 return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].length);
136 /* setup the initial settings for a buffer pool */
137 static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size)
139 pool->size = pool_size;
140 pool->index = pool_index;
141 pool->buff_size = buff_size;
142 pool->threshold = pool_size / 2;
145 /* allocate and setup an buffer pool - called during open */
146 static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
150 pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
152 if(!pool->free_map) {
156 pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
157 if(!pool->dma_addr) {
158 kfree(pool->free_map);
159 pool->free_map = NULL;
163 pool->skbuff = kmalloc(sizeof(void*) * pool->size, GFP_KERNEL);
166 kfree(pool->dma_addr);
167 pool->dma_addr = NULL;
169 kfree(pool->free_map);
170 pool->free_map = NULL;
174 memset(pool->skbuff, 0, sizeof(void*) * pool->size);
175 memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
177 for(i = 0; i < pool->size; ++i) {
178 pool->free_map[i] = i;
181 atomic_set(&pool->available, 0);
182 pool->producer_index = 0;
183 pool->consumer_index = 0;
189 /* replenish the buffers for a pool. note that we don't need to
190 * skb_reserve these since they are used for incoming...
192 static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool)
195 u32 count = pool->size - atomic_read(&pool->available);
196 u32 buffers_added = 0;
200 for(i = 0; i < count; ++i) {
202 unsigned int free_index, index;
204 union ibmveth_buf_desc desc;
205 unsigned long lpar_rc;
208 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
211 ibmveth_debug_printk("replenish: unable to allocate skb\n");
212 adapter->replenish_no_mem++;
216 free_index = pool->consumer_index++ % pool->size;
217 index = pool->free_map[free_index];
219 ibmveth_assert(index != IBM_VETH_INVALID_MAP);
220 ibmveth_assert(pool->skbuff[index] == NULL);
222 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
223 pool->buff_size, DMA_FROM_DEVICE);
225 pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
226 pool->dma_addr[index] = dma_addr;
227 pool->skbuff[index] = skb;
229 correlator = ((u64)pool->index << 32) | index;
230 *(u64*)skb->data = correlator;
233 desc.fields.valid = 1;
234 desc.fields.length = pool->buff_size;
235 desc.fields.address = dma_addr;
237 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
239 if(lpar_rc != H_Success) {
240 pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
241 pool->skbuff[index] = NULL;
242 pool->consumer_index--;
243 dma_unmap_single(&adapter->vdev->dev,
244 pool->dma_addr[index], pool->buff_size,
246 dev_kfree_skb_any(skb);
247 adapter->replenish_add_buff_failure++;
251 adapter->replenish_add_buff_success++;
256 atomic_add(buffers_added, &(pool->available));
259 /* replenish routine */
260 static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
264 adapter->replenish_task_cycles++;
266 for(i = 0; i < IbmVethNumBufferPools; i++)
267 if(adapter->rx_buff_pool[i].active)
268 ibmveth_replenish_buffer_pool(adapter,
269 &adapter->rx_buff_pool[i]);
271 adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
274 /* empty and free ana buffer pool - also used to do cleanup in error paths */
275 static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool)
280 kfree(pool->free_map);
281 pool->free_map = NULL;
284 if(pool->skbuff && pool->dma_addr) {
285 for(i = 0; i < pool->size; ++i) {
286 struct sk_buff *skb = pool->skbuff[i];
288 dma_unmap_single(&adapter->vdev->dev,
292 dev_kfree_skb_any(skb);
293 pool->skbuff[i] = NULL;
299 kfree(pool->dma_addr);
300 pool->dma_addr = NULL;
310 /* remove a buffer from a pool */
311 static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, u64 correlator)
313 unsigned int pool = correlator >> 32;
314 unsigned int index = correlator & 0xffffffffUL;
315 unsigned int free_index;
318 ibmveth_assert(pool < IbmVethNumBufferPools);
319 ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
321 skb = adapter->rx_buff_pool[pool].skbuff[index];
323 ibmveth_assert(skb != NULL);
325 adapter->rx_buff_pool[pool].skbuff[index] = NULL;
327 dma_unmap_single(&adapter->vdev->dev,
328 adapter->rx_buff_pool[pool].dma_addr[index],
329 adapter->rx_buff_pool[pool].buff_size,
332 free_index = adapter->rx_buff_pool[pool].producer_index++ % adapter->rx_buff_pool[pool].size;
333 adapter->rx_buff_pool[pool].free_map[free_index] = index;
337 atomic_dec(&(adapter->rx_buff_pool[pool].available));
340 /* get the current buffer on the rx queue */
341 static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
343 u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
344 unsigned int pool = correlator >> 32;
345 unsigned int index = correlator & 0xffffffffUL;
347 ibmveth_assert(pool < IbmVethNumBufferPools);
348 ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
350 return adapter->rx_buff_pool[pool].skbuff[index];
353 /* recycle the current buffer on the rx queue */
354 static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
356 u32 q_index = adapter->rx_queue.index;
357 u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
358 unsigned int pool = correlator >> 32;
359 unsigned int index = correlator & 0xffffffffUL;
360 union ibmveth_buf_desc desc;
361 unsigned long lpar_rc;
363 ibmveth_assert(pool < IbmVethNumBufferPools);
364 ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
366 if(!adapter->rx_buff_pool[pool].active) {
367 ibmveth_rxq_harvest_buffer(adapter);
368 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
373 desc.fields.valid = 1;
374 desc.fields.length = adapter->rx_buff_pool[pool].buff_size;
375 desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
377 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
379 if(lpar_rc != H_Success) {
380 ibmveth_debug_printk("h_add_logical_lan_buffer failed during recycle rc=%ld", lpar_rc);
381 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
384 if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
385 adapter->rx_queue.index = 0;
386 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
390 static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
392 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
394 if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
395 adapter->rx_queue.index = 0;
396 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
400 static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
404 if(adapter->buffer_list_addr != NULL) {
405 if(!dma_mapping_error(adapter->buffer_list_dma)) {
406 dma_unmap_single(&adapter->vdev->dev,
407 adapter->buffer_list_dma, 4096,
409 adapter->buffer_list_dma = DMA_ERROR_CODE;
411 free_page((unsigned long)adapter->buffer_list_addr);
412 adapter->buffer_list_addr = NULL;
415 if(adapter->filter_list_addr != NULL) {
416 if(!dma_mapping_error(adapter->filter_list_dma)) {
417 dma_unmap_single(&adapter->vdev->dev,
418 adapter->filter_list_dma, 4096,
420 adapter->filter_list_dma = DMA_ERROR_CODE;
422 free_page((unsigned long)adapter->filter_list_addr);
423 adapter->filter_list_addr = NULL;
426 if(adapter->rx_queue.queue_addr != NULL) {
427 if(!dma_mapping_error(adapter->rx_queue.queue_dma)) {
428 dma_unmap_single(&adapter->vdev->dev,
429 adapter->rx_queue.queue_dma,
430 adapter->rx_queue.queue_len,
432 adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
434 kfree(adapter->rx_queue.queue_addr);
435 adapter->rx_queue.queue_addr = NULL;
438 for(i = 0; i<IbmVethNumBufferPools; i++)
439 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[i]);
442 static int ibmveth_open(struct net_device *netdev)
444 struct ibmveth_adapter *adapter = netdev->priv;
447 unsigned long lpar_rc;
449 union ibmveth_buf_desc rxq_desc;
452 ibmveth_debug_printk("open starting\n");
454 for(i = 0; i<IbmVethNumBufferPools; i++)
455 rxq_entries += adapter->rx_buff_pool[i].size;
457 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
458 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
460 if(!adapter->buffer_list_addr || !adapter->filter_list_addr) {
461 ibmveth_error_printk("unable to allocate filter or buffer list pages\n");
462 ibmveth_cleanup(adapter);
466 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * rxq_entries;
467 adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len, GFP_KERNEL);
469 if(!adapter->rx_queue.queue_addr) {
470 ibmveth_error_printk("unable to allocate rx queue pages\n");
471 ibmveth_cleanup(adapter);
475 adapter->buffer_list_dma = dma_map_single(&adapter->vdev->dev,
476 adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
477 adapter->filter_list_dma = dma_map_single(&adapter->vdev->dev,
478 adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
479 adapter->rx_queue.queue_dma = dma_map_single(&adapter->vdev->dev,
480 adapter->rx_queue.queue_addr,
481 adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
483 if((dma_mapping_error(adapter->buffer_list_dma) ) ||
484 (dma_mapping_error(adapter->filter_list_dma)) ||
485 (dma_mapping_error(adapter->rx_queue.queue_dma))) {
486 ibmveth_error_printk("unable to map filter or buffer list pages\n");
487 ibmveth_cleanup(adapter);
491 adapter->rx_queue.index = 0;
492 adapter->rx_queue.num_slots = rxq_entries;
493 adapter->rx_queue.toggle = 1;
495 /* call change_mtu to init the buffer pools based in initial mtu */
496 ibmveth_change_mtu(netdev, netdev->mtu);
498 memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
499 mac_address = mac_address >> 16;
502 rxq_desc.fields.valid = 1;
503 rxq_desc.fields.length = adapter->rx_queue.queue_len;
504 rxq_desc.fields.address = adapter->rx_queue.queue_dma;
506 ibmveth_debug_printk("buffer list @ 0x%p\n", adapter->buffer_list_addr);
507 ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr);
508 ibmveth_debug_printk("receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
511 lpar_rc = h_register_logical_lan(adapter->vdev->unit_address,
512 adapter->buffer_list_dma,
514 adapter->filter_list_dma,
517 if(lpar_rc != H_Success) {
518 ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc);
519 ibmveth_error_printk("buffer TCE:0x%x filter TCE:0x%x rxq desc:0x%lx MAC:0x%lx\n",
520 adapter->buffer_list_dma,
521 adapter->filter_list_dma,
524 ibmveth_cleanup(adapter);
528 ibmveth_debug_printk("registering irq 0x%x\n", netdev->irq);
529 if((rc = request_irq(netdev->irq, &ibmveth_interrupt, 0, netdev->name, netdev)) != 0) {
530 ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev->irq, rc);
532 rc = h_free_logical_lan(adapter->vdev->unit_address);
533 } while (H_isLongBusy(rc) || (rc == H_Busy));
535 ibmveth_cleanup(adapter);
539 ibmveth_debug_printk("initial replenish cycle\n");
540 ibmveth_replenish_task(adapter);
542 netif_start_queue(netdev);
544 ibmveth_debug_printk("open complete\n");
549 static int ibmveth_close(struct net_device *netdev)
551 struct ibmveth_adapter *adapter = netdev->priv;
554 ibmveth_debug_printk("close starting\n");
556 netif_stop_queue(netdev);
558 free_irq(netdev->irq, netdev);
561 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
562 } while (H_isLongBusy(lpar_rc) || (lpar_rc == H_Busy));
564 if(lpar_rc != H_Success)
566 ibmveth_error_printk("h_free_logical_lan failed with %lx, continuing with close\n",
570 adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
572 ibmveth_cleanup(adapter);
574 ibmveth_debug_printk("close complete\n");
579 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) {
580 cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE);
581 cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ADVERTISED_FIBRE);
582 cmd->speed = SPEED_1000;
583 cmd->duplex = DUPLEX_FULL;
584 cmd->port = PORT_FIBRE;
585 cmd->phy_address = 0;
586 cmd->transceiver = XCVR_INTERNAL;
587 cmd->autoneg = AUTONEG_ENABLE;
593 static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) {
594 strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1);
595 strncpy(info->version, ibmveth_driver_version, sizeof(info->version) - 1);
598 static u32 netdev_get_link(struct net_device *dev) {
602 static struct ethtool_ops netdev_ethtool_ops = {
603 .get_drvinfo = netdev_get_drvinfo,
604 .get_settings = netdev_get_settings,
605 .get_link = netdev_get_link,
606 .get_sg = ethtool_op_get_sg,
607 .get_tx_csum = ethtool_op_get_tx_csum,
610 static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
615 #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
617 static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
619 struct ibmveth_adapter *adapter = netdev->priv;
620 union ibmveth_buf_desc desc[IbmVethMaxSendFrags];
621 unsigned long lpar_rc;
622 int nfrags = 0, curfrag;
623 unsigned long correlator;
624 unsigned int retry_count;
626 if ((skb_shinfo(skb)->nr_frags + 1) > IbmVethMaxSendFrags) {
627 adapter->stats.tx_dropped++;
632 memset(&desc, 0, sizeof(desc));
634 /* nfrags = number of frags after the initial fragment */
635 nfrags = skb_shinfo(skb)->nr_frags;
638 adapter->tx_multidesc_send++;
640 /* map the initial fragment */
641 desc[0].fields.length = nfrags ? skb->len - skb->data_len : skb->len;
642 desc[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data,
643 desc[0].fields.length, DMA_TO_DEVICE);
644 desc[0].fields.valid = 1;
646 if(dma_mapping_error(desc[0].fields.address)) {
647 ibmveth_error_printk("tx: unable to map initial fragment\n");
648 adapter->tx_map_failed++;
649 adapter->stats.tx_dropped++;
656 /* map fragments past the initial portion if there are any */
658 skb_frag_t *frag = &skb_shinfo(skb)->frags[curfrag];
659 desc[curfrag+1].fields.address
660 = dma_map_single(&adapter->vdev->dev,
661 page_address(frag->page) + frag->page_offset,
662 frag->size, DMA_TO_DEVICE);
663 desc[curfrag+1].fields.length = frag->size;
664 desc[curfrag+1].fields.valid = 1;
666 if(dma_mapping_error(desc[curfrag+1].fields.address)) {
667 ibmveth_error_printk("tx: unable to map fragment %d\n", curfrag);
668 adapter->tx_map_failed++;
669 adapter->stats.tx_dropped++;
670 /* Free all the mappings we just created */
671 while(curfrag < nfrags) {
672 dma_unmap_single(&adapter->vdev->dev,
673 desc[curfrag+1].fields.address,
674 desc[curfrag+1].fields.length,
683 /* send the frame. Arbitrarily set retrycount to 1024 */
687 lpar_rc = h_send_logical_lan(adapter->vdev->unit_address,
695 } while ((lpar_rc == H_Busy) && (retry_count--));
697 if(lpar_rc != H_Success && lpar_rc != H_Dropped) {
699 ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc);
700 for(i = 0; i < 6; i++) {
701 ibmveth_error_printk("tx: desc[%i] valid=%d, len=%d, address=0x%d\n", i,
702 desc[i].fields.valid, desc[i].fields.length, desc[i].fields.address);
704 adapter->tx_send_failed++;
705 adapter->stats.tx_dropped++;
707 adapter->stats.tx_packets++;
708 adapter->stats.tx_bytes += skb->len;
709 netdev->trans_start = jiffies;
713 dma_unmap_single(&adapter->vdev->dev,
714 desc[nfrags].fields.address,
715 desc[nfrags].fields.length, DMA_TO_DEVICE);
716 } while(--nfrags >= 0);
722 static int ibmveth_poll(struct net_device *netdev, int *budget)
724 struct ibmveth_adapter *adapter = netdev->priv;
725 int max_frames_to_process = netdev->quota;
726 int frames_processed = 0;
728 unsigned long lpar_rc;
732 struct net_device *netdev = adapter->netdev;
734 if(ibmveth_rxq_pending_buffer(adapter)) {
739 if(!ibmveth_rxq_buffer_valid(adapter)) {
740 wmb(); /* suggested by larson1 */
741 adapter->rx_invalid_buffer++;
742 ibmveth_debug_printk("recycling invalid buffer\n");
743 ibmveth_rxq_recycle_buffer(adapter);
745 int length = ibmveth_rxq_frame_length(adapter);
746 int offset = ibmveth_rxq_frame_offset(adapter);
747 skb = ibmveth_rxq_get_buffer(adapter);
749 ibmveth_rxq_harvest_buffer(adapter);
751 skb_reserve(skb, offset);
752 skb_put(skb, length);
754 skb->protocol = eth_type_trans(skb, netdev);
756 netif_receive_skb(skb); /* send it up */
758 adapter->stats.rx_packets++;
759 adapter->stats.rx_bytes += length;
761 netdev->last_rx = jiffies;
766 } while(more_work && (frames_processed < max_frames_to_process));
768 ibmveth_replenish_task(adapter);
771 /* more work to do - return that we are not done yet */
772 netdev->quota -= frames_processed;
773 *budget -= frames_processed;
777 /* we think we are done - reenable interrupts, then check once more to make sure we are done */
778 lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE);
780 ibmveth_assert(lpar_rc == H_Success);
782 netif_rx_complete(netdev);
784 if(ibmveth_rxq_pending_buffer(adapter) && netif_rx_reschedule(netdev, frames_processed))
786 lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
787 ibmveth_assert(lpar_rc == H_Success);
792 netdev->quota -= frames_processed;
793 *budget -= frames_processed;
795 /* we really are done */
799 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
801 struct net_device *netdev = dev_instance;
802 struct ibmveth_adapter *adapter = netdev->priv;
803 unsigned long lpar_rc;
805 if(netif_rx_schedule_prep(netdev)) {
806 lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
807 ibmveth_assert(lpar_rc == H_Success);
808 __netif_rx_schedule(netdev);
813 static struct net_device_stats *ibmveth_get_stats(struct net_device *dev)
815 struct ibmveth_adapter *adapter = dev->priv;
816 return &adapter->stats;
819 static void ibmveth_set_multicast_list(struct net_device *netdev)
821 struct ibmveth_adapter *adapter = netdev->priv;
822 unsigned long lpar_rc;
824 if((netdev->flags & IFF_PROMISC) || (netdev->mc_count > adapter->mcastFilterSize)) {
825 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
826 IbmVethMcastEnableRecv |
827 IbmVethMcastDisableFiltering,
829 if(lpar_rc != H_Success) {
830 ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc);
833 struct dev_mc_list *mclist = netdev->mc_list;
835 /* clear the filter table & disable filtering */
836 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
837 IbmVethMcastEnableRecv |
838 IbmVethMcastDisableFiltering |
839 IbmVethMcastClearFilterTable,
841 if(lpar_rc != H_Success) {
842 ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc);
844 /* add the addresses to the filter table */
845 for(i = 0; i < netdev->mc_count; ++i, mclist = mclist->next) {
846 // add the multicast address to the filter table
847 unsigned long mcast_addr = 0;
848 memcpy(((char *)&mcast_addr)+2, mclist->dmi_addr, 6);
849 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
850 IbmVethMcastAddFilter,
852 if(lpar_rc != H_Success) {
853 ibmveth_error_printk("h_multicast_ctrl rc=%ld when adding an entry to the filter table\n", lpar_rc);
857 /* re-enable filtering */
858 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
859 IbmVethMcastEnableFiltering,
861 if(lpar_rc != H_Success) {
862 ibmveth_error_printk("h_multicast_ctrl rc=%ld when enabling filtering\n", lpar_rc);
867 static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
869 struct ibmveth_adapter *adapter = dev->priv;
871 int prev_smaller = 1;
873 if ((new_mtu < 68) ||
874 (new_mtu > (pool_size[IbmVethNumBufferPools-1]) - IBMVETH_BUFF_OH))
877 for(i = 0; i<IbmVethNumBufferPools; i++) {
879 if (new_mtu > (pool_size[i] - IBMVETH_BUFF_OH)) {
888 if (activate && !adapter->rx_buff_pool[i].active) {
889 struct ibmveth_buff_pool *pool =
890 &adapter->rx_buff_pool[i];
891 if(ibmveth_alloc_buffer_pool(pool)) {
892 ibmveth_error_printk("unable to alloc pool\n");
895 adapter->rx_buff_pool[i].active = 1;
896 } else if (!activate && adapter->rx_buff_pool[i].active) {
897 adapter->rx_buff_pool[i].active = 0;
898 h_free_logical_lan_buffer(adapter->vdev->unit_address,
904 /* kick the interrupt handler so that the new buffer pools get
905 replenished or deallocated */
906 ibmveth_interrupt(dev->irq, dev, NULL);
912 static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
915 struct net_device *netdev;
916 struct ibmveth_adapter *adapter = NULL;
918 unsigned char *mac_addr_p;
919 unsigned int *mcastFilterSize_p;
922 ibmveth_debug_printk_no_adapter("entering ibmveth_probe for UA 0x%x\n",
925 mac_addr_p = (unsigned char *) vio_get_attribute(dev, VETH_MAC_ADDR, 0);
927 printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find VETH_MAC_ADDR "
928 "attribute\n", __FILE__, __LINE__);
932 mcastFilterSize_p= (unsigned int *) vio_get_attribute(dev, VETH_MCAST_FILTER_SIZE, 0);
933 if(!mcastFilterSize_p) {
934 printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find "
935 "VETH_MCAST_FILTER_SIZE attribute\n",
940 netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
945 SET_MODULE_OWNER(netdev);
947 adapter = netdev->priv;
948 memset(adapter, 0, sizeof(adapter));
949 dev->dev.driver_data = netdev;
952 adapter->netdev = netdev;
953 adapter->mcastFilterSize= *mcastFilterSize_p;
955 /* Some older boxes running PHYP non-natively have an OF that
956 returns a 8-byte local-mac-address field (and the first
957 2 bytes have to be ignored) while newer boxes' OF return
958 a 6-byte field. Note that IEEE 1275 specifies that
959 local-mac-address must be a 6-byte field.
960 The RPA doc specifies that the first byte must be 10b, so
961 we'll just look for it to solve this 8 vs. 6 byte field issue */
963 if ((*mac_addr_p & 0x3) != 0x02)
966 adapter->mac_addr = 0;
967 memcpy(&adapter->mac_addr, mac_addr_p, 6);
969 adapter->liobn = dev->iommu_table->it_index;
971 netdev->irq = dev->irq;
972 netdev->open = ibmveth_open;
973 netdev->poll = ibmveth_poll;
975 netdev->stop = ibmveth_close;
976 netdev->hard_start_xmit = ibmveth_start_xmit;
977 netdev->get_stats = ibmveth_get_stats;
978 netdev->set_multicast_list = ibmveth_set_multicast_list;
979 netdev->do_ioctl = ibmveth_ioctl;
980 netdev->ethtool_ops = &netdev_ethtool_ops;
981 netdev->change_mtu = ibmveth_change_mtu;
982 SET_NETDEV_DEV(netdev, &dev->dev);
984 memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
986 for(i = 0; i<IbmVethNumBufferPools; i++)
987 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
988 pool_count[i], pool_size[i]);
990 ibmveth_debug_printk("adapter @ 0x%p\n", adapter);
992 adapter->buffer_list_dma = DMA_ERROR_CODE;
993 adapter->filter_list_dma = DMA_ERROR_CODE;
994 adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
996 ibmveth_debug_printk("registering netdev...\n");
998 rc = register_netdev(netdev);
1001 ibmveth_debug_printk("failed to register netdev rc=%d\n", rc);
1002 free_netdev(netdev);
1006 ibmveth_debug_printk("registered\n");
1008 ibmveth_proc_register_adapter(adapter);
1013 static int __devexit ibmveth_remove(struct vio_dev *dev)
1015 struct net_device *netdev = dev->dev.driver_data;
1016 struct ibmveth_adapter *adapter = netdev->priv;
1018 unregister_netdev(netdev);
1020 ibmveth_proc_unregister_adapter(adapter);
1022 free_netdev(netdev);
1026 #ifdef CONFIG_PROC_FS
1027 static void ibmveth_proc_register_driver(void)
1029 ibmveth_proc_dir = proc_mkdir(IBMVETH_PROC_DIR, NULL);
1030 if (ibmveth_proc_dir) {
1031 SET_MODULE_OWNER(ibmveth_proc_dir);
1035 static void ibmveth_proc_unregister_driver(void)
1037 remove_proc_entry(IBMVETH_PROC_DIR, NULL);
1040 static void *ibmveth_seq_start(struct seq_file *seq, loff_t *pos)
1049 static void *ibmveth_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1055 static void ibmveth_seq_stop(struct seq_file *seq, void *v)
1059 static int ibmveth_seq_show(struct seq_file *seq, void *v)
1061 struct ibmveth_adapter *adapter = seq->private;
1062 char *current_mac = ((char*) &adapter->netdev->dev_addr);
1063 char *firmware_mac = ((char*) &adapter->mac_addr) ;
1065 seq_printf(seq, "%s %s\n\n", ibmveth_driver_string, ibmveth_driver_version);
1067 seq_printf(seq, "Unit Address: 0x%x\n", adapter->vdev->unit_address);
1068 seq_printf(seq, "LIOBN: 0x%lx\n", adapter->liobn);
1069 seq_printf(seq, "Current MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
1070 current_mac[0], current_mac[1], current_mac[2],
1071 current_mac[3], current_mac[4], current_mac[5]);
1072 seq_printf(seq, "Firmware MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
1073 firmware_mac[0], firmware_mac[1], firmware_mac[2],
1074 firmware_mac[3], firmware_mac[4], firmware_mac[5]);
1076 seq_printf(seq, "\nAdapter Statistics:\n");
1077 seq_printf(seq, " TX: skbuffs linearized: %ld\n", adapter->tx_linearized);
1078 seq_printf(seq, " multi-descriptor sends: %ld\n", adapter->tx_multidesc_send);
1079 seq_printf(seq, " skb_linearize failures: %ld\n", adapter->tx_linearize_failed);
1080 seq_printf(seq, " vio_map_single failres: %ld\n", adapter->tx_map_failed);
1081 seq_printf(seq, " send failures: %ld\n", adapter->tx_send_failed);
1082 seq_printf(seq, " RX: replenish task cycles: %ld\n", adapter->replenish_task_cycles);
1083 seq_printf(seq, " alloc_skb_failures: %ld\n", adapter->replenish_no_mem);
1084 seq_printf(seq, " add buffer failures: %ld\n", adapter->replenish_add_buff_failure);
1085 seq_printf(seq, " invalid buffers: %ld\n", adapter->rx_invalid_buffer);
1086 seq_printf(seq, " no buffers: %ld\n", adapter->rx_no_buffer);
1090 static struct seq_operations ibmveth_seq_ops = {
1091 .start = ibmveth_seq_start,
1092 .next = ibmveth_seq_next,
1093 .stop = ibmveth_seq_stop,
1094 .show = ibmveth_seq_show,
1097 static int ibmveth_proc_open(struct inode *inode, struct file *file)
1099 struct seq_file *seq;
1100 struct proc_dir_entry *proc;
1103 rc = seq_open(file, &ibmveth_seq_ops);
1105 /* recover the pointer buried in proc_dir_entry data */
1106 seq = file->private_data;
1108 seq->private = proc->data;
1113 static struct file_operations ibmveth_proc_fops = {
1114 .owner = THIS_MODULE,
1115 .open = ibmveth_proc_open,
1117 .llseek = seq_lseek,
1118 .release = seq_release,
1121 static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
1123 struct proc_dir_entry *entry;
1124 if (ibmveth_proc_dir) {
1125 entry = create_proc_entry(adapter->netdev->name, S_IFREG, ibmveth_proc_dir);
1127 ibmveth_error_printk("Cannot create adapter proc entry");
1129 entry->data = (void *) adapter;
1130 entry->proc_fops = &ibmveth_proc_fops;
1131 SET_MODULE_OWNER(entry);
1137 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
1139 if (ibmveth_proc_dir) {
1140 remove_proc_entry(adapter->netdev->name, ibmveth_proc_dir);
1144 #else /* CONFIG_PROC_FS */
1145 static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
1149 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
1152 static void ibmveth_proc_register_driver(void)
1156 static void ibmveth_proc_unregister_driver(void)
1159 #endif /* CONFIG_PROC_FS */
1161 static struct vio_device_id ibmveth_device_table[] __devinitdata= {
1162 { "network", "IBM,l-lan"},
1166 MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
1168 static struct vio_driver ibmveth_driver = {
1169 .name = (char *)ibmveth_driver_name,
1170 .id_table = ibmveth_device_table,
1171 .probe = ibmveth_probe,
1172 .remove = ibmveth_remove
1175 static int __init ibmveth_module_init(void)
1177 ibmveth_printk("%s: %s %s\n", ibmveth_driver_name, ibmveth_driver_string, ibmveth_driver_version);
1179 ibmveth_proc_register_driver();
1181 return vio_register_driver(&ibmveth_driver);
1184 static void __exit ibmveth_module_exit(void)
1186 vio_unregister_driver(&ibmveth_driver);
1187 ibmveth_proc_unregister_driver();
1190 module_init(ibmveth_module_init);
1191 module_exit(ibmveth_module_exit);