X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=drivers%2Fnet%2Fibmveth.c;h=5862282ab2fef6eb3b90b2db059b126d138b2d73;hb=39acbc12affcaa23ef1d887ba3d197baca8e6e47;hp=57772bebff56336cb50c3bde81c0823d13210787;hpb=c10997f6575f476ff38442fa18fd4a0d80345f9d;p=safe%2Fjmp%2Flinux-2.6 diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index 57772be..5862282 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c @@ -33,6 +33,7 @@ */ #include +#include #include #include #include @@ -49,11 +50,12 @@ #include #include #include -#include #include #include #include +#include #include +#include #include #include "ibmveth.h" @@ -95,8 +97,10 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter); static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter); static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance); static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter); +static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev); static struct kobj_type ktype_veth_pool; + #ifdef CONFIG_PROC_FS #define IBMVETH_PROC_DIR "ibmveth" static struct proc_dir_entry *ibmveth_proc_dir; @@ -227,16 +231,16 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc u32 i; u32 count = pool->size - atomic_read(&pool->available); u32 buffers_added = 0; + struct sk_buff *skb; + unsigned int free_index, index; + u64 correlator; + unsigned long lpar_rc; + dma_addr_t dma_addr; mb(); for(i = 0; i < count; ++i) { - struct sk_buff *skb; - unsigned int free_index, index; - u64 correlator; union ibmveth_buf_desc desc; - unsigned long lpar_rc; - dma_addr_t dma_addr; skb = alloc_skb(pool->buff_size, GFP_ATOMIC); @@ -256,6 +260,9 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, pool->buff_size, DMA_FROM_DEVICE); + if (dma_mapping_error(&adapter->vdev->dev, dma_addr)) + goto failure; + pool->free_map[free_index] = IBM_VETH_INVALID_MAP; pool->dma_addr[index] = dma_addr; pool->skbuff[index] = skb; @@ -268,20 +275,9 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); - if(lpar_rc != H_SUCCESS) { - pool->free_map[free_index] = index; - pool->skbuff[index] = NULL; - if (pool->consumer_index == 0) - pool->consumer_index = pool->size - 1; - else - pool->consumer_index--; - dma_unmap_single(&adapter->vdev->dev, - pool->dma_addr[index], pool->buff_size, - DMA_FROM_DEVICE); - dev_kfree_skb_any(skb); - adapter->replenish_add_buff_failure++; - break; - } else { + if (lpar_rc != H_SUCCESS) + goto failure; + else { buffers_added++; adapter->replenish_add_buff_success++; } @@ -289,6 +285,24 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc mb(); atomic_add(buffers_added, &(pool->available)); + return; + +failure: + pool->free_map[free_index] = index; + pool->skbuff[index] = NULL; + if (pool->consumer_index == 0) + pool->consumer_index = pool->size - 1; + else + pool->consumer_index--; + if (!dma_mapping_error(&adapter->vdev->dev, dma_addr)) + dma_unmap_single(&adapter->vdev->dev, + pool->dma_addr[index], pool->buff_size, + DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + adapter->replenish_add_buff_failure++; + + mb(); + atomic_add(buffers_added, &(pool->available)); } /* replenish routine */ @@ -298,7 +312,7 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) adapter->replenish_task_cycles++; - for(i = 0; i < IbmVethNumBufferPools; i++) + for (i = (IbmVethNumBufferPools - 1); i >= 0; i--) if(adapter->rx_buff_pool[i].active) ibmveth_replenish_buffer_pool(adapter, &adapter->rx_buff_pool[i]); @@ -434,11 +448,11 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter) static void ibmveth_cleanup(struct ibmveth_adapter *adapter) { int i; + struct device *dev = &adapter->vdev->dev; if(adapter->buffer_list_addr != NULL) { - if(!dma_mapping_error(adapter->buffer_list_dma)) { - dma_unmap_single(&adapter->vdev->dev, - adapter->buffer_list_dma, 4096, + if (!dma_mapping_error(dev, adapter->buffer_list_dma)) { + dma_unmap_single(dev, adapter->buffer_list_dma, 4096, DMA_BIDIRECTIONAL); adapter->buffer_list_dma = DMA_ERROR_CODE; } @@ -447,9 +461,8 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) } if(adapter->filter_list_addr != NULL) { - if(!dma_mapping_error(adapter->filter_list_dma)) { - dma_unmap_single(&adapter->vdev->dev, - adapter->filter_list_dma, 4096, + if (!dma_mapping_error(dev, adapter->filter_list_dma)) { + dma_unmap_single(dev, adapter->filter_list_dma, 4096, DMA_BIDIRECTIONAL); adapter->filter_list_dma = DMA_ERROR_CODE; } @@ -458,8 +471,8 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) } if(adapter->rx_queue.queue_addr != NULL) { - if(!dma_mapping_error(adapter->rx_queue.queue_dma)) { - dma_unmap_single(&adapter->vdev->dev, + if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) { + dma_unmap_single(dev, adapter->rx_queue.queue_dma, adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL); @@ -473,6 +486,18 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) if (adapter->rx_buff_pool[i].active) ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[i]); + + if (adapter->bounce_buffer != NULL) { + if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) { + dma_unmap_single(&adapter->vdev->dev, + adapter->bounce_buffer_dma, + adapter->netdev->mtu + IBMVETH_BUFF_OH, + DMA_BIDIRECTIONAL); + adapter->bounce_buffer_dma = DMA_ERROR_CODE; + } + kfree(adapter->bounce_buffer); + adapter->bounce_buffer = NULL; + } } static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter, @@ -502,13 +527,14 @@ retry: static int ibmveth_open(struct net_device *netdev) { - struct ibmveth_adapter *adapter = netdev->priv; + struct ibmveth_adapter *adapter = netdev_priv(netdev); u64 mac_address = 0; int rxq_entries = 1; unsigned long lpar_rc; int rc; union ibmveth_buf_desc rxq_desc; int i; + struct device *dev; ibmveth_debug_printk("open starting\n"); @@ -537,17 +563,19 @@ static int ibmveth_open(struct net_device *netdev) return -ENOMEM; } - adapter->buffer_list_dma = dma_map_single(&adapter->vdev->dev, + dev = &adapter->vdev->dev; + + adapter->buffer_list_dma = dma_map_single(dev, adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); - adapter->filter_list_dma = dma_map_single(&adapter->vdev->dev, + adapter->filter_list_dma = dma_map_single(dev, adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); - adapter->rx_queue.queue_dma = dma_map_single(&adapter->vdev->dev, + adapter->rx_queue.queue_dma = dma_map_single(dev, adapter->rx_queue.queue_addr, adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL); - if((dma_mapping_error(adapter->buffer_list_dma) ) || - (dma_mapping_error(adapter->filter_list_dma)) || - (dma_mapping_error(adapter->rx_queue.queue_dma))) { + if ((dma_mapping_error(dev, adapter->buffer_list_dma)) || + (dma_mapping_error(dev, adapter->filter_list_dma)) || + (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) { ibmveth_error_printk("unable to map filter or buffer list pages\n"); ibmveth_cleanup(adapter); napi_disable(&adapter->napi); @@ -574,7 +602,7 @@ static int ibmveth_open(struct net_device *netdev) if(lpar_rc != H_SUCCESS) { ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc); - ibmveth_error_printk("buffer TCE:0x%lx filter TCE:0x%lx rxq desc:0x%lx MAC:0x%lx\n", + ibmveth_error_printk("buffer TCE:0x%llx filter TCE:0x%llx rxq desc:0x%llx MAC:0x%llx\n", adapter->buffer_list_dma, adapter->filter_list_dma, rxq_desc.desc, @@ -608,6 +636,24 @@ static int ibmveth_open(struct net_device *netdev) return rc; } + adapter->bounce_buffer = + kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL); + if (!adapter->bounce_buffer) { + ibmveth_error_printk("unable to allocate bounce buffer\n"); + ibmveth_cleanup(adapter); + napi_disable(&adapter->napi); + return -ENOMEM; + } + adapter->bounce_buffer_dma = + dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, + netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) { + ibmveth_error_printk("unable to map bounce buffer\n"); + ibmveth_cleanup(adapter); + napi_disable(&adapter->napi); + return -ENOMEM; + } + ibmveth_debug_printk("initial replenish cycle\n"); ibmveth_interrupt(netdev->irq, netdev); @@ -620,7 +666,7 @@ static int ibmveth_open(struct net_device *netdev) static int ibmveth_close(struct net_device *netdev) { - struct ibmveth_adapter *adapter = netdev->priv; + struct ibmveth_adapter *adapter = netdev_priv(netdev); long lpar_rc; ibmveth_debug_printk("close starting\n"); @@ -676,7 +722,7 @@ static u32 netdev_get_link(struct net_device *dev) { static void ibmveth_set_rx_csum_flags(struct net_device *dev, u32 data) { - struct ibmveth_adapter *adapter = dev->priv; + struct ibmveth_adapter *adapter = netdev_priv(dev); if (data) adapter->rx_csum = 1; @@ -695,7 +741,7 @@ static void ibmveth_set_rx_csum_flags(struct net_device *dev, u32 data) static void ibmveth_set_tx_csum_flags(struct net_device *dev, u32 data) { - struct ibmveth_adapter *adapter = dev->priv; + struct ibmveth_adapter *adapter = netdev_priv(dev); if (data) { dev->features |= NETIF_F_IP_CSUM; @@ -707,8 +753,8 @@ static void ibmveth_set_tx_csum_flags(struct net_device *dev, u32 data) static int ibmveth_set_csum_offload(struct net_device *dev, u32 data, void (*done) (struct net_device *, u32)) { - struct ibmveth_adapter *adapter = dev->priv; - u64 set_attr, clr_attr, ret_attr; + struct ibmveth_adapter *adapter = netdev_priv(dev); + unsigned long set_attr, clr_attr, ret_attr; long ret; int rc1 = 0, rc2 = 0; int restart = 0; @@ -759,7 +805,7 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data, static int ibmveth_set_rx_csum(struct net_device *dev, u32 data) { - struct ibmveth_adapter *adapter = dev->priv; + struct ibmveth_adapter *adapter = netdev_priv(dev); if ((data && adapter->rx_csum) || (!data && !adapter->rx_csum)) return 0; @@ -769,7 +815,7 @@ static int ibmveth_set_rx_csum(struct net_device *dev, u32 data) static int ibmveth_set_tx_csum(struct net_device *dev, u32 data) { - struct ibmveth_adapter *adapter = dev->priv; + struct ibmveth_adapter *adapter = netdev_priv(dev); int rc = 0; if (data && (dev->features & NETIF_F_IP_CSUM)) @@ -787,7 +833,7 @@ static int ibmveth_set_tx_csum(struct net_device *dev, u32 data) static u32 ibmveth_get_rx_csum(struct net_device *dev) { - struct ibmveth_adapter *adapter = dev->priv; + struct ibmveth_adapter *adapter = netdev_priv(dev); return adapter->rx_csum; } @@ -816,7 +862,7 @@ static void ibmveth_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { int i; - struct ibmveth_adapter *adapter = dev->priv; + struct ibmveth_adapter *adapter = netdev_priv(dev); for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++) data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset); @@ -841,9 +887,10 @@ static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1)) -static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb, + struct net_device *netdev) { - struct ibmveth_adapter *adapter = netdev->priv; + struct ibmveth_adapter *adapter = netdev_priv(netdev); union ibmveth_buf_desc desc; unsigned long lpar_rc; unsigned long correlator; @@ -854,10 +901,10 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) unsigned int tx_packets = 0; unsigned int tx_send_failed = 0; unsigned int tx_map_failed = 0; + int used_bounce = 0; + unsigned long data_dma_addr; desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len; - desc.fields.address = dma_map_single(&adapter->vdev->dev, skb->data, - skb->len, DMA_TO_DEVICE); if (skb->ip_summed == CHECKSUM_PARTIAL && ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) { @@ -876,12 +923,19 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) buf[1] = 0; } - if (dma_mapping_error(desc.fields.address)) { - ibmveth_error_printk("tx: unable to map xmit buffer\n"); + data_dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, + skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(&adapter->vdev->dev, data_dma_addr)) { + if (!firmware_has_feature(FW_FEATURE_CMO)) + ibmveth_error_printk("tx: unable to map xmit buffer\n"); + skb_copy_from_linear_data(skb, adapter->bounce_buffer, + skb->len); + desc.fields.address = adapter->bounce_buffer_dma; tx_map_failed++; - tx_dropped++; - goto out; - } + used_bounce = 1; + wmb(); + } else + desc.fields.address = data_dma_addr; /* send the frame. Arbitrarily set retrycount to 1024 */ correlator = 0; @@ -905,8 +959,9 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) netdev->trans_start = jiffies; } - dma_unmap_single(&adapter->vdev->dev, desc.fields.address, - skb->len, DMA_TO_DEVICE); + if (!used_bounce) + dma_unmap_single(&adapter->vdev->dev, data_dma_addr, + skb->len, DMA_TO_DEVICE); out: spin_lock_irqsave(&adapter->stats_lock, flags); netdev->stats.tx_dropped += tx_dropped; @@ -917,7 +972,7 @@ out: spin_lock_irqsave(&adapter->stats_lock, flags); spin_unlock_irqrestore(&adapter->stats_lock, flags); dev_kfree_skb(skb); - return 0; + return NETDEV_TX_OK; } static int ibmveth_poll(struct napi_struct *napi, int budget) @@ -960,7 +1015,6 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) netdev->stats.rx_packets++; netdev->stats.rx_bytes += length; frames_processed++; - netdev->last_rx = jiffies; } } while (frames_processed < budget); @@ -975,10 +1029,10 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) ibmveth_assert(lpar_rc == H_SUCCESS); - netif_rx_complete(netdev, napi); + napi_complete(napi); if (ibmveth_rxq_pending_buffer(adapter) && - netif_rx_reschedule(netdev, napi)) { + napi_reschedule(napi)) { lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); goto restart_poll; @@ -991,21 +1045,21 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance) { struct net_device *netdev = dev_instance; - struct ibmveth_adapter *adapter = netdev->priv; + struct ibmveth_adapter *adapter = netdev_priv(netdev); unsigned long lpar_rc; - if (netif_rx_schedule_prep(netdev, &adapter->napi)) { + if (napi_schedule_prep(&adapter->napi)) { lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); ibmveth_assert(lpar_rc == H_SUCCESS); - __netif_rx_schedule(netdev, &adapter->napi); + __napi_schedule(&adapter->napi); } return IRQ_HANDLED; } static void ibmveth_set_multicast_list(struct net_device *netdev) { - struct ibmveth_adapter *adapter = netdev->priv; + struct ibmveth_adapter *adapter = netdev_priv(netdev); unsigned long lpar_rc; if((netdev->flags & IFF_PROMISC) || (netdev->mc_count > adapter->mcastFilterSize)) { @@ -1053,10 +1107,10 @@ static void ibmveth_set_multicast_list(struct net_device *netdev) static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) { - struct ibmveth_adapter *adapter = dev->priv; + struct ibmveth_adapter *adapter = netdev_priv(dev); + struct vio_dev *viodev = adapter->vdev; int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH; - int reinit = 0; - int i, rc; + int i; if (new_mtu < IBMVETH_MAX_MTU) return -EINVAL; @@ -1068,23 +1122,34 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) if (i == IbmVethNumBufferPools) return -EINVAL; + /* Deactivate all the buffer pools so that the next loop can activate + only the buffer pools necessary to hold the new MTU */ + for (i = 0; i < IbmVethNumBufferPools; i++) + if (adapter->rx_buff_pool[i].active) { + ibmveth_free_buffer_pool(adapter, + &adapter->rx_buff_pool[i]); + adapter->rx_buff_pool[i].active = 0; + } + /* Look for an active buffer pool that can hold the new MTU */ for(i = 0; irx_buff_pool[i].active) { - adapter->rx_buff_pool[i].active = 1; - reinit = 1; - } + adapter->rx_buff_pool[i].active = 1; if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) { - if (reinit && netif_running(adapter->netdev)) { + if (netif_running(adapter->netdev)) { adapter->pool_config = 1; ibmveth_close(adapter->netdev); adapter->pool_config = 0; dev->mtu = new_mtu; - if ((rc = ibmveth_open(adapter->netdev))) - return rc; - } else - dev->mtu = new_mtu; + vio_cmo_set_dev_desired(viodev, + ibmveth_get_desired_dma + (viodev)); + return ibmveth_open(adapter->netdev); + } + dev->mtu = new_mtu; + vio_cmo_set_dev_desired(viodev, + ibmveth_get_desired_dma + (viodev)); return 0; } } @@ -1094,18 +1159,72 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) #ifdef CONFIG_NET_POLL_CONTROLLER static void ibmveth_poll_controller(struct net_device *dev) { - ibmveth_replenish_task(dev->priv); + ibmveth_replenish_task(netdev_priv(dev)); ibmveth_interrupt(dev->irq, dev); } #endif +/** + * ibmveth_get_desired_dma - Calculate IO memory desired by the driver + * + * @vdev: struct vio_dev for the device whose desired IO mem is to be returned + * + * Return value: + * Number of bytes of IO data the driver will need to perform well. + */ +static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev) +{ + struct net_device *netdev = dev_get_drvdata(&vdev->dev); + struct ibmveth_adapter *adapter; + unsigned long ret; + int i; + int rxqentries = 1; + + /* netdev inits at probe time along with the structures we need below*/ + if (netdev == NULL) + return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT); + + adapter = netdev_priv(netdev); + + ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE; + ret += IOMMU_PAGE_ALIGN(netdev->mtu); + + for (i = 0; i < IbmVethNumBufferPools; i++) { + /* add the size of the active receive buffers */ + if (adapter->rx_buff_pool[i].active) + ret += + adapter->rx_buff_pool[i].size * + IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i]. + buff_size); + rxqentries += adapter->rx_buff_pool[i].size; + } + /* add the size of the receive queue entries */ + ret += IOMMU_PAGE_ALIGN(rxqentries * sizeof(struct ibmveth_rx_q_entry)); + + return ret; +} + +static const struct net_device_ops ibmveth_netdev_ops = { + .ndo_open = ibmveth_open, + .ndo_stop = ibmveth_close, + .ndo_start_xmit = ibmveth_start_xmit, + .ndo_set_multicast_list = ibmveth_set_multicast_list, + .ndo_do_ioctl = ibmveth_ioctl, + .ndo_change_mtu = ibmveth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ibmveth_poll_controller, +#endif +}; + static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) { int rc, i; long ret; struct net_device *netdev; struct ibmveth_adapter *adapter; - u64 set_attr, ret_attr; + unsigned long set_attr, ret_attr; unsigned char *mac_addr_p; unsigned int *mcastFilterSize_p; @@ -1136,8 +1255,8 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ if(!netdev) return -ENOMEM; - adapter = netdev->priv; - dev->dev.driver_data = netdev; + adapter = netdev_priv(netdev); + dev_set_drvdata(&dev->dev, netdev); adapter->vdev = dev; adapter->netdev = netdev; @@ -1161,21 +1280,13 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ memcpy(&adapter->mac_addr, mac_addr_p, 6); netdev->irq = dev->irq; - netdev->open = ibmveth_open; - netdev->stop = ibmveth_close; - netdev->hard_start_xmit = ibmveth_start_xmit; - netdev->set_multicast_list = ibmveth_set_multicast_list; - netdev->do_ioctl = ibmveth_ioctl; - netdev->ethtool_ops = &netdev_ethtool_ops; - netdev->change_mtu = ibmveth_change_mtu; + netdev->netdev_ops = &ibmveth_netdev_ops; + netdev->ethtool_ops = &netdev_ethtool_ops; SET_NETDEV_DEV(netdev, &dev->dev); -#ifdef CONFIG_NET_POLL_CONTROLLER - netdev->poll_controller = ibmveth_poll_controller; -#endif netdev->features |= NETIF_F_LLTX; spin_lock_init(&adapter->stats_lock); - memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len); + memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len); for(i = 0; irx_buff_pool[i].kobj; @@ -1231,8 +1342,8 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ static int __devexit ibmveth_remove(struct vio_dev *dev) { - struct net_device *netdev = dev->dev.driver_data; - struct ibmveth_adapter *adapter = netdev->priv; + struct net_device *netdev = dev_get_drvdata(&dev->dev); + struct ibmveth_adapter *adapter = netdev_priv(netdev); int i; for(i = 0; idev, NULL); + return 0; } @@ -1259,70 +1372,33 @@ static void ibmveth_proc_unregister_driver(void) remove_proc_entry(IBMVETH_PROC_DIR, init_net.proc_net); } -static void *ibmveth_seq_start(struct seq_file *seq, loff_t *pos) -{ - if (*pos == 0) { - return (void *)1; - } else { - return NULL; - } -} - -static void *ibmveth_seq_next(struct seq_file *seq, void *v, loff_t *pos) -{ - ++*pos; - return NULL; -} - -static void ibmveth_seq_stop(struct seq_file *seq, void *v) -{ -} - -static int ibmveth_seq_show(struct seq_file *seq, void *v) +static int ibmveth_show(struct seq_file *seq, void *v) { struct ibmveth_adapter *adapter = seq->private; - char *current_mac = ((char*) &adapter->netdev->dev_addr); - char *firmware_mac = ((char*) &adapter->mac_addr) ; - DECLARE_MAC_BUF(mac); + char *current_mac = (char *) adapter->netdev->dev_addr; + char *firmware_mac = (char *) &adapter->mac_addr; seq_printf(seq, "%s %s\n\n", ibmveth_driver_string, ibmveth_driver_version); seq_printf(seq, "Unit Address: 0x%x\n", adapter->vdev->unit_address); - seq_printf(seq, "Current MAC: %s\n", print_mac(mac, current_mac)); - seq_printf(seq, "Firmware MAC: %s\n", print_mac(mac, firmware_mac)); + seq_printf(seq, "Current MAC: %pM\n", current_mac); + seq_printf(seq, "Firmware MAC: %pM\n", firmware_mac); seq_printf(seq, "\nAdapter Statistics:\n"); - seq_printf(seq, " TX: vio_map_single failres: %ld\n", adapter->tx_map_failed); - seq_printf(seq, " send failures: %ld\n", adapter->tx_send_failed); - seq_printf(seq, " RX: replenish task cycles: %ld\n", adapter->replenish_task_cycles); - seq_printf(seq, " alloc_skb_failures: %ld\n", adapter->replenish_no_mem); - seq_printf(seq, " add buffer failures: %ld\n", adapter->replenish_add_buff_failure); - seq_printf(seq, " invalid buffers: %ld\n", adapter->rx_invalid_buffer); - seq_printf(seq, " no buffers: %ld\n", adapter->rx_no_buffer); + seq_printf(seq, " TX: vio_map_single failres: %lld\n", adapter->tx_map_failed); + seq_printf(seq, " send failures: %lld\n", adapter->tx_send_failed); + seq_printf(seq, " RX: replenish task cycles: %lld\n", adapter->replenish_task_cycles); + seq_printf(seq, " alloc_skb_failures: %lld\n", adapter->replenish_no_mem); + seq_printf(seq, " add buffer failures: %lld\n", adapter->replenish_add_buff_failure); + seq_printf(seq, " invalid buffers: %lld\n", adapter->rx_invalid_buffer); + seq_printf(seq, " no buffers: %lld\n", adapter->rx_no_buffer); return 0; } -static struct seq_operations ibmveth_seq_ops = { - .start = ibmveth_seq_start, - .next = ibmveth_seq_next, - .stop = ibmveth_seq_stop, - .show = ibmveth_seq_show, -}; static int ibmveth_proc_open(struct inode *inode, struct file *file) { - struct seq_file *seq; - struct proc_dir_entry *proc; - int rc; - - rc = seq_open(file, &ibmveth_seq_ops); - if (!rc) { - /* recover the pointer buried in proc_dir_entry data */ - seq = file->private_data; - proc = PDE(inode); - seq->private = proc->data; - } - return rc; + return single_open(file, ibmveth_show, PDE(inode)->data); } static const struct file_operations ibmveth_proc_fops = { @@ -1330,7 +1406,7 @@ static const struct file_operations ibmveth_proc_fops = { .open = ibmveth_proc_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release, + .release = single_release, }; static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter) @@ -1339,13 +1415,10 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter) if (ibmveth_proc_dir) { char u_addr[10]; sprintf(u_addr, "%x", adapter->vdev->unit_address); - entry = create_proc_entry(u_addr, S_IFREG, ibmveth_proc_dir); - if (!entry) { + entry = proc_create_data(u_addr, S_IFREG, ibmveth_proc_dir, + &ibmveth_proc_fops, adapter); + if (!entry) ibmveth_error_printk("Cannot create adapter proc entry"); - } else { - entry->data = (void *) adapter; - entry->proc_fops = &ibmveth_proc_fops; - } } return; } @@ -1402,9 +1475,9 @@ const char * buf, size_t count) struct ibmveth_buff_pool *pool = container_of(kobj, struct ibmveth_buff_pool, kobj); - struct net_device *netdev = - container_of(kobj->parent, struct device, kobj)->driver_data; - struct ibmveth_adapter *adapter = netdev->priv; + struct net_device *netdev = dev_get_drvdata( + container_of(kobj->parent, struct device, kobj)); + struct ibmveth_adapter *adapter = netdev_priv(netdev); long value = simple_strtol(buf, NULL, 10); long rc; @@ -1442,14 +1515,15 @@ const char * buf, size_t count) return -EPERM; } - pool->active = 0; if (netif_running(netdev)) { adapter->pool_config = 1; ibmveth_close(netdev); + pool->active = 0; adapter->pool_config = 0; if ((rc = ibmveth_open(netdev))) return rc; } + pool->active = 0; } } else if (attr == &veth_num_attr) { if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) @@ -1525,6 +1599,7 @@ static struct vio_driver ibmveth_driver = { .id_table = ibmveth_device_table, .probe = ibmveth_probe, .remove = ibmveth_remove, + .get_desired_dma = ibmveth_get_desired_dma, .driver = { .name = ibmveth_driver_name, .owner = THIS_MODULE,