X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=drivers%2Fnet%2Fb44.c;h=b70b81ec34c386f0520437f8cd96199cebc9e611;hb=1aec5bdfed91b50aedbcad43393bcb05033c7ef3;hp=c3bda5ce67c488dba1fc7f51469f16522c5b1080;hpb=f225763a7d6c92c4932dbd528437997078496fcc;p=safe%2Fjmp%2Flinux-2.6 diff --git a/drivers/net/b44.c b/drivers/net/b44.c index c3bda5c..b70b81e 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c @@ -73,8 +73,8 @@ (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP)) #define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1)) -#define RX_PKT_OFFSET 30 -#define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET + 64) +#define RX_PKT_OFFSET (RX_HEADER_LEN + 2) +#define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET) /* minimum number of free TX descriptors required to wake up TX process */ #define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4) @@ -660,7 +660,7 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) /* Hardware bug work-around, the chip is unable to do PCI DMA to/from anything above 1GB :-( */ if (ssb_dma_mapping_error(bp->sdev, mapping) || - mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) { + mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) { /* Sigh... */ if (!ssb_dma_mapping_error(bp->sdev, mapping)) ssb_dma_unmap_single(bp->sdev, mapping, @@ -673,16 +673,16 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) RX_PKT_BUF_SZ, DMA_FROM_DEVICE); if (ssb_dma_mapping_error(bp->sdev, mapping) || - mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) { + mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) { if (!ssb_dma_mapping_error(bp->sdev, mapping)) ssb_dma_unmap_single(bp->sdev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE); dev_kfree_skb_any(skb); return -ENOMEM; } + bp->force_copybreak = 1; } rh = (struct rx_header *) skb->data; - skb_reserve(skb, RX_PKT_OFFSET); rh->len = 0; rh->flags = 0; @@ -693,17 +693,17 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) if (src_map != NULL) src_map->skb = NULL; - ctrl = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - RX_PKT_OFFSET)); + ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ); if (dest_idx == (B44_RX_RING_SIZE - 1)) ctrl |= DESC_CTRL_EOT; dp = &bp->rx_ring[dest_idx]; dp->ctrl = cpu_to_le32(ctrl); - dp->addr = cpu_to_le32((u32) mapping + RX_PKT_OFFSET + bp->dma_offset); + dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset); if (bp->flags & B44_FLAG_RX_RING_HACK) b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma, - dest_idx * sizeof(dp), + dest_idx * sizeof(*dp), DMA_BIDIRECTIONAL); return RX_PKT_BUF_SZ; @@ -731,7 +731,7 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) if (bp->flags & B44_FLAG_RX_RING_HACK) b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma, - src_idx * sizeof(src_desc), + src_idx * sizeof(*src_desc), DMA_BIDIRECTIONAL); ctrl = src_desc->ctrl; @@ -747,10 +747,10 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) if (bp->flags & B44_FLAG_RX_RING_HACK) b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma, - dest_idx * sizeof(dest_desc), + dest_idx * sizeof(*dest_desc), DMA_BIDIRECTIONAL); - ssb_dma_sync_single_for_device(bp->sdev, le32_to_cpu(src_desc->addr), + ssb_dma_sync_single_for_device(bp->sdev, dest_map->mapping, RX_PKT_BUF_SZ, DMA_FROM_DEVICE); } @@ -801,7 +801,7 @@ static int b44_rx(struct b44 *bp, int budget) /* Omit CRC. */ len -= 4; - if (len > RX_COPY_THRESHOLD) { + if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) { int skb_size; skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod); if (skb_size < 0) @@ -809,8 +809,8 @@ static int b44_rx(struct b44 *bp, int budget) ssb_dma_unmap_single(bp->sdev, map, skb_size, DMA_FROM_DEVICE); /* Leave out rx_header */ - skb_put(skb, len + RX_PKT_OFFSET); - skb_pull(skb, RX_PKT_OFFSET); + skb_put(skb, len + RX_PKT_OFFSET); + skb_pull(skb, RX_PKT_OFFSET); } else { struct sk_buff *copy_skb; @@ -829,7 +829,6 @@ static int b44_rx(struct b44 *bp, int budget) skb->ip_summed = CHECKSUM_NONE; skb->protocol = eth_type_trans(skb, bp->dev); netif_receive_skb(skb); - bp->dev->last_rx = jiffies; received++; budget--; next_pkt: @@ -847,7 +846,6 @@ static int b44_rx(struct b44 *bp, int budget) static int b44_poll(struct napi_struct *napi, int budget) { struct b44 *bp = container_of(napi, struct b44, napi); - struct net_device *netdev = bp->dev; int work_done; spin_lock_irq(&bp->lock); @@ -876,7 +874,7 @@ static int b44_poll(struct napi_struct *napi, int budget) } if (work_done < budget) { - netif_rx_complete(netdev, napi); + napi_complete(napi); b44_enable_ints(bp); } @@ -908,13 +906,13 @@ static irqreturn_t b44_interrupt(int irq, void *dev_id) goto irq_ack; } - if (netif_rx_schedule_prep(dev, &bp->napi)) { + if (napi_schedule_prep(&bp->napi)) { /* NOTE: These writes are posted by the readback of * the ISTAT register below. */ bp->istat = istat; __b44_disable_ints(bp); - __netif_rx_schedule(dev, &bp->napi); + __napi_schedule(&bp->napi); } else { printk(KERN_ERR PFX "%s: Error, poll already scheduled\n", dev->name); @@ -967,7 +965,7 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev) } mapping = ssb_dma_map_single(bp->sdev, skb->data, len, DMA_TO_DEVICE); - if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_30BIT_MASK) { + if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) { struct sk_buff *bounce_skb; /* Chip can't handle DMA to/from >1GB, use bounce buffer */ @@ -975,13 +973,13 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev) ssb_dma_unmap_single(bp->sdev, mapping, len, DMA_TO_DEVICE); - bounce_skb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA); + bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA); if (!bounce_skb) goto err_out; mapping = ssb_dma_map_single(bp->sdev, bounce_skb->data, len, DMA_TO_DEVICE); - if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_30BIT_MASK) { + if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) { if (!ssb_dma_mapping_error(bp->sdev, mapping)) ssb_dma_unmap_single(bp->sdev, mapping, len, DMA_TO_DEVICE); @@ -1206,7 +1204,7 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp) DMA_BIDIRECTIONAL); if (ssb_dma_mapping_error(bp->sdev, rx_ring_dma) || - rx_ring_dma + size > DMA_30BIT_MASK) { + rx_ring_dma + size > DMA_BIT_MASK(30)) { kfree(rx_ring); goto out_err; } @@ -1233,7 +1231,7 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp) DMA_TO_DEVICE); if (ssb_dma_mapping_error(bp->sdev, tx_ring_dma) || - tx_ring_dma + size > DMA_30BIT_MASK) { + tx_ring_dma + size > DMA_BIT_MASK(30)) { kfree(tx_ring); goto out_err; } @@ -1266,8 +1264,14 @@ static void b44_clear_stats(struct b44 *bp) static void b44_chip_reset(struct b44 *bp, int reset_kind) { struct ssb_device *sdev = bp->sdev; + bool was_enabled; - if (ssb_device_is_enabled(bp->sdev)) { + was_enabled = ssb_device_is_enabled(bp->sdev); + + ssb_device_enable(bp->sdev, 0); + ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev); + + if (was_enabled) { bw32(bp, B44_RCV_LAZY, 0); bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE); b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1); @@ -1279,10 +1283,8 @@ static void b44_chip_reset(struct b44 *bp, int reset_kind) } bw32(bp, B44_DMARX_CTRL, 0); bp->rx_prod = bp->rx_cons = 0; - } else - ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev); + } - ssb_device_enable(bp->sdev, 0); b44_clear_stats(bp); /* @@ -2110,6 +2112,22 @@ static int __devinit b44_get_invariants(struct b44 *bp) return err; } +static const struct net_device_ops b44_netdev_ops = { + .ndo_open = b44_open, + .ndo_stop = b44_close, + .ndo_start_xmit = b44_start_xmit, + .ndo_get_stats = b44_get_stats, + .ndo_set_multicast_list = b44_set_rx_mode, + .ndo_set_mac_address = b44_set_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = b44_ioctl, + .ndo_tx_timeout = b44_tx_timeout, + .ndo_change_mtu = b44_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = b44_poll_controller, +#endif +}; + static int __devinit b44_init_one(struct ssb_device *sdev, const struct ssb_device_id *ent) { @@ -2117,7 +2135,6 @@ static int __devinit b44_init_one(struct ssb_device *sdev, struct net_device *dev; struct b44 *bp; int err; - DECLARE_MAC_BUF(mac); instance++; @@ -2140,6 +2157,7 @@ static int __devinit b44_init_one(struct ssb_device *sdev, bp = netdev_priv(dev); bp->sdev = sdev; bp->dev = dev; + bp->force_copybreak = 0; bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE); @@ -2148,20 +2166,9 @@ static int __devinit b44_init_one(struct ssb_device *sdev, bp->rx_pending = B44_DEF_RX_RING_PENDING; bp->tx_pending = B44_DEF_TX_RING_PENDING; - dev->open = b44_open; - dev->stop = b44_close; - dev->hard_start_xmit = b44_start_xmit; - dev->get_stats = b44_get_stats; - dev->set_multicast_list = b44_set_rx_mode; - dev->set_mac_address = b44_set_mac_addr; - dev->do_ioctl = b44_ioctl; - dev->tx_timeout = b44_tx_timeout; + dev->netdev_ops = &b44_netdev_ops; netif_napi_add(dev, &bp->napi, b44_poll, 64); dev->watchdog_timeo = B44_TX_TIMEOUT; -#ifdef CONFIG_NET_POLL_CONTROLLER - dev->poll_controller = b44_poll_controller; -#endif - dev->change_mtu = b44_change_mtu; dev->irq = sdev->irq; SET_ETHTOOL_OPS(dev, &b44_ethtool_ops); @@ -2173,7 +2180,7 @@ static int __devinit b44_init_one(struct ssb_device *sdev, "Failed to powerup the bus\n"); goto err_out_free_dev; } - err = ssb_dma_set_mask(sdev, DMA_30BIT_MASK); + err = ssb_dma_set_mask(sdev, DMA_BIT_MASK(30)); if (err) { dev_err(sdev->dev, "Required 30BIT DMA mask unsupported by the system.\n"); @@ -2213,8 +2220,8 @@ static int __devinit b44_init_one(struct ssb_device *sdev, */ b44_chip_reset(bp, B44_CHIP_RESET_FULL); - printk(KERN_INFO "%s: Broadcom 44xx/47xx 10/100BaseT Ethernet %s\n", - dev->name, print_mac(mac, dev->dev_addr)); + printk(KERN_INFO "%s: Broadcom 44xx/47xx 10/100BaseT Ethernet %pM\n", + dev->name, dev->dev_addr); return 0; @@ -2233,6 +2240,7 @@ static void __devexit b44_remove_one(struct ssb_device *sdev) struct net_device *dev = ssb_get_drvdata(sdev); unregister_netdev(dev); + ssb_device_disable(sdev, 0); ssb_bus_may_powerdown(sdev->bus); free_netdev(dev); ssb_pcihost_set_power_state(sdev, PCI_D3hot);