X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=drivers%2Fnet%2Fmv643xx_eth.c;h=0f32db3e92ade2c2403dc47f5aadb519690c3fc6;hb=86669530d966ca21f4245b9990e7ae188d433d1e;hp=89eaf3b3c760795c1d010df6a9a3f89e7ca86e11;hpb=e7d2f4dbd9224ba50d6d5331bb0538d2ce9027f8;p=safe%2Fjmp%2Flinux-2.6 diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 89eaf3b..0f32db3 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c @@ -53,7 +53,9 @@ #include #include #include +#include #include +#include static char mv643xx_eth_driver_name[] = "mv643xx_eth"; static char mv643xx_eth_driver_version[] = "1.4"; @@ -87,7 +89,24 @@ static char mv643xx_eth_driver_version[] = "1.4"; #define MAC_ADDR_LOW 0x0014 #define MAC_ADDR_HIGH 0x0018 #define SDMA_CONFIG 0x001c +#define TX_BURST_SIZE_16_64BIT 0x01000000 +#define TX_BURST_SIZE_4_64BIT 0x00800000 +#define BLM_TX_NO_SWAP 0x00000020 +#define BLM_RX_NO_SWAP 0x00000010 +#define RX_BURST_SIZE_16_64BIT 0x00000008 +#define RX_BURST_SIZE_4_64BIT 0x00000004 #define PORT_SERIAL_CONTROL 0x003c +#define SET_MII_SPEED_TO_100 0x01000000 +#define SET_GMII_SPEED_TO_1000 0x00800000 +#define SET_FULL_DUPLEX_MODE 0x00200000 +#define MAX_RX_PACKET_9700BYTE 0x000a0000 +#define DISABLE_AUTO_NEG_SPEED_GMII 0x00002000 +#define DO_NOT_FORCE_LINK_FAIL 0x00000400 +#define SERIAL_PORT_CONTROL_RESERVED 0x00000200 +#define DISABLE_AUTO_NEG_FOR_FLOW_CTRL 0x00000008 +#define DISABLE_AUTO_NEG_FOR_DUPLEX 0x00000004 +#define FORCE_LINK_PASS 0x00000002 +#define SERIAL_PORT_ENABLE 0x00000001 #define PORT_STATUS 0x0044 #define TX_FIFO_EMPTY 0x00000400 #define TX_IN_PROGRESS 0x00000080 @@ -105,7 +124,9 @@ static char mv643xx_eth_driver_version[] = "1.4"; #define TX_BW_BURST 0x005c #define INT_CAUSE 0x0060 #define INT_TX_END 0x07f80000 +#define INT_TX_END_0 0x00080000 #define INT_RX 0x000003fc +#define INT_RX_0 0x00000004 #define INT_EXT 0x00000002 #define INT_CAUSE_EXT 0x0064 #define INT_EXT_LINK_PHY 0x00110000 @@ -134,15 +155,8 @@ static char mv643xx_eth_driver_version[] = "1.4"; /* - * SDMA configuration register. + * SDMA configuration register default value. */ -#define RX_BURST_SIZE_4_64BIT (2 << 1) -#define RX_BURST_SIZE_16_64BIT (4 << 1) -#define BLM_RX_NO_SWAP (1 << 4) -#define BLM_TX_NO_SWAP (1 << 5) -#define TX_BURST_SIZE_4_64BIT (2 << 22) -#define TX_BURST_SIZE_16_64BIT (4 << 22) - #if defined(__BIG_ENDIAN) #define PORT_SDMA_CONFIG_DEFAULT_VALUE \ (RX_BURST_SIZE_4_64BIT | \ @@ -159,22 +173,11 @@ static char mv643xx_eth_driver_version[] = "1.4"; /* - * Port serial control register. + * Misc definitions. */ -#define SET_MII_SPEED_TO_100 (1 << 24) -#define SET_GMII_SPEED_TO_1000 (1 << 23) -#define SET_FULL_DUPLEX_MODE (1 << 21) -#define MAX_RX_PACKET_9700BYTE (5 << 17) -#define DISABLE_AUTO_NEG_SPEED_GMII (1 << 13) -#define DO_NOT_FORCE_LINK_FAIL (1 << 10) -#define SERIAL_PORT_CONTROL_RESERVED (1 << 9) -#define DISABLE_AUTO_NEG_FOR_FLOW_CTRL (1 << 3) -#define DISABLE_AUTO_NEG_FOR_DUPLEX (1 << 2) -#define FORCE_LINK_PASS (1 << 1) -#define SERIAL_PORT_ENABLE (1 << 0) - -#define DEFAULT_RX_QUEUE_SIZE 128 -#define DEFAULT_TX_QUEUE_SIZE 256 +#define DEFAULT_RX_QUEUE_SIZE 128 +#define DEFAULT_TX_QUEUE_SIZE 256 +#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES) /* @@ -227,6 +230,12 @@ struct tx_desc { #define RX_ENABLE_INTERRUPT 0x20000000 #define RX_FIRST_DESC 0x08000000 #define RX_LAST_DESC 0x04000000 +#define RX_IP_HDR_OK 0x02000000 +#define RX_PKT_IS_IPV4 0x01000000 +#define RX_PKT_IS_ETHERNETV2 0x00800000 +#define RX_PKT_LAYER4_TYPE_MASK 0x00600000 +#define RX_PKT_LAYER4_TYPE_TCP_IPV4 0x00000000 +#define RX_PKT_IS_VLAN_TAGGED 0x00080000 /* TX descriptor command */ #define TX_ENABLE_INTERRUPT 0x00800000 @@ -324,6 +333,12 @@ struct mib_counters { u32 late_collision; }; +struct lro_counters { + u32 lro_aggregated; + u32 lro_flushed; + u32 lro_no_desc; +}; + struct rx_queue { int index; @@ -337,6 +352,9 @@ struct rx_queue { dma_addr_t rx_desc_dma; int rx_desc_area_size; struct sk_buff **rx_skb; + + struct net_lro_mgr lro_mgr; + struct net_lro_desc lro_arr[8]; }; struct tx_queue { @@ -372,15 +390,18 @@ struct mv643xx_eth_private { spinlock_t mib_counters_lock; struct mib_counters mib_counters; + struct lro_counters lro_counters; + struct work_struct tx_timeout_task; struct napi_struct napi; + u32 int_mask; + u8 oom; u8 work_link; u8 work_tx; u8 work_tx_end; u8 work_rx; u8 work_rx_refill; - u8 work_rx_oom; int skb_size; struct sk_buff_head rx_recycle; @@ -496,12 +517,40 @@ static void txq_maybe_wake(struct tx_queue *txq) /* rx napi ******************************************************************/ +static int +mv643xx_get_skb_header(struct sk_buff *skb, void **iphdr, void **tcph, + u64 *hdr_flags, void *priv) +{ + unsigned long cmd_sts = (unsigned long)priv; + + /* + * Make sure that this packet is Ethernet II, is not VLAN + * tagged, is IPv4, has a valid IP header, and is TCP. + */ + if ((cmd_sts & (RX_IP_HDR_OK | RX_PKT_IS_IPV4 | + RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_MASK | + RX_PKT_IS_VLAN_TAGGED)) != + (RX_IP_HDR_OK | RX_PKT_IS_IPV4 | + RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_TCP_IPV4)) + return -1; + + skb_reset_network_header(skb); + skb_set_transport_header(skb, ip_hdrlen(skb)); + *iphdr = ip_hdr(skb); + *tcph = tcp_hdr(skb); + *hdr_flags = LRO_IPV4 | LRO_TCP; + + return 0; +} + static int rxq_process(struct rx_queue *rxq, int budget) { struct mv643xx_eth_private *mp = rxq_to_mp(rxq); struct net_device_stats *stats = &mp->dev->stats; + int lro_flush_needed; int rx; + lro_flush_needed = 0; rx = 0; while (rx < budget && rxq->rx_desc_count) { struct rx_desc *rx_desc; @@ -523,7 +572,7 @@ static int rxq_process(struct rx_queue *rxq, int budget) if (rxq->rx_curr_desc == rxq->rx_ring_size) rxq->rx_curr_desc = 0; - dma_unmap_single(NULL, rx_desc->buf_ptr, + dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr, rx_desc->buf_size, DMA_FROM_DEVICE); rxq->rx_desc_count--; rx++; @@ -561,7 +610,13 @@ static int rxq_process(struct rx_queue *rxq, int budget) if (cmd_sts & LAYER_4_CHECKSUM_OK) skb->ip_summed = CHECKSUM_UNNECESSARY; skb->protocol = eth_type_trans(skb, mp->dev); - netif_receive_skb(skb); + + if (skb->dev->features & NETIF_F_LRO && + skb->ip_summed == CHECKSUM_UNNECESSARY) { + lro_receive_skb(&rxq->lro_mgr, skb, (void *)cmd_sts); + lro_flush_needed = 1; + } else + netif_receive_skb(skb); continue; @@ -582,6 +637,9 @@ err: dev_kfree_skb(skb); } + if (lro_flush_needed) + lro_flush_all(&rxq->lro_mgr); + if (rx < budget) mp->work_rx &= ~(1 << rxq->index); @@ -596,23 +654,20 @@ static int rxq_refill(struct rx_queue *rxq, int budget) refilled = 0; while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) { struct sk_buff *skb; - int unaligned; int rx; struct rx_desc *rx_desc; skb = __skb_dequeue(&mp->rx_recycle); if (skb == NULL) - skb = dev_alloc_skb(mp->skb_size + - dma_get_cache_alignment() - 1); + skb = dev_alloc_skb(mp->skb_size); if (skb == NULL) { - mp->work_rx_oom |= 1 << rxq->index; + mp->oom = 1; goto oom; } - unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1); - if (unaligned) - skb_reserve(skb, dma_get_cache_alignment() - unaligned); + if (SKB_DMA_REALIGN) + skb_reserve(skb, SKB_DMA_REALIGN); refilled++; rxq->rx_desc_count++; @@ -623,8 +678,9 @@ static int rxq_refill(struct rx_queue *rxq, int budget) rx_desc = rxq->rx_desc_area + rx; - rx_desc->buf_ptr = dma_map_single(NULL, skb->data, - mp->skb_size, DMA_FROM_DEVICE); + rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent, + skb->data, mp->skb_size, + DMA_FROM_DEVICE); rx_desc->buf_size = mp->skb_size; rxq->rx_skb[rx] = skb; wmb(); @@ -663,6 +719,7 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb) static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) { + struct mv643xx_eth_private *mp = txq_to_mp(txq); int nr_frags = skb_shinfo(skb)->nr_frags; int frag; @@ -691,10 +748,10 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) desc->l4i_chk = 0; desc->byte_cnt = this_frag->size; - desc->buf_ptr = dma_map_page(NULL, this_frag->page, - this_frag->page_offset, - this_frag->size, - DMA_TO_DEVICE); + desc->buf_ptr = dma_map_page(mp->dev->dev.parent, + this_frag->page, + this_frag->page_offset, + this_frag->size, DMA_TO_DEVICE); } } @@ -771,7 +828,8 @@ no_csum: desc->l4i_chk = l4i_chk; desc->byte_cnt = length; - desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE); + desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data, + length, DMA_TO_DEVICE); __skb_queue_tail(&txq->tx_skb, skb); @@ -901,18 +959,17 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) } if (cmd_sts & TX_FIRST_DESC) { - dma_unmap_single(NULL, desc->buf_ptr, + dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr, desc->byte_cnt, DMA_TO_DEVICE); } else { - dma_unmap_page(NULL, desc->buf_ptr, + dma_unmap_page(mp->dev->dev.parent, desc->buf_ptr, desc->byte_cnt, DMA_TO_DEVICE); } if (skb != NULL) { if (skb_queue_len(&mp->rx_recycle) < mp->rx_ring_size && - skb_recycle_check(skb, mp->skb_size + - dma_get_cache_alignment() - 1)) + skb_recycle_check(skb, mp->skb_size)) __skb_queue_head(&mp->rx_recycle, skb); else dev_kfree_skb(skb); @@ -1161,6 +1218,26 @@ static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev) return stats; } +static void mv643xx_eth_grab_lro_stats(struct mv643xx_eth_private *mp) +{ + u32 lro_aggregated = 0; + u32 lro_flushed = 0; + u32 lro_no_desc = 0; + int i; + + for (i = 0; i < mp->rxq_count; i++) { + struct rx_queue *rxq = mp->rxq + i; + + lro_aggregated += rxq->lro_mgr.stats.aggregated; + lro_flushed += rxq->lro_mgr.stats.flushed; + lro_no_desc += rxq->lro_mgr.stats.no_desc; + } + + mp->lro_counters.lro_aggregated = lro_aggregated; + mp->lro_counters.lro_flushed = lro_flushed; + mp->lro_counters.lro_no_desc = lro_no_desc; +} + static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset) { return rdl(mp, MIB_COUNTERS(mp->port_num) + offset); @@ -1178,9 +1255,8 @@ static void mib_counters_update(struct mv643xx_eth_private *mp) { struct mib_counters *p = &mp->mib_counters; - spin_lock(&mp->mib_counters_lock); + spin_lock_bh(&mp->mib_counters_lock); p->good_octets_received += mib_read(mp, 0x00); - p->good_octets_received += (u64)mib_read(mp, 0x04) << 32; p->bad_octets_received += mib_read(mp, 0x08); p->internal_mac_transmit_err += mib_read(mp, 0x0c); p->good_frames_received += mib_read(mp, 0x10); @@ -1194,7 +1270,6 @@ static void mib_counters_update(struct mv643xx_eth_private *mp) p->frames_512_to_1023_octets += mib_read(mp, 0x30); p->frames_1024_to_max_octets += mib_read(mp, 0x34); p->good_octets_sent += mib_read(mp, 0x38); - p->good_octets_sent += (u64)mib_read(mp, 0x3c) << 32; p->good_frames_sent += mib_read(mp, 0x40); p->excessive_collision += mib_read(mp, 0x44); p->multicast_frames_sent += mib_read(mp, 0x48); @@ -1211,7 +1286,7 @@ static void mib_counters_update(struct mv643xx_eth_private *mp) p->bad_crc_event += mib_read(mp, 0x74); p->collision += mib_read(mp, 0x78); p->late_collision += mib_read(mp, 0x7c); - spin_unlock(&mp->mib_counters_lock); + spin_unlock_bh(&mp->mib_counters_lock); mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ); } @@ -1319,6 +1394,10 @@ struct mv643xx_eth_stats { { #m, FIELD_SIZEOF(struct mib_counters, m), \ -1, offsetof(struct mv643xx_eth_private, mib_counters.m) } +#define LROSTAT(m) \ + { #m, FIELD_SIZEOF(struct lro_counters, m), \ + -1, offsetof(struct mv643xx_eth_private, lro_counters.m) } + static const struct mv643xx_eth_stats mv643xx_eth_stats[] = { SSTAT(rx_packets), SSTAT(tx_packets), @@ -1358,6 +1437,9 @@ static const struct mv643xx_eth_stats mv643xx_eth_stats[] = { MIBSTAT(bad_crc_event), MIBSTAT(collision), MIBSTAT(late_collision), + LROSTAT(lro_aggregated), + LROSTAT(lro_flushed), + LROSTAT(lro_no_desc), }; static int @@ -1528,6 +1610,24 @@ mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er) return 0; } +static u32 +mv643xx_eth_get_rx_csum(struct net_device *dev) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + + return !!(rdlp(mp, PORT_CONFIG) & 0x02000000); +} + +static int +mv643xx_eth_set_rx_csum(struct net_device *dev, u32 rx_csum) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + + wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000); + + return 0; +} + static void mv643xx_eth_get_strings(struct net_device *dev, uint32_t stringset, uint8_t *data) { @@ -1551,6 +1651,7 @@ static void mv643xx_eth_get_ethtool_stats(struct net_device *dev, mv643xx_eth_get_stats(dev); mib_counters_update(mp); + mv643xx_eth_grab_lro_stats(mp); for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { const struct mv643xx_eth_stats *stat; @@ -1586,9 +1687,14 @@ static const struct ethtool_ops mv643xx_eth_ethtool_ops = { .set_coalesce = mv643xx_eth_set_coalesce, .get_ringparam = mv643xx_eth_get_ringparam, .set_ringparam = mv643xx_eth_set_ringparam, + .get_rx_csum = mv643xx_eth_get_rx_csum, + .set_rx_csum = mv643xx_eth_set_rx_csum, + .set_tx_csum = ethtool_op_set_tx_csum, .set_sg = ethtool_op_set_sg, .get_strings = mv643xx_eth_get_strings, .get_ethtool_stats = mv643xx_eth_get_ethtool_stats, + .get_flags = ethtool_op_get_flags, + .set_flags = ethtool_op_set_flags, .get_sset_count = mv643xx_eth_get_sset_count, }; @@ -1616,20 +1722,20 @@ static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr) static u32 uc_addr_filter_mask(struct net_device *dev) { - struct dev_addr_list *uc_ptr; + struct netdev_hw_addr *ha; u32 nibbles; if (dev->flags & IFF_PROMISC) return 0; nibbles = 1 << (dev->dev_addr[5] & 0x0f); - for (uc_ptr = dev->uc_list; uc_ptr != NULL; uc_ptr = uc_ptr->next) { - if (memcmp(dev->dev_addr, uc_ptr->da_addr, 5)) + list_for_each_entry(ha, &dev->uc.list, list) { + if (memcmp(dev->dev_addr, ha->addr, 5)) return 0; - if ((dev->dev_addr[5] ^ uc_ptr->da_addr[5]) & 0xf0) + if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0) return 0; - nibbles |= 1 << (uc_ptr->da_addr[5] & 0x0f); + nibbles |= 1 << (ha->addr[5] & 0x0f); } return nibbles; @@ -1644,12 +1750,12 @@ static void mv643xx_eth_program_unicast_filter(struct net_device *dev) uc_addr_set(mp, dev->dev_addr); - port_config = rdlp(mp, PORT_CONFIG); + port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE; + nibbles = uc_addr_filter_mask(dev); if (!nibbles) { port_config |= UNICAST_PROMISCUOUS_MODE; - wrlp(mp, PORT_CONFIG, port_config); - return; + nibbles = 0xffff; } for (i = 0; i < 16; i += 4) { @@ -1670,7 +1776,6 @@ static void mv643xx_eth_program_unicast_filter(struct net_device *dev) wrl(mp, off, v); } - port_config &= ~UNICAST_PROMISCUOUS_MODE; wrlp(mp, PORT_CONFIG, port_config); } @@ -1703,7 +1808,6 @@ static void mv643xx_eth_program_multicast_filter(struct net_device *dev) if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { int port_num; u32 accept; - int i; oom: port_num = mp->port_num; @@ -1715,7 +1819,7 @@ oom: return; } - mc_spec = kmalloc(0x200, GFP_KERNEL); + mc_spec = kmalloc(0x200, GFP_ATOMIC); if (mc_spec == NULL) goto oom; mc_other = mc_spec + (0x100 >> 2); @@ -1790,9 +1894,9 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index) mp->rx_desc_sram_size); rxq->rx_desc_dma = mp->rx_desc_sram_addr; } else { - rxq->rx_desc_area = dma_alloc_coherent(NULL, size, - &rxq->rx_desc_dma, - GFP_KERNEL); + rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, + size, &rxq->rx_desc_dma, + GFP_KERNEL); } if (rxq->rx_desc_area == NULL) { @@ -1823,6 +1927,19 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index) nexti * sizeof(struct rx_desc); } + rxq->lro_mgr.dev = mp->dev; + memset(&rxq->lro_mgr.stats, 0, sizeof(rxq->lro_mgr.stats)); + rxq->lro_mgr.features = LRO_F_NAPI; + rxq->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; + rxq->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; + rxq->lro_mgr.max_desc = ARRAY_SIZE(rxq->lro_arr); + rxq->lro_mgr.max_aggr = 32; + rxq->lro_mgr.frag_align_pad = 0; + rxq->lro_mgr.lro_arr = rxq->lro_arr; + rxq->lro_mgr.get_skb_header = mv643xx_get_skb_header; + + memset(&rxq->lro_arr, 0, sizeof(rxq->lro_arr)); + return 0; @@ -1830,7 +1947,7 @@ out_free: if (index == 0 && size <= mp->rx_desc_sram_size) iounmap(rxq->rx_desc_area); else - dma_free_coherent(NULL, size, + dma_free_coherent(mp->dev->dev.parent, size, rxq->rx_desc_area, rxq->rx_desc_dma); @@ -1862,7 +1979,7 @@ static void rxq_deinit(struct rx_queue *rxq) rxq->rx_desc_area_size <= mp->rx_desc_sram_size) iounmap(rxq->rx_desc_area); else - dma_free_coherent(NULL, rxq->rx_desc_area_size, + dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size, rxq->rx_desc_area, rxq->rx_desc_dma); kfree(rxq->rx_skb); @@ -1890,9 +2007,9 @@ static int txq_init(struct mv643xx_eth_private *mp, int index) mp->tx_desc_sram_size); txq->tx_desc_dma = mp->tx_desc_sram_addr; } else { - txq->tx_desc_area = dma_alloc_coherent(NULL, size, - &txq->tx_desc_dma, - GFP_KERNEL); + txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, + size, &txq->tx_desc_dma, + GFP_KERNEL); } if (txq->tx_desc_area == NULL) { @@ -1936,7 +2053,7 @@ static void txq_deinit(struct tx_queue *txq) txq->tx_desc_area_size <= mp->tx_desc_sram_size) iounmap(txq->tx_desc_area); else - dma_free_coherent(NULL, txq->tx_desc_area_size, + dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, txq->tx_desc_area, txq->tx_desc_dma); } @@ -1947,15 +2064,16 @@ static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp) u32 int_cause; u32 int_cause_ext; - int_cause = rdlp(mp, INT_CAUSE) & (INT_TX_END | INT_RX | INT_EXT); + int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask; if (int_cause == 0) return 0; int_cause_ext = 0; - if (int_cause & INT_EXT) + if (int_cause & INT_EXT) { + int_cause &= ~INT_EXT; int_cause_ext = rdlp(mp, INT_CAUSE_EXT); + } - int_cause &= INT_TX_END | INT_RX; if (int_cause) { wrlp(mp, INT_CAUSE, ~int_cause); mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) & @@ -2048,8 +2166,10 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget) mp = container_of(napi, struct mv643xx_eth_private, napi); - mp->work_rx_refill |= mp->work_rx_oom; - mp->work_rx_oom = 0; + if (unlikely(mp->oom)) { + mp->oom = 0; + del_timer(&mp->rx_oom); + } work_done = 0; while (work_done < budget) { @@ -2060,11 +2180,14 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget) if (mp->work_link) { mp->work_link = 0; handle_link_event(mp); + work_done++; continue; } - queue_mask = mp->work_tx | mp->work_tx_end | - mp->work_rx | mp->work_rx_refill; + queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx; + if (likely(!mp->oom)) + queue_mask |= mp->work_rx_refill; + if (!queue_mask) { if (mv643xx_eth_collect_events(mp)) continue; @@ -2085,7 +2208,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget) txq_maybe_wake(mp->txq + queue); } else if (mp->work_rx & queue_mask) { work_done += rxq_process(mp->rxq + queue, work_tbd); - } else if (mp->work_rx_refill & queue_mask) { + } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) { work_done += rxq_refill(mp->rxq + queue, work_tbd); } else { BUG(); @@ -2093,10 +2216,10 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget) } if (work_done < budget) { - if (mp->work_rx_oom) + if (mp->oom) mod_timer(&mp->rx_oom, jiffies + (HZ / 10)); napi_complete(napi); - wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT); + wrlp(mp, INT_MASK, mp->int_mask); } return work_done; @@ -2155,8 +2278,6 @@ static void port_start(struct mv643xx_eth_private *mp) pscr |= FORCE_LINK_PASS; wrlp(mp, PORT_SERIAL_CONTROL, pscr); - wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE); - /* * Configure TX path and queues. */ @@ -2170,11 +2291,6 @@ static void port_start(struct mv643xx_eth_private *mp) } /* - * Add configured unicast address to address filter table. - */ - mv643xx_eth_program_unicast_filter(mp->dev); - - /* * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast * frames to RX queue #0, and include the pseudo-header when * calculating receive checksums. @@ -2187,6 +2303,11 @@ static void port_start(struct mv643xx_eth_private *mp) wrlp(mp, PORT_CONFIG_EXT, 0x00000000); /* + * Add configured unicast addresses to address filter table. + */ + mv643xx_eth_program_unicast_filter(mp->dev); + + /* * Enable the receive queues. */ for (i = 0; i < mp->rxq_count; i++) { @@ -2219,6 +2340,14 @@ static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp) * size field are ignored by the hardware. */ mp->skb_size = (skb_size + 7) & ~7; + + /* + * If NET_SKB_PAD is smaller than a cache line, + * netdev_alloc_skb() will cause skb->data to be misaligned + * to a cache line boundary. If this is the case, include + * some extra space to allow re-aligning the data area. + */ + mp->skb_size += SKB_DMA_REALIGN; } static int mv643xx_eth_open(struct net_device *dev) @@ -2244,6 +2373,8 @@ static int mv643xx_eth_open(struct net_device *dev) skb_queue_head_init(&mp->rx_recycle); + mp->int_mask = INT_EXT; + for (i = 0; i < mp->rxq_count; i++) { err = rxq_init(mp, i); if (err) { @@ -2253,9 +2384,10 @@ static int mv643xx_eth_open(struct net_device *dev) } rxq_refill(mp->rxq + i, INT_MAX); + mp->int_mask |= INT_RX_0 << i; } - if (mp->work_rx_oom) { + if (mp->oom) { mp->rx_oom.expires = jiffies + (HZ / 10); add_timer(&mp->rx_oom); } @@ -2267,17 +2399,13 @@ static int mv643xx_eth_open(struct net_device *dev) txq_deinit(mp->txq + i); goto out_free; } + mp->int_mask |= INT_TX_END_0 << i; } - netif_carrier_off(dev); - port_start(mp); - set_rx_coal(mp, 0); - set_tx_coal(mp, 0); - wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX); - wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT); + wrlp(mp, INT_MASK, mp->int_mask); return 0; @@ -2326,8 +2454,6 @@ static int mv643xx_eth_stop(struct net_device *dev) wrlp(mp, INT_MASK, 0x00000000); rdlp(mp, INT_MASK); - del_timer_sync(&mp->mib_counters_timer); - napi_disable(&mp->napi); del_timer_sync(&mp->rx_oom); @@ -2339,6 +2465,7 @@ static int mv643xx_eth_stop(struct net_device *dev) port_reset(mp); mv643xx_eth_get_stats(dev); mib_counters_update(mp); + del_timer_sync(&mp->mib_counters_timer); skb_queue_purge(&mp->rx_recycle); @@ -2422,7 +2549,7 @@ static void mv643xx_eth_netpoll(struct net_device *dev) mv643xx_eth_irq(dev->irq, dev); - wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT); + wrlp(mp, INT_MASK, mp->int_mask); } #endif @@ -2743,6 +2870,21 @@ static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex) wrlp(mp, PORT_SERIAL_CONTROL, pscr); } +static const struct net_device_ops mv643xx_eth_netdev_ops = { + .ndo_open = mv643xx_eth_open, + .ndo_stop = mv643xx_eth_stop, + .ndo_start_xmit = mv643xx_eth_xmit, + .ndo_set_rx_mode = mv643xx_eth_set_rx_mode, + .ndo_set_mac_address = mv643xx_eth_set_mac_address, + .ndo_do_ioctl = mv643xx_eth_ioctl, + .ndo_change_mtu = mv643xx_eth_change_mtu, + .ndo_tx_timeout = mv643xx_eth_tx_timeout, + .ndo_get_stats = mv643xx_eth_get_stats, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = mv643xx_eth_netpoll, +#endif +}; + static int mv643xx_eth_probe(struct platform_device *pdev) { struct mv643xx_eth_platform_data *pd; @@ -2814,18 +2956,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev) BUG_ON(!res); dev->irq = res->start; - dev->get_stats = mv643xx_eth_get_stats; - dev->hard_start_xmit = mv643xx_eth_xmit; - dev->open = mv643xx_eth_open; - dev->stop = mv643xx_eth_stop; - dev->set_rx_mode = mv643xx_eth_set_rx_mode; - dev->set_mac_address = mv643xx_eth_set_mac_address; - dev->do_ioctl = mv643xx_eth_ioctl; - dev->change_mtu = mv643xx_eth_change_mtu; - dev->tx_timeout = mv643xx_eth_tx_timeout; -#ifdef CONFIG_NET_POLL_CONTROLLER - dev->poll_controller = mv643xx_eth_netpoll; -#endif + dev->netdev_ops = &mv643xx_eth_netdev_ops; + dev->watchdog_timeo = 2 * HZ; dev->base_addr = 0; @@ -2837,6 +2969,13 @@ static int mv643xx_eth_probe(struct platform_device *pdev) if (mp->shared->win_protect) wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect); + netif_carrier_off(dev); + + wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE); + + set_rx_coal(mp, 250); + set_tx_coal(mp, 0); + err = register_netdev(dev); if (err) goto out;