#include <linux/io.h>
#include <linux/types.h>
#include <linux/inet_lro.h>
+#include <linux/slab.h>
#include <asm/system.h>
static char mv643xx_eth_driver_name[] = "mv643xx_eth";
#define MAC_ADDR_LOW 0x0014
#define MAC_ADDR_HIGH 0x0018
#define SDMA_CONFIG 0x001c
+#define TX_BURST_SIZE_16_64BIT 0x01000000
+#define TX_BURST_SIZE_4_64BIT 0x00800000
+#define BLM_TX_NO_SWAP 0x00000020
+#define BLM_RX_NO_SWAP 0x00000010
+#define RX_BURST_SIZE_16_64BIT 0x00000008
+#define RX_BURST_SIZE_4_64BIT 0x00000004
#define PORT_SERIAL_CONTROL 0x003c
+#define SET_MII_SPEED_TO_100 0x01000000
+#define SET_GMII_SPEED_TO_1000 0x00800000
+#define SET_FULL_DUPLEX_MODE 0x00200000
+#define MAX_RX_PACKET_9700BYTE 0x000a0000
+#define DISABLE_AUTO_NEG_SPEED_GMII 0x00002000
+#define DO_NOT_FORCE_LINK_FAIL 0x00000400
+#define SERIAL_PORT_CONTROL_RESERVED 0x00000200
+#define DISABLE_AUTO_NEG_FOR_FLOW_CTRL 0x00000008
+#define DISABLE_AUTO_NEG_FOR_DUPLEX 0x00000004
+#define FORCE_LINK_PASS 0x00000002
+#define SERIAL_PORT_ENABLE 0x00000001
#define PORT_STATUS 0x0044
#define TX_FIFO_EMPTY 0x00000400
#define TX_IN_PROGRESS 0x00000080
#define TX_BW_BURST 0x005c
#define INT_CAUSE 0x0060
#define INT_TX_END 0x07f80000
+#define INT_TX_END_0 0x00080000
#define INT_RX 0x000003fc
+#define INT_RX_0 0x00000004
#define INT_EXT 0x00000002
#define INT_CAUSE_EXT 0x0064
#define INT_EXT_LINK_PHY 0x00110000
/*
- * SDMA configuration register.
+ * SDMA configuration register default value.
*/
-#define RX_BURST_SIZE_4_64BIT (2 << 1)
-#define RX_BURST_SIZE_16_64BIT (4 << 1)
-#define BLM_RX_NO_SWAP (1 << 4)
-#define BLM_TX_NO_SWAP (1 << 5)
-#define TX_BURST_SIZE_4_64BIT (2 << 22)
-#define TX_BURST_SIZE_16_64BIT (4 << 22)
-
#if defined(__BIG_ENDIAN)
#define PORT_SDMA_CONFIG_DEFAULT_VALUE \
(RX_BURST_SIZE_4_64BIT | \
/*
- * Port serial control register.
+ * Misc definitions.
*/
-#define SET_MII_SPEED_TO_100 (1 << 24)
-#define SET_GMII_SPEED_TO_1000 (1 << 23)
-#define SET_FULL_DUPLEX_MODE (1 << 21)
-#define MAX_RX_PACKET_9700BYTE (5 << 17)
-#define DISABLE_AUTO_NEG_SPEED_GMII (1 << 13)
-#define DO_NOT_FORCE_LINK_FAIL (1 << 10)
-#define SERIAL_PORT_CONTROL_RESERVED (1 << 9)
-#define DISABLE_AUTO_NEG_FOR_FLOW_CTRL (1 << 3)
-#define DISABLE_AUTO_NEG_FOR_DUPLEX (1 << 2)
-#define FORCE_LINK_PASS (1 << 1)
-#define SERIAL_PORT_ENABLE (1 << 0)
-
-#define DEFAULT_RX_QUEUE_SIZE 128
-#define DEFAULT_TX_QUEUE_SIZE 256
+#define DEFAULT_RX_QUEUE_SIZE 128
+#define DEFAULT_TX_QUEUE_SIZE 256
+#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
/*
struct work_struct tx_timeout_task;
struct napi_struct napi;
+ u32 int_mask;
+ u8 oom;
u8 work_link;
u8 work_tx;
u8 work_tx_end;
u8 work_rx;
u8 work_rx_refill;
- u8 work_rx_oom;
int skb_size;
struct sk_buff_head rx_recycle;
if (rxq->rx_curr_desc == rxq->rx_ring_size)
rxq->rx_curr_desc = 0;
- dma_unmap_single(NULL, rx_desc->buf_ptr,
+ dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr,
rx_desc->buf_size, DMA_FROM_DEVICE);
rxq->rx_desc_count--;
rx++;
refilled = 0;
while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) {
struct sk_buff *skb;
- int unaligned;
int rx;
struct rx_desc *rx_desc;
+ int size;
skb = __skb_dequeue(&mp->rx_recycle);
if (skb == NULL)
- skb = dev_alloc_skb(mp->skb_size +
- dma_get_cache_alignment() - 1);
+ skb = dev_alloc_skb(mp->skb_size);
if (skb == NULL) {
- mp->work_rx_oom |= 1 << rxq->index;
+ mp->oom = 1;
goto oom;
}
- unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1);
- if (unaligned)
- skb_reserve(skb, dma_get_cache_alignment() - unaligned);
+ if (SKB_DMA_REALIGN)
+ skb_reserve(skb, SKB_DMA_REALIGN);
refilled++;
rxq->rx_desc_count++;
rx_desc = rxq->rx_desc_area + rx;
- rx_desc->buf_ptr = dma_map_single(NULL, skb->data,
- mp->skb_size, DMA_FROM_DEVICE);
- rx_desc->buf_size = mp->skb_size;
+ size = skb->end - skb->data;
+ rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent,
+ skb->data, size,
+ DMA_FROM_DEVICE);
+ rx_desc->buf_size = size;
rxq->rx_skb[rx] = skb;
wmb();
rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT;
static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
{
+ struct mv643xx_eth_private *mp = txq_to_mp(txq);
int nr_frags = skb_shinfo(skb)->nr_frags;
int frag;
desc->l4i_chk = 0;
desc->byte_cnt = this_frag->size;
- desc->buf_ptr = dma_map_page(NULL, this_frag->page,
- this_frag->page_offset,
- this_frag->size,
- DMA_TO_DEVICE);
+ desc->buf_ptr = dma_map_page(mp->dev->dev.parent,
+ this_frag->page,
+ this_frag->page_offset,
+ this_frag->size, DMA_TO_DEVICE);
}
}
desc->l4i_chk = l4i_chk;
desc->byte_cnt = length;
- desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
+ desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data,
+ length, DMA_TO_DEVICE);
__skb_queue_tail(&txq->tx_skb, skb);
return 0;
}
-static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
int queue;
txq->tx_bytes += skb->len;
txq->tx_packets++;
- dev->trans_start = jiffies;
entries_left = txq->tx_ring_size - txq->tx_desc_count;
if (entries_left < MAX_SKB_FRAGS + 1)
}
if (cmd_sts & TX_FIRST_DESC) {
- dma_unmap_single(NULL, desc->buf_ptr,
+ dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
desc->byte_cnt, DMA_TO_DEVICE);
} else {
- dma_unmap_page(NULL, desc->buf_ptr,
+ dma_unmap_page(mp->dev->dev.parent, desc->buf_ptr,
desc->byte_cnt, DMA_TO_DEVICE);
}
if (skb != NULL) {
if (skb_queue_len(&mp->rx_recycle) <
mp->rx_ring_size &&
- skb_recycle_check(skb, mp->skb_size +
- dma_get_cache_alignment() - 1))
+ skb_recycle_check(skb, mp->skb_size))
__skb_queue_head(&mp->rx_recycle, skb);
else
dev_kfree_skb(skb);
}
}
-static void txq_set_wrr(struct tx_queue *txq, int weight)
-{
- struct mv643xx_eth_private *mp = txq_to_mp(txq);
- int off;
- u32 val;
-
- /*
- * Turn off fixed priority mode.
- */
- off = 0;
- switch (mp->shared->tx_bw_control) {
- case TX_BW_CONTROL_OLD_LAYOUT:
- off = TXQ_FIX_PRIO_CONF;
- break;
- case TX_BW_CONTROL_NEW_LAYOUT:
- off = TXQ_FIX_PRIO_CONF_MOVED;
- break;
- }
-
- if (off) {
- val = rdlp(mp, off);
- val &= ~(1 << txq->index);
- wrlp(mp, off, val);
-
- /*
- * Configure WRR weight for this queue.
- */
-
- val = rdlp(mp, off);
- val = (val & ~0xff) | (weight & 0xff);
- wrlp(mp, TXQ_BW_WRR_CONF(txq->index), val);
- }
-}
-
/* mii management interface *************************************************/
static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id)
spin_lock_bh(&mp->mib_counters_lock);
p->good_octets_received += mib_read(mp, 0x00);
- p->good_octets_received += (u64)mib_read(mp, 0x04) << 32;
p->bad_octets_received += mib_read(mp, 0x08);
p->internal_mac_transmit_err += mib_read(mp, 0x0c);
p->good_frames_received += mib_read(mp, 0x10);
p->frames_512_to_1023_octets += mib_read(mp, 0x30);
p->frames_1024_to_max_octets += mib_read(mp, 0x34);
p->good_octets_sent += mib_read(mp, 0x38);
- p->good_octets_sent += (u64)mib_read(mp, 0x3c) << 32;
p->good_frames_sent += mib_read(mp, 0x40);
p->excessive_collision += mib_read(mp, 0x44);
p->multicast_frames_sent += mib_read(mp, 0x48);
static u32 uc_addr_filter_mask(struct net_device *dev)
{
- struct dev_addr_list *uc_ptr;
+ struct netdev_hw_addr *ha;
u32 nibbles;
if (dev->flags & IFF_PROMISC)
return 0;
nibbles = 1 << (dev->dev_addr[5] & 0x0f);
- for (uc_ptr = dev->uc_list; uc_ptr != NULL; uc_ptr = uc_ptr->next) {
- if (memcmp(dev->dev_addr, uc_ptr->da_addr, 5))
+ netdev_for_each_uc_addr(ha, dev) {
+ if (memcmp(dev->dev_addr, ha->addr, 5))
return 0;
- if ((dev->dev_addr[5] ^ uc_ptr->da_addr[5]) & 0xf0)
+ if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0)
return 0;
- nibbles |= 1 << (uc_ptr->da_addr[5] & 0x0f);
+ nibbles |= 1 << (ha->addr[5] & 0x0f);
}
return nibbles;
uc_addr_set(mp, dev->dev_addr);
- port_config = rdlp(mp, PORT_CONFIG);
+ port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE;
+
nibbles = uc_addr_filter_mask(dev);
if (!nibbles) {
port_config |= UNICAST_PROMISCUOUS_MODE;
- wrlp(mp, PORT_CONFIG, port_config);
- return;
+ nibbles = 0xffff;
}
for (i = 0; i < 16; i += 4) {
wrl(mp, off, v);
}
- port_config &= ~UNICAST_PROMISCUOUS_MODE;
wrlp(mp, PORT_CONFIG, port_config);
}
struct mv643xx_eth_private *mp = netdev_priv(dev);
u32 *mc_spec;
u32 *mc_other;
- struct dev_addr_list *addr;
+ struct netdev_hw_addr *ha;
int i;
if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
int port_num;
u32 accept;
- int i;
oom:
port_num = mp->port_num;
memset(mc_spec, 0, 0x100);
memset(mc_other, 0, 0x100);
- for (addr = dev->mc_list; addr != NULL; addr = addr->next) {
- u8 *a = addr->da_addr;
+ netdev_for_each_mc_addr(ha, dev) {
+ u8 *a = ha->addr;
u32 *table;
int entry;
{
struct sockaddr *sa = addr;
+ if (!is_valid_ether_addr(sa->sa_data))
+ return -EINVAL;
+
memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
netif_addr_lock_bh(dev);
mp->rx_desc_sram_size);
rxq->rx_desc_dma = mp->rx_desc_sram_addr;
} else {
- rxq->rx_desc_area = dma_alloc_coherent(NULL, size,
- &rxq->rx_desc_dma,
- GFP_KERNEL);
+ rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
+ size, &rxq->rx_desc_dma,
+ GFP_KERNEL);
}
if (rxq->rx_desc_area == NULL) {
if (index == 0 && size <= mp->rx_desc_sram_size)
iounmap(rxq->rx_desc_area);
else
- dma_free_coherent(NULL, size,
+ dma_free_coherent(mp->dev->dev.parent, size,
rxq->rx_desc_area,
rxq->rx_desc_dma);
rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
iounmap(rxq->rx_desc_area);
else
- dma_free_coherent(NULL, rxq->rx_desc_area_size,
+ dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size,
rxq->rx_desc_area, rxq->rx_desc_dma);
kfree(rxq->rx_skb);
mp->tx_desc_sram_size);
txq->tx_desc_dma = mp->tx_desc_sram_addr;
} else {
- txq->tx_desc_area = dma_alloc_coherent(NULL, size,
- &txq->tx_desc_dma,
- GFP_KERNEL);
+ txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
+ size, &txq->tx_desc_dma,
+ GFP_KERNEL);
}
if (txq->tx_desc_area == NULL) {
txq->tx_desc_area_size <= mp->tx_desc_sram_size)
iounmap(txq->tx_desc_area);
else
- dma_free_coherent(NULL, txq->tx_desc_area_size,
+ dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
txq->tx_desc_area, txq->tx_desc_dma);
}
u32 int_cause;
u32 int_cause_ext;
- int_cause = rdlp(mp, INT_CAUSE) & (INT_TX_END | INT_RX | INT_EXT);
+ int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask;
if (int_cause == 0)
return 0;
int_cause_ext = 0;
- if (int_cause & INT_EXT)
+ if (int_cause & INT_EXT) {
+ int_cause &= ~INT_EXT;
int_cause_ext = rdlp(mp, INT_CAUSE_EXT);
+ }
- int_cause &= INT_TX_END | INT_RX;
if (int_cause) {
wrlp(mp, INT_CAUSE, ~int_cause);
mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) &
mp = container_of(napi, struct mv643xx_eth_private, napi);
- mp->work_rx_refill |= mp->work_rx_oom;
- mp->work_rx_oom = 0;
+ if (unlikely(mp->oom)) {
+ mp->oom = 0;
+ del_timer(&mp->rx_oom);
+ }
work_done = 0;
while (work_done < budget) {
if (mp->work_link) {
mp->work_link = 0;
handle_link_event(mp);
+ work_done++;
continue;
}
- queue_mask = mp->work_tx | mp->work_tx_end |
- mp->work_rx | mp->work_rx_refill;
+ queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx;
+ if (likely(!mp->oom))
+ queue_mask |= mp->work_rx_refill;
+
if (!queue_mask) {
if (mv643xx_eth_collect_events(mp))
continue;
txq_maybe_wake(mp->txq + queue);
} else if (mp->work_rx & queue_mask) {
work_done += rxq_process(mp->rxq + queue, work_tbd);
- } else if (mp->work_rx_refill & queue_mask) {
+ } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) {
work_done += rxq_refill(mp->rxq + queue, work_tbd);
} else {
BUG();
}
if (work_done < budget) {
- if (mp->work_rx_oom)
+ if (mp->oom)
mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
napi_complete(napi);
- wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT);
+ wrlp(mp, INT_MASK, mp->int_mask);
}
return work_done;
pscr |= FORCE_LINK_PASS;
wrlp(mp, PORT_SERIAL_CONTROL, pscr);
- wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE);
-
/*
* Configure TX path and queues.
*/
}
/*
- * Add configured unicast address to address filter table.
- */
- mv643xx_eth_program_unicast_filter(mp->dev);
-
- /*
* Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
* frames to RX queue #0, and include the pseudo-header when
* calculating receive checksums.
wrlp(mp, PORT_CONFIG_EXT, 0x00000000);
/*
+ * Add configured unicast addresses to address filter table.
+ */
+ mv643xx_eth_program_unicast_filter(mp->dev);
+
+ /*
* Enable the receive queues.
*/
for (i = 0; i < mp->rxq_count; i++) {
* size field are ignored by the hardware.
*/
mp->skb_size = (skb_size + 7) & ~7;
+
+ /*
+ * If NET_SKB_PAD is smaller than a cache line,
+ * netdev_alloc_skb() will cause skb->data to be misaligned
+ * to a cache line boundary. If this is the case, include
+ * some extra space to allow re-aligning the data area.
+ */
+ mp->skb_size += SKB_DMA_REALIGN;
}
static int mv643xx_eth_open(struct net_device *dev)
skb_queue_head_init(&mp->rx_recycle);
+ mp->int_mask = INT_EXT;
+
for (i = 0; i < mp->rxq_count; i++) {
err = rxq_init(mp, i);
if (err) {
}
rxq_refill(mp->rxq + i, INT_MAX);
+ mp->int_mask |= INT_RX_0 << i;
}
- if (mp->work_rx_oom) {
+ if (mp->oom) {
mp->rx_oom.expires = jiffies + (HZ / 10);
add_timer(&mp->rx_oom);
}
txq_deinit(mp->txq + i);
goto out_free;
}
+ mp->int_mask |= INT_TX_END_0 << i;
}
port_start(mp);
wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
- wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT);
+ wrlp(mp, INT_MASK, mp->int_mask);
return 0;
mv643xx_eth_irq(dev->irq, dev);
- wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT);
+ wrlp(mp, INT_MASK, mp->int_mask);
}
#endif
.ndo_start_xmit = mv643xx_eth_xmit,
.ndo_set_rx_mode = mv643xx_eth_set_rx_mode,
.ndo_set_mac_address = mv643xx_eth_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = mv643xx_eth_ioctl,
.ndo_change_mtu = mv643xx_eth_change_mtu,
.ndo_tx_timeout = mv643xx_eth_tx_timeout,
netif_carrier_off(dev);
- set_rx_coal(mp, 0);
+ wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE);
+
+ set_rx_coal(mp, 250);
set_tx_coal(mp, 0);
err = register_netdev(dev);