#include <linux/dmaengine.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
+#include <linux/of_mdio.h>
#include <linux/etherdevice.h>
#include <asm/dma-mapping.h>
#include <linux/in.h>
* - Multiqueue RX/TX
*/
-
-/* Must be a power of two */
-#define RX_RING_SIZE 2048
-#define TX_RING_SIZE 4096
-
#define LRO_MAX_AGGR 64
+#define PE_MIN_MTU 64
+#define PE_MAX_MTU 9000
+#define PE_DEF_MTU ETH_DATA_LEN
+
#define DEFAULT_MSG_ENABLE \
(NETIF_MSG_DRV | \
NETIF_MSG_PROBE | \
NETIF_MSG_RX_ERR | \
NETIF_MSG_TX_ERR)
-#define TX_DESC(tx, num) ((tx)->chan.ring_virt[(num) & (TX_RING_SIZE-1)])
-#define TX_DESC_INFO(tx, num) ((tx)->ring_info[(num) & (TX_RING_SIZE-1)])
-#define RX_DESC(rx, num) ((rx)->chan.ring_virt[(num) & (RX_RING_SIZE-1)])
-#define RX_DESC_INFO(rx, num) ((rx)->ring_info[(num) & (RX_RING_SIZE-1)])
-#define RX_BUFF(rx, num) ((rx)->buffers[(num) & (RX_RING_SIZE-1)])
-
-#define RING_USED(ring) (((ring)->next_to_fill - (ring)->next_to_clean) \
- & ((ring)->size - 1))
-#define RING_AVAIL(ring) ((ring->size) - RING_USED(ring))
-
-#define BUF_SIZE 1646 /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
-
MODULE_LICENSE("GPL");
MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>");
MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver");
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "PA Semi MAC bitmapped debugging message enable value");
+extern const struct ethtool_ops pasemi_mac_ethtool_ops;
+
static int translation_enabled(void)
{
#if defined(CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE)
return -1;
}
+static void pasemi_mac_intf_disable(struct pasemi_mac *mac)
+{
+ unsigned int flags;
+
+ flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
+ flags &= ~PAS_MAC_CFG_PCFG_PE;
+ write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
+}
+
+static void pasemi_mac_intf_enable(struct pasemi_mac *mac)
+{
+ unsigned int flags;
+
+ flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
+ flags |= PAS_MAC_CFG_PCFG_PE;
+ write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
+}
+
static int pasemi_get_mac_addr(struct pasemi_mac *mac)
{
struct pci_dev *pdev = mac->pdev;
return 0;
}
+static int pasemi_mac_set_mac_addr(struct net_device *dev, void *p)
+{
+ struct pasemi_mac *mac = netdev_priv(dev);
+ struct sockaddr *addr = p;
+ unsigned int adr0, adr1;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ adr0 = dev->dev_addr[2] << 24 |
+ dev->dev_addr[3] << 16 |
+ dev->dev_addr[4] << 8 |
+ dev->dev_addr[5];
+ adr1 = read_mac_reg(mac, PAS_MAC_CFG_ADR1);
+ adr1 &= ~0xffff;
+ adr1 |= dev->dev_addr[0] << 8 | dev->dev_addr[1];
+
+ pasemi_mac_intf_disable(mac);
+ write_mac_reg(mac, PAS_MAC_CFG_ADR0, adr0);
+ write_mac_reg(mac, PAS_MAC_CFG_ADR1, adr1);
+ pasemi_mac_intf_enable(mac);
+
+ return 0;
+}
+
static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
void **tcph, u64 *hdr_flags, void *data)
{
*tcph = tcp_hdr(skb);
/* check if ip header and tcp header are complete */
- if (iph->tot_len < ip_len + tcp_hdrlen(skb))
+ if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
return -1;
*hdr_flags = LRO_IPV4 | LRO_TCP;
return (nfrags + 3) & ~1;
}
+static struct pasemi_mac_csring *pasemi_mac_setup_csring(struct pasemi_mac *mac)
+{
+ struct pasemi_mac_csring *ring;
+ u32 val;
+ unsigned int cfg;
+ int chno;
+
+ ring = pasemi_dma_alloc_chan(TXCHAN, sizeof(struct pasemi_mac_csring),
+ offsetof(struct pasemi_mac_csring, chan));
+
+ if (!ring) {
+ dev_err(&mac->pdev->dev, "Can't allocate checksum channel\n");
+ goto out_chan;
+ }
+
+ chno = ring->chan.chno;
+
+ ring->size = CS_RING_SIZE;
+ ring->next_to_fill = 0;
+
+ /* Allocate descriptors */
+ if (pasemi_dma_alloc_ring(&ring->chan, CS_RING_SIZE))
+ goto out_ring_desc;
+
+ write_dma_reg(PAS_DMA_TXCHAN_BASEL(chno),
+ PAS_DMA_TXCHAN_BASEL_BRBL(ring->chan.ring_dma));
+ val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32);
+ val |= PAS_DMA_TXCHAN_BASEU_SIZ(CS_RING_SIZE >> 3);
+
+ write_dma_reg(PAS_DMA_TXCHAN_BASEU(chno), val);
+
+ ring->events[0] = pasemi_dma_alloc_flag();
+ ring->events[1] = pasemi_dma_alloc_flag();
+ if (ring->events[0] < 0 || ring->events[1] < 0)
+ goto out_flags;
+
+ pasemi_dma_clear_flag(ring->events[0]);
+ pasemi_dma_clear_flag(ring->events[1]);
+
+ ring->fun = pasemi_dma_alloc_fun();
+ if (ring->fun < 0)
+ goto out_fun;
+
+ cfg = PAS_DMA_TXCHAN_CFG_TY_FUNC | PAS_DMA_TXCHAN_CFG_UP |
+ PAS_DMA_TXCHAN_CFG_TATTR(ring->fun) |
+ PAS_DMA_TXCHAN_CFG_LPSQ | PAS_DMA_TXCHAN_CFG_LPDQ;
+
+ if (translation_enabled())
+ cfg |= PAS_DMA_TXCHAN_CFG_TRD | PAS_DMA_TXCHAN_CFG_TRR;
+
+ write_dma_reg(PAS_DMA_TXCHAN_CFG(chno), cfg);
+
+ /* enable channel */
+ pasemi_dma_start_chan(&ring->chan, PAS_DMA_TXCHAN_TCMDSTA_SZ |
+ PAS_DMA_TXCHAN_TCMDSTA_DB |
+ PAS_DMA_TXCHAN_TCMDSTA_DE |
+ PAS_DMA_TXCHAN_TCMDSTA_DA);
+
+ return ring;
+
+out_fun:
+out_flags:
+ if (ring->events[0] >= 0)
+ pasemi_dma_free_flag(ring->events[0]);
+ if (ring->events[1] >= 0)
+ pasemi_dma_free_flag(ring->events[1]);
+ pasemi_dma_free_ring(&ring->chan);
+out_ring_desc:
+ pasemi_dma_free_chan(&ring->chan);
+out_chan:
+
+ return NULL;
+}
+
+static void pasemi_mac_setup_csrings(struct pasemi_mac *mac)
+{
+ int i;
+ mac->cs[0] = pasemi_mac_setup_csring(mac);
+ if (mac->type == MAC_TYPE_XAUI)
+ mac->cs[1] = pasemi_mac_setup_csring(mac);
+ else
+ mac->cs[1] = 0;
+
+ for (i = 0; i < MAX_CS; i++)
+ if (mac->cs[i])
+ mac->num_cs++;
+}
+
+static void pasemi_mac_free_csring(struct pasemi_mac_csring *csring)
+{
+ pasemi_dma_stop_chan(&csring->chan);
+ pasemi_dma_free_flag(csring->events[0]);
+ pasemi_dma_free_flag(csring->events[1]);
+ pasemi_dma_free_ring(&csring->chan);
+ pasemi_dma_free_chan(&csring->chan);
+ pasemi_dma_free_fun(csring->fun);
+}
+
static int pasemi_mac_setup_rx_resources(const struct net_device *dev)
{
struct pasemi_mac_rxring *ring;
cfg = PAS_DMA_TXCHAN_CFG_TY_IFACE |
PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) |
PAS_DMA_TXCHAN_CFG_UP |
- PAS_DMA_TXCHAN_CFG_WT(2);
+ PAS_DMA_TXCHAN_CFG_WT(4);
if (translation_enabled())
cfg |= PAS_DMA_TXCHAN_CFG_TRD | PAS_DMA_TXCHAN_CFG_TRR;
}
-static void pasemi_mac_free_rx_resources(struct pasemi_mac *mac)
+static void pasemi_mac_free_rx_buffers(struct pasemi_mac *mac)
{
struct pasemi_mac_rxring *rx = rx_ring(mac);
unsigned int i;
}
for (i = 0; i < RX_RING_SIZE; i++)
- RX_DESC(rx, i) = 0;
+ RX_BUFF(rx, i) = 0;
+}
+
+static void pasemi_mac_free_rx_resources(struct pasemi_mac *mac)
+{
+ pasemi_mac_free_rx_buffers(mac);
dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64),
rx_ring(mac)->buffers, rx_ring(mac)->buf_dma);
/* Entry in use? */
WARN_ON(*buff);
- skb = dev_alloc_skb(BUF_SIZE);
+ skb = dev_alloc_skb(mac->bufsz);
skb_reserve(skb, LOCAL_SKB_ALIGN);
if (unlikely(!skb))
break;
dma = pci_map_single(mac->dma_pdev, skb->data,
- BUF_SIZE - LOCAL_SKB_ALIGN,
+ mac->bufsz - LOCAL_SKB_ALIGN,
PCI_DMA_FROMDEVICE);
- if (unlikely(dma_mapping_error(dma))) {
+ if (unlikely(pci_dma_mapping_error(mac->dma_pdev, dma))) {
dev_kfree_skb_irq(info->skb);
break;
}
info->skb = skb;
info->dma = dma;
- *buff = XCT_RXB_LEN(BUF_SIZE) | XCT_RXB_ADDR(dma);
+ *buff = XCT_RXB_LEN(mac->bufsz) | XCT_RXB_ADDR(dma);
fill++;
}
rcmdsta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
ccmdsta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(chan->chno));
- printk(KERN_ERR "pasemi_mac: rx error. macrx %016lx, rx status %lx\n",
+ printk(KERN_ERR "pasemi_mac: rx error. macrx %016llx, rx status %llx\n",
macrx, *chan->status);
printk(KERN_ERR "pasemi_mac: rcmdsta %08x ccmdsta %08x\n",
cmdsta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(chan->chno));
- printk(KERN_ERR "pasemi_mac: tx error. mactx 0x%016lx, "\
- "tx status 0x%016lx\n", mactx, *chan->status);
+ printk(KERN_ERR "pasemi_mac: tx error. mactx 0x%016llx, "\
+ "tx status 0x%016llx\n", mactx, *chan->status);
printk(KERN_ERR "pasemi_mac: tcmdsta 0x%08x\n", cmdsta);
}
len = (macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S;
- pci_unmap_single(pdev, dma, BUF_SIZE-LOCAL_SKB_ALIGN,
+ pci_unmap_single(pdev, dma, mac->bufsz - LOCAL_SKB_ALIGN,
PCI_DMA_FROMDEVICE);
if (macrx & XCT_MACRX_CRC) {
u64 mactx = TX_DESC(txring, i);
struct sk_buff *skb;
- skb = TX_DESC_INFO(txring, i+1).skb;
- nr_frags = TX_DESC_INFO(txring, i).dma;
-
if ((mactx & XCT_MACTX_E) ||
(*chan->status & PAS_STATUS_ERROR))
pasemi_mac_tx_error(mac, mactx);
+ /* Skip over control descriptors */
+ if (!(mactx & XCT_MACTX_LLEN_M)) {
+ TX_DESC(txring, i) = 0;
+ TX_DESC(txring, i+1) = 0;
+ buf_count = 2;
+ continue;
+ }
+
+ skb = TX_DESC_INFO(txring, i+1).skb;
+ nr_frags = TX_DESC_INFO(txring, i).dma;
+
if (unlikely(mactx & XCT_MACTX_O))
/* Not yet transmitted */
break;
{
const struct pasemi_mac_rxring *rxring = data;
struct pasemi_mac *mac = rxring->mac;
- struct net_device *dev = mac->netdev;
const struct pasemi_dmachan *chan = &rxring->chan;
unsigned int reg;
if (*chan->status & PAS_STATUS_ERROR)
reg |= PAS_IOB_DMA_RXCH_RESET_DINTC;
- netif_rx_schedule(dev, &mac->napi);
+ napi_schedule(&mac->napi);
write_iob_reg(PAS_IOB_DMA_RXCH_RESET(chan->chno), reg);
mod_timer(&txring->clean_timer, jiffies + (TX_CLEAN_INTERVAL)*2);
- netif_rx_schedule(mac->netdev, &mac->napi);
+ napi_schedule(&mac->napi);
if (reg)
write_iob_reg(PAS_IOB_DMA_TXCH_RESET(chan->chno), reg);
printk(KERN_INFO "%s: Link is down.\n", dev->name);
netif_carrier_off(dev);
+ pasemi_mac_intf_disable(mac);
mac->link = 0;
return;
- } else
+ } else {
+ pasemi_mac_intf_enable(mac);
netif_carrier_on(dev);
+ }
flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
new_flags = flags & ~(PAS_MAC_CFG_PCFG_HD | PAS_MAC_CFG_PCFG_SPD_M |
struct pasemi_mac *mac = netdev_priv(dev);
struct device_node *dn, *phy_dn;
struct phy_device *phydev;
- unsigned int phy_id;
- const phandle *ph;
- const unsigned int *prop;
- struct resource r;
- int ret;
dn = pci_device_to_OF_node(mac->pdev);
- ph = of_get_property(dn, "phy-handle", NULL);
- if (!ph)
- return -ENODEV;
- phy_dn = of_find_node_by_phandle(*ph);
-
- prop = of_get_property(phy_dn, "reg", NULL);
- ret = of_address_to_resource(phy_dn->parent, 0, &r);
- if (ret)
- goto err;
-
- phy_id = *prop;
- snprintf(mac->phy_id, BUS_ID_SIZE, PHY_ID_FMT, (int)r.start, phy_id);
-
+ phy_dn = of_parse_phandle(dn, "phy-handle", 0);
of_node_put(phy_dn);
mac->link = 0;
mac->speed = 0;
mac->duplex = -1;
- phydev = phy_connect(dev, mac->phy_id, &pasemi_adjust_link, 0, PHY_INTERFACE_MODE_SGMII);
+ phydev = of_phy_connect(dev, phy_dn, &pasemi_adjust_link, 0,
+ PHY_INTERFACE_MODE_SGMII);
if (IS_ERR(phydev)) {
printk(KERN_ERR "%s: Could not attach to phy\n", dev->name);
mac->phydev = phydev;
return 0;
-
-err:
- of_node_put(phy_dn);
- return -ENODEV;
}
{
struct pasemi_mac *mac = netdev_priv(dev);
unsigned int flags;
- int ret;
-
- /* enable rx section */
- write_dma_reg(PAS_DMA_COM_RXCMD, PAS_DMA_COM_RXCMD_EN);
-
- /* enable tx section */
- write_dma_reg(PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN);
+ int i, ret;
flags = PAS_MAC_CFG_TXP_FCE | PAS_MAC_CFG_TXP_FPC(3) |
PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) |
if (!mac->tx)
goto out_tx_ring;
+ /* We might already have allocated rings in case mtu was changed
+ * before interface was brought up.
+ */
+ if (dev->mtu > 1500 && !mac->num_cs) {
+ pasemi_mac_setup_csrings(mac);
+ if (!mac->num_cs)
+ goto out_tx_ring;
+ }
+
+ /* Zero out rmon counters */
+ for (i = 0; i < 32; i++)
+ write_mac_reg(mac, PAS_MAC_RMON(i), 0);
+
/* 0x3ff with 33MHz clock is about 31us */
write_iob_reg(PAS_IOB_DMA_COM_TIMEOUTCFG,
PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0x3ff));
pasemi_mac_restart_rx_intr(mac);
pasemi_mac_restart_tx_intr(mac);
- flags = PAS_MAC_CFG_PCFG_S1 | PAS_MAC_CFG_PCFG_PE |
- PAS_MAC_CFG_PCFG_PR | PAS_MAC_CFG_PCFG_CE;
+ flags = PAS_MAC_CFG_PCFG_S1 | PAS_MAC_CFG_PCFG_PR | PAS_MAC_CFG_PCFG_CE;
if (mac->type == MAC_TYPE_GMAC)
flags |= PAS_MAC_CFG_PCFG_TSR_1G | PAS_MAC_CFG_PCFG_SPD_1G;
write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
ret = pasemi_mac_phy_init(dev);
- /* Warn for missing PHY on SGMII (1Gig) ports.
- */
- if (ret && mac->type == MAC_TYPE_GMAC) {
- dev_warn(&mac->pdev->dev, "PHY init failed: %d.\n", ret);
- dev_warn(&mac->pdev->dev, "Defaulting to 1Gbit full duplex\n");
+ if (ret) {
+ /* Since we won't get link notification, just enable RX */
+ pasemi_mac_intf_enable(mac);
+ if (mac->type == MAC_TYPE_GMAC) {
+ /* Warn for missing PHY on SGMII (1Gig) ports */
+ dev_warn(&mac->pdev->dev,
+ "PHY init failed: %d.\n", ret);
+ dev_warn(&mac->pdev->dev,
+ "Defaulting to 1Gbit full duplex\n");
+ }
}
netif_start_queue(dev);
snprintf(mac->tx_irq_name, sizeof(mac->tx_irq_name), "%s tx",
dev->name);
- ret = request_irq(mac->tx->chan.irq, &pasemi_mac_tx_intr, IRQF_DISABLED,
+ ret = request_irq(mac->tx->chan.irq, pasemi_mac_tx_intr, IRQF_DISABLED,
mac->tx_irq_name, mac->tx);
if (ret) {
dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
snprintf(mac->rx_irq_name, sizeof(mac->rx_irq_name), "%s rx",
dev->name);
- ret = request_irq(mac->rx->chan.irq, &pasemi_mac_rx_intr, IRQF_DISABLED,
+ ret = request_irq(mac->rx->chan.irq, pasemi_mac_rx_intr, IRQF_DISABLED,
mac->rx_irq_name, mac->rx);
if (ret) {
dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
#define MAX_RETRIES 5000
+static void pasemi_mac_pause_txchan(struct pasemi_mac *mac)
+{
+ unsigned int sta, retries;
+ int txch = tx_ring(mac)->chan.chno;
+
+ write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch),
+ PAS_DMA_TXCHAN_TCMDSTA_ST);
+
+ for (retries = 0; retries < MAX_RETRIES; retries++) {
+ sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch));
+ if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT))
+ break;
+ cond_resched();
+ }
+
+ if (sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)
+ dev_err(&mac->dma_pdev->dev,
+ "Failed to stop tx channel, tcmdsta %08x\n", sta);
+
+ write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), 0);
+}
+
+static void pasemi_mac_pause_rxchan(struct pasemi_mac *mac)
+{
+ unsigned int sta, retries;
+ int rxch = rx_ring(mac)->chan.chno;
+
+ write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch),
+ PAS_DMA_RXCHAN_CCMDSTA_ST);
+ for (retries = 0; retries < MAX_RETRIES; retries++) {
+ sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch));
+ if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT))
+ break;
+ cond_resched();
+ }
+
+ if (sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)
+ dev_err(&mac->dma_pdev->dev,
+ "Failed to stop rx channel, ccmdsta 08%x\n", sta);
+ write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch), 0);
+}
+
+static void pasemi_mac_pause_rxint(struct pasemi_mac *mac)
+{
+ unsigned int sta, retries;
+
+ write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
+ PAS_DMA_RXINT_RCMDSTA_ST);
+ for (retries = 0; retries < MAX_RETRIES; retries++) {
+ sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
+ if (!(sta & PAS_DMA_RXINT_RCMDSTA_ACT))
+ break;
+ cond_resched();
+ }
+
+ if (sta & PAS_DMA_RXINT_RCMDSTA_ACT)
+ dev_err(&mac->dma_pdev->dev,
+ "Failed to stop rx interface, rcmdsta %08x\n", sta);
+ write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0);
+}
+
static int pasemi_mac_close(struct net_device *dev)
{
struct pasemi_mac *mac = netdev_priv(dev);
unsigned int sta;
- int retries;
- int rxch, txch;
+ int rxch, txch, i;
rxch = rx_ring(mac)->chan.chno;
txch = tx_ring(mac)->chan.chno;
pasemi_mac_clean_tx(tx_ring(mac));
pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE);
- /* Disable interface */
- write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch),
- PAS_DMA_TXCHAN_TCMDSTA_ST);
- write_dma_reg( PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
- PAS_DMA_RXINT_RCMDSTA_ST);
- write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch),
- PAS_DMA_RXCHAN_CCMDSTA_ST);
+ pasemi_mac_pause_txchan(mac);
+ pasemi_mac_pause_rxint(mac);
+ pasemi_mac_pause_rxchan(mac);
+ pasemi_mac_intf_disable(mac);
- for (retries = 0; retries < MAX_RETRIES; retries++) {
- sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(rxch));
- if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT))
- break;
- cond_resched();
+ free_irq(mac->tx->chan.irq, mac->tx);
+ free_irq(mac->rx->chan.irq, mac->rx);
+
+ for (i = 0; i < mac->num_cs; i++) {
+ pasemi_mac_free_csring(mac->cs[i]);
+ mac->cs[i] = NULL;
}
- if (sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)
- dev_err(&mac->dma_pdev->dev, "Failed to stop tx channel\n");
+ mac->num_cs = 0;
- for (retries = 0; retries < MAX_RETRIES; retries++) {
- sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch));
- if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT))
- break;
- cond_resched();
- }
+ /* Free resources */
+ pasemi_mac_free_rx_resources(mac);
+ pasemi_mac_free_tx_resources(mac);
- if (sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)
- dev_err(&mac->dma_pdev->dev, "Failed to stop rx channel\n");
+ return 0;
+}
- for (retries = 0; retries < MAX_RETRIES; retries++) {
- sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
- if (!(sta & PAS_DMA_RXINT_RCMDSTA_ACT))
- break;
- cond_resched();
+static void pasemi_mac_queue_csdesc(const struct sk_buff *skb,
+ const dma_addr_t *map,
+ const unsigned int *map_size,
+ struct pasemi_mac_txring *txring,
+ struct pasemi_mac_csring *csring)
+{
+ u64 fund;
+ dma_addr_t cs_dest;
+ const int nh_off = skb_network_offset(skb);
+ const int nh_len = skb_network_header_len(skb);
+ const int nfrags = skb_shinfo(skb)->nr_frags;
+ int cs_size, i, fill, hdr, cpyhdr, evt;
+ dma_addr_t csdma;
+
+ fund = XCT_FUN_ST | XCT_FUN_RR_8BRES |
+ XCT_FUN_O | XCT_FUN_FUN(csring->fun) |
+ XCT_FUN_CRM_SIG | XCT_FUN_LLEN(skb->len - nh_off) |
+ XCT_FUN_SHL(nh_len >> 2) | XCT_FUN_SE;
+
+ switch (ip_hdr(skb)->protocol) {
+ case IPPROTO_TCP:
+ fund |= XCT_FUN_SIG_TCP4;
+ /* TCP checksum is 16 bytes into the header */
+ cs_dest = map[0] + skb_transport_offset(skb) + 16;
+ break;
+ case IPPROTO_UDP:
+ fund |= XCT_FUN_SIG_UDP4;
+ /* UDP checksum is 6 bytes into the header */
+ cs_dest = map[0] + skb_transport_offset(skb) + 6;
+ break;
+ default:
+ BUG();
}
- if (sta & PAS_DMA_RXINT_RCMDSTA_ACT)
- dev_err(&mac->dma_pdev->dev, "Failed to stop rx interface\n");
+ /* Do the checksum offloaded */
+ fill = csring->next_to_fill;
+ hdr = fill;
- /* Then, disable the channel. This must be done separately from
- * stopping, since you can't disable when active.
- */
+ CS_DESC(csring, fill++) = fund;
+ /* Room for 8BRES. Checksum result is really 2 bytes into it */
+ csdma = csring->chan.ring_dma + (fill & (CS_RING_SIZE-1)) * 8 + 2;
+ CS_DESC(csring, fill++) = 0;
- write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), 0);
- write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch), 0);
- write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0);
+ CS_DESC(csring, fill) = XCT_PTR_LEN(map_size[0]-nh_off) | XCT_PTR_ADDR(map[0]+nh_off);
+ for (i = 1; i <= nfrags; i++)
+ CS_DESC(csring, fill+i) = XCT_PTR_LEN(map_size[i]) | XCT_PTR_ADDR(map[i]);
- free_irq(mac->tx->chan.irq, mac->tx);
- free_irq(mac->rx->chan.irq, mac->rx);
+ fill += i;
+ if (fill & 1)
+ fill++;
- /* Free resources */
- pasemi_mac_free_rx_resources(mac);
- pasemi_mac_free_tx_resources(mac);
+ /* Copy the result into the TCP packet */
+ cpyhdr = fill;
+ CS_DESC(csring, fill++) = XCT_FUN_O | XCT_FUN_FUN(csring->fun) |
+ XCT_FUN_LLEN(2) | XCT_FUN_SE;
+ CS_DESC(csring, fill++) = XCT_PTR_LEN(2) | XCT_PTR_ADDR(cs_dest) | XCT_PTR_T;
+ CS_DESC(csring, fill++) = XCT_PTR_LEN(2) | XCT_PTR_ADDR(csdma);
+ fill++;
- return 0;
+ evt = !csring->last_event;
+ csring->last_event = evt;
+
+ /* Event handshaking with MAC TX */
+ CS_DESC(csring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O |
+ CTRL_CMD_ETYPE_SET | CTRL_CMD_REG(csring->events[evt]);
+ CS_DESC(csring, fill++) = 0;
+ CS_DESC(csring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O |
+ CTRL_CMD_ETYPE_WCLR | CTRL_CMD_REG(csring->events[!evt]);
+ CS_DESC(csring, fill++) = 0;
+ csring->next_to_fill = fill & (CS_RING_SIZE-1);
+
+ cs_size = fill - hdr;
+ write_dma_reg(PAS_DMA_TXCHAN_INCR(csring->chan.chno), (cs_size) >> 1);
+
+ /* TX-side event handshaking */
+ fill = txring->next_to_fill;
+ TX_DESC(txring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O |
+ CTRL_CMD_ETYPE_WSET | CTRL_CMD_REG(csring->events[evt]);
+ TX_DESC(txring, fill++) = 0;
+ TX_DESC(txring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O |
+ CTRL_CMD_ETYPE_CLR | CTRL_CMD_REG(csring->events[!evt]);
+ TX_DESC(txring, fill++) = 0;
+ txring->next_to_fill = fill;
+
+ write_dma_reg(PAS_DMA_TXCHAN_INCR(txring->chan.chno), 2);
+
+ return;
}
static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
{
- struct pasemi_mac *mac = netdev_priv(dev);
- struct pasemi_mac_txring *txring;
- u64 dflags, mactx;
+ struct pasemi_mac * const mac = netdev_priv(dev);
+ struct pasemi_mac_txring * const txring = tx_ring(mac);
+ struct pasemi_mac_csring *csring;
+ u64 dflags = 0;
+ u64 mactx;
dma_addr_t map[MAX_SKB_FRAGS+1];
unsigned int map_size[MAX_SKB_FRAGS+1];
unsigned long flags;
int i, nfrags;
int fill;
+ const int nh_off = skb_network_offset(skb);
+ const int nh_len = skb_network_header_len(skb);
- dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_CRC_PAD;
-
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- const unsigned char *nh = skb_network_header(skb);
+ prefetch(&txring->ring_info);
- switch (ip_hdr(skb)->protocol) {
- case IPPROTO_TCP:
- dflags |= XCT_MACTX_CSUM_TCP;
- dflags |= XCT_MACTX_IPH(skb_network_header_len(skb) >> 2);
- dflags |= XCT_MACTX_IPO(nh - skb->data);
- break;
- case IPPROTO_UDP:
- dflags |= XCT_MACTX_CSUM_UDP;
- dflags |= XCT_MACTX_IPH(skb_network_header_len(skb) >> 2);
- dflags |= XCT_MACTX_IPO(nh - skb->data);
- break;
- }
- }
+ dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_CRC_PAD;
nfrags = skb_shinfo(skb)->nr_frags;
map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb),
PCI_DMA_TODEVICE);
map_size[0] = skb_headlen(skb);
- if (dma_mapping_error(map[0]))
+ if (pci_dma_mapping_error(mac->dma_pdev, map[0]))
goto out_err_nolock;
for (i = 0; i < nfrags; i++) {
frag->page_offset, frag->size,
PCI_DMA_TODEVICE);
map_size[i+1] = frag->size;
- if (dma_mapping_error(map[i+1])) {
+ if (pci_dma_mapping_error(mac->dma_pdev, map[i+1])) {
nfrags = i;
goto out_err_nolock;
}
}
- mactx = dflags | XCT_MACTX_LLEN(skb->len);
+ if (skb->ip_summed == CHECKSUM_PARTIAL && skb->len <= 1540) {
+ switch (ip_hdr(skb)->protocol) {
+ case IPPROTO_TCP:
+ dflags |= XCT_MACTX_CSUM_TCP;
+ dflags |= XCT_MACTX_IPH(nh_len >> 2);
+ dflags |= XCT_MACTX_IPO(nh_off);
+ break;
+ case IPPROTO_UDP:
+ dflags |= XCT_MACTX_CSUM_UDP;
+ dflags |= XCT_MACTX_IPH(nh_len >> 2);
+ dflags |= XCT_MACTX_IPO(nh_off);
+ break;
+ default:
+ WARN_ON(1);
+ }
+ }
- txring = tx_ring(mac);
+ mactx = dflags | XCT_MACTX_LLEN(skb->len);
spin_lock_irqsave(&txring->lock, flags);
- fill = txring->next_to_fill;
-
/* Avoid stepping on the same cache line that the DMA controller
* is currently about to send, so leave at least 8 words available.
* Total free space needed is mactx + fragments + 8
*/
- if (RING_AVAIL(txring) < nfrags + 10) {
+ if (RING_AVAIL(txring) < nfrags + 14) {
/* no room -- stop the queue and wait for tx intr */
netif_stop_queue(dev);
goto out_err;
}
+ /* Queue up checksum + event descriptors, if needed */
+ if (mac->num_cs && skb->ip_summed == CHECKSUM_PARTIAL && skb->len > 1540) {
+ csring = mac->cs[mac->last_cs];
+ mac->last_cs = (mac->last_cs + 1) % mac->num_cs;
+
+ pasemi_mac_queue_csdesc(skb, map, map_size, txring, csring);
+ }
+
+ fill = txring->next_to_fill;
TX_DESC(txring, fill) = mactx;
TX_DESC_INFO(txring, fill).dma = nfrags;
fill++;
static int pasemi_mac_poll(struct napi_struct *napi, int budget)
{
struct pasemi_mac *mac = container_of(napi, struct pasemi_mac, napi);
- struct net_device *dev = mac->netdev;
int pkts;
pasemi_mac_clean_tx(tx_ring(mac));
pkts = pasemi_mac_clean_rx(rx_ring(mac), budget);
if (pkts < budget) {
/* all done, no more packets present */
- netif_rx_complete(dev, napi);
+ napi_complete(napi);
pasemi_mac_restart_rx_intr(mac);
pasemi_mac_restart_tx_intr(mac);
return pkts;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void pasemi_mac_netpoll(struct net_device *dev)
+{
+ const struct pasemi_mac *mac = netdev_priv(dev);
+
+ disable_irq(mac->tx->chan.irq);
+ pasemi_mac_tx_intr(mac->tx->chan.irq, mac->tx);
+ enable_irq(mac->tx->chan.irq);
+
+ disable_irq(mac->rx->chan.irq);
+ pasemi_mac_rx_intr(mac->rx->chan.irq, mac->rx);
+ enable_irq(mac->rx->chan.irq);
+}
+#endif
+
+static int pasemi_mac_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct pasemi_mac *mac = netdev_priv(dev);
+ unsigned int reg;
+ unsigned int rcmdsta = 0;
+ int running;
+ int ret = 0;
+
+ if (new_mtu < PE_MIN_MTU || new_mtu > PE_MAX_MTU)
+ return -EINVAL;
+
+ running = netif_running(dev);
+
+ if (running) {
+ /* Need to stop the interface, clean out all already
+ * received buffers, free all unused buffers on the RX
+ * interface ring, then finally re-fill the rx ring with
+ * the new-size buffers and restart.
+ */
+
+ napi_disable(&mac->napi);
+ netif_tx_disable(dev);
+ pasemi_mac_intf_disable(mac);
+
+ rcmdsta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
+ pasemi_mac_pause_rxint(mac);
+ pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE);
+ pasemi_mac_free_rx_buffers(mac);
+
+ }
+
+ /* Setup checksum channels if large MTU and none already allocated */
+ if (new_mtu > 1500 && !mac->num_cs) {
+ pasemi_mac_setup_csrings(mac);
+ if (!mac->num_cs) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+
+ /* Change maxf, i.e. what size frames are accepted.
+ * Need room for ethernet header and CRC word
+ */
+ reg = read_mac_reg(mac, PAS_MAC_CFG_MACCFG);
+ reg &= ~PAS_MAC_CFG_MACCFG_MAXF_M;
+ reg |= PAS_MAC_CFG_MACCFG_MAXF(new_mtu + ETH_HLEN + 4);
+ write_mac_reg(mac, PAS_MAC_CFG_MACCFG, reg);
+
+ dev->mtu = new_mtu;
+ /* MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
+ mac->bufsz = new_mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128;
+
+out:
+ if (running) {
+ write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
+ rcmdsta | PAS_DMA_RXINT_RCMDSTA_EN);
+
+ rx_ring(mac)->next_to_fill = 0;
+ pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE-1);
+
+ napi_enable(&mac->napi);
+ netif_start_queue(dev);
+ pasemi_mac_intf_enable(mac);
+ }
+
+ return ret;
+}
+
+static const struct net_device_ops pasemi_netdev_ops = {
+ .ndo_open = pasemi_mac_open,
+ .ndo_stop = pasemi_mac_close,
+ .ndo_start_xmit = pasemi_mac_start_tx,
+ .ndo_set_multicast_list = pasemi_mac_set_rx_mode,
+ .ndo_set_mac_address = pasemi_mac_set_mac_addr,
+ .ndo_change_mtu = pasemi_mac_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = pasemi_mac_netpoll,
+#endif
+};
+
static int __devinit
pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *dev;
struct pasemi_mac *mac;
- int err;
- DECLARE_MAC_BUF(mac_buf);
+ int err, ret;
err = pci_enable_device(pdev);
if (err)
netif_napi_add(dev, &mac->napi, pasemi_mac_poll, 64);
dev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX | NETIF_F_SG |
- NETIF_F_HIGHDMA;
+ NETIF_F_HIGHDMA | NETIF_F_GSO;
mac->lro_mgr.max_aggr = LRO_MAX_AGGR;
mac->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
}
memcpy(dev->dev_addr, mac->mac_addr, sizeof(mac->mac_addr));
- mac->dma_if = mac_to_intf(mac);
- if (mac->dma_if < 0) {
+ ret = mac_to_intf(mac);
+ if (ret < 0) {
dev_err(&mac->pdev->dev, "Can't map DMA interface\n");
err = -ENODEV;
goto out;
}
+ mac->dma_if = ret;
switch (pdev->device) {
case 0xa005:
goto out;
}
- dev->open = pasemi_mac_open;
- dev->stop = pasemi_mac_close;
- dev->hard_start_xmit = pasemi_mac_start_tx;
- dev->set_multicast_list = pasemi_mac_set_rx_mode;
+ dev->netdev_ops = &pasemi_netdev_ops;
+ dev->mtu = PE_DEF_MTU;
+ /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
+ mac->bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128;
+
+ dev->ethtool_ops = &pasemi_mac_ethtool_ops;
if (err)
goto out;
err);
goto out;
} else if netif_msg_probe(mac)
- printk(KERN_INFO "%s: PA Semi %s: intf %d, hw addr %s\n",
+ printk(KERN_INFO "%s: PA Semi %s: intf %d, hw addr %pM\n",
dev->name, mac->type == MAC_TYPE_GMAC ? "GMAC" : "XAUI",
- mac->dma_if, print_mac(mac_buf, dev->dev_addr));
+ mac->dma_if, dev->dev_addr);
return err;
free_netdev(netdev);
}
-static struct pci_device_id pasemi_mac_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(pasemi_mac_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) },
{ PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) },
{ },