2 * Copyright (C) 2006-2007 PA Semi, Inc
4 * Driver for the PA Semi PWRficient onchip 1G/10G Ethernet MACs
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/dmaengine.h>
25 #include <linux/delay.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <asm/dma-mapping.h>
30 #include <linux/skbuff.h>
33 #include <linux/tcp.h>
34 #include <net/checksum.h>
37 #include <asm/firmware.h>
39 #include "pasemi_mac.h"
41 /* We have our own align, since ppc64 in general has it at 0 because
42 * of design flaws in some of the server bridge chips. However, for
43 * PWRficient doing the unaligned copies is more expensive than doing
44 * unaligned DMA, so make sure the data is aligned instead.
46 #define LOCAL_SKB_ALIGN 2
57 /* Must be a power of two */
58 #define RX_RING_SIZE 4096
59 #define TX_RING_SIZE 4096
61 #define DEFAULT_MSG_ENABLE \
71 #define TX_DESC(tx, num) ((tx)->ring[(num) & (TX_RING_SIZE-1)])
72 #define TX_DESC_INFO(tx, num) ((tx)->ring_info[(num) & (TX_RING_SIZE-1)])
73 #define RX_DESC(rx, num) ((rx)->ring[(num) & (RX_RING_SIZE-1)])
74 #define RX_DESC_INFO(rx, num) ((rx)->ring_info[(num) & (RX_RING_SIZE-1)])
75 #define RX_BUFF(rx, num) ((rx)->buffers[(num) & (RX_RING_SIZE-1)])
77 #define RING_USED(ring) (((ring)->next_to_fill - (ring)->next_to_clean) \
79 #define RING_AVAIL(ring) ((ring->size) - RING_USED(ring))
81 #define BUF_SIZE 1646 /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
83 MODULE_LICENSE("GPL");
84 MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>");
85 MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver");
87 static int debug = -1; /* -1 == use DEFAULT_MSG_ENABLE as value */
88 module_param(debug, int, 0);
89 MODULE_PARM_DESC(debug, "PA Semi MAC bitmapped debugging message enable value");
91 static struct pasdma_status *dma_status;
93 static int translation_enabled(void)
95 #if defined(CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE)
98 return firmware_has_feature(FW_FEATURE_LPAR);
102 static void write_iob_reg(struct pasemi_mac *mac, unsigned int reg,
105 out_le32(mac->iob_regs+reg, val);
108 static unsigned int read_mac_reg(struct pasemi_mac *mac, unsigned int reg)
110 return in_le32(mac->regs+reg);
113 static void write_mac_reg(struct pasemi_mac *mac, unsigned int reg,
116 out_le32(mac->regs+reg, val);
119 static unsigned int read_dma_reg(struct pasemi_mac *mac, unsigned int reg)
121 return in_le32(mac->dma_regs+reg);
124 static void write_dma_reg(struct pasemi_mac *mac, unsigned int reg,
127 out_le32(mac->dma_regs+reg, val);
130 static struct pasemi_mac_rxring *rx_ring(struct pasemi_mac *mac)
135 static struct pasemi_mac_txring *tx_ring(struct pasemi_mac *mac)
140 static int pasemi_get_mac_addr(struct pasemi_mac *mac)
142 struct pci_dev *pdev = mac->pdev;
143 struct device_node *dn = pci_device_to_OF_node(pdev);
150 "No device node for mac, not configuring\n");
154 maddr = of_get_property(dn, "local-mac-address", &len);
156 if (maddr && len == 6) {
157 memcpy(mac->mac_addr, maddr, 6);
161 /* Some old versions of firmware mistakenly uses mac-address
162 * (and as a string) instead of a byte array in local-mac-address.
166 maddr = of_get_property(dn, "mac-address", NULL);
170 "no mac address in device tree, not configuring\n");
175 if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0],
176 &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) != 6) {
178 "can't parse mac address, not configuring\n");
182 memcpy(mac->mac_addr, addr, 6);
187 static int pasemi_mac_unmap_tx_skb(struct pasemi_mac *mac,
192 int nfrags = skb_shinfo(skb)->nr_frags;
194 pci_unmap_single(mac->dma_pdev, dmas[0], skb_headlen(skb),
197 for (f = 0; f < nfrags; f++) {
198 skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
200 pci_unmap_page(mac->dma_pdev, dmas[f+1], frag->size,
203 dev_kfree_skb_irq(skb);
205 /* Freed descriptor slot + main SKB ptr + nfrags additional ptrs,
206 * aligned up to a power of 2
208 return (nfrags + 3) & ~1;
211 static int pasemi_mac_setup_rx_resources(struct net_device *dev)
213 struct pasemi_mac_rxring *ring;
214 struct pasemi_mac *mac = netdev_priv(dev);
215 int chan_id = mac->dma_rxch;
218 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
223 spin_lock_init(&ring->lock);
225 ring->size = RX_RING_SIZE;
226 ring->ring_info = kzalloc(sizeof(struct pasemi_mac_buffer) *
227 RX_RING_SIZE, GFP_KERNEL);
229 if (!ring->ring_info)
232 /* Allocate descriptors */
233 ring->ring = dma_alloc_coherent(&mac->dma_pdev->dev,
234 RX_RING_SIZE * sizeof(u64),
235 &ring->dma, GFP_KERNEL);
240 memset(ring->ring, 0, RX_RING_SIZE * sizeof(u64));
242 ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev,
243 RX_RING_SIZE * sizeof(u64),
244 &ring->buf_dma, GFP_KERNEL);
248 memset(ring->buffers, 0, RX_RING_SIZE * sizeof(u64));
250 write_dma_reg(mac, PAS_DMA_RXCHAN_BASEL(chan_id), PAS_DMA_RXCHAN_BASEL_BRBL(ring->dma));
252 write_dma_reg(mac, PAS_DMA_RXCHAN_BASEU(chan_id),
253 PAS_DMA_RXCHAN_BASEU_BRBH(ring->dma >> 32) |
254 PAS_DMA_RXCHAN_BASEU_SIZ(RX_RING_SIZE >> 3));
256 cfg = PAS_DMA_RXCHAN_CFG_HBU(2);
258 if (translation_enabled())
259 cfg |= PAS_DMA_RXCHAN_CFG_CTR;
261 write_dma_reg(mac, PAS_DMA_RXCHAN_CFG(chan_id), cfg);
263 write_dma_reg(mac, PAS_DMA_RXINT_BASEL(mac->dma_if),
264 PAS_DMA_RXINT_BASEL_BRBL(ring->buf_dma));
266 write_dma_reg(mac, PAS_DMA_RXINT_BASEU(mac->dma_if),
267 PAS_DMA_RXINT_BASEU_BRBH(ring->buf_dma >> 32) |
268 PAS_DMA_RXINT_BASEU_SIZ(RX_RING_SIZE >> 3));
270 cfg = PAS_DMA_RXINT_CFG_DHL(3) | PAS_DMA_RXINT_CFG_L2 |
271 PAS_DMA_RXINT_CFG_LW | PAS_DMA_RXINT_CFG_RBP |
272 PAS_DMA_RXINT_CFG_HEN;
274 if (translation_enabled())
275 cfg |= PAS_DMA_RXINT_CFG_ITRR | PAS_DMA_RXINT_CFG_ITR;
277 write_dma_reg(mac, PAS_DMA_RXINT_CFG(mac->dma_if), cfg);
279 ring->next_to_fill = 0;
280 ring->next_to_clean = 0;
282 ring->status = &dma_status->rx_sta[mac->dma_rxch];
289 dma_free_coherent(&mac->dma_pdev->dev,
290 RX_RING_SIZE * sizeof(u64),
291 rx_ring(mac)->ring, rx_ring(mac)->dma);
293 kfree(ring->ring_info);
300 static struct pasemi_mac_txring *
301 pasemi_mac_setup_tx_resources(struct net_device *dev, int txch)
303 struct pasemi_mac *mac = netdev_priv(dev);
305 struct pasemi_mac_txring *ring;
308 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
312 spin_lock_init(&ring->lock);
314 ring->size = TX_RING_SIZE;
315 ring->ring_info = kzalloc(sizeof(struct pasemi_mac_buffer) *
316 TX_RING_SIZE, GFP_KERNEL);
317 if (!ring->ring_info)
320 /* Allocate descriptors */
321 ring->ring = dma_alloc_coherent(&mac->dma_pdev->dev,
322 TX_RING_SIZE * sizeof(u64),
323 &ring->dma, GFP_KERNEL);
327 memset(ring->ring, 0, TX_RING_SIZE * sizeof(u64));
329 write_dma_reg(mac, PAS_DMA_TXCHAN_BASEL(txch),
330 PAS_DMA_TXCHAN_BASEL_BRBL(ring->dma));
331 val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->dma >> 32);
332 val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 3);
334 write_dma_reg(mac, PAS_DMA_TXCHAN_BASEU(txch), val);
336 cfg = PAS_DMA_TXCHAN_CFG_TY_IFACE |
337 PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) |
338 PAS_DMA_TXCHAN_CFG_UP |
339 PAS_DMA_TXCHAN_CFG_WT(2);
341 if (translation_enabled())
342 cfg |= PAS_DMA_TXCHAN_CFG_TRD | PAS_DMA_TXCHAN_CFG_TRR;
344 write_dma_reg(mac, PAS_DMA_TXCHAN_CFG(txch), cfg);
346 ring->next_to_fill = 0;
347 ring->next_to_clean = 0;
348 ring->status = &dma_status->tx_sta[txch];
355 kfree(ring->ring_info);
362 static void pasemi_mac_free_tx_resources(struct pasemi_mac *mac)
364 struct pasemi_mac_txring *txring = tx_ring(mac);
366 struct pasemi_mac_buffer *info;
367 dma_addr_t dmas[MAX_SKB_FRAGS+1];
371 start = txring->next_to_clean;
372 limit = txring->next_to_fill;
374 /* Compensate for when fill has wrapped and clean has not */
376 limit += TX_RING_SIZE;
378 for (i = start; i < limit; i += freed) {
379 info = &txring->ring_info[(i+1) & (TX_RING_SIZE-1)];
380 if (info->dma && info->skb) {
381 for (j = 0; j <= skb_shinfo(info->skb)->nr_frags; j++)
382 dmas[j] = txring->ring_info[(i+1+j) &
383 (TX_RING_SIZE-1)].dma;
384 freed = pasemi_mac_unmap_tx_skb(mac, info->skb, dmas);
389 for (i = 0; i < TX_RING_SIZE; i++)
392 dma_free_coherent(&mac->dma_pdev->dev,
393 TX_RING_SIZE * sizeof(u64),
394 txring->ring, txring->dma);
396 kfree(txring->ring_info);
400 static void pasemi_mac_free_rx_resources(struct pasemi_mac *mac)
402 struct pasemi_mac_rxring *rx = rx_ring(mac);
404 struct pasemi_mac_buffer *info;
406 for (i = 0; i < RX_RING_SIZE; i++) {
407 info = &RX_DESC_INFO(rx, i);
408 if (info->skb && info->dma) {
409 pci_unmap_single(mac->dma_pdev,
413 dev_kfree_skb_any(info->skb);
419 for (i = 0; i < RX_RING_SIZE; i++)
422 dma_free_coherent(&mac->dma_pdev->dev,
423 RX_RING_SIZE * sizeof(u64),
424 rx_ring(mac)->ring, rx_ring(mac)->dma);
426 dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64),
427 rx_ring(mac)->buffers, rx_ring(mac)->buf_dma);
429 kfree(rx_ring(mac)->ring_info);
434 static void pasemi_mac_replenish_rx_ring(struct net_device *dev, int limit)
436 struct pasemi_mac *mac = netdev_priv(dev);
437 struct pasemi_mac_rxring *rx = rx_ring(mac);
443 fill = rx_ring(mac)->next_to_fill;
444 for (count = 0; count < limit; count++) {
445 struct pasemi_mac_buffer *info = &RX_DESC_INFO(rx, fill);
446 u64 *buff = &RX_BUFF(rx, fill);
453 /* skb might still be in there for recycle on short receives */
457 skb = dev_alloc_skb(BUF_SIZE);
458 skb_reserve(skb, LOCAL_SKB_ALIGN);
464 dma = pci_map_single(mac->dma_pdev, skb->data,
465 BUF_SIZE - LOCAL_SKB_ALIGN,
468 if (unlikely(dma_mapping_error(dma))) {
469 dev_kfree_skb_irq(info->skb);
475 *buff = XCT_RXB_LEN(BUF_SIZE) | XCT_RXB_ADDR(dma);
481 write_dma_reg(mac, PAS_DMA_RXINT_INCR(mac->dma_if), count);
483 rx_ring(mac)->next_to_fill = (rx_ring(mac)->next_to_fill + count) &
487 static void pasemi_mac_restart_rx_intr(struct pasemi_mac *mac)
489 unsigned int reg, pcnt;
490 /* Re-enable packet count interrupts: finally
491 * ack the packet count interrupt we got in rx_intr.
494 pcnt = *rx_ring(mac)->status & PAS_STATUS_PCNT_M;
496 reg = PAS_IOB_DMA_RXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_RXCH_RESET_PINTC;
498 write_iob_reg(mac, PAS_IOB_DMA_RXCH_RESET(mac->dma_rxch), reg);
501 static void pasemi_mac_restart_tx_intr(struct pasemi_mac *mac)
503 unsigned int reg, pcnt;
505 /* Re-enable packet count interrupts */
506 pcnt = *tx_ring(mac)->status & PAS_STATUS_PCNT_M;
508 reg = PAS_IOB_DMA_TXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_TXCH_RESET_PINTC;
510 write_iob_reg(mac, PAS_IOB_DMA_TXCH_RESET(tx_ring(mac)->chan), reg);
514 static inline void pasemi_mac_rx_error(struct pasemi_mac *mac, u64 macrx)
516 unsigned int rcmdsta, ccmdsta;
518 if (!netif_msg_rx_err(mac))
521 rcmdsta = read_dma_reg(mac, PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
522 ccmdsta = read_dma_reg(mac, PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch));
524 printk(KERN_ERR "pasemi_mac: rx error. macrx %016lx, rx status %lx\n",
525 macrx, *rx_ring(mac)->status);
527 printk(KERN_ERR "pasemi_mac: rcmdsta %08x ccmdsta %08x\n",
531 static inline void pasemi_mac_tx_error(struct pasemi_mac *mac, u64 mactx)
535 if (!netif_msg_tx_err(mac))
538 cmdsta = read_dma_reg(mac, PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch));
540 printk(KERN_ERR "pasemi_mac: tx error. mactx 0x%016lx, "\
541 "tx status 0x%016lx\n", mactx, *tx_ring(mac)->status);
543 printk(KERN_ERR "pasemi_mac: tcmdsta 0x%08x\n", cmdsta);
546 static int pasemi_mac_clean_rx(struct pasemi_mac_rxring *rx, int limit)
548 struct pasemi_mac *mac = rx->mac;
551 struct pasemi_mac_buffer *info;
559 spin_lock(&rx->lock);
561 n = rx->next_to_clean;
563 prefetch(&RX_DESC(rx, n));
565 for (count = 0; count < limit; count++) {
566 macrx = RX_DESC(rx, n);
568 if ((macrx & XCT_MACRX_E) ||
569 (*rx_ring(mac)->status & PAS_STATUS_ERROR))
570 pasemi_mac_rx_error(mac, macrx);
572 if (!(macrx & XCT_MACRX_O))
577 BUG_ON(!(macrx & XCT_MACRX_RR_8BRES));
579 eval = (RX_DESC(rx, n+1) & XCT_RXRES_8B_EVAL_M) >>
583 dma = (RX_DESC(rx, n+2) & XCT_PTR_ADDR_M);
584 info = &RX_DESC_INFO(rx, buf_index);
589 prefetch(&skb->data_len);
591 len = (macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S;
593 pci_unmap_single(mac->dma_pdev, dma, len, PCI_DMA_FROMDEVICE);
595 if (macrx & XCT_MACRX_CRC) {
596 /* CRC error flagged */
597 mac->netdev->stats.rx_errors++;
598 mac->netdev->stats.rx_crc_errors++;
599 /* No need to free skb, it'll be reused */
604 struct sk_buff *new_skb;
606 new_skb = netdev_alloc_skb(mac->netdev,
607 len + LOCAL_SKB_ALIGN);
609 skb_reserve(new_skb, LOCAL_SKB_ALIGN);
610 memcpy(new_skb->data, skb->data, len);
611 /* save the skb in buffer_info as good */
614 /* else just continue with the old one */
620 /* Don't include CRC */
623 if (likely((macrx & XCT_MACRX_HTY_M) == XCT_MACRX_HTY_IPV4_OK)) {
624 skb->ip_summed = CHECKSUM_UNNECESSARY;
625 skb->csum = (macrx & XCT_MACRX_CSUM_M) >>
628 skb->ip_summed = CHECKSUM_NONE;
630 mac->netdev->stats.rx_bytes += len;
631 mac->netdev->stats.rx_packets++;
633 skb->protocol = eth_type_trans(skb, mac->netdev);
634 netif_receive_skb(skb);
638 RX_DESC(rx, n+1) = 0;
640 /* Need to zero it out since hardware doesn't, since the
641 * replenish loop uses it to tell when it's done.
643 RX_BUFF(rx, buf_index) = 0;
648 if (n > RX_RING_SIZE) {
649 /* Errata 5971 workaround: L2 target of headers */
650 write_iob_reg(mac, PAS_IOB_COM_PKTHDRCNT, 0);
651 n &= (RX_RING_SIZE-1);
654 rx_ring(mac)->next_to_clean = n;
656 /* Increase is in number of 16-byte entries, and since each descriptor
657 * with an 8BRES takes up 3x8 bytes (padded to 4x8), increase with
660 write_dma_reg(mac, PAS_DMA_RXCHAN_INCR(mac->dma_rxch), count << 1);
662 pasemi_mac_replenish_rx_ring(mac->netdev, count);
664 spin_unlock(&rx_ring(mac)->lock);
669 /* Can't make this too large or we blow the kernel stack limits */
670 #define TX_CLEAN_BATCHSIZE (128/MAX_SKB_FRAGS)
672 static int pasemi_mac_clean_tx(struct pasemi_mac_txring *txring)
674 struct pasemi_mac *mac = txring->mac;
676 unsigned int start, descr_count, buf_count, batch_limit;
677 unsigned int ring_limit;
678 unsigned int total_count;
680 struct sk_buff *skbs[TX_CLEAN_BATCHSIZE];
681 dma_addr_t dmas[TX_CLEAN_BATCHSIZE][MAX_SKB_FRAGS+1];
684 batch_limit = TX_CLEAN_BATCHSIZE;
686 spin_lock_irqsave(&txring->lock, flags);
688 start = txring->next_to_clean;
689 ring_limit = txring->next_to_fill;
691 /* Compensate for when fill has wrapped but clean has not */
692 if (start > ring_limit)
693 ring_limit += TX_RING_SIZE;
699 descr_count < batch_limit && i < ring_limit;
701 u64 mactx = TX_DESC(txring, i);
704 if ((mactx & XCT_MACTX_E) ||
705 (*tx_ring(mac)->status & PAS_STATUS_ERROR))
706 pasemi_mac_tx_error(mac, mactx);
708 if (unlikely(mactx & XCT_MACTX_O))
709 /* Not yet transmitted */
712 skb = TX_DESC_INFO(txring, i+1).skb;
713 skbs[descr_count] = skb;
715 buf_count = 2 + skb_shinfo(skb)->nr_frags;
716 for (j = 0; j <= skb_shinfo(skb)->nr_frags; j++)
717 dmas[descr_count][j] = TX_DESC_INFO(txring, i+1+j).dma;
719 TX_DESC(txring, i) = 0;
720 TX_DESC(txring, i+1) = 0;
722 /* Since we always fill with an even number of entries, make
723 * sure we skip any unused one at the end as well.
729 txring->next_to_clean = i & (TX_RING_SIZE-1);
731 spin_unlock_irqrestore(&txring->lock, flags);
732 netif_wake_queue(mac->netdev);
734 for (i = 0; i < descr_count; i++)
735 pasemi_mac_unmap_tx_skb(mac, skbs[i], dmas[i]);
737 total_count += descr_count;
739 /* If the batch was full, try to clean more */
740 if (descr_count == batch_limit)
747 static irqreturn_t pasemi_mac_rx_intr(int irq, void *data)
749 struct net_device *dev = data;
750 struct pasemi_mac *mac = netdev_priv(dev);
753 if (!(*rx_ring(mac)->status & PAS_STATUS_CAUSE_M))
756 /* Don't reset packet count so it won't fire again but clear
761 if (*rx_ring(mac)->status & PAS_STATUS_SOFT)
762 reg |= PAS_IOB_DMA_RXCH_RESET_SINTC;
763 if (*rx_ring(mac)->status & PAS_STATUS_ERROR)
764 reg |= PAS_IOB_DMA_RXCH_RESET_DINTC;
765 if (*rx_ring(mac)->status & PAS_STATUS_TIMER)
766 reg |= PAS_IOB_DMA_RXCH_RESET_TINTC;
768 netif_rx_schedule(dev, &mac->napi);
770 write_iob_reg(mac, PAS_IOB_DMA_RXCH_RESET(mac->dma_rxch), reg);
775 static irqreturn_t pasemi_mac_tx_intr(int irq, void *data)
777 struct pasemi_mac_txring *txring = data;
778 struct pasemi_mac *mac = txring->mac;
779 unsigned int reg, pcnt;
781 if (!(*txring->status & PAS_STATUS_CAUSE_M))
784 pasemi_mac_clean_tx(txring);
786 pcnt = *txring->status & PAS_STATUS_PCNT_M;
788 reg = PAS_IOB_DMA_TXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_TXCH_RESET_PINTC;
790 if (*txring->status & PAS_STATUS_SOFT)
791 reg |= PAS_IOB_DMA_TXCH_RESET_SINTC;
792 if (*txring->status & PAS_STATUS_ERROR)
793 reg |= PAS_IOB_DMA_TXCH_RESET_DINTC;
795 write_iob_reg(mac, PAS_IOB_DMA_TXCH_RESET(txring->chan), reg);
800 static void pasemi_adjust_link(struct net_device *dev)
802 struct pasemi_mac *mac = netdev_priv(dev);
805 unsigned int new_flags;
807 if (!mac->phydev->link) {
808 /* If no link, MAC speed settings don't matter. Just report
809 * link down and return.
811 if (mac->link && netif_msg_link(mac))
812 printk(KERN_INFO "%s: Link is down.\n", dev->name);
814 netif_carrier_off(dev);
819 netif_carrier_on(dev);
821 flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
822 new_flags = flags & ~(PAS_MAC_CFG_PCFG_HD | PAS_MAC_CFG_PCFG_SPD_M |
823 PAS_MAC_CFG_PCFG_TSR_M);
825 if (!mac->phydev->duplex)
826 new_flags |= PAS_MAC_CFG_PCFG_HD;
828 switch (mac->phydev->speed) {
830 new_flags |= PAS_MAC_CFG_PCFG_SPD_1G |
831 PAS_MAC_CFG_PCFG_TSR_1G;
834 new_flags |= PAS_MAC_CFG_PCFG_SPD_100M |
835 PAS_MAC_CFG_PCFG_TSR_100M;
838 new_flags |= PAS_MAC_CFG_PCFG_SPD_10M |
839 PAS_MAC_CFG_PCFG_TSR_10M;
842 printk("Unsupported speed %d\n", mac->phydev->speed);
845 /* Print on link or speed/duplex change */
846 msg = mac->link != mac->phydev->link || flags != new_flags;
848 mac->duplex = mac->phydev->duplex;
849 mac->speed = mac->phydev->speed;
850 mac->link = mac->phydev->link;
852 if (new_flags != flags)
853 write_mac_reg(mac, PAS_MAC_CFG_PCFG, new_flags);
855 if (msg && netif_msg_link(mac))
856 printk(KERN_INFO "%s: Link is up at %d Mbps, %s duplex.\n",
857 dev->name, mac->speed, mac->duplex ? "full" : "half");
860 static int pasemi_mac_phy_init(struct net_device *dev)
862 struct pasemi_mac *mac = netdev_priv(dev);
863 struct device_node *dn, *phy_dn;
864 struct phy_device *phydev;
867 const unsigned int *prop;
871 dn = pci_device_to_OF_node(mac->pdev);
872 ph = of_get_property(dn, "phy-handle", NULL);
875 phy_dn = of_find_node_by_phandle(*ph);
877 prop = of_get_property(phy_dn, "reg", NULL);
878 ret = of_address_to_resource(phy_dn->parent, 0, &r);
883 snprintf(mac->phy_id, BUS_ID_SIZE, PHY_ID_FMT, (int)r.start, phy_id);
891 phydev = phy_connect(dev, mac->phy_id, &pasemi_adjust_link, 0, PHY_INTERFACE_MODE_SGMII);
893 if (IS_ERR(phydev)) {
894 printk(KERN_ERR "%s: Could not attach to phy\n", dev->name);
895 return PTR_ERR(phydev);
898 mac->phydev = phydev;
908 static int pasemi_mac_open(struct net_device *dev)
910 struct pasemi_mac *mac = netdev_priv(dev);
915 /* enable rx section */
916 write_dma_reg(mac, PAS_DMA_COM_RXCMD, PAS_DMA_COM_RXCMD_EN);
918 /* enable tx section */
919 write_dma_reg(mac, PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN);
921 flags = PAS_MAC_CFG_TXP_FCE | PAS_MAC_CFG_TXP_FPC(3) |
922 PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) |
923 PAS_MAC_CFG_TXP_TIFT(8) | PAS_MAC_CFG_TXP_TIFG(12);
925 write_mac_reg(mac, PAS_MAC_CFG_TXP, flags);
927 write_iob_reg(mac, PAS_IOB_DMA_RXCH_CFG(mac->dma_rxch),
928 PAS_IOB_DMA_RXCH_CFG_CNTTH(0));
930 write_iob_reg(mac, PAS_IOB_DMA_TXCH_CFG(mac->dma_txch),
931 PAS_IOB_DMA_TXCH_CFG_CNTTH(128));
933 /* 0xffffff is max value, about 16ms */
934 write_iob_reg(mac, PAS_IOB_DMA_COM_TIMEOUTCFG,
935 PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0xffffff));
937 ret = pasemi_mac_setup_rx_resources(dev);
939 goto out_rx_resources;
941 mac->tx = pasemi_mac_setup_tx_resources(dev, mac->dma_txch);
946 write_mac_reg(mac, PAS_MAC_IPC_CHNL,
947 PAS_MAC_IPC_CHNL_DCHNO(mac->dma_rxch) |
948 PAS_MAC_IPC_CHNL_BCH(mac->dma_rxch));
951 write_dma_reg(mac, PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
952 PAS_DMA_RXINT_RCMDSTA_EN |
953 PAS_DMA_RXINT_RCMDSTA_DROPS_M |
954 PAS_DMA_RXINT_RCMDSTA_BP |
955 PAS_DMA_RXINT_RCMDSTA_OO |
956 PAS_DMA_RXINT_RCMDSTA_BT);
958 /* enable rx channel */
959 write_dma_reg(mac, PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),
960 PAS_DMA_RXCHAN_CCMDSTA_EN |
961 PAS_DMA_RXCHAN_CCMDSTA_DU |
962 PAS_DMA_RXCHAN_CCMDSTA_OD |
963 PAS_DMA_RXCHAN_CCMDSTA_FD |
964 PAS_DMA_RXCHAN_CCMDSTA_DT);
966 /* enable tx channel */
967 write_dma_reg(mac, PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),
968 PAS_DMA_TXCHAN_TCMDSTA_EN |
969 PAS_DMA_TXCHAN_TCMDSTA_SZ |
970 PAS_DMA_TXCHAN_TCMDSTA_DB |
971 PAS_DMA_TXCHAN_TCMDSTA_DE |
972 PAS_DMA_TXCHAN_TCMDSTA_DA);
974 pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE);
976 write_dma_reg(mac, PAS_DMA_RXCHAN_INCR(mac->dma_rxch), RX_RING_SIZE>>1);
978 /* Clear out any residual packet count state from firmware */
979 pasemi_mac_restart_rx_intr(mac);
980 pasemi_mac_restart_tx_intr(mac);
982 flags = PAS_MAC_CFG_PCFG_S1 | PAS_MAC_CFG_PCFG_PE |
983 PAS_MAC_CFG_PCFG_PR | PAS_MAC_CFG_PCFG_CE;
985 if (mac->type == MAC_TYPE_GMAC)
986 flags |= PAS_MAC_CFG_PCFG_TSR_1G | PAS_MAC_CFG_PCFG_SPD_1G;
988 flags |= PAS_MAC_CFG_PCFG_TSR_10G | PAS_MAC_CFG_PCFG_SPD_10G;
990 /* Enable interface in MAC */
991 write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
993 ret = pasemi_mac_phy_init(dev);
994 /* Some configs don't have PHYs (XAUI etc), so don't complain about
995 * failed init due to -ENODEV.
997 if (ret && ret != -ENODEV)
998 dev_warn(&mac->pdev->dev, "phy init failed: %d\n", ret);
1000 netif_start_queue(dev);
1001 napi_enable(&mac->napi);
1003 /* Interrupts are a bit different for our DMA controller: While
1004 * it's got one a regular PCI device header, the interrupt there
1005 * is really the base of the range it's using. Each tx and rx
1006 * channel has it's own interrupt source.
1009 base_irq = virq_to_hw(mac->dma_pdev->irq);
1011 mac->tx_irq = irq_create_mapping(NULL, base_irq + mac->dma_txch);
1013 snprintf(mac->tx_irq_name, sizeof(mac->tx_irq_name), "%s tx",
1016 ret = request_irq(mac->tx_irq, &pasemi_mac_tx_intr, IRQF_DISABLED,
1017 mac->tx_irq_name, mac->tx);
1019 dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
1020 base_irq + mac->dma_txch, ret);
1024 mac->rx_irq = irq_create_mapping(NULL, base_irq + 20 + mac->dma_rxch);
1026 snprintf(mac->rx_irq_name, sizeof(mac->rx_irq_name), "%s rx",
1029 ret = request_irq(mac->rx_irq, &pasemi_mac_rx_intr, IRQF_DISABLED,
1030 mac->rx_irq_name, dev);
1032 dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
1033 base_irq + 20 + mac->dma_rxch, ret);
1038 phy_start(mac->phydev);
1043 free_irq(mac->tx_irq, mac->tx);
1045 napi_disable(&mac->napi);
1046 netif_stop_queue(dev);
1049 pasemi_mac_free_tx_resources(mac);
1050 pasemi_mac_free_rx_resources(mac);
1056 #define MAX_RETRIES 5000
1058 static int pasemi_mac_close(struct net_device *dev)
1060 struct pasemi_mac *mac = netdev_priv(dev);
1065 phy_stop(mac->phydev);
1066 phy_disconnect(mac->phydev);
1069 netif_stop_queue(dev);
1070 napi_disable(&mac->napi);
1072 sta = read_dma_reg(mac, PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
1073 if (sta & (PAS_DMA_RXINT_RCMDSTA_BP |
1074 PAS_DMA_RXINT_RCMDSTA_OO |
1075 PAS_DMA_RXINT_RCMDSTA_BT))
1076 printk(KERN_DEBUG "pasemi_mac: rcmdsta error: 0x%08x\n", sta);
1078 sta = read_dma_reg(mac, PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch));
1079 if (sta & (PAS_DMA_RXCHAN_CCMDSTA_DU |
1080 PAS_DMA_RXCHAN_CCMDSTA_OD |
1081 PAS_DMA_RXCHAN_CCMDSTA_FD |
1082 PAS_DMA_RXCHAN_CCMDSTA_DT))
1083 printk(KERN_DEBUG "pasemi_mac: ccmdsta error: 0x%08x\n", sta);
1085 sta = read_dma_reg(mac, PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch));
1086 if (sta & (PAS_DMA_TXCHAN_TCMDSTA_SZ | PAS_DMA_TXCHAN_TCMDSTA_DB |
1087 PAS_DMA_TXCHAN_TCMDSTA_DE | PAS_DMA_TXCHAN_TCMDSTA_DA))
1088 printk(KERN_DEBUG "pasemi_mac: tcmdsta error: 0x%08x\n", sta);
1090 /* Clean out any pending buffers */
1091 pasemi_mac_clean_tx(tx_ring(mac));
1092 pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE);
1094 /* Disable interface */
1095 write_dma_reg(mac, PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),
1096 PAS_DMA_TXCHAN_TCMDSTA_ST);
1097 write_dma_reg(mac, PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
1098 PAS_DMA_RXINT_RCMDSTA_ST);
1099 write_dma_reg(mac, PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),
1100 PAS_DMA_RXCHAN_CCMDSTA_ST);
1102 for (retries = 0; retries < MAX_RETRIES; retries++) {
1103 sta = read_dma_reg(mac, PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch));
1104 if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT))
1109 if (sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)
1110 dev_err(&mac->dma_pdev->dev, "Failed to stop tx channel %d\n",
1113 for (retries = 0; retries < MAX_RETRIES; retries++) {
1114 sta = read_dma_reg(mac, PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch));
1115 if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT))
1120 if (sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)
1121 dev_err(&mac->dma_pdev->dev, "Failed to stop rx channel\n");
1123 for (retries = 0; retries < MAX_RETRIES; retries++) {
1124 sta = read_dma_reg(mac, PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
1125 if (!(sta & PAS_DMA_RXINT_RCMDSTA_ACT))
1130 if (sta & PAS_DMA_RXINT_RCMDSTA_ACT)
1131 dev_err(&mac->dma_pdev->dev, "Failed to stop rx interface\n");
1133 /* Then, disable the channel. This must be done separately from
1134 * stopping, since you can't disable when active.
1137 write_dma_reg(mac, PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch), 0);
1138 write_dma_reg(mac, PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch), 0);
1139 write_dma_reg(mac, PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0);
1141 free_irq(mac->tx_irq, mac->tx);
1142 free_irq(mac->rx_irq, mac->rx);
1144 /* Free resources */
1145 pasemi_mac_free_rx_resources(mac);
1146 pasemi_mac_free_tx_resources(mac);
1151 static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
1153 struct pasemi_mac *mac = netdev_priv(dev);
1154 struct pasemi_mac_txring *txring;
1156 dma_addr_t map[MAX_SKB_FRAGS+1];
1157 unsigned int map_size[MAX_SKB_FRAGS+1];
1158 unsigned long flags;
1161 dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_CRC_PAD;
1163 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1164 const unsigned char *nh = skb_network_header(skb);
1166 switch (ip_hdr(skb)->protocol) {
1168 dflags |= XCT_MACTX_CSUM_TCP;
1169 dflags |= XCT_MACTX_IPH(skb_network_header_len(skb) >> 2);
1170 dflags |= XCT_MACTX_IPO(nh - skb->data);
1173 dflags |= XCT_MACTX_CSUM_UDP;
1174 dflags |= XCT_MACTX_IPH(skb_network_header_len(skb) >> 2);
1175 dflags |= XCT_MACTX_IPO(nh - skb->data);
1180 nfrags = skb_shinfo(skb)->nr_frags;
1182 map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb),
1184 map_size[0] = skb_headlen(skb);
1185 if (dma_mapping_error(map[0]))
1186 goto out_err_nolock;
1188 for (i = 0; i < nfrags; i++) {
1189 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1191 map[i+1] = pci_map_page(mac->dma_pdev, frag->page,
1192 frag->page_offset, frag->size,
1194 map_size[i+1] = frag->size;
1195 if (dma_mapping_error(map[i+1])) {
1197 goto out_err_nolock;
1201 mactx = dflags | XCT_MACTX_LLEN(skb->len);
1203 txring = tx_ring(mac);
1205 spin_lock_irqsave(&txring->lock, flags);
1207 /* Avoid stepping on the same cache line that the DMA controller
1208 * is currently about to send, so leave at least 8 words available.
1209 * Total free space needed is mactx + fragments + 8
1211 if (RING_AVAIL(txring) < nfrags + 10) {
1212 /* no room -- stop the queue and wait for tx intr */
1213 netif_stop_queue(dev);
1217 TX_DESC(txring, txring->next_to_fill) = mactx;
1218 txring->next_to_fill++;
1219 TX_DESC_INFO(txring, txring->next_to_fill).skb = skb;
1220 for (i = 0; i <= nfrags; i++) {
1221 TX_DESC(txring, txring->next_to_fill+i) =
1222 XCT_PTR_LEN(map_size[i]) | XCT_PTR_ADDR(map[i]);
1223 TX_DESC_INFO(txring, txring->next_to_fill+i).dma = map[i];
1226 /* We have to add an even number of 8-byte entries to the ring
1227 * even if the last one is unused. That means always an odd number
1228 * of pointers + one mactx descriptor.
1233 txring->next_to_fill = (txring->next_to_fill + nfrags + 1) &
1236 dev->stats.tx_packets++;
1237 dev->stats.tx_bytes += skb->len;
1239 spin_unlock_irqrestore(&txring->lock, flags);
1241 write_dma_reg(mac, PAS_DMA_TXCHAN_INCR(txring->chan), (nfrags+2) >> 1);
1243 return NETDEV_TX_OK;
1246 spin_unlock_irqrestore(&txring->lock, flags);
1249 pci_unmap_single(mac->dma_pdev, map[nfrags], map_size[nfrags],
1252 return NETDEV_TX_BUSY;
1255 static void pasemi_mac_set_rx_mode(struct net_device *dev)
1257 struct pasemi_mac *mac = netdev_priv(dev);
1260 flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
1262 /* Set promiscuous */
1263 if (dev->flags & IFF_PROMISC)
1264 flags |= PAS_MAC_CFG_PCFG_PR;
1266 flags &= ~PAS_MAC_CFG_PCFG_PR;
1268 write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
1272 static int pasemi_mac_poll(struct napi_struct *napi, int budget)
1274 struct pasemi_mac *mac = container_of(napi, struct pasemi_mac, napi);
1275 struct net_device *dev = mac->netdev;
1278 pasemi_mac_clean_tx(tx_ring(mac));
1279 pkts = pasemi_mac_clean_rx(rx_ring(mac), budget);
1280 if (pkts < budget) {
1281 /* all done, no more packets present */
1282 netif_rx_complete(dev, napi);
1284 pasemi_mac_restart_rx_intr(mac);
1289 static void __iomem * __devinit map_onedev(struct pci_dev *p, int index)
1291 struct device_node *dn;
1294 dn = pci_device_to_OF_node(p);
1298 ret = of_iomap(dn, index);
1304 /* This is hardcoded and ugly, but we have some firmware versions
1305 * that don't provide the register space in the device tree. Luckily
1306 * they are at well-known locations so we can just do the math here.
1308 return ioremap(0xe0000000 + (p->devfn << 12), 0x2000);
1311 static int __devinit pasemi_mac_map_regs(struct pasemi_mac *mac)
1313 struct resource res;
1314 struct device_node *dn;
1317 mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL);
1318 if (!mac->dma_pdev) {
1319 dev_err(&mac->pdev->dev, "Can't find DMA Controller\n");
1323 mac->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
1324 if (!mac->iob_pdev) {
1325 dev_err(&mac->pdev->dev, "Can't find I/O Bridge\n");
1329 mac->regs = map_onedev(mac->pdev, 0);
1330 mac->dma_regs = map_onedev(mac->dma_pdev, 0);
1331 mac->iob_regs = map_onedev(mac->iob_pdev, 0);
1333 if (!mac->regs || !mac->dma_regs || !mac->iob_regs) {
1334 dev_err(&mac->pdev->dev, "Can't map registers\n");
1338 /* The dma status structure is located in the I/O bridge, and
1339 * is cache coherent.
1342 dn = pci_device_to_OF_node(mac->iob_pdev);
1344 err = of_address_to_resource(dn, 1, &res);
1346 /* Fallback for old firmware */
1347 res.start = 0xfd800000;
1348 res.end = res.start + 0x1000;
1350 dma_status = __ioremap(res.start, res.end-res.start, 0);
1356 static int __devinit
1357 pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1359 static int index = 0;
1360 struct net_device *dev;
1361 struct pasemi_mac *mac;
1363 DECLARE_MAC_BUF(mac_buf);
1365 err = pci_enable_device(pdev);
1369 dev = alloc_etherdev(sizeof(struct pasemi_mac));
1372 "pasemi_mac: Could not allocate ethernet device.\n");
1374 goto out_disable_device;
1377 pci_set_drvdata(pdev, dev);
1378 SET_NETDEV_DEV(dev, &pdev->dev);
1380 mac = netdev_priv(dev);
1385 netif_napi_add(dev, &mac->napi, pasemi_mac_poll, 64);
1387 dev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX | NETIF_F_SG;
1389 /* These should come out of the device tree eventually */
1390 mac->dma_txch = index;
1391 mac->dma_rxch = index;
1393 /* We probe GMAC before XAUI, but the DMA interfaces are
1394 * in XAUI, GMAC order.
1397 mac->dma_if = index + 2;
1399 mac->dma_if = index - 4;
1402 switch (pdev->device) {
1404 mac->type = MAC_TYPE_GMAC;
1407 mac->type = MAC_TYPE_XAUI;
1414 /* get mac addr from device tree */
1415 if (pasemi_get_mac_addr(mac) || !is_valid_ether_addr(mac->mac_addr)) {
1419 memcpy(dev->dev_addr, mac->mac_addr, sizeof(mac->mac_addr));
1421 dev->open = pasemi_mac_open;
1422 dev->stop = pasemi_mac_close;
1423 dev->hard_start_xmit = pasemi_mac_start_tx;
1424 dev->set_multicast_list = pasemi_mac_set_rx_mode;
1426 err = pasemi_mac_map_regs(mac);
1430 mac->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
1432 /* Enable most messages by default */
1433 mac->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1435 err = register_netdev(dev);
1438 dev_err(&mac->pdev->dev, "register_netdev failed with error %d\n",
1441 } else if netif_msg_probe(mac)
1442 printk(KERN_INFO "%s: PA Semi %s: intf %d, hw addr %s\n",
1443 dev->name, mac->type == MAC_TYPE_GMAC ? "GMAC" : "XAUI",
1444 mac->dma_if, print_mac(mac_buf, dev->dev_addr));
1450 pci_dev_put(mac->iob_pdev);
1452 pci_dev_put(mac->dma_pdev);
1454 iounmap(mac->dma_regs);
1456 iounmap(mac->iob_regs);
1462 pci_disable_device(pdev);
1467 static void __devexit pasemi_mac_remove(struct pci_dev *pdev)
1469 struct net_device *netdev = pci_get_drvdata(pdev);
1470 struct pasemi_mac *mac;
1475 mac = netdev_priv(netdev);
1477 unregister_netdev(netdev);
1479 pci_disable_device(pdev);
1480 pci_dev_put(mac->dma_pdev);
1481 pci_dev_put(mac->iob_pdev);
1484 iounmap(mac->dma_regs);
1485 iounmap(mac->iob_regs);
1487 pci_set_drvdata(pdev, NULL);
1488 free_netdev(netdev);
1491 static struct pci_device_id pasemi_mac_pci_tbl[] = {
1492 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) },
1493 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) },
1497 MODULE_DEVICE_TABLE(pci, pasemi_mac_pci_tbl);
1499 static struct pci_driver pasemi_mac_driver = {
1500 .name = "pasemi_mac",
1501 .id_table = pasemi_mac_pci_tbl,
1502 .probe = pasemi_mac_probe,
1503 .remove = __devexit_p(pasemi_mac_remove),
1506 static void __exit pasemi_mac_cleanup_module(void)
1508 pci_unregister_driver(&pasemi_mac_driver);
1509 __iounmap(dma_status);
1513 int pasemi_mac_init_module(void)
1515 return pci_register_driver(&pasemi_mac_driver);
1518 module_init(pasemi_mac_init_module);
1519 module_exit(pasemi_mac_cleanup_module);