of: Always use 'struct device.of_node' to get device node pointer.
[safe/jmp/linux-2.6] / drivers / net / ll_temac_main.c
1 /*
2  * Driver for Xilinx TEMAC Ethernet device
3  *
4  * Copyright (c) 2008 Nissin Systems Co., Ltd.,  Yoshio Kashiwagi
5  * Copyright (c) 2005-2008 DLA Systems,  David H. Lynch Jr. <dhlii@dlasys.net>
6  * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
7  *
8  * This is a driver for the Xilinx ll_temac ipcore which is often used
9  * in the Virtex and Spartan series of chips.
10  *
11  * Notes:
12  * - The ll_temac hardware uses indirect access for many of the TEMAC
13  *   registers, include the MDIO bus.  However, indirect access to MDIO
14  *   registers take considerably more clock cycles than to TEMAC registers.
15  *   MDIO accesses are long, so threads doing them should probably sleep
16  *   rather than busywait.  However, since only one indirect access can be
17  *   in progress at any given time, that means that *all* indirect accesses
18  *   could end up sleeping (to wait for an MDIO access to complete).
19  *   Fortunately none of the indirect accesses are on the 'hot' path for tx
20  *   or rx, so this should be okay.
21  *
22  * TODO:
23  * - Fix driver to work on more than just Virtex5.  Right now the driver
24  *   assumes that the locallink DMA registers are accessed via DCR
25  *   instructions.
26  * - Factor out locallink DMA code into separate driver
27  * - Fix multicast assignment.
28  * - Fix support for hardware checksumming.
29  * - Testing.  Lots and lots of testing.
30  *
31  */
32
33 #include <linux/delay.h>
34 #include <linux/etherdevice.h>
35 #include <linux/init.h>
36 #include <linux/mii.h>
37 #include <linux/module.h>
38 #include <linux/mutex.h>
39 #include <linux/netdevice.h>
40 #include <linux/of.h>
41 #include <linux/of_device.h>
42 #include <linux/of_mdio.h>
43 #include <linux/of_platform.h>
44 #include <linux/skbuff.h>
45 #include <linux/spinlock.h>
46 #include <linux/tcp.h>      /* needed for sizeof(tcphdr) */
47 #include <linux/udp.h>      /* needed for sizeof(udphdr) */
48 #include <linux/phy.h>
49 #include <linux/in.h>
50 #include <linux/io.h>
51 #include <linux/ip.h>
52 #include <linux/slab.h>
53
54 #include "ll_temac.h"
55
56 #define TX_BD_NUM   64
57 #define RX_BD_NUM   128
58
59 /* ---------------------------------------------------------------------
60  * Low level register access functions
61  */
62
63 u32 temac_ior(struct temac_local *lp, int offset)
64 {
65         return in_be32((u32 *)(lp->regs + offset));
66 }
67
68 void temac_iow(struct temac_local *lp, int offset, u32 value)
69 {
70         out_be32((u32 *) (lp->regs + offset), value);
71 }
72
73 int temac_indirect_busywait(struct temac_local *lp)
74 {
75         long end = jiffies + 2;
76
77         while (!(temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK)) {
78                 if (end - jiffies <= 0) {
79                         WARN_ON(1);
80                         return -ETIMEDOUT;
81                 }
82                 msleep(1);
83         }
84         return 0;
85 }
86
87 /**
88  * temac_indirect_in32
89  *
90  * lp->indirect_mutex must be held when calling this function
91  */
92 u32 temac_indirect_in32(struct temac_local *lp, int reg)
93 {
94         u32 val;
95
96         if (temac_indirect_busywait(lp))
97                 return -ETIMEDOUT;
98         temac_iow(lp, XTE_CTL0_OFFSET, reg);
99         if (temac_indirect_busywait(lp))
100                 return -ETIMEDOUT;
101         val = temac_ior(lp, XTE_LSW0_OFFSET);
102
103         return val;
104 }
105
106 /**
107  * temac_indirect_out32
108  *
109  * lp->indirect_mutex must be held when calling this function
110  */
111 void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
112 {
113         if (temac_indirect_busywait(lp))
114                 return;
115         temac_iow(lp, XTE_LSW0_OFFSET, value);
116         temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
117 }
118
119 static u32 temac_dma_in32(struct temac_local *lp, int reg)
120 {
121         return dcr_read(lp->sdma_dcrs, reg);
122 }
123
124 static void temac_dma_out32(struct temac_local *lp, int reg, u32 value)
125 {
126         dcr_write(lp->sdma_dcrs, reg, value);
127 }
128
129 /**
130  * temac_dma_bd_init - Setup buffer descriptor rings
131  */
132 static int temac_dma_bd_init(struct net_device *ndev)
133 {
134         struct temac_local *lp = netdev_priv(ndev);
135         struct sk_buff *skb;
136         int i;
137
138         lp->rx_skb = kzalloc(sizeof(*lp->rx_skb) * RX_BD_NUM, GFP_KERNEL);
139         /* allocate the tx and rx ring buffer descriptors. */
140         /* returns a virtual addres and a physical address. */
141         lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
142                                          sizeof(*lp->tx_bd_v) * TX_BD_NUM,
143                                          &lp->tx_bd_p, GFP_KERNEL);
144         lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
145                                          sizeof(*lp->rx_bd_v) * RX_BD_NUM,
146                                          &lp->rx_bd_p, GFP_KERNEL);
147
148         memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
149         for (i = 0; i < TX_BD_NUM; i++) {
150                 lp->tx_bd_v[i].next = lp->tx_bd_p +
151                                 sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM);
152         }
153
154         memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM);
155         for (i = 0; i < RX_BD_NUM; i++) {
156                 lp->rx_bd_v[i].next = lp->rx_bd_p +
157                                 sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM);
158
159                 skb = alloc_skb(XTE_MAX_JUMBO_FRAME_SIZE
160                                 + XTE_ALIGN, GFP_ATOMIC);
161                 if (skb == 0) {
162                         dev_err(&ndev->dev, "alloc_skb error %d\n", i);
163                         return -1;
164                 }
165                 lp->rx_skb[i] = skb;
166                 skb_reserve(skb,  BUFFER_ALIGN(skb->data));
167                 /* returns physical address of skb->data */
168                 lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
169                                                      skb->data,
170                                                      XTE_MAX_JUMBO_FRAME_SIZE,
171                                                      DMA_FROM_DEVICE);
172                 lp->rx_bd_v[i].len = XTE_MAX_JUMBO_FRAME_SIZE;
173                 lp->rx_bd_v[i].app0 = STS_CTRL_APP0_IRQONEND;
174         }
175
176         temac_dma_out32(lp, TX_CHNL_CTRL, 0x10220400 |
177                                           CHNL_CTRL_IRQ_EN |
178                                           CHNL_CTRL_IRQ_DLY_EN |
179                                           CHNL_CTRL_IRQ_COAL_EN);
180         /* 0x10220483 */
181         /* 0x00100483 */
182         temac_dma_out32(lp, RX_CHNL_CTRL, 0xff010000 |
183                                           CHNL_CTRL_IRQ_EN |
184                                           CHNL_CTRL_IRQ_DLY_EN |
185                                           CHNL_CTRL_IRQ_COAL_EN |
186                                           CHNL_CTRL_IRQ_IOE);
187         /* 0xff010283 */
188
189         temac_dma_out32(lp, RX_CURDESC_PTR,  lp->rx_bd_p);
190         temac_dma_out32(lp, RX_TAILDESC_PTR,
191                        lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
192         temac_dma_out32(lp, TX_CURDESC_PTR, lp->tx_bd_p);
193
194         return 0;
195 }
196
197 /* ---------------------------------------------------------------------
198  * net_device_ops
199  */
200
201 static int temac_set_mac_address(struct net_device *ndev, void *address)
202 {
203         struct temac_local *lp = netdev_priv(ndev);
204
205         if (address)
206                 memcpy(ndev->dev_addr, address, ETH_ALEN);
207
208         if (!is_valid_ether_addr(ndev->dev_addr))
209                 random_ether_addr(ndev->dev_addr);
210
211         /* set up unicast MAC address filter set its mac address */
212         mutex_lock(&lp->indirect_mutex);
213         temac_indirect_out32(lp, XTE_UAW0_OFFSET,
214                              (ndev->dev_addr[0]) |
215                              (ndev->dev_addr[1] << 8) |
216                              (ndev->dev_addr[2] << 16) |
217                              (ndev->dev_addr[3] << 24));
218         /* There are reserved bits in EUAW1
219          * so don't affect them Set MAC bits [47:32] in EUAW1 */
220         temac_indirect_out32(lp, XTE_UAW1_OFFSET,
221                              (ndev->dev_addr[4] & 0x000000ff) |
222                              (ndev->dev_addr[5] << 8));
223         mutex_unlock(&lp->indirect_mutex);
224
225         return 0;
226 }
227
228 static int netdev_set_mac_address(struct net_device *ndev, void *p)
229 {
230         struct sockaddr *addr = p;
231
232         return temac_set_mac_address(ndev, addr->sa_data);
233 }
234
235 static void temac_set_multicast_list(struct net_device *ndev)
236 {
237         struct temac_local *lp = netdev_priv(ndev);
238         u32 multi_addr_msw, multi_addr_lsw, val;
239         int i;
240
241         mutex_lock(&lp->indirect_mutex);
242         if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
243             netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM) {
244                 /*
245                  *      We must make the kernel realise we had to move
246                  *      into promisc mode or we start all out war on
247                  *      the cable. If it was a promisc request the
248                  *      flag is already set. If not we assert it.
249                  */
250                 ndev->flags |= IFF_PROMISC;
251                 temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
252                 dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
253         } else if (!netdev_mc_empty(ndev)) {
254                 struct dev_mc_list *mclist;
255
256                 i = 0;
257                 netdev_for_each_mc_addr(mclist, ndev) {
258                         if (i >= MULTICAST_CAM_TABLE_NUM)
259                                 break;
260                         multi_addr_msw = ((mclist->dmi_addr[3] << 24) |
261                                           (mclist->dmi_addr[2] << 16) |
262                                           (mclist->dmi_addr[1] << 8) |
263                                           (mclist->dmi_addr[0]));
264                         temac_indirect_out32(lp, XTE_MAW0_OFFSET,
265                                              multi_addr_msw);
266                         multi_addr_lsw = ((mclist->dmi_addr[5] << 8) |
267                                           (mclist->dmi_addr[4]) | (i << 16));
268                         temac_indirect_out32(lp, XTE_MAW1_OFFSET,
269                                              multi_addr_lsw);
270                         i++;
271                 }
272         } else {
273                 val = temac_indirect_in32(lp, XTE_AFM_OFFSET);
274                 temac_indirect_out32(lp, XTE_AFM_OFFSET,
275                                      val & ~XTE_AFM_EPPRM_MASK);
276                 temac_indirect_out32(lp, XTE_MAW0_OFFSET, 0);
277                 temac_indirect_out32(lp, XTE_MAW1_OFFSET, 0);
278                 dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
279         }
280         mutex_unlock(&lp->indirect_mutex);
281 }
282
283 struct temac_option {
284         int flg;
285         u32 opt;
286         u32 reg;
287         u32 m_or;
288         u32 m_and;
289 } temac_options[] = {
290         /* Turn on jumbo packet support for both Rx and Tx */
291         {
292                 .opt = XTE_OPTION_JUMBO,
293                 .reg = XTE_TXC_OFFSET,
294                 .m_or = XTE_TXC_TXJMBO_MASK,
295         },
296         {
297                 .opt = XTE_OPTION_JUMBO,
298                 .reg = XTE_RXC1_OFFSET,
299                 .m_or =XTE_RXC1_RXJMBO_MASK,
300         },
301         /* Turn on VLAN packet support for both Rx and Tx */
302         {
303                 .opt = XTE_OPTION_VLAN,
304                 .reg = XTE_TXC_OFFSET,
305                 .m_or =XTE_TXC_TXVLAN_MASK,
306         },
307         {
308                 .opt = XTE_OPTION_VLAN,
309                 .reg = XTE_RXC1_OFFSET,
310                 .m_or =XTE_RXC1_RXVLAN_MASK,
311         },
312         /* Turn on FCS stripping on receive packets */
313         {
314                 .opt = XTE_OPTION_FCS_STRIP,
315                 .reg = XTE_RXC1_OFFSET,
316                 .m_or =XTE_RXC1_RXFCS_MASK,
317         },
318         /* Turn on FCS insertion on transmit packets */
319         {
320                 .opt = XTE_OPTION_FCS_INSERT,
321                 .reg = XTE_TXC_OFFSET,
322                 .m_or =XTE_TXC_TXFCS_MASK,
323         },
324         /* Turn on length/type field checking on receive packets */
325         {
326                 .opt = XTE_OPTION_LENTYPE_ERR,
327                 .reg = XTE_RXC1_OFFSET,
328                 .m_or =XTE_RXC1_RXLT_MASK,
329         },
330         /* Turn on flow control */
331         {
332                 .opt = XTE_OPTION_FLOW_CONTROL,
333                 .reg = XTE_FCC_OFFSET,
334                 .m_or =XTE_FCC_RXFLO_MASK,
335         },
336         /* Turn on flow control */
337         {
338                 .opt = XTE_OPTION_FLOW_CONTROL,
339                 .reg = XTE_FCC_OFFSET,
340                 .m_or =XTE_FCC_TXFLO_MASK,
341         },
342         /* Turn on promiscuous frame filtering (all frames are received ) */
343         {
344                 .opt = XTE_OPTION_PROMISC,
345                 .reg = XTE_AFM_OFFSET,
346                 .m_or =XTE_AFM_EPPRM_MASK,
347         },
348         /* Enable transmitter if not already enabled */
349         {
350                 .opt = XTE_OPTION_TXEN,
351                 .reg = XTE_TXC_OFFSET,
352                 .m_or =XTE_TXC_TXEN_MASK,
353         },
354         /* Enable receiver? */
355         {
356                 .opt = XTE_OPTION_RXEN,
357                 .reg = XTE_RXC1_OFFSET,
358                 .m_or =XTE_RXC1_RXEN_MASK,
359         },
360         {}
361 };
362
363 /**
364  * temac_setoptions
365  */
366 static u32 temac_setoptions(struct net_device *ndev, u32 options)
367 {
368         struct temac_local *lp = netdev_priv(ndev);
369         struct temac_option *tp = &temac_options[0];
370         int reg;
371
372         mutex_lock(&lp->indirect_mutex);
373         while (tp->opt) {
374                 reg = temac_indirect_in32(lp, tp->reg) & ~tp->m_or;
375                 if (options & tp->opt)
376                         reg |= tp->m_or;
377                 temac_indirect_out32(lp, tp->reg, reg);
378                 tp++;
379         }
380         lp->options |= options;
381         mutex_unlock(&lp->indirect_mutex);
382
383         return (0);
384 }
385
386 /* Initilize temac */
387 static void temac_device_reset(struct net_device *ndev)
388 {
389         struct temac_local *lp = netdev_priv(ndev);
390         u32 timeout;
391         u32 val;
392
393         /* Perform a software reset */
394
395         /* 0x300 host enable bit ? */
396         /* reset PHY through control register ?:1 */
397
398         dev_dbg(&ndev->dev, "%s()\n", __func__);
399
400         mutex_lock(&lp->indirect_mutex);
401         /* Reset the receiver and wait for it to finish reset */
402         temac_indirect_out32(lp, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK);
403         timeout = 1000;
404         while (temac_indirect_in32(lp, XTE_RXC1_OFFSET) & XTE_RXC1_RXRST_MASK) {
405                 udelay(1);
406                 if (--timeout == 0) {
407                         dev_err(&ndev->dev,
408                                 "temac_device_reset RX reset timeout!!\n");
409                         break;
410                 }
411         }
412
413         /* Reset the transmitter and wait for it to finish reset */
414         temac_indirect_out32(lp, XTE_TXC_OFFSET, XTE_TXC_TXRST_MASK);
415         timeout = 1000;
416         while (temac_indirect_in32(lp, XTE_TXC_OFFSET) & XTE_TXC_TXRST_MASK) {
417                 udelay(1);
418                 if (--timeout == 0) {
419                         dev_err(&ndev->dev,
420                                 "temac_device_reset TX reset timeout!!\n");
421                         break;
422                 }
423         }
424
425         /* Disable the receiver */
426         val = temac_indirect_in32(lp, XTE_RXC1_OFFSET);
427         temac_indirect_out32(lp, XTE_RXC1_OFFSET, val & ~XTE_RXC1_RXEN_MASK);
428
429         /* Reset Local Link (DMA) */
430         temac_dma_out32(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
431         timeout = 1000;
432         while (temac_dma_in32(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) {
433                 udelay(1);
434                 if (--timeout == 0) {
435                         dev_err(&ndev->dev,
436                                 "temac_device_reset DMA reset timeout!!\n");
437                         break;
438                 }
439         }
440         temac_dma_out32(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE);
441
442         temac_dma_bd_init(ndev);
443
444         temac_indirect_out32(lp, XTE_RXC0_OFFSET, 0);
445         temac_indirect_out32(lp, XTE_RXC1_OFFSET, 0);
446         temac_indirect_out32(lp, XTE_TXC_OFFSET, 0);
447         temac_indirect_out32(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK);
448
449         mutex_unlock(&lp->indirect_mutex);
450
451         /* Sync default options with HW
452          * but leave receiver and transmitter disabled.  */
453         temac_setoptions(ndev,
454                          lp->options & ~(XTE_OPTION_TXEN | XTE_OPTION_RXEN));
455
456         temac_set_mac_address(ndev, NULL);
457
458         /* Set address filter table */
459         temac_set_multicast_list(ndev);
460         if (temac_setoptions(ndev, lp->options))
461                 dev_err(&ndev->dev, "Error setting TEMAC options\n");
462
463         /* Init Driver variable */
464         ndev->trans_start = 0;
465 }
466
467 void temac_adjust_link(struct net_device *ndev)
468 {
469         struct temac_local *lp = netdev_priv(ndev);
470         struct phy_device *phy = lp->phy_dev;
471         u32 mii_speed;
472         int link_state;
473
474         /* hash together the state values to decide if something has changed */
475         link_state = phy->speed | (phy->duplex << 1) | phy->link;
476
477         mutex_lock(&lp->indirect_mutex);
478         if (lp->last_link != link_state) {
479                 mii_speed = temac_indirect_in32(lp, XTE_EMCFG_OFFSET);
480                 mii_speed &= ~XTE_EMCFG_LINKSPD_MASK;
481
482                 switch (phy->speed) {
483                 case SPEED_1000: mii_speed |= XTE_EMCFG_LINKSPD_1000; break;
484                 case SPEED_100: mii_speed |= XTE_EMCFG_LINKSPD_100; break;
485                 case SPEED_10: mii_speed |= XTE_EMCFG_LINKSPD_10; break;
486                 }
487
488                 /* Write new speed setting out to TEMAC */
489                 temac_indirect_out32(lp, XTE_EMCFG_OFFSET, mii_speed);
490                 lp->last_link = link_state;
491                 phy_print_status(phy);
492         }
493         mutex_unlock(&lp->indirect_mutex);
494 }
495
496 static void temac_start_xmit_done(struct net_device *ndev)
497 {
498         struct temac_local *lp = netdev_priv(ndev);
499         struct cdmac_bd *cur_p;
500         unsigned int stat = 0;
501
502         cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
503         stat = cur_p->app0;
504
505         while (stat & STS_CTRL_APP0_CMPLT) {
506                 dma_unmap_single(ndev->dev.parent, cur_p->phys, cur_p->len,
507                                  DMA_TO_DEVICE);
508                 if (cur_p->app4)
509                         dev_kfree_skb_irq((struct sk_buff *)cur_p->app4);
510                 cur_p->app0 = 0;
511
512                 ndev->stats.tx_packets++;
513                 ndev->stats.tx_bytes += cur_p->len;
514
515                 lp->tx_bd_ci++;
516                 if (lp->tx_bd_ci >= TX_BD_NUM)
517                         lp->tx_bd_ci = 0;
518
519                 cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
520                 stat = cur_p->app0;
521         }
522
523         netif_wake_queue(ndev);
524 }
525
526 static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
527 {
528         struct temac_local *lp = netdev_priv(ndev);
529         struct cdmac_bd *cur_p;
530         dma_addr_t start_p, tail_p;
531         int ii;
532         unsigned long num_frag;
533         skb_frag_t *frag;
534
535         num_frag = skb_shinfo(skb)->nr_frags;
536         frag = &skb_shinfo(skb)->frags[0];
537         start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
538         cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
539
540         if (cur_p->app0 & STS_CTRL_APP0_CMPLT) {
541                 if (!netif_queue_stopped(ndev)) {
542                         netif_stop_queue(ndev);
543                         return NETDEV_TX_BUSY;
544                 }
545                 return NETDEV_TX_BUSY;
546         }
547
548         cur_p->app0 = 0;
549         if (skb->ip_summed == CHECKSUM_PARTIAL) {
550                 const struct iphdr *ip = ip_hdr(skb);
551                 int length = 0, start = 0, insert = 0;
552
553                 switch (ip->protocol) {
554                 case IPPROTO_TCP:
555                         start = sizeof(struct iphdr) + ETH_HLEN;
556                         insert = sizeof(struct iphdr) + ETH_HLEN + 16;
557                         length = ip->tot_len - sizeof(struct iphdr);
558                         break;
559                 case IPPROTO_UDP:
560                         start = sizeof(struct iphdr) + ETH_HLEN;
561                         insert = sizeof(struct iphdr) + ETH_HLEN + 6;
562                         length = ip->tot_len - sizeof(struct iphdr);
563                         break;
564                 default:
565                         break;
566                 }
567                 cur_p->app1 = ((start << 16) | insert);
568                 cur_p->app2 = csum_tcpudp_magic(ip->saddr, ip->daddr,
569                                                 length, ip->protocol, 0);
570                 skb->data[insert] = 0;
571                 skb->data[insert + 1] = 0;
572         }
573         cur_p->app0 |= STS_CTRL_APP0_SOP;
574         cur_p->len = skb_headlen(skb);
575         cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, skb->len,
576                                      DMA_TO_DEVICE);
577         cur_p->app4 = (unsigned long)skb;
578
579         for (ii = 0; ii < num_frag; ii++) {
580                 lp->tx_bd_tail++;
581                 if (lp->tx_bd_tail >= TX_BD_NUM)
582                         lp->tx_bd_tail = 0;
583
584                 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
585                 cur_p->phys = dma_map_single(ndev->dev.parent,
586                                              (void *)page_address(frag->page) +
587                                                   frag->page_offset,
588                                              frag->size, DMA_TO_DEVICE);
589                 cur_p->len = frag->size;
590                 cur_p->app0 = 0;
591                 frag++;
592         }
593         cur_p->app0 |= STS_CTRL_APP0_EOP;
594
595         tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
596         lp->tx_bd_tail++;
597         if (lp->tx_bd_tail >= TX_BD_NUM)
598                 lp->tx_bd_tail = 0;
599
600         /* Kick off the transfer */
601         temac_dma_out32(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
602
603         return NETDEV_TX_OK;
604 }
605
606
607 static void ll_temac_recv(struct net_device *ndev)
608 {
609         struct temac_local *lp = netdev_priv(ndev);
610         struct sk_buff *skb, *new_skb;
611         unsigned int bdstat;
612         struct cdmac_bd *cur_p;
613         dma_addr_t tail_p;
614         int length;
615         unsigned long skb_vaddr;
616         unsigned long flags;
617
618         spin_lock_irqsave(&lp->rx_lock, flags);
619
620         tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
621         cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
622
623         bdstat = cur_p->app0;
624         while ((bdstat & STS_CTRL_APP0_CMPLT)) {
625
626                 skb = lp->rx_skb[lp->rx_bd_ci];
627                 length = cur_p->app4 & 0x3FFF;
628
629                 skb_vaddr = virt_to_bus(skb->data);
630                 dma_unmap_single(ndev->dev.parent, skb_vaddr, length,
631                                  DMA_FROM_DEVICE);
632
633                 skb_put(skb, length);
634                 skb->dev = ndev;
635                 skb->protocol = eth_type_trans(skb, ndev);
636                 skb->ip_summed = CHECKSUM_NONE;
637
638                 netif_rx(skb);
639
640                 ndev->stats.rx_packets++;
641                 ndev->stats.rx_bytes += length;
642
643                 new_skb = alloc_skb(XTE_MAX_JUMBO_FRAME_SIZE + XTE_ALIGN,
644                                 GFP_ATOMIC);
645                 if (new_skb == 0) {
646                         dev_err(&ndev->dev, "no memory for new sk_buff\n");
647                         spin_unlock_irqrestore(&lp->rx_lock, flags);
648                         return;
649                 }
650
651                 skb_reserve(new_skb, BUFFER_ALIGN(new_skb->data));
652
653                 cur_p->app0 = STS_CTRL_APP0_IRQONEND;
654                 cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
655                                              XTE_MAX_JUMBO_FRAME_SIZE,
656                                              DMA_FROM_DEVICE);
657                 cur_p->len = XTE_MAX_JUMBO_FRAME_SIZE;
658                 lp->rx_skb[lp->rx_bd_ci] = new_skb;
659
660                 lp->rx_bd_ci++;
661                 if (lp->rx_bd_ci >= RX_BD_NUM)
662                         lp->rx_bd_ci = 0;
663
664                 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
665                 bdstat = cur_p->app0;
666         }
667         temac_dma_out32(lp, RX_TAILDESC_PTR, tail_p);
668
669         spin_unlock_irqrestore(&lp->rx_lock, flags);
670 }
671
672 static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev)
673 {
674         struct net_device *ndev = _ndev;
675         struct temac_local *lp = netdev_priv(ndev);
676         unsigned int status;
677
678         status = temac_dma_in32(lp, TX_IRQ_REG);
679         temac_dma_out32(lp, TX_IRQ_REG, status);
680
681         if (status & (IRQ_COAL | IRQ_DLY))
682                 temac_start_xmit_done(lp->ndev);
683         if (status & 0x080)
684                 dev_err(&ndev->dev, "DMA error 0x%x\n", status);
685
686         return IRQ_HANDLED;
687 }
688
689 static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev)
690 {
691         struct net_device *ndev = _ndev;
692         struct temac_local *lp = netdev_priv(ndev);
693         unsigned int status;
694
695         /* Read and clear the status registers */
696         status = temac_dma_in32(lp, RX_IRQ_REG);
697         temac_dma_out32(lp, RX_IRQ_REG, status);
698
699         if (status & (IRQ_COAL | IRQ_DLY))
700                 ll_temac_recv(lp->ndev);
701
702         return IRQ_HANDLED;
703 }
704
705 static int temac_open(struct net_device *ndev)
706 {
707         struct temac_local *lp = netdev_priv(ndev);
708         int rc;
709
710         dev_dbg(&ndev->dev, "temac_open()\n");
711
712         if (lp->phy_node) {
713                 lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
714                                              temac_adjust_link, 0, 0);
715                 if (!lp->phy_dev) {
716                         dev_err(lp->dev, "of_phy_connect() failed\n");
717                         return -ENODEV;
718                 }
719
720                 phy_start(lp->phy_dev);
721         }
722
723         rc = request_irq(lp->tx_irq, ll_temac_tx_irq, 0, ndev->name, ndev);
724         if (rc)
725                 goto err_tx_irq;
726         rc = request_irq(lp->rx_irq, ll_temac_rx_irq, 0, ndev->name, ndev);
727         if (rc)
728                 goto err_rx_irq;
729
730         temac_device_reset(ndev);
731         return 0;
732
733  err_rx_irq:
734         free_irq(lp->tx_irq, ndev);
735  err_tx_irq:
736         if (lp->phy_dev)
737                 phy_disconnect(lp->phy_dev);
738         lp->phy_dev = NULL;
739         dev_err(lp->dev, "request_irq() failed\n");
740         return rc;
741 }
742
743 static int temac_stop(struct net_device *ndev)
744 {
745         struct temac_local *lp = netdev_priv(ndev);
746
747         dev_dbg(&ndev->dev, "temac_close()\n");
748
749         free_irq(lp->tx_irq, ndev);
750         free_irq(lp->rx_irq, ndev);
751
752         if (lp->phy_dev)
753                 phy_disconnect(lp->phy_dev);
754         lp->phy_dev = NULL;
755
756         return 0;
757 }
758
759 #ifdef CONFIG_NET_POLL_CONTROLLER
760 static void
761 temac_poll_controller(struct net_device *ndev)
762 {
763         struct temac_local *lp = netdev_priv(ndev);
764
765         disable_irq(lp->tx_irq);
766         disable_irq(lp->rx_irq);
767
768         ll_temac_rx_irq(lp->tx_irq, lp);
769         ll_temac_tx_irq(lp->rx_irq, lp);
770
771         enable_irq(lp->tx_irq);
772         enable_irq(lp->rx_irq);
773 }
774 #endif
775
776 static const struct net_device_ops temac_netdev_ops = {
777         .ndo_open = temac_open,
778         .ndo_stop = temac_stop,
779         .ndo_start_xmit = temac_start_xmit,
780         .ndo_set_mac_address = netdev_set_mac_address,
781         //.ndo_set_multicast_list = temac_set_multicast_list,
782 #ifdef CONFIG_NET_POLL_CONTROLLER
783         .ndo_poll_controller = temac_poll_controller,
784 #endif
785 };
786
787 /* ---------------------------------------------------------------------
788  * SYSFS device attributes
789  */
790 static ssize_t temac_show_llink_regs(struct device *dev,
791                                      struct device_attribute *attr, char *buf)
792 {
793         struct net_device *ndev = dev_get_drvdata(dev);
794         struct temac_local *lp = netdev_priv(ndev);
795         int i, len = 0;
796
797         for (i = 0; i < 0x11; i++)
798                 len += sprintf(buf + len, "%.8x%s", temac_dma_in32(lp, i),
799                                (i % 8) == 7 ? "\n" : " ");
800         len += sprintf(buf + len, "\n");
801
802         return len;
803 }
804
805 static DEVICE_ATTR(llink_regs, 0440, temac_show_llink_regs, NULL);
806
807 static struct attribute *temac_device_attrs[] = {
808         &dev_attr_llink_regs.attr,
809         NULL,
810 };
811
812 static const struct attribute_group temac_attr_group = {
813         .attrs = temac_device_attrs,
814 };
815
816 static int __init
817 temac_of_probe(struct of_device *op, const struct of_device_id *match)
818 {
819         struct device_node *np;
820         struct temac_local *lp;
821         struct net_device *ndev;
822         const void *addr;
823         int size, rc = 0;
824         unsigned int dcrs;
825
826         /* Init network device structure */
827         ndev = alloc_etherdev(sizeof(*lp));
828         if (!ndev) {
829                 dev_err(&op->dev, "could not allocate device.\n");
830                 return -ENOMEM;
831         }
832         ether_setup(ndev);
833         dev_set_drvdata(&op->dev, ndev);
834         SET_NETDEV_DEV(ndev, &op->dev);
835         ndev->flags &= ~IFF_MULTICAST;  /* clear multicast */
836         ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
837         ndev->netdev_ops = &temac_netdev_ops;
838 #if 0
839         ndev->features |= NETIF_F_IP_CSUM; /* Can checksum TCP/UDP over IPv4. */
840         ndev->features |= NETIF_F_HW_CSUM; /* Can checksum all the packets. */
841         ndev->features |= NETIF_F_IPV6_CSUM; /* Can checksum IPV6 TCP/UDP */
842         ndev->features |= NETIF_F_HIGHDMA; /* Can DMA to high memory. */
843         ndev->features |= NETIF_F_HW_VLAN_TX; /* Transmit VLAN hw accel */
844         ndev->features |= NETIF_F_HW_VLAN_RX; /* Receive VLAN hw acceleration */
845         ndev->features |= NETIF_F_HW_VLAN_FILTER; /* Receive VLAN filtering */
846         ndev->features |= NETIF_F_VLAN_CHALLENGED; /* cannot handle VLAN pkts */
847         ndev->features |= NETIF_F_GSO; /* Enable software GSO. */
848         ndev->features |= NETIF_F_MULTI_QUEUE; /* Has multiple TX/RX queues */
849         ndev->features |= NETIF_F_LRO; /* large receive offload */
850 #endif
851
852         /* setup temac private info structure */
853         lp = netdev_priv(ndev);
854         lp->ndev = ndev;
855         lp->dev = &op->dev;
856         lp->options = XTE_OPTION_DEFAULTS;
857         spin_lock_init(&lp->rx_lock);
858         mutex_init(&lp->indirect_mutex);
859
860         /* map device registers */
861         lp->regs = of_iomap(op->dev.of_node, 0);
862         if (!lp->regs) {
863                 dev_err(&op->dev, "could not map temac regs.\n");
864                 goto nodev;
865         }
866
867         /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
868         np = of_parse_phandle(op->dev.of_node, "llink-connected", 0);
869         if (!np) {
870                 dev_err(&op->dev, "could not find DMA node\n");
871                 goto nodev;
872         }
873
874         dcrs = dcr_resource_start(np, 0);
875         if (dcrs == 0) {
876                 dev_err(&op->dev, "could not get DMA register address\n");
877                 goto nodev;
878         }
879         lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
880         dev_dbg(&op->dev, "DCR base: %x\n", dcrs);
881
882         lp->rx_irq = irq_of_parse_and_map(np, 0);
883         lp->tx_irq = irq_of_parse_and_map(np, 1);
884         if (!lp->rx_irq || !lp->tx_irq) {
885                 dev_err(&op->dev, "could not determine irqs\n");
886                 rc = -ENOMEM;
887                 goto nodev;
888         }
889
890         of_node_put(np); /* Finished with the DMA node; drop the reference */
891
892         /* Retrieve the MAC address */
893         addr = of_get_property(op->dev.of_node, "local-mac-address", &size);
894         if ((!addr) || (size != 6)) {
895                 dev_err(&op->dev, "could not find MAC address\n");
896                 rc = -ENODEV;
897                 goto nodev;
898         }
899         temac_set_mac_address(ndev, (void *)addr);
900
901         rc = temac_mdio_setup(lp, op->dev.of_node);
902         if (rc)
903                 dev_warn(&op->dev, "error registering MDIO bus\n");
904
905         lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
906         if (lp->phy_node)
907                 dev_dbg(lp->dev, "using PHY node %s (%p)\n", np->full_name, np);
908
909         /* Add the device attributes */
910         rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group);
911         if (rc) {
912                 dev_err(lp->dev, "Error creating sysfs files\n");
913                 goto nodev;
914         }
915
916         rc = register_netdev(lp->ndev);
917         if (rc) {
918                 dev_err(lp->dev, "register_netdev() error (%i)\n", rc);
919                 goto err_register_ndev;
920         }
921
922         return 0;
923
924  err_register_ndev:
925         sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
926  nodev:
927         free_netdev(ndev);
928         ndev = NULL;
929         return rc;
930 }
931
932 static int __devexit temac_of_remove(struct of_device *op)
933 {
934         struct net_device *ndev = dev_get_drvdata(&op->dev);
935         struct temac_local *lp = netdev_priv(ndev);
936
937         temac_mdio_teardown(lp);
938         unregister_netdev(ndev);
939         sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
940         if (lp->phy_node)
941                 of_node_put(lp->phy_node);
942         lp->phy_node = NULL;
943         dev_set_drvdata(&op->dev, NULL);
944         free_netdev(ndev);
945         return 0;
946 }
947
948 static struct of_device_id temac_of_match[] __devinitdata = {
949         { .compatible = "xlnx,xps-ll-temac-1.01.b", },
950         { .compatible = "xlnx,xps-ll-temac-2.00.a", },
951         { .compatible = "xlnx,xps-ll-temac-2.02.a", },
952         { .compatible = "xlnx,xps-ll-temac-2.03.a", },
953         {},
954 };
955 MODULE_DEVICE_TABLE(of, temac_of_match);
956
957 static struct of_platform_driver temac_of_driver = {
958         .match_table = temac_of_match,
959         .probe = temac_of_probe,
960         .remove = __devexit_p(temac_of_remove),
961         .driver = {
962                 .owner = THIS_MODULE,
963                 .name = "xilinx_temac",
964         },
965 };
966
967 static int __init temac_init(void)
968 {
969         return of_register_platform_driver(&temac_of_driver);
970 }
971 module_init(temac_init);
972
973 static void __exit temac_exit(void)
974 {
975         of_unregister_platform_driver(&temac_of_driver);
976 }
977 module_exit(temac_exit);
978
979 MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver");
980 MODULE_AUTHOR("Yoshio Kashiwagi");
981 MODULE_LICENSE("GPL");