2 * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
4 * Author: Shlomi Gridish <gridish@freescale.com>
5 * Li Yang <leoli@freescale.com>
8 * QE UCC Gigabit Ethernet Driver
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
15 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/errno.h>
18 #include <linux/slab.h>
19 #include <linux/stddef.h>
20 #include <linux/interrupt.h>
21 #include <linux/netdevice.h>
22 #include <linux/etherdevice.h>
23 #include <linux/skbuff.h>
24 #include <linux/spinlock.h>
26 #include <linux/ethtool.h>
27 #include <linux/delay.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/fsl_devices.h>
30 #include <linux/ethtool.h>
31 #include <linux/mii.h>
32 #include <linux/phy.h>
33 #include <linux/workqueue.h>
35 #include <asm/of_platform.h>
36 #include <asm/uaccess.h>
39 #include <asm/immap_qe.h>
42 #include <asm/ucc_fast.h>
45 #include "ucc_geth_mii.h"
49 #define DRV_DESC "QE UCC Gigabit Ethernet Controller version:Sept 11, 2006"
50 #define DRV_NAME "ucc_geth"
52 #define ugeth_printk(level, format, arg...) \
53 printk(level format "\n", ## arg)
55 #define ugeth_dbg(format, arg...) \
56 ugeth_printk(KERN_DEBUG , format , ## arg)
57 #define ugeth_err(format, arg...) \
58 ugeth_printk(KERN_ERR , format , ## arg)
59 #define ugeth_info(format, arg...) \
60 ugeth_printk(KERN_INFO , format , ## arg)
61 #define ugeth_warn(format, arg...) \
62 ugeth_printk(KERN_WARNING , format , ## arg)
64 #ifdef UGETH_VERBOSE_DEBUG
65 #define ugeth_vdbg ugeth_dbg
67 #define ugeth_vdbg(fmt, args...) do { } while (0)
68 #endif /* UGETH_VERBOSE_DEBUG */
70 static DEFINE_SPINLOCK(ugeth_lock);
72 static struct ucc_geth_info ugeth_primary_info = {
74 .bd_mem_part = MEM_PART_SYSTEM,
75 .rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES,
76 .max_rx_buf_length = 1536,
77 /* adjusted at startup if max-speed 1000 */
78 .urfs = UCC_GETH_URFS_INIT,
79 .urfet = UCC_GETH_URFET_INIT,
80 .urfset = UCC_GETH_URFSET_INIT,
81 .utfs = UCC_GETH_UTFS_INIT,
82 .utfet = UCC_GETH_UTFET_INIT,
83 .utftt = UCC_GETH_UTFTT_INIT,
85 .mode = UCC_FAST_PROTOCOL_MODE_ETHERNET,
86 .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
87 .tenc = UCC_FAST_TX_ENCODING_NRZ,
88 .renc = UCC_FAST_RX_ENCODING_NRZ,
89 .tcrc = UCC_FAST_16_BIT_CRC,
90 .synl = UCC_FAST_SYNC_LEN_NOT_USED,
94 .extendedFilteringChainPointer = ((uint32_t) NULL),
95 .typeorlen = 3072 /*1536 */ ,
96 .nonBackToBackIfgPart1 = 0x40,
97 .nonBackToBackIfgPart2 = 0x60,
98 .miminumInterFrameGapEnforcement = 0x50,
99 .backToBackInterFrameGap = 0x60,
103 .strictpriorityq = 0xff,
104 .altBebTruncation = 0xa,
106 .maxRetransmission = 0xf,
107 .collisionWindow = 0x37,
108 .receiveFlowControl = 1,
109 .maxGroupAddrInHash = 4,
110 .maxIndAddrInHash = 4,
112 .maxFrameLength = 1518,
113 .minFrameLength = 64,
117 .ecamptr = ((uint32_t) NULL),
118 .eventRegMask = UCCE_OTHER,
119 .pausePeriod = 0xf000,
120 .interruptcoalescingmaxvalue = {1, 1, 1, 1, 1, 1, 1, 1},
141 .numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1,
142 .largestexternallookupkeysize =
143 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE,
144 .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_NONE,
145 .vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP,
146 .vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP,
147 .rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT,
148 .aufc = UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE,
149 .padAndCrc = MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC,
150 .numThreadsTx = UCC_GETH_NUM_OF_THREADS_4,
151 .numThreadsRx = UCC_GETH_NUM_OF_THREADS_4,
152 .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
153 .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
156 static struct ucc_geth_info ugeth_info[8];
159 static void mem_disp(u8 *addr, int size)
162 int size16Aling = (size >> 4) << 4;
163 int size4Aling = (size >> 2) << 2;
168 for (i = addr; (u32) i < (u32) addr + size16Aling; i += 16)
169 printk("0x%08x: %08x %08x %08x %08x\r\n",
173 *((u32 *) (i + 8)), *((u32 *) (i + 12)));
175 printk("0x%08x: ", (u32) i);
176 for (; (u32) i < (u32) addr + size4Aling; i += 4)
177 printk("%08x ", *((u32 *) (i)));
178 for (; (u32) i < (u32) addr + size; i++)
179 printk("%02x", *((u8 *) (i)));
185 #ifdef CONFIG_UGETH_FILTERING
186 static void enqueue(struct list_head *node, struct list_head *lh)
190 spin_lock_irqsave(&ugeth_lock, flags);
191 list_add_tail(node, lh);
192 spin_unlock_irqrestore(&ugeth_lock, flags);
194 #endif /* CONFIG_UGETH_FILTERING */
196 static struct list_head *dequeue(struct list_head *lh)
200 spin_lock_irqsave(&ugeth_lock, flags);
201 if (!list_empty(lh)) {
202 struct list_head *node = lh->next;
204 spin_unlock_irqrestore(&ugeth_lock, flags);
207 spin_unlock_irqrestore(&ugeth_lock, flags);
212 static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, u8 *bd)
214 struct sk_buff *skb = NULL;
216 skb = dev_alloc_skb(ugeth->ug_info->uf_info.max_rx_buf_length +
217 UCC_GETH_RX_DATA_BUF_ALIGNMENT);
222 /* We need the data buffer to be aligned properly. We will reserve
223 * as many bytes as needed to align the data properly
226 UCC_GETH_RX_DATA_BUF_ALIGNMENT -
227 (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT -
230 skb->dev = ugeth->dev;
232 out_be32(&((struct qe_bd *)bd)->buf,
235 ugeth->ug_info->uf_info.max_rx_buf_length +
236 UCC_GETH_RX_DATA_BUF_ALIGNMENT,
239 out_be32((u32 *)bd, (R_E | R_I | (in_be32((u32 *)bd) & R_W)));
244 static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ)
251 bd = ugeth->p_rx_bd_ring[rxQ];
255 bd_status = in_be32((u32*)bd);
256 skb = get_new_skb(ugeth, bd);
258 if (!skb) /* If can not allocate data buffer,
259 abort. Cleanup will be elsewhere */
262 ugeth->rx_skbuff[rxQ][i] = skb;
264 /* advance the BD pointer */
265 bd += sizeof(struct qe_bd);
267 } while (!(bd_status & R_W));
272 static int fill_init_enet_entries(struct ucc_geth_private *ugeth,
273 volatile u32 *p_start,
276 u32 thread_alignment,
277 enum qe_risc_allocation risc,
278 int skip_page_for_first_entry)
280 u32 init_enet_offset;
284 for (i = 0; i < num_entries; i++) {
285 if ((snum = qe_get_snum()) < 0) {
286 ugeth_err("fill_init_enet_entries: Can not get SNUM.");
289 if ((i == 0) && skip_page_for_first_entry)
290 /* First entry of Rx does not have page */
291 init_enet_offset = 0;
294 qe_muram_alloc(thread_size, thread_alignment);
295 if (IS_MURAM_ERR(init_enet_offset)) {
297 ("fill_init_enet_entries: Can not allocate DPRAM memory.");
298 qe_put_snum((u8) snum);
303 ((u8) snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset
310 static int return_init_enet_entries(struct ucc_geth_private *ugeth,
311 volatile u32 *p_start,
313 enum qe_risc_allocation risc,
314 int skip_page_for_first_entry)
316 u32 init_enet_offset;
320 for (i = 0; i < num_entries; i++) {
321 /* Check that this entry was actually valid --
322 needed in case failed in allocations */
323 if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) {
325 (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >>
326 ENET_INIT_PARAM_SNUM_SHIFT;
327 qe_put_snum((u8) snum);
328 if (!((i == 0) && skip_page_for_first_entry)) {
329 /* First entry of Rx does not have page */
332 ENET_INIT_PARAM_PTR_MASK);
333 qe_muram_free(init_enet_offset);
335 *(p_start++) = 0; /* Just for cosmetics */
343 static int dump_init_enet_entries(struct ucc_geth_private *ugeth,
344 volatile u32 *p_start,
347 enum qe_risc_allocation risc,
348 int skip_page_for_first_entry)
350 u32 init_enet_offset;
354 for (i = 0; i < num_entries; i++) {
355 /* Check that this entry was actually valid --
356 needed in case failed in allocations */
357 if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) {
359 (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >>
360 ENET_INIT_PARAM_SNUM_SHIFT;
361 qe_put_snum((u8) snum);
362 if (!((i == 0) && skip_page_for_first_entry)) {
363 /* First entry of Rx does not have page */
366 ENET_INIT_PARAM_PTR_MASK);
367 ugeth_info("Init enet entry %d:", i);
368 ugeth_info("Base address: 0x%08x",
370 qe_muram_addr(init_enet_offset));
371 mem_disp(qe_muram_addr(init_enet_offset),
382 #ifdef CONFIG_UGETH_FILTERING
383 static struct enet_addr_container *get_enet_addr_container(void)
385 struct enet_addr_container *enet_addr_cont;
387 /* allocate memory */
388 enet_addr_cont = kmalloc(sizeof(struct enet_addr_container), GFP_KERNEL);
389 if (!enet_addr_cont) {
390 ugeth_err("%s: No memory for enet_addr_container object.",
395 return enet_addr_cont;
397 #endif /* CONFIG_UGETH_FILTERING */
399 static void put_enet_addr_container(struct enet_addr_container *enet_addr_cont)
401 kfree(enet_addr_cont);
404 static void set_mac_addr(__be16 __iomem *reg, u8 *mac)
406 out_be16(®[0], ((u16)mac[5] << 8) | mac[4]);
407 out_be16(®[1], ((u16)mac[3] << 8) | mac[2]);
408 out_be16(®[2], ((u16)mac[1] << 8) | mac[0]);
411 #ifdef CONFIG_UGETH_FILTERING
412 static int hw_add_addr_in_paddr(struct ucc_geth_private *ugeth,
413 u8 *p_enet_addr, u8 paddr_num)
415 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
417 if (!(paddr_num < NUM_OF_PADDRS)) {
418 ugeth_warn("%s: Illegal paddr_num.", __FUNCTION__);
423 (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram->
426 /* Ethernet frames are defined in Little Endian mode, */
427 /* therefore to insert the address we reverse the bytes. */
428 set_mac_addr(&p_82xx_addr_filt->paddr[paddr_num].h, p_enet_addr);
431 #endif /* CONFIG_UGETH_FILTERING */
433 static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num)
435 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
437 if (!(paddr_num < NUM_OF_PADDRS)) {
438 ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__);
443 (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram->
446 /* Writing address ff.ff.ff.ff.ff.ff disables address
447 recognition for this register */
448 out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 0xffff);
449 out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, 0xffff);
450 out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 0xffff);
455 static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth,
458 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
462 (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram->
466 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
468 /* Ethernet frames are defined in Little Endian mode,
469 therefor to insert */
470 /* the address to the hash (Big Endian mode), we reverse the bytes.*/
472 set_mac_addr(&p_82xx_addr_filt->taddr.h, p_enet_addr);
474 qe_issue_cmd(QE_SET_GROUP_ADDRESS, cecr_subblock,
475 QE_CR_PROTOCOL_ETHERNET, 0);
478 #ifdef CONFIG_UGETH_MAGIC_PACKET
479 static void magic_packet_detection_enable(struct ucc_geth_private *ugeth)
481 struct ucc_fast_private *uccf;
482 struct ucc_geth *ug_regs;
486 ug_regs = ugeth->ug_regs;
488 /* Enable interrupts for magic packet detection */
489 uccm = in_be32(uccf->p_uccm);
491 out_be32(uccf->p_uccm, uccm);
493 /* Enable magic packet detection */
494 maccfg2 = in_be32(&ug_regs->maccfg2);
495 maccfg2 |= MACCFG2_MPE;
496 out_be32(&ug_regs->maccfg2, maccfg2);
499 static void magic_packet_detection_disable(struct ucc_geth_private *ugeth)
501 struct ucc_fast_private *uccf;
502 struct ucc_geth *ug_regs;
506 ug_regs = ugeth->ug_regs;
508 /* Disable interrupts for magic packet detection */
509 uccm = in_be32(uccf->p_uccm);
511 out_be32(uccf->p_uccm, uccm);
513 /* Disable magic packet detection */
514 maccfg2 = in_be32(&ug_regs->maccfg2);
515 maccfg2 &= ~MACCFG2_MPE;
516 out_be32(&ug_regs->maccfg2, maccfg2);
518 #endif /* MAGIC_PACKET */
520 static inline int compare_addr(u8 **addr1, u8 **addr2)
522 return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS);
526 static void get_statistics(struct ucc_geth_private *ugeth,
527 struct ucc_geth_tx_firmware_statistics *
528 tx_firmware_statistics,
529 struct ucc_geth_rx_firmware_statistics *
530 rx_firmware_statistics,
531 struct ucc_geth_hardware_statistics *hardware_statistics)
533 struct ucc_fast *uf_regs;
534 struct ucc_geth *ug_regs;
535 struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram;
536 struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram;
538 ug_regs = ugeth->ug_regs;
539 uf_regs = (struct ucc_fast *) ug_regs;
540 p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram;
541 p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram;
543 /* Tx firmware only if user handed pointer and driver actually
544 gathers Tx firmware statistics */
545 if (tx_firmware_statistics && p_tx_fw_statistics_pram) {
546 tx_firmware_statistics->sicoltx =
547 in_be32(&p_tx_fw_statistics_pram->sicoltx);
548 tx_firmware_statistics->mulcoltx =
549 in_be32(&p_tx_fw_statistics_pram->mulcoltx);
550 tx_firmware_statistics->latecoltxfr =
551 in_be32(&p_tx_fw_statistics_pram->latecoltxfr);
552 tx_firmware_statistics->frabortduecol =
553 in_be32(&p_tx_fw_statistics_pram->frabortduecol);
554 tx_firmware_statistics->frlostinmactxer =
555 in_be32(&p_tx_fw_statistics_pram->frlostinmactxer);
556 tx_firmware_statistics->carriersenseertx =
557 in_be32(&p_tx_fw_statistics_pram->carriersenseertx);
558 tx_firmware_statistics->frtxok =
559 in_be32(&p_tx_fw_statistics_pram->frtxok);
560 tx_firmware_statistics->txfrexcessivedefer =
561 in_be32(&p_tx_fw_statistics_pram->txfrexcessivedefer);
562 tx_firmware_statistics->txpkts256 =
563 in_be32(&p_tx_fw_statistics_pram->txpkts256);
564 tx_firmware_statistics->txpkts512 =
565 in_be32(&p_tx_fw_statistics_pram->txpkts512);
566 tx_firmware_statistics->txpkts1024 =
567 in_be32(&p_tx_fw_statistics_pram->txpkts1024);
568 tx_firmware_statistics->txpktsjumbo =
569 in_be32(&p_tx_fw_statistics_pram->txpktsjumbo);
572 /* Rx firmware only if user handed pointer and driver actually
573 * gathers Rx firmware statistics */
574 if (rx_firmware_statistics && p_rx_fw_statistics_pram) {
576 rx_firmware_statistics->frrxfcser =
577 in_be32(&p_rx_fw_statistics_pram->frrxfcser);
578 rx_firmware_statistics->fraligner =
579 in_be32(&p_rx_fw_statistics_pram->fraligner);
580 rx_firmware_statistics->inrangelenrxer =
581 in_be32(&p_rx_fw_statistics_pram->inrangelenrxer);
582 rx_firmware_statistics->outrangelenrxer =
583 in_be32(&p_rx_fw_statistics_pram->outrangelenrxer);
584 rx_firmware_statistics->frtoolong =
585 in_be32(&p_rx_fw_statistics_pram->frtoolong);
586 rx_firmware_statistics->runt =
587 in_be32(&p_rx_fw_statistics_pram->runt);
588 rx_firmware_statistics->verylongevent =
589 in_be32(&p_rx_fw_statistics_pram->verylongevent);
590 rx_firmware_statistics->symbolerror =
591 in_be32(&p_rx_fw_statistics_pram->symbolerror);
592 rx_firmware_statistics->dropbsy =
593 in_be32(&p_rx_fw_statistics_pram->dropbsy);
594 for (i = 0; i < 0x8; i++)
595 rx_firmware_statistics->res0[i] =
596 p_rx_fw_statistics_pram->res0[i];
597 rx_firmware_statistics->mismatchdrop =
598 in_be32(&p_rx_fw_statistics_pram->mismatchdrop);
599 rx_firmware_statistics->underpkts =
600 in_be32(&p_rx_fw_statistics_pram->underpkts);
601 rx_firmware_statistics->pkts256 =
602 in_be32(&p_rx_fw_statistics_pram->pkts256);
603 rx_firmware_statistics->pkts512 =
604 in_be32(&p_rx_fw_statistics_pram->pkts512);
605 rx_firmware_statistics->pkts1024 =
606 in_be32(&p_rx_fw_statistics_pram->pkts1024);
607 rx_firmware_statistics->pktsjumbo =
608 in_be32(&p_rx_fw_statistics_pram->pktsjumbo);
609 rx_firmware_statistics->frlossinmacer =
610 in_be32(&p_rx_fw_statistics_pram->frlossinmacer);
611 rx_firmware_statistics->pausefr =
612 in_be32(&p_rx_fw_statistics_pram->pausefr);
613 for (i = 0; i < 0x4; i++)
614 rx_firmware_statistics->res1[i] =
615 p_rx_fw_statistics_pram->res1[i];
616 rx_firmware_statistics->removevlan =
617 in_be32(&p_rx_fw_statistics_pram->removevlan);
618 rx_firmware_statistics->replacevlan =
619 in_be32(&p_rx_fw_statistics_pram->replacevlan);
620 rx_firmware_statistics->insertvlan =
621 in_be32(&p_rx_fw_statistics_pram->insertvlan);
624 /* Hardware only if user handed pointer and driver actually
625 gathers hardware statistics */
626 if (hardware_statistics && (in_be32(&uf_regs->upsmr) & UPSMR_HSE)) {
627 hardware_statistics->tx64 = in_be32(&ug_regs->tx64);
628 hardware_statistics->tx127 = in_be32(&ug_regs->tx127);
629 hardware_statistics->tx255 = in_be32(&ug_regs->tx255);
630 hardware_statistics->rx64 = in_be32(&ug_regs->rx64);
631 hardware_statistics->rx127 = in_be32(&ug_regs->rx127);
632 hardware_statistics->rx255 = in_be32(&ug_regs->rx255);
633 hardware_statistics->txok = in_be32(&ug_regs->txok);
634 hardware_statistics->txcf = in_be16(&ug_regs->txcf);
635 hardware_statistics->tmca = in_be32(&ug_regs->tmca);
636 hardware_statistics->tbca = in_be32(&ug_regs->tbca);
637 hardware_statistics->rxfok = in_be32(&ug_regs->rxfok);
638 hardware_statistics->rxbok = in_be32(&ug_regs->rxbok);
639 hardware_statistics->rbyt = in_be32(&ug_regs->rbyt);
640 hardware_statistics->rmca = in_be32(&ug_regs->rmca);
641 hardware_statistics->rbca = in_be32(&ug_regs->rbca);
645 static void dump_bds(struct ucc_geth_private *ugeth)
650 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
651 if (ugeth->p_tx_bd_ring[i]) {
653 (ugeth->ug_info->bdRingLenTx[i] *
654 sizeof(struct qe_bd));
655 ugeth_info("TX BDs[%d]", i);
656 mem_disp(ugeth->p_tx_bd_ring[i], length);
659 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
660 if (ugeth->p_rx_bd_ring[i]) {
662 (ugeth->ug_info->bdRingLenRx[i] *
663 sizeof(struct qe_bd));
664 ugeth_info("RX BDs[%d]", i);
665 mem_disp(ugeth->p_rx_bd_ring[i], length);
670 static void dump_regs(struct ucc_geth_private *ugeth)
674 ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num);
675 ugeth_info("Base address: 0x%08x", (u32) ugeth->ug_regs);
677 ugeth_info("maccfg1 : addr - 0x%08x, val - 0x%08x",
678 (u32) & ugeth->ug_regs->maccfg1,
679 in_be32(&ugeth->ug_regs->maccfg1));
680 ugeth_info("maccfg2 : addr - 0x%08x, val - 0x%08x",
681 (u32) & ugeth->ug_regs->maccfg2,
682 in_be32(&ugeth->ug_regs->maccfg2));
683 ugeth_info("ipgifg : addr - 0x%08x, val - 0x%08x",
684 (u32) & ugeth->ug_regs->ipgifg,
685 in_be32(&ugeth->ug_regs->ipgifg));
686 ugeth_info("hafdup : addr - 0x%08x, val - 0x%08x",
687 (u32) & ugeth->ug_regs->hafdup,
688 in_be32(&ugeth->ug_regs->hafdup));
689 ugeth_info("ifctl : addr - 0x%08x, val - 0x%08x",
690 (u32) & ugeth->ug_regs->ifctl,
691 in_be32(&ugeth->ug_regs->ifctl));
692 ugeth_info("ifstat : addr - 0x%08x, val - 0x%08x",
693 (u32) & ugeth->ug_regs->ifstat,
694 in_be32(&ugeth->ug_regs->ifstat));
695 ugeth_info("macstnaddr1: addr - 0x%08x, val - 0x%08x",
696 (u32) & ugeth->ug_regs->macstnaddr1,
697 in_be32(&ugeth->ug_regs->macstnaddr1));
698 ugeth_info("macstnaddr2: addr - 0x%08x, val - 0x%08x",
699 (u32) & ugeth->ug_regs->macstnaddr2,
700 in_be32(&ugeth->ug_regs->macstnaddr2));
701 ugeth_info("uempr : addr - 0x%08x, val - 0x%08x",
702 (u32) & ugeth->ug_regs->uempr,
703 in_be32(&ugeth->ug_regs->uempr));
704 ugeth_info("utbipar : addr - 0x%08x, val - 0x%08x",
705 (u32) & ugeth->ug_regs->utbipar,
706 in_be32(&ugeth->ug_regs->utbipar));
707 ugeth_info("uescr : addr - 0x%08x, val - 0x%04x",
708 (u32) & ugeth->ug_regs->uescr,
709 in_be16(&ugeth->ug_regs->uescr));
710 ugeth_info("tx64 : addr - 0x%08x, val - 0x%08x",
711 (u32) & ugeth->ug_regs->tx64,
712 in_be32(&ugeth->ug_regs->tx64));
713 ugeth_info("tx127 : addr - 0x%08x, val - 0x%08x",
714 (u32) & ugeth->ug_regs->tx127,
715 in_be32(&ugeth->ug_regs->tx127));
716 ugeth_info("tx255 : addr - 0x%08x, val - 0x%08x",
717 (u32) & ugeth->ug_regs->tx255,
718 in_be32(&ugeth->ug_regs->tx255));
719 ugeth_info("rx64 : addr - 0x%08x, val - 0x%08x",
720 (u32) & ugeth->ug_regs->rx64,
721 in_be32(&ugeth->ug_regs->rx64));
722 ugeth_info("rx127 : addr - 0x%08x, val - 0x%08x",
723 (u32) & ugeth->ug_regs->rx127,
724 in_be32(&ugeth->ug_regs->rx127));
725 ugeth_info("rx255 : addr - 0x%08x, val - 0x%08x",
726 (u32) & ugeth->ug_regs->rx255,
727 in_be32(&ugeth->ug_regs->rx255));
728 ugeth_info("txok : addr - 0x%08x, val - 0x%08x",
729 (u32) & ugeth->ug_regs->txok,
730 in_be32(&ugeth->ug_regs->txok));
731 ugeth_info("txcf : addr - 0x%08x, val - 0x%04x",
732 (u32) & ugeth->ug_regs->txcf,
733 in_be16(&ugeth->ug_regs->txcf));
734 ugeth_info("tmca : addr - 0x%08x, val - 0x%08x",
735 (u32) & ugeth->ug_regs->tmca,
736 in_be32(&ugeth->ug_regs->tmca));
737 ugeth_info("tbca : addr - 0x%08x, val - 0x%08x",
738 (u32) & ugeth->ug_regs->tbca,
739 in_be32(&ugeth->ug_regs->tbca));
740 ugeth_info("rxfok : addr - 0x%08x, val - 0x%08x",
741 (u32) & ugeth->ug_regs->rxfok,
742 in_be32(&ugeth->ug_regs->rxfok));
743 ugeth_info("rxbok : addr - 0x%08x, val - 0x%08x",
744 (u32) & ugeth->ug_regs->rxbok,
745 in_be32(&ugeth->ug_regs->rxbok));
746 ugeth_info("rbyt : addr - 0x%08x, val - 0x%08x",
747 (u32) & ugeth->ug_regs->rbyt,
748 in_be32(&ugeth->ug_regs->rbyt));
749 ugeth_info("rmca : addr - 0x%08x, val - 0x%08x",
750 (u32) & ugeth->ug_regs->rmca,
751 in_be32(&ugeth->ug_regs->rmca));
752 ugeth_info("rbca : addr - 0x%08x, val - 0x%08x",
753 (u32) & ugeth->ug_regs->rbca,
754 in_be32(&ugeth->ug_regs->rbca));
755 ugeth_info("scar : addr - 0x%08x, val - 0x%08x",
756 (u32) & ugeth->ug_regs->scar,
757 in_be32(&ugeth->ug_regs->scar));
758 ugeth_info("scam : addr - 0x%08x, val - 0x%08x",
759 (u32) & ugeth->ug_regs->scam,
760 in_be32(&ugeth->ug_regs->scam));
762 if (ugeth->p_thread_data_tx) {
763 int numThreadsTxNumerical;
764 switch (ugeth->ug_info->numThreadsTx) {
765 case UCC_GETH_NUM_OF_THREADS_1:
766 numThreadsTxNumerical = 1;
768 case UCC_GETH_NUM_OF_THREADS_2:
769 numThreadsTxNumerical = 2;
771 case UCC_GETH_NUM_OF_THREADS_4:
772 numThreadsTxNumerical = 4;
774 case UCC_GETH_NUM_OF_THREADS_6:
775 numThreadsTxNumerical = 6;
777 case UCC_GETH_NUM_OF_THREADS_8:
778 numThreadsTxNumerical = 8;
781 numThreadsTxNumerical = 0;
785 ugeth_info("Thread data TXs:");
786 ugeth_info("Base address: 0x%08x",
787 (u32) ugeth->p_thread_data_tx);
788 for (i = 0; i < numThreadsTxNumerical; i++) {
789 ugeth_info("Thread data TX[%d]:", i);
790 ugeth_info("Base address: 0x%08x",
791 (u32) & ugeth->p_thread_data_tx[i]);
792 mem_disp((u8 *) & ugeth->p_thread_data_tx[i],
793 sizeof(struct ucc_geth_thread_data_tx));
796 if (ugeth->p_thread_data_rx) {
797 int numThreadsRxNumerical;
798 switch (ugeth->ug_info->numThreadsRx) {
799 case UCC_GETH_NUM_OF_THREADS_1:
800 numThreadsRxNumerical = 1;
802 case UCC_GETH_NUM_OF_THREADS_2:
803 numThreadsRxNumerical = 2;
805 case UCC_GETH_NUM_OF_THREADS_4:
806 numThreadsRxNumerical = 4;
808 case UCC_GETH_NUM_OF_THREADS_6:
809 numThreadsRxNumerical = 6;
811 case UCC_GETH_NUM_OF_THREADS_8:
812 numThreadsRxNumerical = 8;
815 numThreadsRxNumerical = 0;
819 ugeth_info("Thread data RX:");
820 ugeth_info("Base address: 0x%08x",
821 (u32) ugeth->p_thread_data_rx);
822 for (i = 0; i < numThreadsRxNumerical; i++) {
823 ugeth_info("Thread data RX[%d]:", i);
824 ugeth_info("Base address: 0x%08x",
825 (u32) & ugeth->p_thread_data_rx[i]);
826 mem_disp((u8 *) & ugeth->p_thread_data_rx[i],
827 sizeof(struct ucc_geth_thread_data_rx));
830 if (ugeth->p_exf_glbl_param) {
831 ugeth_info("EXF global param:");
832 ugeth_info("Base address: 0x%08x",
833 (u32) ugeth->p_exf_glbl_param);
834 mem_disp((u8 *) ugeth->p_exf_glbl_param,
835 sizeof(*ugeth->p_exf_glbl_param));
837 if (ugeth->p_tx_glbl_pram) {
838 ugeth_info("TX global param:");
839 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_tx_glbl_pram);
840 ugeth_info("temoder : addr - 0x%08x, val - 0x%04x",
841 (u32) & ugeth->p_tx_glbl_pram->temoder,
842 in_be16(&ugeth->p_tx_glbl_pram->temoder));
843 ugeth_info("sqptr : addr - 0x%08x, val - 0x%08x",
844 (u32) & ugeth->p_tx_glbl_pram->sqptr,
845 in_be32(&ugeth->p_tx_glbl_pram->sqptr));
846 ugeth_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x",
847 (u32) & ugeth->p_tx_glbl_pram->schedulerbasepointer,
848 in_be32(&ugeth->p_tx_glbl_pram->
849 schedulerbasepointer));
850 ugeth_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x",
851 (u32) & ugeth->p_tx_glbl_pram->txrmonbaseptr,
852 in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr));
853 ugeth_info("tstate : addr - 0x%08x, val - 0x%08x",
854 (u32) & ugeth->p_tx_glbl_pram->tstate,
855 in_be32(&ugeth->p_tx_glbl_pram->tstate));
856 ugeth_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x",
857 (u32) & ugeth->p_tx_glbl_pram->iphoffset[0],
858 ugeth->p_tx_glbl_pram->iphoffset[0]);
859 ugeth_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x",
860 (u32) & ugeth->p_tx_glbl_pram->iphoffset[1],
861 ugeth->p_tx_glbl_pram->iphoffset[1]);
862 ugeth_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x",
863 (u32) & ugeth->p_tx_glbl_pram->iphoffset[2],
864 ugeth->p_tx_glbl_pram->iphoffset[2]);
865 ugeth_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x",
866 (u32) & ugeth->p_tx_glbl_pram->iphoffset[3],
867 ugeth->p_tx_glbl_pram->iphoffset[3]);
868 ugeth_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x",
869 (u32) & ugeth->p_tx_glbl_pram->iphoffset[4],
870 ugeth->p_tx_glbl_pram->iphoffset[4]);
871 ugeth_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x",
872 (u32) & ugeth->p_tx_glbl_pram->iphoffset[5],
873 ugeth->p_tx_glbl_pram->iphoffset[5]);
874 ugeth_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x",
875 (u32) & ugeth->p_tx_glbl_pram->iphoffset[6],
876 ugeth->p_tx_glbl_pram->iphoffset[6]);
877 ugeth_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x",
878 (u32) & ugeth->p_tx_glbl_pram->iphoffset[7],
879 ugeth->p_tx_glbl_pram->iphoffset[7]);
880 ugeth_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x",
881 (u32) & ugeth->p_tx_glbl_pram->vtagtable[0],
882 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0]));
883 ugeth_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x",
884 (u32) & ugeth->p_tx_glbl_pram->vtagtable[1],
885 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1]));
886 ugeth_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x",
887 (u32) & ugeth->p_tx_glbl_pram->vtagtable[2],
888 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2]));
889 ugeth_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x",
890 (u32) & ugeth->p_tx_glbl_pram->vtagtable[3],
891 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3]));
892 ugeth_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x",
893 (u32) & ugeth->p_tx_glbl_pram->vtagtable[4],
894 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4]));
895 ugeth_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x",
896 (u32) & ugeth->p_tx_glbl_pram->vtagtable[5],
897 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5]));
898 ugeth_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x",
899 (u32) & ugeth->p_tx_glbl_pram->vtagtable[6],
900 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6]));
901 ugeth_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x",
902 (u32) & ugeth->p_tx_glbl_pram->vtagtable[7],
903 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7]));
904 ugeth_info("tqptr : addr - 0x%08x, val - 0x%08x",
905 (u32) & ugeth->p_tx_glbl_pram->tqptr,
906 in_be32(&ugeth->p_tx_glbl_pram->tqptr));
908 if (ugeth->p_rx_glbl_pram) {
909 ugeth_info("RX global param:");
910 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_glbl_pram);
911 ugeth_info("remoder : addr - 0x%08x, val - 0x%08x",
912 (u32) & ugeth->p_rx_glbl_pram->remoder,
913 in_be32(&ugeth->p_rx_glbl_pram->remoder));
914 ugeth_info("rqptr : addr - 0x%08x, val - 0x%08x",
915 (u32) & ugeth->p_rx_glbl_pram->rqptr,
916 in_be32(&ugeth->p_rx_glbl_pram->rqptr));
917 ugeth_info("typeorlen : addr - 0x%08x, val - 0x%04x",
918 (u32) & ugeth->p_rx_glbl_pram->typeorlen,
919 in_be16(&ugeth->p_rx_glbl_pram->typeorlen));
920 ugeth_info("rxgstpack : addr - 0x%08x, val - 0x%02x",
921 (u32) & ugeth->p_rx_glbl_pram->rxgstpack,
922 ugeth->p_rx_glbl_pram->rxgstpack);
923 ugeth_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x",
924 (u32) & ugeth->p_rx_glbl_pram->rxrmonbaseptr,
925 in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr));
926 ugeth_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x",
927 (u32) & ugeth->p_rx_glbl_pram->intcoalescingptr,
928 in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr));
929 ugeth_info("rstate : addr - 0x%08x, val - 0x%02x",
930 (u32) & ugeth->p_rx_glbl_pram->rstate,
931 ugeth->p_rx_glbl_pram->rstate);
932 ugeth_info("mrblr : addr - 0x%08x, val - 0x%04x",
933 (u32) & ugeth->p_rx_glbl_pram->mrblr,
934 in_be16(&ugeth->p_rx_glbl_pram->mrblr));
935 ugeth_info("rbdqptr : addr - 0x%08x, val - 0x%08x",
936 (u32) & ugeth->p_rx_glbl_pram->rbdqptr,
937 in_be32(&ugeth->p_rx_glbl_pram->rbdqptr));
938 ugeth_info("mflr : addr - 0x%08x, val - 0x%04x",
939 (u32) & ugeth->p_rx_glbl_pram->mflr,
940 in_be16(&ugeth->p_rx_glbl_pram->mflr));
941 ugeth_info("minflr : addr - 0x%08x, val - 0x%04x",
942 (u32) & ugeth->p_rx_glbl_pram->minflr,
943 in_be16(&ugeth->p_rx_glbl_pram->minflr));
944 ugeth_info("maxd1 : addr - 0x%08x, val - 0x%04x",
945 (u32) & ugeth->p_rx_glbl_pram->maxd1,
946 in_be16(&ugeth->p_rx_glbl_pram->maxd1));
947 ugeth_info("maxd2 : addr - 0x%08x, val - 0x%04x",
948 (u32) & ugeth->p_rx_glbl_pram->maxd2,
949 in_be16(&ugeth->p_rx_glbl_pram->maxd2));
950 ugeth_info("ecamptr : addr - 0x%08x, val - 0x%08x",
951 (u32) & ugeth->p_rx_glbl_pram->ecamptr,
952 in_be32(&ugeth->p_rx_glbl_pram->ecamptr));
953 ugeth_info("l2qt : addr - 0x%08x, val - 0x%08x",
954 (u32) & ugeth->p_rx_glbl_pram->l2qt,
955 in_be32(&ugeth->p_rx_glbl_pram->l2qt));
956 ugeth_info("l3qt[0] : addr - 0x%08x, val - 0x%08x",
957 (u32) & ugeth->p_rx_glbl_pram->l3qt[0],
958 in_be32(&ugeth->p_rx_glbl_pram->l3qt[0]));
959 ugeth_info("l3qt[1] : addr - 0x%08x, val - 0x%08x",
960 (u32) & ugeth->p_rx_glbl_pram->l3qt[1],
961 in_be32(&ugeth->p_rx_glbl_pram->l3qt[1]));
962 ugeth_info("l3qt[2] : addr - 0x%08x, val - 0x%08x",
963 (u32) & ugeth->p_rx_glbl_pram->l3qt[2],
964 in_be32(&ugeth->p_rx_glbl_pram->l3qt[2]));
965 ugeth_info("l3qt[3] : addr - 0x%08x, val - 0x%08x",
966 (u32) & ugeth->p_rx_glbl_pram->l3qt[3],
967 in_be32(&ugeth->p_rx_glbl_pram->l3qt[3]));
968 ugeth_info("l3qt[4] : addr - 0x%08x, val - 0x%08x",
969 (u32) & ugeth->p_rx_glbl_pram->l3qt[4],
970 in_be32(&ugeth->p_rx_glbl_pram->l3qt[4]));
971 ugeth_info("l3qt[5] : addr - 0x%08x, val - 0x%08x",
972 (u32) & ugeth->p_rx_glbl_pram->l3qt[5],
973 in_be32(&ugeth->p_rx_glbl_pram->l3qt[5]));
974 ugeth_info("l3qt[6] : addr - 0x%08x, val - 0x%08x",
975 (u32) & ugeth->p_rx_glbl_pram->l3qt[6],
976 in_be32(&ugeth->p_rx_glbl_pram->l3qt[6]));
977 ugeth_info("l3qt[7] : addr - 0x%08x, val - 0x%08x",
978 (u32) & ugeth->p_rx_glbl_pram->l3qt[7],
979 in_be32(&ugeth->p_rx_glbl_pram->l3qt[7]));
980 ugeth_info("vlantype : addr - 0x%08x, val - 0x%04x",
981 (u32) & ugeth->p_rx_glbl_pram->vlantype,
982 in_be16(&ugeth->p_rx_glbl_pram->vlantype));
983 ugeth_info("vlantci : addr - 0x%08x, val - 0x%04x",
984 (u32) & ugeth->p_rx_glbl_pram->vlantci,
985 in_be16(&ugeth->p_rx_glbl_pram->vlantci));
986 for (i = 0; i < 64; i++)
988 ("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x",
990 (u32) & ugeth->p_rx_glbl_pram->addressfiltering[i],
991 ugeth->p_rx_glbl_pram->addressfiltering[i]);
992 ugeth_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x",
993 (u32) & ugeth->p_rx_glbl_pram->exfGlobalParam,
994 in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam));
996 if (ugeth->p_send_q_mem_reg) {
997 ugeth_info("Send Q memory registers:");
998 ugeth_info("Base address: 0x%08x",
999 (u32) ugeth->p_send_q_mem_reg);
1000 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
1001 ugeth_info("SQQD[%d]:", i);
1002 ugeth_info("Base address: 0x%08x",
1003 (u32) & ugeth->p_send_q_mem_reg->sqqd[i]);
1004 mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i],
1005 sizeof(struct ucc_geth_send_queue_qd));
1008 if (ugeth->p_scheduler) {
1009 ugeth_info("Scheduler:");
1010 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_scheduler);
1011 mem_disp((u8 *) ugeth->p_scheduler,
1012 sizeof(*ugeth->p_scheduler));
1014 if (ugeth->p_tx_fw_statistics_pram) {
1015 ugeth_info("TX FW statistics pram:");
1016 ugeth_info("Base address: 0x%08x",
1017 (u32) ugeth->p_tx_fw_statistics_pram);
1018 mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram,
1019 sizeof(*ugeth->p_tx_fw_statistics_pram));
1021 if (ugeth->p_rx_fw_statistics_pram) {
1022 ugeth_info("RX FW statistics pram:");
1023 ugeth_info("Base address: 0x%08x",
1024 (u32) ugeth->p_rx_fw_statistics_pram);
1025 mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram,
1026 sizeof(*ugeth->p_rx_fw_statistics_pram));
1028 if (ugeth->p_rx_irq_coalescing_tbl) {
1029 ugeth_info("RX IRQ coalescing tables:");
1030 ugeth_info("Base address: 0x%08x",
1031 (u32) ugeth->p_rx_irq_coalescing_tbl);
1032 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
1033 ugeth_info("RX IRQ coalescing table entry[%d]:", i);
1034 ugeth_info("Base address: 0x%08x",
1035 (u32) & ugeth->p_rx_irq_coalescing_tbl->
1036 coalescingentry[i]);
1038 ("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x",
1039 (u32) & ugeth->p_rx_irq_coalescing_tbl->
1040 coalescingentry[i].interruptcoalescingmaxvalue,
1041 in_be32(&ugeth->p_rx_irq_coalescing_tbl->
1043 interruptcoalescingmaxvalue));
1045 ("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x",
1046 (u32) & ugeth->p_rx_irq_coalescing_tbl->
1047 coalescingentry[i].interruptcoalescingcounter,
1048 in_be32(&ugeth->p_rx_irq_coalescing_tbl->
1050 interruptcoalescingcounter));
1053 if (ugeth->p_rx_bd_qs_tbl) {
1054 ugeth_info("RX BD QS tables:");
1055 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_bd_qs_tbl);
1056 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
1057 ugeth_info("RX BD QS table[%d]:", i);
1058 ugeth_info("Base address: 0x%08x",
1059 (u32) & ugeth->p_rx_bd_qs_tbl[i]);
1061 ("bdbaseptr : addr - 0x%08x, val - 0x%08x",
1062 (u32) & ugeth->p_rx_bd_qs_tbl[i].bdbaseptr,
1063 in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr));
1065 ("bdptr : addr - 0x%08x, val - 0x%08x",
1066 (u32) & ugeth->p_rx_bd_qs_tbl[i].bdptr,
1067 in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr));
1069 ("externalbdbaseptr: addr - 0x%08x, val - 0x%08x",
1070 (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
1071 in_be32(&ugeth->p_rx_bd_qs_tbl[i].
1072 externalbdbaseptr));
1074 ("externalbdptr : addr - 0x%08x, val - 0x%08x",
1075 (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdptr,
1076 in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr));
1077 ugeth_info("ucode RX Prefetched BDs:");
1078 ugeth_info("Base address: 0x%08x",
1080 qe_muram_addr(in_be32
1081 (&ugeth->p_rx_bd_qs_tbl[i].
1084 qe_muram_addr(in_be32
1085 (&ugeth->p_rx_bd_qs_tbl[i].
1087 sizeof(struct ucc_geth_rx_prefetched_bds));
1090 if (ugeth->p_init_enet_param_shadow) {
1092 ugeth_info("Init enet param shadow:");
1093 ugeth_info("Base address: 0x%08x",
1094 (u32) ugeth->p_init_enet_param_shadow);
1095 mem_disp((u8 *) ugeth->p_init_enet_param_shadow,
1096 sizeof(*ugeth->p_init_enet_param_shadow));
1098 size = sizeof(struct ucc_geth_thread_rx_pram);
1099 if (ugeth->ug_info->rxExtendedFiltering) {
1101 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
1102 if (ugeth->ug_info->largestexternallookupkeysize ==
1103 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
1105 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
1106 if (ugeth->ug_info->largestexternallookupkeysize ==
1107 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
1109 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
1112 dump_init_enet_entries(ugeth,
1113 &(ugeth->p_init_enet_param_shadow->
1115 ENET_INIT_PARAM_MAX_ENTRIES_TX,
1116 sizeof(struct ucc_geth_thread_tx_pram),
1117 ugeth->ug_info->riscTx, 0);
1118 dump_init_enet_entries(ugeth,
1119 &(ugeth->p_init_enet_param_shadow->
1121 ENET_INIT_PARAM_MAX_ENTRIES_RX, size,
1122 ugeth->ug_info->riscRx, 1);
1127 static void init_default_reg_vals(volatile u32 *upsmr_register,
1128 volatile u32 *maccfg1_register,
1129 volatile u32 *maccfg2_register)
1131 out_be32(upsmr_register, UCC_GETH_UPSMR_INIT);
1132 out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT);
1133 out_be32(maccfg2_register, UCC_GETH_MACCFG2_INIT);
1136 static int init_half_duplex_params(int alt_beb,
1137 int back_pressure_no_backoff,
1140 u8 alt_beb_truncation,
1141 u8 max_retransmissions,
1142 u8 collision_window,
1143 volatile u32 *hafdup_register)
1147 if ((alt_beb_truncation > HALFDUP_ALT_BEB_TRUNCATION_MAX) ||
1148 (max_retransmissions > HALFDUP_MAX_RETRANSMISSION_MAX) ||
1149 (collision_window > HALFDUP_COLLISION_WINDOW_MAX))
1152 value = (u32) (alt_beb_truncation << HALFDUP_ALT_BEB_TRUNCATION_SHIFT);
1155 value |= HALFDUP_ALT_BEB;
1156 if (back_pressure_no_backoff)
1157 value |= HALFDUP_BACK_PRESSURE_NO_BACKOFF;
1159 value |= HALFDUP_NO_BACKOFF;
1161 value |= HALFDUP_EXCESSIVE_DEFER;
1163 value |= (max_retransmissions << HALFDUP_MAX_RETRANSMISSION_SHIFT);
1165 value |= collision_window;
1167 out_be32(hafdup_register, value);
1171 static int init_inter_frame_gap_params(u8 non_btb_cs_ipg,
1175 volatile u32 *ipgifg_register)
1179 /* Non-Back-to-back IPG part 1 should be <= Non-Back-to-back
1181 if (non_btb_cs_ipg > non_btb_ipg)
1184 if ((non_btb_cs_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX) ||
1185 (non_btb_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX) ||
1186 /*(min_ifg > IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX) || */
1187 (btb_ipg > IPGIFG_BACK_TO_BACK_IFG_MAX))
1191 ((non_btb_cs_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT) &
1192 IPGIFG_NBTB_CS_IPG_MASK);
1194 ((non_btb_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT) &
1195 IPGIFG_NBTB_IPG_MASK);
1197 ((min_ifg << IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT) &
1198 IPGIFG_MIN_IFG_MASK);
1199 value |= (btb_ipg & IPGIFG_BTB_IPG_MASK);
1201 out_be32(ipgifg_register, value);
1205 static int init_flow_control_params(u32 automatic_flow_control_mode,
1206 int rx_flow_control_enable,
1207 int tx_flow_control_enable,
1209 u16 extension_field,
1210 volatile u32 *upsmr_register,
1211 volatile u32 *uempr_register,
1212 volatile u32 *maccfg1_register)
1216 /* Set UEMPR register */
1217 value = (u32) pause_period << UEMPR_PAUSE_TIME_VALUE_SHIFT;
1218 value |= (u32) extension_field << UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT;
1219 out_be32(uempr_register, value);
1221 /* Set UPSMR register */
1222 value = in_be32(upsmr_register);
1223 value |= automatic_flow_control_mode;
1224 out_be32(upsmr_register, value);
1226 value = in_be32(maccfg1_register);
1227 if (rx_flow_control_enable)
1228 value |= MACCFG1_FLOW_RX;
1229 if (tx_flow_control_enable)
1230 value |= MACCFG1_FLOW_TX;
1231 out_be32(maccfg1_register, value);
1236 static int init_hw_statistics_gathering_mode(int enable_hardware_statistics,
1237 int auto_zero_hardware_statistics,
1238 volatile u32 *upsmr_register,
1239 volatile u16 *uescr_register)
1241 u32 upsmr_value = 0;
1242 u16 uescr_value = 0;
1243 /* Enable hardware statistics gathering if requested */
1244 if (enable_hardware_statistics) {
1245 upsmr_value = in_be32(upsmr_register);
1246 upsmr_value |= UPSMR_HSE;
1247 out_be32(upsmr_register, upsmr_value);
1250 /* Clear hardware statistics counters */
1251 uescr_value = in_be16(uescr_register);
1252 uescr_value |= UESCR_CLRCNT;
1253 /* Automatically zero hardware statistics counters on read,
1255 if (auto_zero_hardware_statistics)
1256 uescr_value |= UESCR_AUTOZ;
1257 out_be16(uescr_register, uescr_value);
1262 static int init_firmware_statistics_gathering_mode(int
1263 enable_tx_firmware_statistics,
1264 int enable_rx_firmware_statistics,
1265 volatile u32 *tx_rmon_base_ptr,
1266 u32 tx_firmware_statistics_structure_address,
1267 volatile u32 *rx_rmon_base_ptr,
1268 u32 rx_firmware_statistics_structure_address,
1269 volatile u16 *temoder_register,
1270 volatile u32 *remoder_register)
1272 /* Note: this function does not check if */
1273 /* the parameters it receives are NULL */
1277 if (enable_tx_firmware_statistics) {
1278 out_be32(tx_rmon_base_ptr,
1279 tx_firmware_statistics_structure_address);
1280 temoder_value = in_be16(temoder_register);
1281 temoder_value |= TEMODER_TX_RMON_STATISTICS_ENABLE;
1282 out_be16(temoder_register, temoder_value);
1285 if (enable_rx_firmware_statistics) {
1286 out_be32(rx_rmon_base_ptr,
1287 rx_firmware_statistics_structure_address);
1288 remoder_value = in_be32(remoder_register);
1289 remoder_value |= REMODER_RX_RMON_STATISTICS_ENABLE;
1290 out_be32(remoder_register, remoder_value);
1296 static int init_mac_station_addr_regs(u8 address_byte_0,
1302 volatile u32 *macstnaddr1_register,
1303 volatile u32 *macstnaddr2_register)
1307 /* Example: for a station address of 0x12345678ABCD, */
1308 /* 0x12 is byte 0, 0x34 is byte 1 and so on and 0xCD is byte 5 */
1310 /* MACSTNADDR1 Register: */
1313 /* station address byte 5 station address byte 4 */
1315 /* station address byte 3 station address byte 2 */
1316 value |= (u32) ((address_byte_2 << 0) & 0x000000FF);
1317 value |= (u32) ((address_byte_3 << 8) & 0x0000FF00);
1318 value |= (u32) ((address_byte_4 << 16) & 0x00FF0000);
1319 value |= (u32) ((address_byte_5 << 24) & 0xFF000000);
1321 out_be32(macstnaddr1_register, value);
1323 /* MACSTNADDR2 Register: */
1326 /* station address byte 1 station address byte 0 */
1328 /* reserved reserved */
1330 value |= (u32) ((address_byte_0 << 16) & 0x00FF0000);
1331 value |= (u32) ((address_byte_1 << 24) & 0xFF000000);
1333 out_be32(macstnaddr2_register, value);
1338 static int init_check_frame_length_mode(int length_check,
1339 volatile u32 *maccfg2_register)
1343 value = in_be32(maccfg2_register);
1346 value |= MACCFG2_LC;
1348 value &= ~MACCFG2_LC;
1350 out_be32(maccfg2_register, value);
1354 static int init_preamble_length(u8 preamble_length,
1355 volatile u32 *maccfg2_register)
1359 if ((preamble_length < 3) || (preamble_length > 7))
1362 value = in_be32(maccfg2_register);
1363 value &= ~MACCFG2_PREL_MASK;
1364 value |= (preamble_length << MACCFG2_PREL_SHIFT);
1365 out_be32(maccfg2_register, value);
1369 static int init_rx_parameters(int reject_broadcast,
1370 int receive_short_frames,
1371 int promiscuous, volatile u32 *upsmr_register)
1375 value = in_be32(upsmr_register);
1377 if (reject_broadcast)
1380 value &= ~UPSMR_BRO;
1382 if (receive_short_frames)
1385 value &= ~UPSMR_RSH;
1390 value &= ~UPSMR_PRO;
1392 out_be32(upsmr_register, value);
1397 static int init_max_rx_buff_len(u16 max_rx_buf_len,
1398 volatile u16 *mrblr_register)
1400 /* max_rx_buf_len value must be a multiple of 128 */
1401 if ((max_rx_buf_len == 0)
1402 || (max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT))
1405 out_be16(mrblr_register, max_rx_buf_len);
1409 static int init_min_frame_len(u16 min_frame_length,
1410 volatile u16 *minflr_register,
1411 volatile u16 *mrblr_register)
1413 u16 mrblr_value = 0;
1415 mrblr_value = in_be16(mrblr_register);
1416 if (min_frame_length >= (mrblr_value - 4))
1419 out_be16(minflr_register, min_frame_length);
1423 static int adjust_enet_interface(struct ucc_geth_private *ugeth)
1425 struct ucc_geth_info *ug_info;
1426 struct ucc_geth *ug_regs;
1427 struct ucc_fast *uf_regs;
1429 u32 upsmr, maccfg2, tbiBaseAddress;
1432 ugeth_vdbg("%s: IN", __FUNCTION__);
1434 ug_info = ugeth->ug_info;
1435 ug_regs = ugeth->ug_regs;
1436 uf_regs = ugeth->uccf->uf_regs;
1439 maccfg2 = in_be32(&ug_regs->maccfg2);
1440 maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
1441 if ((ugeth->max_speed == SPEED_10) ||
1442 (ugeth->max_speed == SPEED_100))
1443 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
1444 else if (ugeth->max_speed == SPEED_1000)
1445 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
1446 maccfg2 |= ug_info->padAndCrc;
1447 out_be32(&ug_regs->maccfg2, maccfg2);
1450 upsmr = in_be32(&uf_regs->upsmr);
1451 upsmr &= ~(UPSMR_RPM | UPSMR_R10M | UPSMR_TBIM | UPSMR_RMM);
1452 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
1453 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
1454 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1455 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
1457 switch (ugeth->max_speed) {
1459 upsmr |= UPSMR_R10M;
1462 if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI)
1466 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
1467 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
1468 upsmr |= UPSMR_TBIM;
1470 out_be32(&uf_regs->upsmr, upsmr);
1472 /* Disable autonegotiation in tbi mode, because by default it
1473 comes up in autonegotiation mode. */
1474 /* Note that this depends on proper setting in utbipar register. */
1475 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
1476 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
1477 tbiBaseAddress = in_be32(&ug_regs->utbipar);
1478 tbiBaseAddress &= UTBIPAR_PHY_ADDRESS_MASK;
1479 tbiBaseAddress >>= UTBIPAR_PHY_ADDRESS_SHIFT;
1480 value = ugeth->phydev->bus->read(ugeth->phydev->bus,
1481 (u8) tbiBaseAddress, ENET_TBI_MII_CR);
1482 value &= ~0x1000; /* Turn off autonegotiation */
1483 ugeth->phydev->bus->write(ugeth->phydev->bus,
1484 (u8) tbiBaseAddress, ENET_TBI_MII_CR, value);
1487 init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
1489 ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2);
1492 ("%s: Preamble length must be between 3 and 7 inclusive.",
1500 /* Called every time the controller might need to be made
1501 * aware of new link state. The PHY code conveys this
1502 * information through variables in the ugeth structure, and this
1503 * function converts those variables into the appropriate
1504 * register values, and can bring down the device if needed.
1507 static void adjust_link(struct net_device *dev)
1509 struct ucc_geth_private *ugeth = netdev_priv(dev);
1510 struct ucc_geth *ug_regs;
1511 struct ucc_fast *uf_regs;
1512 struct phy_device *phydev = ugeth->phydev;
1513 unsigned long flags;
1516 ug_regs = ugeth->ug_regs;
1517 uf_regs = ugeth->uccf->uf_regs;
1519 spin_lock_irqsave(&ugeth->lock, flags);
1522 u32 tempval = in_be32(&ug_regs->maccfg2);
1523 u32 upsmr = in_be32(&uf_regs->upsmr);
1524 /* Now we make sure that we can be in full duplex mode.
1525 * If not, we operate in half-duplex mode. */
1526 if (phydev->duplex != ugeth->oldduplex) {
1528 if (!(phydev->duplex))
1529 tempval &= ~(MACCFG2_FDX);
1531 tempval |= MACCFG2_FDX;
1532 ugeth->oldduplex = phydev->duplex;
1535 if (phydev->speed != ugeth->oldspeed) {
1537 switch (phydev->speed) {
1539 tempval = ((tempval &
1540 ~(MACCFG2_INTERFACE_MODE_MASK)) |
1541 MACCFG2_INTERFACE_MODE_BYTE);
1545 tempval = ((tempval &
1546 ~(MACCFG2_INTERFACE_MODE_MASK)) |
1547 MACCFG2_INTERFACE_MODE_NIBBLE);
1548 /* if reduced mode, re-set UPSMR.R10M */
1549 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
1550 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
1551 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1552 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
1553 if (phydev->speed == SPEED_10)
1554 upsmr |= UPSMR_R10M;
1556 upsmr &= ~(UPSMR_R10M);
1560 if (netif_msg_link(ugeth))
1562 "%s: Ack! Speed (%d) is not 10/100/1000!",
1563 dev->name, phydev->speed);
1566 ugeth->oldspeed = phydev->speed;
1569 out_be32(&ug_regs->maccfg2, tempval);
1570 out_be32(&uf_regs->upsmr, upsmr);
1572 if (!ugeth->oldlink) {
1575 netif_schedule(dev);
1577 } else if (ugeth->oldlink) {
1580 ugeth->oldspeed = 0;
1581 ugeth->oldduplex = -1;
1584 if (new_state && netif_msg_link(ugeth))
1585 phy_print_status(phydev);
1587 spin_unlock_irqrestore(&ugeth->lock, flags);
1590 /* Configure the PHY for dev.
1591 * returns 0 if success. -1 if failure
1593 static int init_phy(struct net_device *dev)
1595 struct ucc_geth_private *priv = netdev_priv(dev);
1596 struct phy_device *phydev;
1597 char phy_id[BUS_ID_SIZE];
1601 priv->oldduplex = -1;
1603 snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, priv->ug_info->mdio_bus,
1604 priv->ug_info->phy_address);
1606 phydev = phy_connect(dev, phy_id, &adjust_link, 0, priv->phy_interface);
1608 if (IS_ERR(phydev)) {
1609 printk("%s: Could not attach to PHY\n", dev->name);
1610 return PTR_ERR(phydev);
1613 phydev->supported &= (ADVERTISED_10baseT_Half |
1614 ADVERTISED_10baseT_Full |
1615 ADVERTISED_100baseT_Half |
1616 ADVERTISED_100baseT_Full);
1618 if (priv->max_speed == SPEED_1000)
1619 phydev->supported |= ADVERTISED_1000baseT_Full;
1621 phydev->advertising = phydev->supported;
1623 priv->phydev = phydev;
1630 static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth)
1632 struct ucc_fast_private *uccf;
1638 /* Mask GRACEFUL STOP TX interrupt bit and clear it */
1639 temp = in_be32(uccf->p_uccm);
1641 out_be32(uccf->p_uccm, temp);
1642 out_be32(uccf->p_ucce, UCCE_GRA); /* clear by writing 1 */
1644 /* Issue host command */
1646 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
1647 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
1648 QE_CR_PROTOCOL_ETHERNET, 0);
1650 /* Wait for command to complete */
1652 temp = in_be32(uccf->p_ucce);
1653 } while (!(temp & UCCE_GRA));
1655 uccf->stopped_tx = 1;
1660 static int ugeth_graceful_stop_rx(struct ucc_geth_private * ugeth)
1662 struct ucc_fast_private *uccf;
1668 /* Clear acknowledge bit */
1669 temp = ugeth->p_rx_glbl_pram->rxgstpack;
1670 temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX;
1671 ugeth->p_rx_glbl_pram->rxgstpack = temp;
1673 /* Keep issuing command and checking acknowledge bit until
1674 it is asserted, according to spec */
1676 /* Issue host command */
1678 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.
1680 qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
1681 QE_CR_PROTOCOL_ETHERNET, 0);
1683 temp = ugeth->p_rx_glbl_pram->rxgstpack;
1684 } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX));
1686 uccf->stopped_rx = 1;
1691 static int ugeth_restart_tx(struct ucc_geth_private *ugeth)
1693 struct ucc_fast_private *uccf;
1699 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
1700 qe_issue_cmd(QE_RESTART_TX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 0);
1701 uccf->stopped_tx = 0;
1706 static int ugeth_restart_rx(struct ucc_geth_private *ugeth)
1708 struct ucc_fast_private *uccf;
1714 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
1715 qe_issue_cmd(QE_RESTART_RX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET,
1717 uccf->stopped_rx = 0;
1722 static int ugeth_enable(struct ucc_geth_private *ugeth, enum comm_dir mode)
1724 struct ucc_fast_private *uccf;
1725 int enabled_tx, enabled_rx;
1729 /* check if the UCC number is in range. */
1730 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
1731 ugeth_err("%s: ucc_num out of range.", __FUNCTION__);
1735 enabled_tx = uccf->enabled_tx;
1736 enabled_rx = uccf->enabled_rx;
1738 /* Get Tx and Rx going again, in case this channel was actively
1740 if ((mode & COMM_DIR_TX) && (!enabled_tx) && uccf->stopped_tx)
1741 ugeth_restart_tx(ugeth);
1742 if ((mode & COMM_DIR_RX) && (!enabled_rx) && uccf->stopped_rx)
1743 ugeth_restart_rx(ugeth);
1745 ucc_fast_enable(uccf, mode); /* OK to do even if not disabled */
1751 static int ugeth_disable(struct ucc_geth_private * ugeth, enum comm_dir mode)
1753 struct ucc_fast_private *uccf;
1757 /* check if the UCC number is in range. */
1758 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
1759 ugeth_err("%s: ucc_num out of range.", __FUNCTION__);
1763 /* Stop any transmissions */
1764 if ((mode & COMM_DIR_TX) && uccf->enabled_tx && !uccf->stopped_tx)
1765 ugeth_graceful_stop_tx(ugeth);
1767 /* Stop any receptions */
1768 if ((mode & COMM_DIR_RX) && uccf->enabled_rx && !uccf->stopped_rx)
1769 ugeth_graceful_stop_rx(ugeth);
1771 ucc_fast_disable(ugeth->uccf, mode); /* OK to do even if not enabled */
1776 static void ugeth_dump_regs(struct ucc_geth_private *ugeth)
1779 ucc_fast_dump_regs(ugeth->uccf);
1785 #ifdef CONFIG_UGETH_FILTERING
1786 static int ugeth_ext_filtering_serialize_tad(struct ucc_geth_tad_params *
1788 struct qe_fltr_tad *qe_fltr_tad)
1792 /* Zero serialized TAD */
1793 memset(qe_fltr_tad, 0, QE_FLTR_TAD_SIZE);
1795 qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_V; /* Must have this */
1796 if (p_UccGethTadParams->rx_non_dynamic_extended_features_mode ||
1797 (p_UccGethTadParams->vtag_op != UCC_GETH_VLAN_OPERATION_TAGGED_NOP)
1798 || (p_UccGethTadParams->vnontag_op !=
1799 UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP)
1801 qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_EF;
1802 if (p_UccGethTadParams->reject_frame)
1803 qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_REJ;
1805 (u16) (((u16) p_UccGethTadParams->
1806 vtag_op) << UCC_GETH_TAD_VTAG_OP_SHIFT);
1807 qe_fltr_tad->serialized[0] |= (u8) (temp >> 8); /* upper bits */
1809 qe_fltr_tad->serialized[1] |= (u8) (temp & 0x00ff); /* lower bits */
1810 if (p_UccGethTadParams->vnontag_op ==
1811 UCC_GETH_VLAN_OPERATION_NON_TAGGED_Q_TAG_INSERT)
1812 qe_fltr_tad->serialized[1] |= UCC_GETH_TAD_V_NON_VTAG_OP;
1813 qe_fltr_tad->serialized[1] |=
1814 p_UccGethTadParams->rqos << UCC_GETH_TAD_RQOS_SHIFT;
1816 qe_fltr_tad->serialized[2] |=
1817 p_UccGethTadParams->vpri << UCC_GETH_TAD_V_PRIORITY_SHIFT;
1819 qe_fltr_tad->serialized[2] |= (u8) (p_UccGethTadParams->vid >> 8);
1821 qe_fltr_tad->serialized[3] |= (u8) (p_UccGethTadParams->vid & 0x00ff);
1826 static struct enet_addr_container_t
1827 *ugeth_82xx_filtering_get_match_addr_in_hash(struct ucc_geth_private *ugeth,
1828 struct enet_addr *p_enet_addr)
1830 struct enet_addr_container *enet_addr_cont;
1831 struct list_head *p_lh;
1836 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
1837 p_lh = &ugeth->group_hash_q;
1838 p_counter = &(ugeth->numGroupAddrInHash);
1840 p_lh = &ugeth->ind_hash_q;
1841 p_counter = &(ugeth->numIndAddrInHash);
1849 for (i = 0; i < num; i++) {
1851 (struct enet_addr_container *)
1852 ENET_ADDR_CONT_ENTRY(dequeue(p_lh));
1853 for (j = ENET_NUM_OCTETS_PER_ADDRESS - 1; j >= 0; j--) {
1854 if ((*p_enet_addr)[j] != (enet_addr_cont->address)[j])
1857 return enet_addr_cont; /* Found */
1859 enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
1864 static int ugeth_82xx_filtering_add_addr_in_hash(struct ucc_geth_private *ugeth,
1865 struct enet_addr *p_enet_addr)
1867 enum ucc_geth_enet_address_recognition_location location;
1868 struct enet_addr_container *enet_addr_cont;
1869 struct list_head *p_lh;
1874 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
1875 p_lh = &ugeth->group_hash_q;
1876 limit = ugeth->ug_info->maxGroupAddrInHash;
1878 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_GROUP_HASH;
1879 p_counter = &(ugeth->numGroupAddrInHash);
1881 p_lh = &ugeth->ind_hash_q;
1882 limit = ugeth->ug_info->maxIndAddrInHash;
1884 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_INDIVIDUAL_HASH;
1885 p_counter = &(ugeth->numIndAddrInHash);
1888 if ((enet_addr_cont =
1889 ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr))) {
1890 list_add(p_lh, &enet_addr_cont->node); /* Put it back */
1893 if ((!p_lh) || (!(*p_counter < limit)))
1895 if (!(enet_addr_cont = get_enet_addr_container()))
1897 for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++)
1898 (enet_addr_cont->address)[i] = (*p_enet_addr)[i];
1899 enet_addr_cont->location = location;
1900 enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
1903 hw_add_addr_in_hash(ugeth, enet_addr_cont->address);
1907 static int ugeth_82xx_filtering_clear_addr_in_hash(struct ucc_geth_private *ugeth,
1908 struct enet_addr *p_enet_addr)
1910 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
1911 struct enet_addr_container *enet_addr_cont;
1912 struct ucc_fast_private *uccf;
1913 enum comm_dir comm_dir;
1915 struct list_head *p_lh;
1916 u32 *addr_h, *addr_l;
1922 (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram->
1927 ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr)))
1930 /* It's been found and removed from the CQ. */
1931 /* Now destroy its container */
1932 put_enet_addr_container(enet_addr_cont);
1934 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
1935 addr_h = &(p_82xx_addr_filt->gaddr_h);
1936 addr_l = &(p_82xx_addr_filt->gaddr_l);
1937 p_lh = &ugeth->group_hash_q;
1938 p_counter = &(ugeth->numGroupAddrInHash);
1940 addr_h = &(p_82xx_addr_filt->iaddr_h);
1941 addr_l = &(p_82xx_addr_filt->iaddr_l);
1942 p_lh = &ugeth->ind_hash_q;
1943 p_counter = &(ugeth->numIndAddrInHash);
1947 if (uccf->enabled_tx)
1948 comm_dir |= COMM_DIR_TX;
1949 if (uccf->enabled_rx)
1950 comm_dir |= COMM_DIR_RX;
1952 ugeth_disable(ugeth, comm_dir);
1954 /* Clear the hash table. */
1955 out_be32(addr_h, 0x00000000);
1956 out_be32(addr_l, 0x00000000);
1958 /* Add all remaining CQ elements back into hash */
1959 num = --(*p_counter);
1960 for (i = 0; i < num; i++) {
1962 (struct enet_addr_container *)
1963 ENET_ADDR_CONT_ENTRY(dequeue(p_lh));
1964 hw_add_addr_in_hash(ugeth, enet_addr_cont->address);
1965 enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
1969 ugeth_enable(ugeth, comm_dir);
1973 #endif /* CONFIG_UGETH_FILTERING */
1975 static int ugeth_82xx_filtering_clear_all_addr_in_hash(struct ucc_geth_private *
1980 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
1981 struct ucc_fast_private *uccf;
1982 enum comm_dir comm_dir;
1983 struct list_head *p_lh;
1985 u32 *addr_h, *addr_l;
1991 (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram->
1994 if (enet_addr_type == ENET_ADDR_TYPE_GROUP) {
1995 addr_h = &(p_82xx_addr_filt->gaddr_h);
1996 addr_l = &(p_82xx_addr_filt->gaddr_l);
1997 p_lh = &ugeth->group_hash_q;
1998 p_counter = &(ugeth->numGroupAddrInHash);
1999 } else if (enet_addr_type == ENET_ADDR_TYPE_INDIVIDUAL) {
2000 addr_h = &(p_82xx_addr_filt->iaddr_h);
2001 addr_l = &(p_82xx_addr_filt->iaddr_l);
2002 p_lh = &ugeth->ind_hash_q;
2003 p_counter = &(ugeth->numIndAddrInHash);
2008 if (uccf->enabled_tx)
2009 comm_dir |= COMM_DIR_TX;
2010 if (uccf->enabled_rx)
2011 comm_dir |= COMM_DIR_RX;
2013 ugeth_disable(ugeth, comm_dir);
2015 /* Clear the hash table. */
2016 out_be32(addr_h, 0x00000000);
2017 out_be32(addr_l, 0x00000000);
2024 /* Delete all remaining CQ elements */
2025 for (i = 0; i < num; i++)
2026 put_enet_addr_container(ENET_ADDR_CONT_ENTRY(dequeue(p_lh)));
2031 ugeth_enable(ugeth, comm_dir);
2036 #ifdef CONFIG_UGETH_FILTERING
2037 static int ugeth_82xx_filtering_add_addr_in_paddr(struct ucc_geth_private *ugeth,
2038 struct enet_addr *p_enet_addr,
2043 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR)
2045 ("%s: multicast address added to paddr will have no "
2046 "effect - is this what you wanted?",
2049 ugeth->indAddrRegUsed[paddr_num] = 1; /* mark this paddr as used */
2050 /* store address in our database */
2051 for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++)
2052 ugeth->paddr[paddr_num][i] = (*p_enet_addr)[i];
2053 /* put in hardware */
2054 return hw_add_addr_in_paddr(ugeth, p_enet_addr, paddr_num);
2056 #endif /* CONFIG_UGETH_FILTERING */
2058 static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *ugeth,
2061 ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */
2062 return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */
2065 static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
2074 ucc_fast_free(ugeth->uccf);
2076 if (ugeth->p_thread_data_tx) {
2077 qe_muram_free(ugeth->thread_dat_tx_offset);
2078 ugeth->p_thread_data_tx = NULL;
2080 if (ugeth->p_thread_data_rx) {
2081 qe_muram_free(ugeth->thread_dat_rx_offset);
2082 ugeth->p_thread_data_rx = NULL;
2084 if (ugeth->p_exf_glbl_param) {
2085 qe_muram_free(ugeth->exf_glbl_param_offset);
2086 ugeth->p_exf_glbl_param = NULL;
2088 if (ugeth->p_rx_glbl_pram) {
2089 qe_muram_free(ugeth->rx_glbl_pram_offset);
2090 ugeth->p_rx_glbl_pram = NULL;
2092 if (ugeth->p_tx_glbl_pram) {
2093 qe_muram_free(ugeth->tx_glbl_pram_offset);
2094 ugeth->p_tx_glbl_pram = NULL;
2096 if (ugeth->p_send_q_mem_reg) {
2097 qe_muram_free(ugeth->send_q_mem_reg_offset);
2098 ugeth->p_send_q_mem_reg = NULL;
2100 if (ugeth->p_scheduler) {
2101 qe_muram_free(ugeth->scheduler_offset);
2102 ugeth->p_scheduler = NULL;
2104 if (ugeth->p_tx_fw_statistics_pram) {
2105 qe_muram_free(ugeth->tx_fw_statistics_pram_offset);
2106 ugeth->p_tx_fw_statistics_pram = NULL;
2108 if (ugeth->p_rx_fw_statistics_pram) {
2109 qe_muram_free(ugeth->rx_fw_statistics_pram_offset);
2110 ugeth->p_rx_fw_statistics_pram = NULL;
2112 if (ugeth->p_rx_irq_coalescing_tbl) {
2113 qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset);
2114 ugeth->p_rx_irq_coalescing_tbl = NULL;
2116 if (ugeth->p_rx_bd_qs_tbl) {
2117 qe_muram_free(ugeth->rx_bd_qs_tbl_offset);
2118 ugeth->p_rx_bd_qs_tbl = NULL;
2120 if (ugeth->p_init_enet_param_shadow) {
2121 return_init_enet_entries(ugeth,
2122 &(ugeth->p_init_enet_param_shadow->
2124 ENET_INIT_PARAM_MAX_ENTRIES_RX,
2125 ugeth->ug_info->riscRx, 1);
2126 return_init_enet_entries(ugeth,
2127 &(ugeth->p_init_enet_param_shadow->
2129 ENET_INIT_PARAM_MAX_ENTRIES_TX,
2130 ugeth->ug_info->riscTx, 0);
2131 kfree(ugeth->p_init_enet_param_shadow);
2132 ugeth->p_init_enet_param_shadow = NULL;
2134 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
2135 bd = ugeth->p_tx_bd_ring[i];
2138 for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) {
2139 if (ugeth->tx_skbuff[i][j]) {
2140 dma_unmap_single(NULL,
2141 ((qe_bd_t *)bd)->buf,
2142 (in_be32((u32 *)bd) &
2145 dev_kfree_skb_any(ugeth->tx_skbuff[i][j]);
2146 ugeth->tx_skbuff[i][j] = NULL;
2150 kfree(ugeth->tx_skbuff[i]);
2152 if (ugeth->p_tx_bd_ring[i]) {
2153 if (ugeth->ug_info->uf_info.bd_mem_part ==
2155 kfree((void *)ugeth->tx_bd_ring_offset[i]);
2156 else if (ugeth->ug_info->uf_info.bd_mem_part ==
2158 qe_muram_free(ugeth->tx_bd_ring_offset[i]);
2159 ugeth->p_tx_bd_ring[i] = NULL;
2162 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
2163 if (ugeth->p_rx_bd_ring[i]) {
2164 /* Return existing data buffers in ring */
2165 bd = ugeth->p_rx_bd_ring[i];
2166 for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) {
2167 if (ugeth->rx_skbuff[i][j]) {
2168 dma_unmap_single(NULL,
2169 ((struct qe_bd *)bd)->buf,
2171 uf_info.max_rx_buf_length +
2172 UCC_GETH_RX_DATA_BUF_ALIGNMENT,
2175 ugeth->rx_skbuff[i][j]);
2176 ugeth->rx_skbuff[i][j] = NULL;
2178 bd += sizeof(struct qe_bd);
2181 kfree(ugeth->rx_skbuff[i]);
2183 if (ugeth->ug_info->uf_info.bd_mem_part ==
2185 kfree((void *)ugeth->rx_bd_ring_offset[i]);
2186 else if (ugeth->ug_info->uf_info.bd_mem_part ==
2188 qe_muram_free(ugeth->rx_bd_ring_offset[i]);
2189 ugeth->p_rx_bd_ring[i] = NULL;
2192 while (!list_empty(&ugeth->group_hash_q))
2193 put_enet_addr_container(ENET_ADDR_CONT_ENTRY
2194 (dequeue(&ugeth->group_hash_q)));
2195 while (!list_empty(&ugeth->ind_hash_q))
2196 put_enet_addr_container(ENET_ADDR_CONT_ENTRY
2197 (dequeue(&ugeth->ind_hash_q)));
2201 static void ucc_geth_set_multi(struct net_device *dev)
2203 struct ucc_geth_private *ugeth;
2204 struct dev_mc_list *dmi;
2205 struct ucc_fast *uf_regs;
2206 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
2211 ugeth = netdev_priv(dev);
2213 uf_regs = ugeth->uccf->uf_regs;
2215 if (dev->flags & IFF_PROMISC) {
2217 uf_regs->upsmr |= UPSMR_PRO;
2221 uf_regs->upsmr &= ~UPSMR_PRO;
2224 (struct ucc_geth_82xx_address_filtering_pram *) ugeth->
2225 p_rx_glbl_pram->addressfiltering;
2227 if (dev->flags & IFF_ALLMULTI) {
2228 /* Catch all multicast addresses, so set the
2229 * filter to all 1's.
2231 out_be32(&p_82xx_addr_filt->gaddr_h, 0xffffffff);
2232 out_be32(&p_82xx_addr_filt->gaddr_l, 0xffffffff);
2234 /* Clear filter and add the addresses in the list.
2236 out_be32(&p_82xx_addr_filt->gaddr_h, 0x0);
2237 out_be32(&p_82xx_addr_filt->gaddr_l, 0x0);
2241 for (i = 0; i < dev->mc_count; i++, dmi = dmi->next) {
2243 /* Only support group multicast for now.
2245 if (!(dmi->dmi_addr[0] & 1))
2248 /* The address in dmi_addr is LSB first,
2249 * and taddr is MSB first. We have to
2250 * copy bytes MSB first from dmi_addr.
2252 mcptr = (u8 *) dmi->dmi_addr + 5;
2253 tdptr = (u8 *) tempaddr;
2254 for (j = 0; j < 6; j++)
2255 *tdptr++ = *mcptr--;
2257 /* Ask CPM to run CRC and set bit in
2260 hw_add_addr_in_hash(ugeth, tempaddr);
2266 static void ucc_geth_stop(struct ucc_geth_private *ugeth)
2268 struct ucc_geth *ug_regs = ugeth->ug_regs;
2269 struct phy_device *phydev = ugeth->phydev;
2272 ugeth_vdbg("%s: IN", __FUNCTION__);
2274 /* Disable the controller */
2275 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
2277 /* Tell the kernel the link is down */
2280 /* Mask all interrupts */
2281 out_be32(ugeth->uccf->p_ucce, 0x00000000);
2283 /* Clear all interrupts */
2284 out_be32(ugeth->uccf->p_ucce, 0xffffffff);
2286 /* Disable Rx and Tx */
2287 tempval = in_be32(&ug_regs->maccfg1);
2288 tempval &= ~(MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
2289 out_be32(&ug_regs->maccfg1, tempval);
2291 free_irq(ugeth->ug_info->uf_info.irq, ugeth->dev);
2293 ucc_geth_memclean(ugeth);
2296 static int ucc_struct_init(struct ucc_geth_private *ugeth)
2298 struct ucc_geth_info *ug_info;
2299 struct ucc_fast_info *uf_info;
2302 ug_info = ugeth->ug_info;
2303 uf_info = &ug_info->uf_info;
2305 /* Create CQs for hash tables */
2306 INIT_LIST_HEAD(&ugeth->group_hash_q);
2307 INIT_LIST_HEAD(&ugeth->ind_hash_q);
2309 if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) ||
2310 (uf_info->bd_mem_part == MEM_PART_MURAM))) {
2311 ugeth_err("%s: Bad memory partition value.", __FUNCTION__);
2316 for (i = 0; i < ug_info->numQueuesRx; i++) {
2317 if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) ||
2318 (ug_info->bdRingLenRx[i] %
2319 UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) {
2321 ("%s: Rx BD ring length must be multiple of 4,"
2322 " no smaller than 8.", __FUNCTION__);
2328 for (i = 0; i < ug_info->numQueuesTx; i++) {
2329 if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) {
2331 ("%s: Tx BD ring length must be no smaller than 2.",
2338 if ((uf_info->max_rx_buf_length == 0) ||
2339 (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) {
2341 ("%s: max_rx_buf_length must be non-zero multiple of 128.",
2347 if (ug_info->numQueuesTx > NUM_TX_QUEUES) {
2348 ugeth_err("%s: number of tx queues too large.", __FUNCTION__);
2353 if (ug_info->numQueuesRx > NUM_RX_QUEUES) {
2354 ugeth_err("%s: number of rx queues too large.", __FUNCTION__);
2359 for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) {
2360 if (ug_info->l2qt[i] >= ug_info->numQueuesRx) {
2362 ("%s: VLAN priority table entry must not be"
2363 " larger than number of Rx queues.",
2370 for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) {
2371 if (ug_info->l3qt[i] >= ug_info->numQueuesRx) {
2373 ("%s: IP priority table entry must not be"
2374 " larger than number of Rx queues.",
2380 if (ug_info->cam && !ug_info->ecamptr) {
2381 ugeth_err("%s: If cam mode is chosen, must supply cam ptr.",
2386 if ((ug_info->numStationAddresses !=
2387 UCC_GETH_NUM_OF_STATION_ADDRESSES_1)
2388 && ug_info->rxExtendedFiltering) {
2389 ugeth_err("%s: Number of station addresses greater than 1 "
2390 "not allowed in extended parsing mode.",
2395 /* Generate uccm_mask for receive */
2396 uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */
2397 for (i = 0; i < ug_info->numQueuesRx; i++)
2398 uf_info->uccm_mask |= (UCCE_RXBF_SINGLE_MASK << i);
2400 for (i = 0; i < ug_info->numQueuesTx; i++)
2401 uf_info->uccm_mask |= (UCCE_TXBF_SINGLE_MASK << i);
2402 /* Initialize the general fast UCC block. */
2403 if (ucc_fast_init(uf_info, &ugeth->uccf)) {
2404 ugeth_err("%s: Failed to init uccf.", __FUNCTION__);
2405 ucc_geth_memclean(ugeth);
2409 ugeth->ug_regs = (struct ucc_geth *) ioremap(uf_info->regs, sizeof(struct ucc_geth));
2414 static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2416 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
2417 struct ucc_geth_init_pram *p_init_enet_pram;
2418 struct ucc_fast_private *uccf;
2419 struct ucc_geth_info *ug_info;
2420 struct ucc_fast_info *uf_info;
2421 struct ucc_fast *uf_regs;
2422 struct ucc_geth *ug_regs;
2423 int ret_val = -EINVAL;
2424 u32 remoder = UCC_GETH_REMODER_INIT;
2425 u32 init_enet_pram_offset, cecr_subblock, command, maccfg1;
2426 u32 ifstat, i, j, size, l2qt, l3qt, length;
2427 u16 temoder = UCC_GETH_TEMODER_INIT;
2429 u8 function_code = 0;
2431 u8 numThreadsRxNumerical, numThreadsTxNumerical;
2433 ugeth_vdbg("%s: IN", __FUNCTION__);
2435 ug_info = ugeth->ug_info;
2436 uf_info = &ug_info->uf_info;
2437 uf_regs = uccf->uf_regs;
2438 ug_regs = ugeth->ug_regs;
2440 switch (ug_info->numThreadsRx) {
2441 case UCC_GETH_NUM_OF_THREADS_1:
2442 numThreadsRxNumerical = 1;
2444 case UCC_GETH_NUM_OF_THREADS_2:
2445 numThreadsRxNumerical = 2;
2447 case UCC_GETH_NUM_OF_THREADS_4:
2448 numThreadsRxNumerical = 4;
2450 case UCC_GETH_NUM_OF_THREADS_6:
2451 numThreadsRxNumerical = 6;
2453 case UCC_GETH_NUM_OF_THREADS_8:
2454 numThreadsRxNumerical = 8;
2457 ugeth_err("%s: Bad number of Rx threads value.", __FUNCTION__);
2458 ucc_geth_memclean(ugeth);
2463 switch (ug_info->numThreadsTx) {
2464 case UCC_GETH_NUM_OF_THREADS_1:
2465 numThreadsTxNumerical = 1;
2467 case UCC_GETH_NUM_OF_THREADS_2:
2468 numThreadsTxNumerical = 2;
2470 case UCC_GETH_NUM_OF_THREADS_4:
2471 numThreadsTxNumerical = 4;
2473 case UCC_GETH_NUM_OF_THREADS_6:
2474 numThreadsTxNumerical = 6;
2476 case UCC_GETH_NUM_OF_THREADS_8:
2477 numThreadsTxNumerical = 8;
2480 ugeth_err("%s: Bad number of Tx threads value.", __FUNCTION__);
2481 ucc_geth_memclean(ugeth);
2486 /* Calculate rx_extended_features */
2487 ugeth->rx_non_dynamic_extended_features = ug_info->ipCheckSumCheck ||
2488 ug_info->ipAddressAlignment ||
2489 (ug_info->numStationAddresses !=
2490 UCC_GETH_NUM_OF_STATION_ADDRESSES_1);
2492 ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features ||
2493 (ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP)
2494 || (ug_info->vlanOperationNonTagged !=
2495 UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP);
2497 init_default_reg_vals(&uf_regs->upsmr,
2498 &ug_regs->maccfg1, &ug_regs->maccfg2);
2501 /* For more details see the hardware spec. */
2502 init_rx_parameters(ug_info->bro,
2503 ug_info->rsh, ug_info->pro, &uf_regs->upsmr);
2505 /* We're going to ignore other registers for now, */
2506 /* except as needed to get up and running */
2509 /* For more details see the hardware spec. */
2510 init_flow_control_params(ug_info->aufc,
2511 ug_info->receiveFlowControl,
2513 ug_info->pausePeriod,
2514 ug_info->extensionField,
2516 &ug_regs->uempr, &ug_regs->maccfg1);
2518 maccfg1 = in_be32(&ug_regs->maccfg1);
2519 maccfg1 |= MACCFG1_ENABLE_RX;
2520 maccfg1 |= MACCFG1_ENABLE_TX;
2521 out_be32(&ug_regs->maccfg1, maccfg1);
2524 /* For more details see the hardware spec. */
2525 ret_val = init_inter_frame_gap_params(ug_info->nonBackToBackIfgPart1,
2526 ug_info->nonBackToBackIfgPart2,
2528 miminumInterFrameGapEnforcement,
2529 ug_info->backToBackInterFrameGap,
2532 ugeth_err("%s: IPGIFG initialization parameter too large.",
2534 ucc_geth_memclean(ugeth);
2539 /* For more details see the hardware spec. */
2540 ret_val = init_half_duplex_params(ug_info->altBeb,
2541 ug_info->backPressureNoBackoff,
2543 ug_info->excessDefer,
2544 ug_info->altBebTruncation,
2545 ug_info->maxRetransmission,
2546 ug_info->collisionWindow,
2549 ugeth_err("%s: Half Duplex initialization parameter too large.",
2551 ucc_geth_memclean(ugeth);
2556 /* For more details see the hardware spec. */
2557 /* Read only - resets upon read */
2558 ifstat = in_be32(&ug_regs->ifstat);
2561 /* For more details see the hardware spec. */
2562 out_be32(&ug_regs->uempr, 0);
2565 /* For more details see the hardware spec. */
2566 init_hw_statistics_gathering_mode((ug_info->statisticsMode &
2567 UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE),
2568 0, &uf_regs->upsmr, &ug_regs->uescr);
2570 /* Allocate Tx bds */
2571 for (j = 0; j < ug_info->numQueuesTx; j++) {
2572 /* Allocate in multiple of
2573 UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT,
2574 according to spec */
2575 length = ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd))
2576 / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
2577 * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
2578 if ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) %
2579 UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
2580 length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
2581 if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
2583 if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4)
2584 align = UCC_GETH_TX_BD_RING_ALIGNMENT;
2585 ugeth->tx_bd_ring_offset[j] =
2586 kmalloc((u32) (length + align), GFP_KERNEL);
2588 if (ugeth->tx_bd_ring_offset[j] != 0)
2589 ugeth->p_tx_bd_ring[j] =
2590 (void*)((ugeth->tx_bd_ring_offset[j] +
2591 align) & ~(align - 1));
2592 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
2593 ugeth->tx_bd_ring_offset[j] =
2594 qe_muram_alloc(length,
2595 UCC_GETH_TX_BD_RING_ALIGNMENT);
2596 if (!IS_MURAM_ERR(ugeth->tx_bd_ring_offset[j]))
2597 ugeth->p_tx_bd_ring[j] =
2598 (u8 *) qe_muram_addr(ugeth->
2599 tx_bd_ring_offset[j]);
2601 if (!ugeth->p_tx_bd_ring[j]) {
2603 ("%s: Can not allocate memory for Tx bd rings.",
2605 ucc_geth_memclean(ugeth);
2608 /* Zero unused end of bd ring, according to spec */
2609 memset(ugeth->p_tx_bd_ring[j] +
2610 ug_info->bdRingLenTx[j] * sizeof(struct qe_bd), 0,
2611 length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd));
2614 /* Allocate Rx bds */
2615 for (j = 0; j < ug_info->numQueuesRx; j++) {
2616 length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd);
2617 if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
2619 if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4)
2620 align = UCC_GETH_RX_BD_RING_ALIGNMENT;
2621 ugeth->rx_bd_ring_offset[j] =
2622 kmalloc((u32) (length + align), GFP_KERNEL);
2623 if (ugeth->rx_bd_ring_offset[j] != 0)
2624 ugeth->p_rx_bd_ring[j] =
2625 (void*)((ugeth->rx_bd_ring_offset[j] +
2626 align) & ~(align - 1));
2627 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
2628 ugeth->rx_bd_ring_offset[j] =
2629 qe_muram_alloc(length,
2630 UCC_GETH_RX_BD_RING_ALIGNMENT);
2631 if (!IS_MURAM_ERR(ugeth->rx_bd_ring_offset[j]))
2632 ugeth->p_rx_bd_ring[j] =
2633 (u8 *) qe_muram_addr(ugeth->
2634 rx_bd_ring_offset[j]);
2636 if (!ugeth->p_rx_bd_ring[j]) {
2638 ("%s: Can not allocate memory for Rx bd rings.",
2640 ucc_geth_memclean(ugeth);
2646 for (j = 0; j < ug_info->numQueuesTx; j++) {
2647 /* Setup the skbuff rings */
2648 ugeth->tx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
2649 ugeth->ug_info->bdRingLenTx[j],
2652 if (ugeth->tx_skbuff[j] == NULL) {
2653 ugeth_err("%s: Could not allocate tx_skbuff",
2655 ucc_geth_memclean(ugeth);
2659 for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++)
2660 ugeth->tx_skbuff[j][i] = NULL;
2662 ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0;
2663 bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j];
2664 for (i = 0; i < ug_info->bdRingLenTx[j]; i++) {
2665 /* clear bd buffer */
2666 out_be32(&((struct qe_bd *)bd)->buf, 0);
2667 /* set bd status and length */
2668 out_be32((u32 *)bd, 0);
2669 bd += sizeof(struct qe_bd);
2671 bd -= sizeof(struct qe_bd);
2672 /* set bd status and length */
2673 out_be32((u32 *)bd, T_W); /* for last BD set Wrap bit */
2677 for (j = 0; j < ug_info->numQueuesRx; j++) {
2678 /* Setup the skbuff rings */
2679 ugeth->rx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
2680 ugeth->ug_info->bdRingLenRx[j],
2683 if (ugeth->rx_skbuff[j] == NULL) {
2684 ugeth_err("%s: Could not allocate rx_skbuff",
2686 ucc_geth_memclean(ugeth);
2690 for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++)
2691 ugeth->rx_skbuff[j][i] = NULL;
2693 ugeth->skb_currx[j] = 0;
2694 bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j];
2695 for (i = 0; i < ug_info->bdRingLenRx[j]; i++) {
2696 /* set bd status and length */
2697 out_be32((u32 *)bd, R_I);
2698 /* clear bd buffer */
2699 out_be32(&((struct qe_bd *)bd)->buf, 0);
2700 bd += sizeof(struct qe_bd);
2702 bd -= sizeof(struct qe_bd);
2703 /* set bd status and length */
2704 out_be32((u32 *)bd, R_W); /* for last BD set Wrap bit */
2710 /* Tx global PRAM */
2711 /* Allocate global tx parameter RAM page */
2712 ugeth->tx_glbl_pram_offset =
2713 qe_muram_alloc(sizeof(struct ucc_geth_tx_global_pram),
2714 UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT);
2715 if (IS_MURAM_ERR(ugeth->tx_glbl_pram_offset)) {
2717 ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.",
2719 ucc_geth_memclean(ugeth);
2722 ugeth->p_tx_glbl_pram =
2723 (struct ucc_geth_tx_global_pram *) qe_muram_addr(ugeth->
2724 tx_glbl_pram_offset);
2725 /* Zero out p_tx_glbl_pram */
2726 memset(ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram));
2728 /* Fill global PRAM */
2731 /* Size varies with number of Tx threads */
2732 ugeth->thread_dat_tx_offset =
2733 qe_muram_alloc(numThreadsTxNumerical *
2734 sizeof(struct ucc_geth_thread_data_tx) +
2735 32 * (numThreadsTxNumerical == 1),
2736 UCC_GETH_THREAD_DATA_ALIGNMENT);
2737 if (IS_MURAM_ERR(ugeth->thread_dat_tx_offset)) {
2739 ("%s: Can not allocate DPRAM memory for p_thread_data_tx.",
2741 ucc_geth_memclean(ugeth);
2745 ugeth->p_thread_data_tx =
2746 (struct ucc_geth_thread_data_tx *) qe_muram_addr(ugeth->
2747 thread_dat_tx_offset);
2748 out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset);
2751 for (i = 0; i < UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX; i++)
2752 out_be32(&ugeth->p_tx_glbl_pram->vtagtable[i],
2753 ug_info->vtagtable[i]);
2756 for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++)
2757 ugeth->p_tx_glbl_pram->iphoffset[i] = ug_info->iphoffset[i];
2760 /* Size varies with number of Tx queues */
2761 ugeth->send_q_mem_reg_offset =
2762 qe_muram_alloc(ug_info->numQueuesTx *
2763 sizeof(struct ucc_geth_send_queue_qd),
2764 UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
2765 if (IS_MURAM_ERR(ugeth->send_q_mem_reg_offset)) {
2767 ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.",
2769 ucc_geth_memclean(ugeth);
2773 ugeth->p_send_q_mem_reg =
2774 (struct ucc_geth_send_queue_mem_region *) qe_muram_addr(ugeth->
2775 send_q_mem_reg_offset);
2776 out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset);
2778 /* Setup the table */
2779 /* Assume BD rings are already established */
2780 for (i = 0; i < ug_info->numQueuesTx; i++) {
2782 ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] -
2783 1) * sizeof(struct qe_bd);
2784 if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
2785 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
2786 (u32) virt_to_phys(ugeth->p_tx_bd_ring[i]));
2787 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
2788 last_bd_completed_address,
2789 (u32) virt_to_phys(endOfRing));
2790 } else if (ugeth->ug_info->uf_info.bd_mem_part ==
2792 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
2793 (u32) immrbar_virt_to_phys(ugeth->
2795 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
2796 last_bd_completed_address,
2797 (u32) immrbar_virt_to_phys(endOfRing));
2801 /* schedulerbasepointer */
2803 if (ug_info->numQueuesTx > 1) {
2804 /* scheduler exists only if more than 1 tx queue */
2805 ugeth->scheduler_offset =
2806 qe_muram_alloc(sizeof(struct ucc_geth_scheduler),
2807 UCC_GETH_SCHEDULER_ALIGNMENT);
2808 if (IS_MURAM_ERR(ugeth->scheduler_offset)) {
2810 ("%s: Can not allocate DPRAM memory for p_scheduler.",
2812 ucc_geth_memclean(ugeth);
2816 ugeth->p_scheduler =
2817 (struct ucc_geth_scheduler *) qe_muram_addr(ugeth->
2819 out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer,
2820 ugeth->scheduler_offset);
2821 /* Zero out p_scheduler */
2822 memset(ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler));
2824 /* Set values in scheduler */
2825 out_be32(&ugeth->p_scheduler->mblinterval,
2826 ug_info->mblinterval);
2827 out_be16(&ugeth->p_scheduler->nortsrbytetime,
2828 ug_info->nortsrbytetime);
2829 ugeth->p_scheduler->fracsiz = ug_info->fracsiz;
2830 ugeth->p_scheduler->strictpriorityq = ug_info->strictpriorityq;
2831 ugeth->p_scheduler->txasap = ug_info->txasap;
2832 ugeth->p_scheduler->extrabw = ug_info->extrabw;
2833 for (i = 0; i < NUM_TX_QUEUES; i++)
2834 ugeth->p_scheduler->weightfactor[i] =
2835 ug_info->weightfactor[i];
2837 /* Set pointers to cpucount registers in scheduler */
2838 ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0);
2839 ugeth->p_cpucount[1] = &(ugeth->p_scheduler->cpucount1);
2840 ugeth->p_cpucount[2] = &(ugeth->p_scheduler->cpucount2);
2841 ugeth->p_cpucount[3] = &(ugeth->p_scheduler->cpucount3);
2842 ugeth->p_cpucount[4] = &(ugeth->p_scheduler->cpucount4);
2843 ugeth->p_cpucount[5] = &(ugeth->p_scheduler->cpucount5);
2844 ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6);
2845 ugeth->p_cpucount[7] = &(ugeth->p_scheduler->cpucount7);
2848 /* schedulerbasepointer */
2849 /* TxRMON_PTR (statistics) */
2851 statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
2852 ugeth->tx_fw_statistics_pram_offset =
2853 qe_muram_alloc(sizeof
2854 (struct ucc_geth_tx_firmware_statistics_pram),
2855 UCC_GETH_TX_STATISTICS_ALIGNMENT);
2856 if (IS_MURAM_ERR(ugeth->tx_fw_statistics_pram_offset)) {
2858 ("%s: Can not allocate DPRAM memory for"
2859 " p_tx_fw_statistics_pram.", __FUNCTION__);
2860 ucc_geth_memclean(ugeth);
2863 ugeth->p_tx_fw_statistics_pram =
2864 (struct ucc_geth_tx_firmware_statistics_pram *)
2865 qe_muram_addr(ugeth->tx_fw_statistics_pram_offset);
2866 /* Zero out p_tx_fw_statistics_pram */
2867 memset(ugeth->p_tx_fw_statistics_pram,
2868 0, sizeof(struct ucc_geth_tx_firmware_statistics_pram));
2872 /* Already has speed set */
2874 if (ug_info->numQueuesTx > 1)
2875 temoder |= TEMODER_SCHEDULER_ENABLE;
2876 if (ug_info->ipCheckSumGenerate)
2877 temoder |= TEMODER_IP_CHECKSUM_GENERATE;
2878 temoder |= ((ug_info->numQueuesTx - 1) << TEMODER_NUM_OF_QUEUES_SHIFT);
2879 out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder);
2881 test = in_be16(&ugeth->p_tx_glbl_pram->temoder);
2883 /* Function code register value to be used later */
2884 function_code = QE_BMR_BYTE_ORDER_BO_MOT | UCC_FAST_FUNCTION_CODE_GBL;
2885 /* Required for QE */
2887 /* function code register */
2888 out_be32(&ugeth->p_tx_glbl_pram->tstate, ((u32) function_code) << 24);
2890 /* Rx global PRAM */
2891 /* Allocate global rx parameter RAM page */
2892 ugeth->rx_glbl_pram_offset =
2893 qe_muram_alloc(sizeof(struct ucc_geth_rx_global_pram),
2894 UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT);
2895 if (IS_MURAM_ERR(ugeth->rx_glbl_pram_offset)) {
2897 ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.",
2899 ucc_geth_memclean(ugeth);
2902 ugeth->p_rx_glbl_pram =
2903 (struct ucc_geth_rx_global_pram *) qe_muram_addr(ugeth->
2904 rx_glbl_pram_offset);
2905 /* Zero out p_rx_glbl_pram */
2906 memset(ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram));
2908 /* Fill global PRAM */
2911 /* Size varies with number of Rx threads */
2912 ugeth->thread_dat_rx_offset =
2913 qe_muram_alloc(numThreadsRxNumerical *
2914 sizeof(struct ucc_geth_thread_data_rx),
2915 UCC_GETH_THREAD_DATA_ALIGNMENT);
2916 if (IS_MURAM_ERR(ugeth->thread_dat_rx_offset)) {
2918 ("%s: Can not allocate DPRAM memory for p_thread_data_rx.",
2920 ucc_geth_memclean(ugeth);
2924 ugeth->p_thread_data_rx =
2925 (struct ucc_geth_thread_data_rx *) qe_muram_addr(ugeth->
2926 thread_dat_rx_offset);
2927 out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset);
2930 out_be16(&ugeth->p_rx_glbl_pram->typeorlen, ug_info->typeorlen);
2932 /* rxrmonbaseptr (statistics) */
2934 statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) {
2935 ugeth->rx_fw_statistics_pram_offset =
2936 qe_muram_alloc(sizeof
2937 (struct ucc_geth_rx_firmware_statistics_pram),
2938 UCC_GETH_RX_STATISTICS_ALIGNMENT);
2939 if (IS_MURAM_ERR(ugeth->rx_fw_statistics_pram_offset)) {
2941 ("%s: Can not allocate DPRAM memory for"
2942 " p_rx_fw_statistics_pram.", __FUNCTION__);
2943 ucc_geth_memclean(ugeth);
2946 ugeth->p_rx_fw_statistics_pram =
2947 (struct ucc_geth_rx_firmware_statistics_pram *)
2948 qe_muram_addr(ugeth->rx_fw_statistics_pram_offset);
2949 /* Zero out p_rx_fw_statistics_pram */
2950 memset(ugeth->p_rx_fw_statistics_pram, 0,
2951 sizeof(struct ucc_geth_rx_firmware_statistics_pram));
2954 /* intCoalescingPtr */
2956 /* Size varies with number of Rx queues */
2957 ugeth->rx_irq_coalescing_tbl_offset =
2958 qe_muram_alloc(ug_info->numQueuesRx *
2959 sizeof(struct ucc_geth_rx_interrupt_coalescing_entry)
2960 + 4, UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT);
2961 if (IS_MURAM_ERR(ugeth->rx_irq_coalescing_tbl_offset)) {
2963 ("%s: Can not allocate DPRAM memory for"
2964 " p_rx_irq_coalescing_tbl.", __FUNCTION__);
2965 ucc_geth_memclean(ugeth);
2969 ugeth->p_rx_irq_coalescing_tbl =
2970 (struct ucc_geth_rx_interrupt_coalescing_table *)
2971 qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset);
2972 out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr,
2973 ugeth->rx_irq_coalescing_tbl_offset);
2975 /* Fill interrupt coalescing table */
2976 for (i = 0; i < ug_info->numQueuesRx; i++) {
2977 out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
2978 interruptcoalescingmaxvalue,
2979 ug_info->interruptcoalescingmaxvalue[i]);
2980 out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
2981 interruptcoalescingcounter,
2982 ug_info->interruptcoalescingmaxvalue[i]);
2986 init_max_rx_buff_len(uf_info->max_rx_buf_length,
2987 &ugeth->p_rx_glbl_pram->mrblr);
2989 out_be16(&ugeth->p_rx_glbl_pram->mflr, ug_info->maxFrameLength);
2991 init_min_frame_len(ug_info->minFrameLength,
2992 &ugeth->p_rx_glbl_pram->minflr,
2993 &ugeth->p_rx_glbl_pram->mrblr);
2995 out_be16(&ugeth->p_rx_glbl_pram->maxd1, ug_info->maxD1Length);
2997 out_be16(&ugeth->p_rx_glbl_pram->maxd2, ug_info->maxD2Length);
3001 for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++)
3002 l2qt |= (ug_info->l2qt[i] << (28 - 4 * i));
3003 out_be32(&ugeth->p_rx_glbl_pram->l2qt, l2qt);
3006 for (j = 0; j < UCC_GETH_IP_PRIORITY_MAX; j += 8) {
3008 for (i = 0; i < 8; i++)
3009 l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i));
3010 out_be32(&ugeth->p_rx_glbl_pram->l3qt[j/8], l3qt);
3014 out_be16(&ugeth->p_rx_glbl_pram->vlantype, ug_info->vlantype);
3017 out_be16(&ugeth->p_rx_glbl_pram->vlantci, ug_info->vlantci);
3020 out_be32(&ugeth->p_rx_glbl_pram->ecamptr, ug_info->ecamptr);
3023 /* Size varies with number of Rx queues */
3024 ugeth->rx_bd_qs_tbl_offset =
3025 qe_muram_alloc(ug_info->numQueuesRx *
3026 (sizeof(struct ucc_geth_rx_bd_queues_entry) +
3027 sizeof(struct ucc_geth_rx_prefetched_bds)),
3028 UCC_GETH_RX_BD_QUEUES_ALIGNMENT);
3029 if (IS_MURAM_ERR(ugeth->rx_bd_qs_tbl_offset)) {
3031 ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.",
3033 ucc_geth_memclean(ugeth);
3037 ugeth->p_rx_bd_qs_tbl =
3038 (struct ucc_geth_rx_bd_queues_entry *) qe_muram_addr(ugeth->
3039 rx_bd_qs_tbl_offset);
3040 out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset);
3041 /* Zero out p_rx_bd_qs_tbl */
3042 memset(ugeth->p_rx_bd_qs_tbl,
3044 ug_info->numQueuesRx * (sizeof(struct ucc_geth_rx_bd_queues_entry) +
3045 sizeof(struct ucc_geth_rx_prefetched_bds)));
3047 /* Setup the table */
3048 /* Assume BD rings are already established */
3049 for (i = 0; i < ug_info->numQueuesRx; i++) {
3050 if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
3051 out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
3052 (u32) virt_to_phys(ugeth->p_rx_bd_ring[i]));
3053 } else if (ugeth->ug_info->uf_info.bd_mem_part ==
3055 out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
3056 (u32) immrbar_virt_to_phys(ugeth->
3059 /* rest of fields handled by QE */
3063 /* Already has speed set */
3065 if (ugeth->rx_extended_features)
3066 remoder |= REMODER_RX_EXTENDED_FEATURES;
3067 if (ug_info->rxExtendedFiltering)
3068 remoder |= REMODER_RX_EXTENDED_FILTERING;
3069 if (ug_info->dynamicMaxFrameLength)
3070 remoder |= REMODER_DYNAMIC_MAX_FRAME_LENGTH;
3071 if (ug_info->dynamicMinFrameLength)
3072 remoder |= REMODER_DYNAMIC_MIN_FRAME_LENGTH;
3074 ug_info->vlanOperationTagged << REMODER_VLAN_OPERATION_TAGGED_SHIFT;
3077 vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT;
3078 remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT;
3079 remoder |= ((ug_info->numQueuesRx - 1) << REMODER_NUM_OF_QUEUES_SHIFT);
3080 if (ug_info->ipCheckSumCheck)
3081 remoder |= REMODER_IP_CHECKSUM_CHECK;
3082 if (ug_info->ipAddressAlignment)
3083 remoder |= REMODER_IP_ADDRESS_ALIGNMENT;
3084 out_be32(&ugeth->p_rx_glbl_pram->remoder, remoder);
3086 /* Note that this function must be called */
3087 /* ONLY AFTER p_tx_fw_statistics_pram */
3088 /* andp_UccGethRxFirmwareStatisticsPram are allocated ! */
3089 init_firmware_statistics_gathering_mode((ug_info->
3091 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX),
3092 (ug_info->statisticsMode &
3093 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX),
3094 &ugeth->p_tx_glbl_pram->txrmonbaseptr,
3095 ugeth->tx_fw_statistics_pram_offset,
3096 &ugeth->p_rx_glbl_pram->rxrmonbaseptr,
3097 ugeth->rx_fw_statistics_pram_offset,
3098 &ugeth->p_tx_glbl_pram->temoder,
3099 &ugeth->p_rx_glbl_pram->remoder);
3101 /* function code register */
3102 ugeth->p_rx_glbl_pram->rstate = function_code;
3104 /* initialize extended filtering */
3105 if (ug_info->rxExtendedFiltering) {
3106 if (!ug_info->extendedFilteringChainPointer) {
3107 ugeth_err("%s: Null Extended Filtering Chain Pointer.",
3109 ucc_geth_memclean(ugeth);
3113 /* Allocate memory for extended filtering Mode Global
3115 ugeth->exf_glbl_param_offset =
3116 qe_muram_alloc(sizeof(struct ucc_geth_exf_global_pram),
3117 UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT);
3118 if (IS_MURAM_ERR(ugeth->exf_glbl_param_offset)) {
3120 ("%s: Can not allocate DPRAM memory for"
3121 " p_exf_glbl_param.", __FUNCTION__);
3122 ucc_geth_memclean(ugeth);
3126 ugeth->p_exf_glbl_param =
3127 (struct ucc_geth_exf_global_pram *) qe_muram_addr(ugeth->
3128 exf_glbl_param_offset);
3129 out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam,
3130 ugeth->exf_glbl_param_offset);
3131 out_be32(&ugeth->p_exf_glbl_param->l2pcdptr,
3132 (u32) ug_info->extendedFilteringChainPointer);
3134 } else { /* initialize 82xx style address filtering */
3136 /* Init individual address recognition registers to disabled */
3138 for (j = 0; j < NUM_OF_PADDRS; j++)
3139 ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j);
3142 (struct ucc_geth_82xx_address_filtering_pram *) ugeth->
3143 p_rx_glbl_pram->addressfiltering;
3145 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
3146 ENET_ADDR_TYPE_GROUP);
3147 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
3148 ENET_ADDR_TYPE_INDIVIDUAL);
3152 * Initialize UCC at QE level
3155 command = QE_INIT_TX_RX;
3157 /* Allocate shadow InitEnet command parameter structure.
3158 * This is needed because after the InitEnet command is executed,
3159 * the structure in DPRAM is released, because DPRAM is a premium
3161 * This shadow structure keeps a copy of what was done so that the
3162 * allocated resources can be released when the channel is freed.
3164 if (!(ugeth->p_init_enet_param_shadow =
3165 kmalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) {
3167 ("%s: Can not allocate memory for"
3168 " p_UccInitEnetParamShadows.", __FUNCTION__);
3169 ucc_geth_memclean(ugeth);
3172 /* Zero out *p_init_enet_param_shadow */
3173 memset((char *)ugeth->p_init_enet_param_shadow,
3174 0, sizeof(struct ucc_geth_init_pram));
3176 /* Fill shadow InitEnet command parameter structure */
3178 ugeth->p_init_enet_param_shadow->resinit1 =
3179 ENET_INIT_PARAM_MAGIC_RES_INIT1;
3180 ugeth->p_init_enet_param_shadow->resinit2 =
3181 ENET_INIT_PARAM_MAGIC_RES_INIT2;
3182 ugeth->p_init_enet_param_shadow->resinit3 =
3183 ENET_INIT_PARAM_MAGIC_RES_INIT3;
3184 ugeth->p_init_enet_param_shadow->resinit4 =
3185 ENET_INIT_PARAM_MAGIC_RES_INIT4;
3186 ugeth->p_init_enet_param_shadow->resinit5 =
3187 ENET_INIT_PARAM_MAGIC_RES_INIT5;
3188 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
3189 ((u32) ug_info->numThreadsRx) << ENET_INIT_PARAM_RGF_SHIFT;
3190 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
3191 ((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT;
3193 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
3194 ugeth->rx_glbl_pram_offset | ug_info->riscRx;
3195 if ((ug_info->largestexternallookupkeysize !=
3196 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE)
3197 && (ug_info->largestexternallookupkeysize !=
3198 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
3199 && (ug_info->largestexternallookupkeysize !=
3200 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) {
3201 ugeth_err("%s: Invalid largest External Lookup Key Size.",
3203 ucc_geth_memclean(ugeth);
3206 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize =
3207 ug_info->largestexternallookupkeysize;
3208 size = sizeof(struct ucc_geth_thread_rx_pram);
3209 if (ug_info->rxExtendedFiltering) {
3210 size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
3211 if (ug_info->largestexternallookupkeysize ==
3212 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
3214 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
3215 if (ug_info->largestexternallookupkeysize ==
3216 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
3218 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
3221 if ((ret_val = fill_init_enet_entries(ugeth, &(ugeth->
3222 p_init_enet_param_shadow->rxthread[0]),
3223 (u8) (numThreadsRxNumerical + 1)
3224 /* Rx needs one extra for terminator */
3225 , size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT,
3226 ug_info->riscRx, 1)) != 0) {
3227 ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
3229 ucc_geth_memclean(ugeth);
3233 ugeth->p_init_enet_param_shadow->txglobal =
3234 ugeth->tx_glbl_pram_offset | ug_info->riscTx;
3236 fill_init_enet_entries(ugeth,
3237 &(ugeth->p_init_enet_param_shadow->
3238 txthread[0]), numThreadsTxNumerical,
3239 sizeof(struct ucc_geth_thread_tx_pram),
3240 UCC_GETH_THREAD_TX_PRAM_ALIGNMENT,
3241 ug_info->riscTx, 0)) != 0) {
3242 ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
3244 ucc_geth_memclean(ugeth);
3248 /* Load Rx bds with buffers */
3249 for (i = 0; i < ug_info->numQueuesRx; i++) {
3250 if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) {
3251 ugeth_err("%s: Can not fill Rx bds with buffers.",
3253 ucc_geth_memclean(ugeth);
3258 /* Allocate InitEnet command parameter structure */
3259 init_enet_pram_offset = qe_muram_alloc(sizeof(struct ucc_geth_init_pram), 4);
3260 if (IS_MURAM_ERR(init_enet_pram_offset)) {
3262 ("%s: Can not allocate DPRAM memory for p_init_enet_pram.",
3264 ucc_geth_memclean(ugeth);
3268 (struct ucc_geth_init_pram *) qe_muram_addr(init_enet_pram_offset);
3270 /* Copy shadow InitEnet command parameter structure into PRAM */
3271 p_init_enet_pram->resinit1 = ugeth->p_init_enet_param_shadow->resinit1;
3272 p_init_enet_pram->resinit2 = ugeth->p_init_enet_param_shadow->resinit2;
3273 p_init_enet_pram->resinit3 = ugeth->p_init_enet_param_shadow->resinit3;
3274 p_init_enet_pram->resinit4 = ugeth->p_init_enet_param_shadow->resinit4;
3275 out_be16(&p_init_enet_pram->resinit5,
3276 ugeth->p_init_enet_param_shadow->resinit5);
3277 p_init_enet_pram->largestexternallookupkeysize =
3278 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize;
3279 out_be32(&p_init_enet_pram->rgftgfrxglobal,
3280 ugeth->p_init_enet_param_shadow->rgftgfrxglobal);
3281 for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++)
3282 out_be32(&p_init_enet_pram->rxthread[i],
3283 ugeth->p_init_enet_param_shadow->rxthread[i]);
3284 out_be32(&p_init_enet_pram->txglobal,
3285 ugeth->p_init_enet_param_shadow->txglobal);
3286 for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_TX; i++)
3287 out_be32(&p_init_enet_pram->txthread[i],
3288 ugeth->p_init_enet_param_shadow->txthread[i]);
3290 /* Issue QE command */
3292 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
3293 qe_issue_cmd(command, cecr_subblock, QE_CR_PROTOCOL_ETHERNET,
3294 init_enet_pram_offset);
3296 /* Free InitEnet command parameter */
3297 qe_muram_free(init_enet_pram_offset);
3302 /* returns a net_device_stats structure pointer */
3303 static struct net_device_stats *ucc_geth_get_stats(struct net_device *dev)
3305 struct ucc_geth_private *ugeth = netdev_priv(dev);
3307 return &(ugeth->stats);
3310 /* ucc_geth_timeout gets called when a packet has not been
3311 * transmitted after a set amount of time.
3312 * For now, assume that clearing out all the structures, and
3313 * starting over will fix the problem. */
3314 static void ucc_geth_timeout(struct net_device *dev)
3316 struct ucc_geth_private *ugeth = netdev_priv(dev);
3318 ugeth_vdbg("%s: IN", __FUNCTION__);
3320 ugeth->stats.tx_errors++;
3322 ugeth_dump_regs(ugeth);
3324 if (dev->flags & IFF_UP) {
3325 ucc_geth_stop(ugeth);
3326 ucc_geth_startup(ugeth);
3329 netif_schedule(dev);
3332 /* This is called by the kernel when a frame is ready for transmission. */
3333 /* It is pointed to by the dev->hard_start_xmit function pointer */
3334 static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
3336 struct ucc_geth_private *ugeth = netdev_priv(dev);
3337 #ifdef CONFIG_UGETH_TX_ON_DEMAND
3338 struct ucc_fast_private *uccf;
3340 u8 *bd; /* BD pointer */
3344 ugeth_vdbg("%s: IN", __FUNCTION__);
3346 spin_lock_irq(&ugeth->lock);
3348 ugeth->stats.tx_bytes += skb->len;
3350 /* Start from the next BD that should be filled */
3351 bd = ugeth->txBd[txQ];
3352 bd_status = in_be32((u32 *)bd);
3353 /* Save the skb pointer so we can free it later */
3354 ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb;
3356 /* Update the current skb pointer (wrapping if this was the last) */
3357 ugeth->skb_curtx[txQ] =
3358 (ugeth->skb_curtx[txQ] +
3359 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
3361 /* set up the buffer descriptor */
3362 out_be32(&((struct qe_bd *)bd)->buf,
3363 dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE));
3365 /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */
3367 bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len;
3369 /* set bd status and length */
3370 out_be32((u32 *)bd, bd_status);
3372 dev->trans_start = jiffies;
3374 /* Move to next BD in the ring */
3375 if (!(bd_status & T_W))
3376 bd += sizeof(struct qe_bd);
3378 bd = ugeth->p_tx_bd_ring[txQ];
3380 /* If the next BD still needs to be cleaned up, then the bds
3381 are full. We need to tell the kernel to stop sending us stuff. */
3382 if (bd == ugeth->confBd[txQ]) {
3383 if (!netif_queue_stopped(dev))
3384 netif_stop_queue(dev);
3387 ugeth->txBd[txQ] = bd;
3389 if (ugeth->p_scheduler) {
3390 ugeth->cpucount[txQ]++;
3391 /* Indicate to QE that there are more Tx bds ready for
3393 /* This is done by writing a running counter of the bd
3394 count to the scheduler PRAM. */
3395 out_be16(ugeth->p_cpucount[txQ], ugeth->cpucount[txQ]);
3398 #ifdef CONFIG_UGETH_TX_ON_DEMAND
3400 out_be16(uccf->p_utodr, UCC_FAST_TOD);
3402 spin_unlock_irq(&ugeth->lock);
3407 static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit)
3409 struct sk_buff *skb;
3411 u16 length, howmany = 0;
3415 ugeth_vdbg("%s: IN", __FUNCTION__);
3417 /* collect received buffers */
3418 bd = ugeth->rxBd[rxQ];
3420 bd_status = in_be32((u32 *)bd);
3422 /* while there are received buffers and BD is full (~R_E) */
3423 while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) {
3424 bdBuffer = (u8 *) in_be32(&((struct qe_bd *)bd)->buf);
3425 length = (u16) ((bd_status & BD_LENGTH_MASK) - 4);
3426 skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]];
3428 /* determine whether buffer is first, last, first and last
3429 (single buffer frame) or middle (not first and not last) */
3431 (!(bd_status & (R_F | R_L))) ||
3432 (bd_status & R_ERRORS_FATAL)) {
3433 ugeth_vdbg("%s, %d: ERROR!!! skb - 0x%08x",
3434 __FUNCTION__, __LINE__, (u32) skb);
3436 dev_kfree_skb_any(skb);
3438 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL;
3439 ugeth->stats.rx_dropped++;
3441 ugeth->stats.rx_packets++;
3444 /* Prep the skb for the packet */
3445 skb_put(skb, length);
3447 /* Tell the skb what kind of packet this is */
3448 skb->protocol = eth_type_trans(skb, ugeth->dev);
3450 ugeth->stats.rx_bytes += length;
3451 /* Send the packet up the stack */
3452 #ifdef CONFIG_UGETH_NAPI
3453 netif_receive_skb(skb);
3456 #endif /* CONFIG_UGETH_NAPI */
3459 ugeth->dev->last_rx = jiffies;
3461 skb = get_new_skb(ugeth, bd);
3463 ugeth_warn("%s: No Rx Data Buffer", __FUNCTION__);
3464 ugeth->stats.rx_dropped++;
3468 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb;
3470 /* update to point at the next skb */
3471 ugeth->skb_currx[rxQ] =
3472 (ugeth->skb_currx[rxQ] +
3473 1) & RX_RING_MOD_MASK(ugeth->ug_info->bdRingLenRx[rxQ]);
3475 if (bd_status & R_W)
3476 bd = ugeth->p_rx_bd_ring[rxQ];
3478 bd += sizeof(struct qe_bd);
3480 bd_status = in_be32((u32 *)bd);
3483 ugeth->rxBd[rxQ] = bd;
3487 static int ucc_geth_tx(struct net_device *dev, u8 txQ)
3489 /* Start from the next BD that should be filled */
3490 struct ucc_geth_private *ugeth = netdev_priv(dev);
3491 u8 *bd; /* BD pointer */
3494 bd = ugeth->confBd[txQ];
3495 bd_status = in_be32((u32 *)bd);
3497 /* Normal processing. */
3498 while ((bd_status & T_R) == 0) {
3499 /* BD contains already transmitted buffer. */
3500 /* Handle the transmitted buffer and release */
3501 /* the BD to be used with the current frame */
3503 if ((bd == ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0))
3506 ugeth->stats.tx_packets++;
3508 /* Free the sk buffer associated with this TxBD */
3509 dev_kfree_skb_irq(ugeth->
3510 tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]);
3511 ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
3512 ugeth->skb_dirtytx[txQ] =
3513 (ugeth->skb_dirtytx[txQ] +
3514 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
3516 /* We freed a buffer, so now we can restart transmission */
3517 if (netif_queue_stopped(dev))
3518 netif_wake_queue(dev);
3520 /* Advance the confirmation BD pointer */
3521 if (!(bd_status & T_W))
3522 bd += sizeof(struct qe_bd);
3524 bd = ugeth->p_tx_bd_ring[txQ];
3525 bd_status = in_be32((u32 *)bd);
3527 ugeth->confBd[txQ] = bd;
3531 #ifdef CONFIG_UGETH_NAPI
3532 static int ucc_geth_poll(struct net_device *dev, int *budget)
3534 struct ucc_geth_private *ugeth = netdev_priv(dev);
3535 struct ucc_geth_info *ug_info;
3536 struct ucc_fast_private *uccf;
3542 ug_info = ugeth->ug_info;
3544 rx_work_limit = *budget;
3545 if (rx_work_limit > dev->quota)
3546 rx_work_limit = dev->quota;
3550 for (i = 0; i < ug_info->numQueuesRx; i++) {
3551 howmany += ucc_geth_rx(ugeth, i, rx_work_limit);
3554 dev->quota -= howmany;
3555 rx_work_limit -= howmany;
3558 if (rx_work_limit > 0) {
3559 netif_rx_complete(dev);
3561 uccm = in_be32(uccf->p_uccm);
3562 uccm |= UCCE_RX_EVENTS;
3563 out_be32(uccf->p_uccm, uccm);
3566 return (rx_work_limit > 0) ? 0 : 1;
3568 #endif /* CONFIG_UGETH_NAPI */
3570 static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
3572 struct net_device *dev = (struct net_device *)info;
3573 struct ucc_geth_private *ugeth = netdev_priv(dev);
3574 struct ucc_fast_private *uccf;
3575 struct ucc_geth_info *ug_info;
3578 #ifndef CONFIG_UGETH_NAPI
3579 register u32 rx_mask;
3581 register u32 tx_mask;
3584 ugeth_vdbg("%s: IN", __FUNCTION__);
3590 ug_info = ugeth->ug_info;
3592 /* read and clear events */
3593 ucce = (u32) in_be32(uccf->p_ucce);
3594 uccm = (u32) in_be32(uccf->p_uccm);
3596 out_be32(uccf->p_ucce, ucce);
3598 /* check for receive events that require processing */
3599 if (ucce & UCCE_RX_EVENTS) {
3600 #ifdef CONFIG_UGETH_NAPI
3601 if (netif_rx_schedule_prep(dev)) {
3602 uccm &= ~UCCE_RX_EVENTS;
3603 out_be32(uccf->p_uccm, uccm);
3604 __netif_rx_schedule(dev);
3607 rx_mask = UCCE_RXBF_SINGLE_MASK;
3608 for (i = 0; i < ug_info->numQueuesRx; i++) {
3610 ucc_geth_rx(ugeth, i, (int)ugeth->ug_info->bdRingLenRx[i]);
3614 #endif /* CONFIG_UGETH_NAPI */
3617 /* Tx event processing */
3618 if (ucce & UCCE_TX_EVENTS) {
3619 spin_lock(&ugeth->lock);
3620 tx_mask = UCCE_TXBF_SINGLE_MASK;
3621 for (i = 0; i < ug_info->numQueuesTx; i++) {
3623 ucc_geth_tx(dev, i);
3627 spin_unlock(&ugeth->lock);
3630 /* Errors and other events */
3631 if (ucce & UCCE_OTHER) {
3632 if (ucce & UCCE_BSY) {
3633 ugeth->stats.rx_errors++;
3635 if (ucce & UCCE_TXE) {
3636 ugeth->stats.tx_errors++;
3643 /* Called when something needs to use the ethernet device */
3644 /* Returns 0 for success. */
3645 static int ucc_geth_open(struct net_device *dev)
3647 struct ucc_geth_private *ugeth = netdev_priv(dev);
3650 ugeth_vdbg("%s: IN", __FUNCTION__);
3652 /* Test station address */
3653 if (dev->dev_addr[0] & ENET_GROUP_ADDR) {
3654 ugeth_err("%s: Multicast address used for station address"
3655 " - is this what you wanted?", __FUNCTION__);
3659 err = ucc_struct_init(ugeth);
3661 ugeth_err("%s: Cannot configure internal struct, aborting.", dev->name);
3665 err = ucc_geth_startup(ugeth);
3667 ugeth_err("%s: Cannot configure net device, aborting.",
3672 err = adjust_enet_interface(ugeth);
3674 ugeth_err("%s: Cannot configure net device, aborting.",
3679 /* Set MACSTNADDR1, MACSTNADDR2 */
3680 /* For more details see the hardware spec. */
3681 init_mac_station_addr_regs(dev->dev_addr[0],
3687 &ugeth->ug_regs->macstnaddr1,
3688 &ugeth->ug_regs->macstnaddr2);
3690 err = init_phy(dev);
3692 ugeth_err("%s: Cannot initialize PHY, aborting.", dev->name);
3696 phy_start(ugeth->phydev);
3699 request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler, 0,
3702 ugeth_err("%s: Cannot get IRQ for net device, aborting.",
3704 ucc_geth_stop(ugeth);
3708 err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
3710 ugeth_err("%s: Cannot enable net device, aborting.", dev->name);
3711 ucc_geth_stop(ugeth);
3715 netif_start_queue(dev);
3720 /* Stops the kernel queue, and halts the controller */
3721 static int ucc_geth_close(struct net_device *dev)
3723 struct ucc_geth_private *ugeth = netdev_priv(dev);
3725 ugeth_vdbg("%s: IN", __FUNCTION__);
3727 ucc_geth_stop(ugeth);
3729 phy_disconnect(ugeth->phydev);
3730 ugeth->phydev = NULL;
3732 netif_stop_queue(dev);
3737 const struct ethtool_ops ucc_geth_ethtool_ops = { };
3739 static phy_interface_t to_phy_interface(const char *interface_type)
3741 if (strcasecmp(interface_type, "mii") == 0)
3742 return PHY_INTERFACE_MODE_MII;
3743 if (strcasecmp(interface_type, "gmii") == 0)
3744 return PHY_INTERFACE_MODE_GMII;
3745 if (strcasecmp(interface_type, "tbi") == 0)
3746 return PHY_INTERFACE_MODE_TBI;
3747 if (strcasecmp(interface_type, "rmii") == 0)
3748 return PHY_INTERFACE_MODE_RMII;
3749 if (strcasecmp(interface_type, "rgmii") == 0)
3750 return PHY_INTERFACE_MODE_RGMII;
3751 if (strcasecmp(interface_type, "rgmii-id") == 0)
3752 return PHY_INTERFACE_MODE_RGMII_ID;
3753 if (strcasecmp(interface_type, "rtbi") == 0)
3754 return PHY_INTERFACE_MODE_RTBI;
3756 return PHY_INTERFACE_MODE_MII;
3759 static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *match)
3761 struct device *device = &ofdev->dev;
3762 struct device_node *np = ofdev->node;
3763 struct device_node *mdio;
3764 struct net_device *dev = NULL;
3765 struct ucc_geth_private *ugeth = NULL;
3766 struct ucc_geth_info *ug_info;
3767 struct resource res;
3768 struct device_node *phy;
3769 int err, ucc_num, max_speed = 0;
3771 const unsigned int *prop;
3772 const void *mac_addr;
3773 phy_interface_t phy_interface;
3774 static const int enet_to_speed[] = {
3775 SPEED_10, SPEED_10, SPEED_10,
3776 SPEED_100, SPEED_100, SPEED_100,
3777 SPEED_1000, SPEED_1000, SPEED_1000, SPEED_1000,
3779 static const phy_interface_t enet_to_phy_interface[] = {
3780 PHY_INTERFACE_MODE_MII, PHY_INTERFACE_MODE_RMII,
3781 PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_MII,
3782 PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_RGMII,
3783 PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_RGMII,
3784 PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI,
3787 ugeth_vdbg("%s: IN", __FUNCTION__);
3789 prop = get_property(np, "device-id", NULL);
3790 ucc_num = *prop - 1;
3791 if ((ucc_num < 0) || (ucc_num > 7))
3794 ug_info = &ugeth_info[ucc_num];
3795 ug_info->uf_info.ucc_num = ucc_num;
3797 prop = get_property(np, "rx-clock", NULL);
3798 ug_info->uf_info.rx_clock = *prop;
3799 prop = get_property(np, "tx-clock", NULL);
3800 ug_info->uf_info.tx_clock = *prop;
3801 err = of_address_to_resource(np, 0, &res);
3805 ug_info->uf_info.regs = res.start;
3806 ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
3808 ph = get_property(np, "phy-handle", NULL);
3809 phy = of_find_node_by_phandle(*ph);
3814 /* set the PHY address */
3815 prop = get_property(phy, "reg", NULL);
3818 ug_info->phy_address = *prop;
3820 /* get the phy interface type, or default to MII */
3821 prop = get_property(np, "interface-type", NULL);
3823 /* handle interface property present in old trees */
3824 prop = get_property(phy, "interface", NULL);
3826 phy_interface = enet_to_phy_interface[*prop];
3828 phy_interface = PHY_INTERFACE_MODE_MII;
3830 phy_interface = to_phy_interface((const char *)prop);
3833 /* get speed, or derive from interface */
3834 prop = get_property(np, "max-speed", NULL);
3836 /* handle interface property present in old trees */
3837 prop = get_property(phy, "interface", NULL);
3839 max_speed = enet_to_speed[*prop];
3844 switch (phy_interface) {
3845 case PHY_INTERFACE_MODE_GMII:
3846 case PHY_INTERFACE_MODE_RGMII:
3847 case PHY_INTERFACE_MODE_RGMII_ID:
3848 case PHY_INTERFACE_MODE_TBI:
3849 case PHY_INTERFACE_MODE_RTBI:
3850 max_speed = SPEED_1000;
3853 max_speed = SPEED_100;
3858 if (max_speed == SPEED_1000) {
3859 ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT;
3860 ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT;
3861 ug_info->uf_info.urfset = UCC_GETH_URFSET_GIGA_INIT;
3862 ug_info->uf_info.utfs = UCC_GETH_UTFS_GIGA_INIT;
3863 ug_info->uf_info.utfet = UCC_GETH_UTFET_GIGA_INIT;
3864 ug_info->uf_info.utftt = UCC_GETH_UTFTT_GIGA_INIT;
3867 /* Set the bus id */
3868 mdio = of_get_parent(phy);
3873 err = of_address_to_resource(mdio, 0, &res);
3879 ug_info->mdio_bus = res.start;
3881 printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n",
3882 ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs,
3883 ug_info->uf_info.irq);
3885 if (ug_info == NULL) {
3886 ugeth_err("%s: [%d] Missing additional data!", __FUNCTION__,
3891 /* Create an ethernet device instance */
3892 dev = alloc_etherdev(sizeof(*ugeth));
3897 ugeth = netdev_priv(dev);
3898 spin_lock_init(&ugeth->lock);
3900 dev_set_drvdata(device, dev);
3902 /* Set the dev->base_addr to the gfar reg region */
3903 dev->base_addr = (unsigned long)(ug_info->uf_info.regs);
3905 SET_MODULE_OWNER(dev);
3906 SET_NETDEV_DEV(dev, device);
3908 /* Fill in the dev structure */
3909 dev->open = ucc_geth_open;
3910 dev->hard_start_xmit = ucc_geth_start_xmit;
3911 dev->tx_timeout = ucc_geth_timeout;
3912 dev->watchdog_timeo = TX_TIMEOUT;
3913 #ifdef CONFIG_UGETH_NAPI
3914 dev->poll = ucc_geth_poll;
3915 dev->weight = UCC_GETH_DEV_WEIGHT;
3916 #endif /* CONFIG_UGETH_NAPI */
3917 dev->stop = ucc_geth_close;
3918 dev->get_stats = ucc_geth_get_stats;
3919 // dev->change_mtu = ucc_geth_change_mtu;
3921 dev->set_multicast_list = ucc_geth_set_multi;
3922 dev->ethtool_ops = &ucc_geth_ethtool_ops;
3924 ugeth->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
3925 ugeth->phy_interface = phy_interface;
3926 ugeth->max_speed = max_speed;
3928 err = register_netdev(dev);
3930 ugeth_err("%s: Cannot register net device, aborting.",
3936 mac_addr = of_get_mac_address(np);
3938 memcpy(dev->dev_addr, mac_addr, 6);
3940 ugeth->ug_info = ug_info;
3946 static int ucc_geth_remove(struct of_device* ofdev)
3948 struct device *device = &ofdev->dev;
3949 struct net_device *dev = dev_get_drvdata(device);
3950 struct ucc_geth_private *ugeth = netdev_priv(dev);
3952 dev_set_drvdata(device, NULL);
3953 ucc_geth_memclean(ugeth);
3959 static struct of_device_id ucc_geth_match[] = {
3962 .compatible = "ucc_geth",
3967 MODULE_DEVICE_TABLE(of, ucc_geth_match);
3969 static struct of_platform_driver ucc_geth_driver = {
3971 .match_table = ucc_geth_match,
3972 .probe = ucc_geth_probe,
3973 .remove = ucc_geth_remove,
3976 static int __init ucc_geth_init(void)
3980 ret = uec_mdio_init();
3985 printk(KERN_INFO "ucc_geth: " DRV_DESC "\n");
3986 for (i = 0; i < 8; i++)
3987 memcpy(&(ugeth_info[i]), &ugeth_primary_info,
3988 sizeof(ugeth_primary_info));
3990 ret = of_register_platform_driver(&ucc_geth_driver);
3998 static void __exit ucc_geth_exit(void)
4000 of_unregister_platform_driver(&ucc_geth_driver);
4004 module_init(ucc_geth_init);
4005 module_exit(ucc_geth_exit);
4007 MODULE_AUTHOR("Freescale Semiconductor, Inc");
4008 MODULE_DESCRIPTION(DRV_DESC);
4009 MODULE_LICENSE("GPL");