2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
27 #include <linux/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/skbuff.h>
37 #include <linux/if_vlan.h>
38 #include <linux/delay.h>
40 #include <linux/vmalloc.h>
41 #include <net/ip6_checksum.h>
45 char qlge_driver_name[] = DRV_NAME;
46 const char qlge_driver_version[] = DRV_VERSION;
48 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
49 MODULE_DESCRIPTION(DRV_STRING " ");
50 MODULE_LICENSE("GPL");
51 MODULE_VERSION(DRV_VERSION);
53 static const u32 default_msg =
54 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
55 /* NETIF_MSG_TIMER | */
60 /* NETIF_MSG_TX_QUEUED | */
61 /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
62 /* NETIF_MSG_PKTDATA | */
63 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
65 static int debug = 0x00007fff; /* defaults above */
66 module_param(debug, int, 0);
67 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
72 static int irq_type = MSIX_IRQ;
73 module_param(irq_type, int, MSIX_IRQ);
74 MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
76 static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
77 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
78 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
79 /* required last entry */
83 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
85 /* This hardware semaphore causes exclusive access to
86 * resources shared between the NIC driver, MPI firmware,
87 * FCOE firmware and the FC driver.
89 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
95 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
98 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
101 sem_bits = SEM_SET << SEM_ICB_SHIFT;
103 case SEM_MAC_ADDR_MASK:
104 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
107 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
110 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
112 case SEM_RT_IDX_MASK:
113 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
115 case SEM_PROC_REG_MASK:
116 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
119 QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n");
123 ql_write32(qdev, SEM, sem_bits | sem_mask);
124 return !(ql_read32(qdev, SEM) & sem_bits);
127 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
129 unsigned int wait_count = 30;
131 if (!ql_sem_trylock(qdev, sem_mask))
134 } while (--wait_count);
138 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
140 ql_write32(qdev, SEM, sem_mask);
141 ql_read32(qdev, SEM); /* flush */
144 /* This function waits for a specific bit to come ready
145 * in a given register. It is used mostly by the initialize
146 * process, but is also used in kernel thread API such as
147 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
149 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
152 int count = UDELAY_COUNT;
155 temp = ql_read32(qdev, reg);
157 /* check for errors */
158 if (temp & err_bit) {
159 QPRINTK(qdev, PROBE, ALERT,
160 "register 0x%.08x access error, value = 0x%.08x!.\n",
163 } else if (temp & bit)
165 udelay(UDELAY_DELAY);
168 QPRINTK(qdev, PROBE, ALERT,
169 "Timed out waiting for reg %x to come ready.\n", reg);
173 /* The CFG register is used to download TX and RX control blocks
174 * to the chip. This function waits for an operation to complete.
176 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
178 int count = UDELAY_COUNT;
182 temp = ql_read32(qdev, CFG);
187 udelay(UDELAY_DELAY);
194 /* Used to issue init control blocks to hw. Maps control block,
195 * sets address, triggers download, waits for completion.
197 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
207 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
210 map = pci_map_single(qdev->pdev, ptr, size, direction);
211 if (pci_dma_mapping_error(qdev->pdev, map)) {
212 QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n");
216 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
220 status = ql_wait_cfg(qdev, bit);
222 QPRINTK(qdev, IFUP, ERR,
223 "Timed out waiting for CFG to come ready.\n");
227 ql_write32(qdev, ICB_L, (u32) map);
228 ql_write32(qdev, ICB_H, (u32) (map >> 32));
230 mask = CFG_Q_MASK | (bit << 16);
231 value = bit | (q_id << CFG_Q_SHIFT);
232 ql_write32(qdev, CFG, (mask | value));
235 * Wait for the bit to clear after signaling hw.
237 status = ql_wait_cfg(qdev, bit);
239 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
240 pci_unmap_single(qdev->pdev, map, size, direction);
244 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
245 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
252 case MAC_ADDR_TYPE_MULTI_MAC:
253 case MAC_ADDR_TYPE_CAM_MAC:
256 ql_wait_reg_rdy(qdev,
257 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
260 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
261 (index << MAC_ADDR_IDX_SHIFT) | /* index */
262 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
264 ql_wait_reg_rdy(qdev,
265 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
268 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
270 ql_wait_reg_rdy(qdev,
271 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
274 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
275 (index << MAC_ADDR_IDX_SHIFT) | /* index */
276 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
278 ql_wait_reg_rdy(qdev,
279 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
282 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
283 if (type == MAC_ADDR_TYPE_CAM_MAC) {
285 ql_wait_reg_rdy(qdev,
286 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
289 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
290 (index << MAC_ADDR_IDX_SHIFT) | /* index */
291 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
293 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
297 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
301 case MAC_ADDR_TYPE_VLAN:
302 case MAC_ADDR_TYPE_MULTI_FLTR:
304 QPRINTK(qdev, IFUP, CRIT,
305 "Address type %d not yet supported.\n", type);
312 /* Set up a MAC, multicast or VLAN address for the
313 * inbound frame matching.
315 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
322 case MAC_ADDR_TYPE_MULTI_MAC:
323 case MAC_ADDR_TYPE_CAM_MAC:
326 u32 upper = (addr[0] << 8) | addr[1];
328 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
331 QPRINTK(qdev, IFUP, DEBUG,
332 "Adding %s address %pM"
333 " at index %d in the CAM.\n",
335 MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
336 "UNICAST"), addr, index);
339 ql_wait_reg_rdy(qdev,
340 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
343 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
344 (index << MAC_ADDR_IDX_SHIFT) | /* index */
346 ql_write32(qdev, MAC_ADDR_DATA, lower);
348 ql_wait_reg_rdy(qdev,
349 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
352 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
353 (index << MAC_ADDR_IDX_SHIFT) | /* index */
355 ql_write32(qdev, MAC_ADDR_DATA, upper);
357 ql_wait_reg_rdy(qdev,
358 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
361 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
362 (index << MAC_ADDR_IDX_SHIFT) | /* index */
364 /* This field should also include the queue id
365 and possibly the function id. Right now we hardcode
366 the route field to NIC core.
368 if (type == MAC_ADDR_TYPE_CAM_MAC) {
369 cam_output = (CAM_OUT_ROUTE_NIC |
371 func << CAM_OUT_FUNC_SHIFT) |
372 (0 << CAM_OUT_CQ_ID_SHIFT));
374 cam_output |= CAM_OUT_RV;
375 /* route to NIC core */
376 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
380 case MAC_ADDR_TYPE_VLAN:
382 u32 enable_bit = *((u32 *) &addr[0]);
383 /* For VLAN, the addr actually holds a bit that
384 * either enables or disables the vlan id we are
385 * addressing. It's either MAC_ADDR_E on or off.
386 * That's bit-27 we're talking about.
388 QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n",
389 (enable_bit ? "Adding" : "Removing"),
390 index, (enable_bit ? "to" : "from"));
393 ql_wait_reg_rdy(qdev,
394 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
397 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
398 (index << MAC_ADDR_IDX_SHIFT) | /* index */
400 enable_bit); /* enable/disable */
403 case MAC_ADDR_TYPE_MULTI_FLTR:
405 QPRINTK(qdev, IFUP, CRIT,
406 "Address type %d not yet supported.\n", type);
413 /* Set or clear MAC address in hardware. We sometimes
414 * have to clear it to prevent wrong frame routing
415 * especially in a bonding environment.
417 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
420 char zero_mac_addr[ETH_ALEN];
424 addr = &qdev->ndev->dev_addr[0];
425 QPRINTK(qdev, IFUP, DEBUG,
426 "Set Mac addr %02x:%02x:%02x:%02x:%02x:%02x\n",
427 addr[0], addr[1], addr[2], addr[3],
430 memset(zero_mac_addr, 0, ETH_ALEN);
431 addr = &zero_mac_addr[0];
432 QPRINTK(qdev, IFUP, DEBUG,
433 "Clearing MAC address on %s\n",
436 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
439 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
440 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
441 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
443 QPRINTK(qdev, IFUP, ERR, "Failed to init mac "
448 void ql_link_on(struct ql_adapter *qdev)
450 QPRINTK(qdev, LINK, ERR, "%s: Link is up.\n",
452 netif_carrier_on(qdev->ndev);
453 ql_set_mac_addr(qdev, 1);
456 void ql_link_off(struct ql_adapter *qdev)
458 QPRINTK(qdev, LINK, ERR, "%s: Link is down.\n",
460 netif_carrier_off(qdev->ndev);
461 ql_set_mac_addr(qdev, 0);
464 /* Get a specific frame routing value from the CAM.
465 * Used for debug and reg dump.
467 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
471 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
475 ql_write32(qdev, RT_IDX,
476 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
477 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
480 *value = ql_read32(qdev, RT_DATA);
485 /* The NIC function for this chip has 16 routing indexes. Each one can be used
486 * to route different frame types to various inbound queues. We send broadcast/
487 * multicast/error frames to the default queue for slow handling,
488 * and CAM hit/RSS frames to the fast handling queues.
490 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
493 int status = -EINVAL; /* Return error if no mask match. */
496 QPRINTK(qdev, IFUP, DEBUG,
497 "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
498 (enable ? "Adding" : "Removing"),
499 ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
500 ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
502 RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
503 ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
504 ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
505 ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
506 ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
507 ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
508 ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
509 ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
510 ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
511 ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
512 ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
513 ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
514 ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
515 ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
516 (enable ? "to" : "from"));
521 value = RT_IDX_DST_CAM_Q | /* dest */
522 RT_IDX_TYPE_NICQ | /* type */
523 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
526 case RT_IDX_VALID: /* Promiscuous Mode frames. */
528 value = RT_IDX_DST_DFLT_Q | /* dest */
529 RT_IDX_TYPE_NICQ | /* type */
530 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
533 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
535 value = RT_IDX_DST_DFLT_Q | /* dest */
536 RT_IDX_TYPE_NICQ | /* type */
537 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
540 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
542 value = RT_IDX_DST_DFLT_Q | /* dest */
543 RT_IDX_TYPE_NICQ | /* type */
544 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
547 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
549 value = RT_IDX_DST_DFLT_Q | /* dest */
550 RT_IDX_TYPE_NICQ | /* type */
551 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
554 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
556 value = RT_IDX_DST_DFLT_Q | /* dest */
557 RT_IDX_TYPE_NICQ | /* type */
558 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
561 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
563 value = RT_IDX_DST_RSS | /* dest */
564 RT_IDX_TYPE_NICQ | /* type */
565 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
568 case 0: /* Clear the E-bit on an entry. */
570 value = RT_IDX_DST_DFLT_Q | /* dest */
571 RT_IDX_TYPE_NICQ | /* type */
572 (index << RT_IDX_IDX_SHIFT);/* index */
576 QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n",
583 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
586 value |= (enable ? RT_IDX_E : 0);
587 ql_write32(qdev, RT_IDX, value);
588 ql_write32(qdev, RT_DATA, enable ? mask : 0);
594 static void ql_enable_interrupts(struct ql_adapter *qdev)
596 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
599 static void ql_disable_interrupts(struct ql_adapter *qdev)
601 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
604 /* If we're running with multiple MSI-X vectors then we enable on the fly.
605 * Otherwise, we may have multiple outstanding workers and don't want to
606 * enable until the last one finishes. In this case, the irq_cnt gets
607 * incremented everytime we queue a worker and decremented everytime
608 * a worker finishes. Once it hits zero we enable the interrupt.
610 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
613 unsigned long hw_flags = 0;
614 struct intr_context *ctx = qdev->intr_context + intr;
616 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
617 /* Always enable if we're MSIX multi interrupts and
618 * it's not the default (zeroeth) interrupt.
620 ql_write32(qdev, INTR_EN,
622 var = ql_read32(qdev, STS);
626 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
627 if (atomic_dec_and_test(&ctx->irq_cnt)) {
628 ql_write32(qdev, INTR_EN,
630 var = ql_read32(qdev, STS);
632 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
636 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
639 struct intr_context *ctx;
641 /* HW disables for us if we're MSIX multi interrupts and
642 * it's not the default (zeroeth) interrupt.
644 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
647 ctx = qdev->intr_context + intr;
648 spin_lock(&qdev->hw_lock);
649 if (!atomic_read(&ctx->irq_cnt)) {
650 ql_write32(qdev, INTR_EN,
652 var = ql_read32(qdev, STS);
654 atomic_inc(&ctx->irq_cnt);
655 spin_unlock(&qdev->hw_lock);
659 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
662 for (i = 0; i < qdev->intr_count; i++) {
663 /* The enable call does a atomic_dec_and_test
664 * and enables only if the result is zero.
665 * So we precharge it here.
667 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
669 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
670 ql_enable_completion_interrupt(qdev, i);
675 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
679 __le16 *flash = (__le16 *)&qdev->flash;
681 status = strncmp((char *)&qdev->flash, str, 4);
683 QPRINTK(qdev, IFUP, ERR, "Invalid flash signature.\n");
687 for (i = 0; i < size; i++)
688 csum += le16_to_cpu(*flash++);
691 QPRINTK(qdev, IFUP, ERR,
692 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
697 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
700 /* wait for reg to come ready */
701 status = ql_wait_reg_rdy(qdev,
702 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
705 /* set up for reg read */
706 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
707 /* wait for reg to come ready */
708 status = ql_wait_reg_rdy(qdev,
709 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
712 /* This data is stored on flash as an array of
713 * __le32. Since ql_read32() returns cpu endian
714 * we need to swap it back.
716 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
721 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
725 __le32 *p = (__le32 *)&qdev->flash;
729 /* Get flash offset for function and adjust
733 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
735 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
737 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
740 size = sizeof(struct flash_params_8000) / sizeof(u32);
741 for (i = 0; i < size; i++, p++) {
742 status = ql_read_flash_word(qdev, i+offset, p);
744 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
749 status = ql_validate_flash(qdev,
750 sizeof(struct flash_params_8000) / sizeof(u16),
753 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
758 /* Extract either manufacturer or BOFM modified
761 if (qdev->flash.flash_params_8000.data_type1 == 2)
763 qdev->flash.flash_params_8000.mac_addr1,
764 qdev->ndev->addr_len);
767 qdev->flash.flash_params_8000.mac_addr,
768 qdev->ndev->addr_len);
770 if (!is_valid_ether_addr(mac_addr)) {
771 QPRINTK(qdev, IFUP, ERR, "Invalid MAC address.\n");
776 memcpy(qdev->ndev->dev_addr,
778 qdev->ndev->addr_len);
781 ql_sem_unlock(qdev, SEM_FLASH_MASK);
785 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
789 __le32 *p = (__le32 *)&qdev->flash;
791 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
793 /* Second function's parameters follow the first
799 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
802 for (i = 0; i < size; i++, p++) {
803 status = ql_read_flash_word(qdev, i+offset, p);
805 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
811 status = ql_validate_flash(qdev,
812 sizeof(struct flash_params_8012) / sizeof(u16),
815 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
820 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
825 memcpy(qdev->ndev->dev_addr,
826 qdev->flash.flash_params_8012.mac_addr,
827 qdev->ndev->addr_len);
830 ql_sem_unlock(qdev, SEM_FLASH_MASK);
834 /* xgmac register are located behind the xgmac_addr and xgmac_data
835 * register pair. Each read/write requires us to wait for the ready
836 * bit before reading/writing the data.
838 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
841 /* wait for reg to come ready */
842 status = ql_wait_reg_rdy(qdev,
843 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
846 /* write the data to the data reg */
847 ql_write32(qdev, XGMAC_DATA, data);
848 /* trigger the write */
849 ql_write32(qdev, XGMAC_ADDR, reg);
853 /* xgmac register are located behind the xgmac_addr and xgmac_data
854 * register pair. Each read/write requires us to wait for the ready
855 * bit before reading/writing the data.
857 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
860 /* wait for reg to come ready */
861 status = ql_wait_reg_rdy(qdev,
862 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
865 /* set up for reg read */
866 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
867 /* wait for reg to come ready */
868 status = ql_wait_reg_rdy(qdev,
869 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
873 *data = ql_read32(qdev, XGMAC_DATA);
878 /* This is used for reading the 64-bit statistics regs. */
879 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
885 status = ql_read_xgmac_reg(qdev, reg, &lo);
889 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
893 *data = (u64) lo | ((u64) hi << 32);
899 static int ql_8000_port_initialize(struct ql_adapter *qdev)
903 * Get MPI firmware version for driver banner
906 status = ql_mb_about_fw(qdev);
909 status = ql_mb_get_fw_state(qdev);
912 /* Wake up a worker to get/set the TX/RX frame sizes. */
913 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
918 /* Take the MAC Core out of reset.
919 * Enable statistics counting.
920 * Take the transmitter/receiver out of reset.
921 * This functionality may be done in the MPI firmware at a
924 static int ql_8012_port_initialize(struct ql_adapter *qdev)
929 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
930 /* Another function has the semaphore, so
931 * wait for the port init bit to come ready.
933 QPRINTK(qdev, LINK, INFO,
934 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
935 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
937 QPRINTK(qdev, LINK, CRIT,
938 "Port initialize timed out.\n");
943 QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n");
944 /* Set the core reset. */
945 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
948 data |= GLOBAL_CFG_RESET;
949 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
953 /* Clear the core reset and turn on jumbo for receiver. */
954 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
955 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
956 data |= GLOBAL_CFG_TX_STAT_EN;
957 data |= GLOBAL_CFG_RX_STAT_EN;
958 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
962 /* Enable transmitter, and clear it's reset. */
963 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
966 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
967 data |= TX_CFG_EN; /* Enable the transmitter. */
968 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
972 /* Enable receiver and clear it's reset. */
973 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
976 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
977 data |= RX_CFG_EN; /* Enable the receiver. */
978 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
984 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
988 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
992 /* Signal to the world that the port is enabled. */
993 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
995 ql_sem_unlock(qdev, qdev->xg_sem_mask);
999 /* Get the next large buffer. */
1000 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1002 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1003 rx_ring->lbq_curr_idx++;
1004 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1005 rx_ring->lbq_curr_idx = 0;
1006 rx_ring->lbq_free_cnt++;
1010 /* Get the next small buffer. */
1011 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1013 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1014 rx_ring->sbq_curr_idx++;
1015 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1016 rx_ring->sbq_curr_idx = 0;
1017 rx_ring->sbq_free_cnt++;
1021 /* Update an rx ring index. */
1022 static void ql_update_cq(struct rx_ring *rx_ring)
1024 rx_ring->cnsmr_idx++;
1025 rx_ring->curr_entry++;
1026 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1027 rx_ring->cnsmr_idx = 0;
1028 rx_ring->curr_entry = rx_ring->cq_base;
1032 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1034 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1037 /* Process (refill) a large buffer queue. */
1038 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1040 u32 clean_idx = rx_ring->lbq_clean_idx;
1041 u32 start_idx = clean_idx;
1042 struct bq_desc *lbq_desc;
1046 while (rx_ring->lbq_free_cnt > 16) {
1047 for (i = 0; i < 16; i++) {
1048 QPRINTK(qdev, RX_STATUS, DEBUG,
1049 "lbq: try cleaning clean_idx = %d.\n",
1051 lbq_desc = &rx_ring->lbq[clean_idx];
1052 if (lbq_desc->p.lbq_page == NULL) {
1053 QPRINTK(qdev, RX_STATUS, DEBUG,
1054 "lbq: getting new page for index %d.\n",
1056 lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
1057 if (lbq_desc->p.lbq_page == NULL) {
1058 rx_ring->lbq_clean_idx = clean_idx;
1059 QPRINTK(qdev, RX_STATUS, ERR,
1060 "Couldn't get a page.\n");
1063 map = pci_map_page(qdev->pdev,
1064 lbq_desc->p.lbq_page,
1066 PCI_DMA_FROMDEVICE);
1067 if (pci_dma_mapping_error(qdev->pdev, map)) {
1068 rx_ring->lbq_clean_idx = clean_idx;
1069 put_page(lbq_desc->p.lbq_page);
1070 lbq_desc->p.lbq_page = NULL;
1071 QPRINTK(qdev, RX_STATUS, ERR,
1072 "PCI mapping failed.\n");
1075 pci_unmap_addr_set(lbq_desc, mapaddr, map);
1076 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
1077 *lbq_desc->addr = cpu_to_le64(map);
1080 if (clean_idx == rx_ring->lbq_len)
1084 rx_ring->lbq_clean_idx = clean_idx;
1085 rx_ring->lbq_prod_idx += 16;
1086 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1087 rx_ring->lbq_prod_idx = 0;
1088 rx_ring->lbq_free_cnt -= 16;
1091 if (start_idx != clean_idx) {
1092 QPRINTK(qdev, RX_STATUS, DEBUG,
1093 "lbq: updating prod idx = %d.\n",
1094 rx_ring->lbq_prod_idx);
1095 ql_write_db_reg(rx_ring->lbq_prod_idx,
1096 rx_ring->lbq_prod_idx_db_reg);
1100 /* Process (refill) a small buffer queue. */
1101 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1103 u32 clean_idx = rx_ring->sbq_clean_idx;
1104 u32 start_idx = clean_idx;
1105 struct bq_desc *sbq_desc;
1109 while (rx_ring->sbq_free_cnt > 16) {
1110 for (i = 0; i < 16; i++) {
1111 sbq_desc = &rx_ring->sbq[clean_idx];
1112 QPRINTK(qdev, RX_STATUS, DEBUG,
1113 "sbq: try cleaning clean_idx = %d.\n",
1115 if (sbq_desc->p.skb == NULL) {
1116 QPRINTK(qdev, RX_STATUS, DEBUG,
1117 "sbq: getting new skb for index %d.\n",
1120 netdev_alloc_skb(qdev->ndev,
1121 rx_ring->sbq_buf_size);
1122 if (sbq_desc->p.skb == NULL) {
1123 QPRINTK(qdev, PROBE, ERR,
1124 "Couldn't get an skb.\n");
1125 rx_ring->sbq_clean_idx = clean_idx;
1128 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1129 map = pci_map_single(qdev->pdev,
1130 sbq_desc->p.skb->data,
1131 rx_ring->sbq_buf_size /
1132 2, PCI_DMA_FROMDEVICE);
1133 if (pci_dma_mapping_error(qdev->pdev, map)) {
1134 QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
1135 rx_ring->sbq_clean_idx = clean_idx;
1136 dev_kfree_skb_any(sbq_desc->p.skb);
1137 sbq_desc->p.skb = NULL;
1140 pci_unmap_addr_set(sbq_desc, mapaddr, map);
1141 pci_unmap_len_set(sbq_desc, maplen,
1142 rx_ring->sbq_buf_size / 2);
1143 *sbq_desc->addr = cpu_to_le64(map);
1147 if (clean_idx == rx_ring->sbq_len)
1150 rx_ring->sbq_clean_idx = clean_idx;
1151 rx_ring->sbq_prod_idx += 16;
1152 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1153 rx_ring->sbq_prod_idx = 0;
1154 rx_ring->sbq_free_cnt -= 16;
1157 if (start_idx != clean_idx) {
1158 QPRINTK(qdev, RX_STATUS, DEBUG,
1159 "sbq: updating prod idx = %d.\n",
1160 rx_ring->sbq_prod_idx);
1161 ql_write_db_reg(rx_ring->sbq_prod_idx,
1162 rx_ring->sbq_prod_idx_db_reg);
1166 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1167 struct rx_ring *rx_ring)
1169 ql_update_sbq(qdev, rx_ring);
1170 ql_update_lbq(qdev, rx_ring);
1173 /* Unmaps tx buffers. Can be called from send() if a pci mapping
1174 * fails at some stage, or from the interrupt when a tx completes.
1176 static void ql_unmap_send(struct ql_adapter *qdev,
1177 struct tx_ring_desc *tx_ring_desc, int mapped)
1180 for (i = 0; i < mapped; i++) {
1181 if (i == 0 || (i == 7 && mapped > 7)) {
1183 * Unmap the skb->data area, or the
1184 * external sglist (AKA the Outbound
1185 * Address List (OAL)).
1186 * If its the zeroeth element, then it's
1187 * the skb->data area. If it's the 7th
1188 * element and there is more than 6 frags,
1192 QPRINTK(qdev, TX_DONE, DEBUG,
1193 "unmapping OAL area.\n");
1195 pci_unmap_single(qdev->pdev,
1196 pci_unmap_addr(&tx_ring_desc->map[i],
1198 pci_unmap_len(&tx_ring_desc->map[i],
1202 QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n",
1204 pci_unmap_page(qdev->pdev,
1205 pci_unmap_addr(&tx_ring_desc->map[i],
1207 pci_unmap_len(&tx_ring_desc->map[i],
1208 maplen), PCI_DMA_TODEVICE);
1214 /* Map the buffers for this transmit. This will return
1215 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1217 static int ql_map_send(struct ql_adapter *qdev,
1218 struct ob_mac_iocb_req *mac_iocb_ptr,
1219 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1221 int len = skb_headlen(skb);
1223 int frag_idx, err, map_idx = 0;
1224 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1225 int frag_cnt = skb_shinfo(skb)->nr_frags;
1228 QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt);
1231 * Map the skb buffer first.
1233 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1235 err = pci_dma_mapping_error(qdev->pdev, map);
1237 QPRINTK(qdev, TX_QUEUED, ERR,
1238 "PCI mapping failed with error: %d\n", err);
1240 return NETDEV_TX_BUSY;
1243 tbd->len = cpu_to_le32(len);
1244 tbd->addr = cpu_to_le64(map);
1245 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1246 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1250 * This loop fills the remainder of the 8 address descriptors
1251 * in the IOCB. If there are more than 7 fragments, then the
1252 * eighth address desc will point to an external list (OAL).
1253 * When this happens, the remainder of the frags will be stored
1256 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1257 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1259 if (frag_idx == 6 && frag_cnt > 7) {
1260 /* Let's tack on an sglist.
1261 * Our control block will now
1263 * iocb->seg[0] = skb->data
1264 * iocb->seg[1] = frag[0]
1265 * iocb->seg[2] = frag[1]
1266 * iocb->seg[3] = frag[2]
1267 * iocb->seg[4] = frag[3]
1268 * iocb->seg[5] = frag[4]
1269 * iocb->seg[6] = frag[5]
1270 * iocb->seg[7] = ptr to OAL (external sglist)
1271 * oal->seg[0] = frag[6]
1272 * oal->seg[1] = frag[7]
1273 * oal->seg[2] = frag[8]
1274 * oal->seg[3] = frag[9]
1275 * oal->seg[4] = frag[10]
1278 /* Tack on the OAL in the eighth segment of IOCB. */
1279 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1282 err = pci_dma_mapping_error(qdev->pdev, map);
1284 QPRINTK(qdev, TX_QUEUED, ERR,
1285 "PCI mapping outbound address list with error: %d\n",
1290 tbd->addr = cpu_to_le64(map);
1292 * The length is the number of fragments
1293 * that remain to be mapped times the length
1294 * of our sglist (OAL).
1297 cpu_to_le32((sizeof(struct tx_buf_desc) *
1298 (frag_cnt - frag_idx)) | TX_DESC_C);
1299 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1301 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1302 sizeof(struct oal));
1303 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1308 pci_map_page(qdev->pdev, frag->page,
1309 frag->page_offset, frag->size,
1312 err = pci_dma_mapping_error(qdev->pdev, map);
1314 QPRINTK(qdev, TX_QUEUED, ERR,
1315 "PCI mapping frags failed with error: %d.\n",
1320 tbd->addr = cpu_to_le64(map);
1321 tbd->len = cpu_to_le32(frag->size);
1322 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1323 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1327 /* Save the number of segments we've mapped. */
1328 tx_ring_desc->map_cnt = map_idx;
1329 /* Terminate the last segment. */
1330 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1331 return NETDEV_TX_OK;
1335 * If the first frag mapping failed, then i will be zero.
1336 * This causes the unmap of the skb->data area. Otherwise
1337 * we pass in the number of frags that mapped successfully
1338 * so they can be umapped.
1340 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1341 return NETDEV_TX_BUSY;
1344 static void ql_realign_skb(struct sk_buff *skb, int len)
1346 void *temp_addr = skb->data;
1348 /* Undo the skb_reserve(skb,32) we did before
1349 * giving to hardware, and realign data on
1350 * a 2-byte boundary.
1352 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1353 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1354 skb_copy_to_linear_data(skb, temp_addr,
1359 * This function builds an skb for the given inbound
1360 * completion. It will be rewritten for readability in the near
1361 * future, but for not it works well.
1363 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1364 struct rx_ring *rx_ring,
1365 struct ib_mac_iocb_rsp *ib_mac_rsp)
1367 struct bq_desc *lbq_desc;
1368 struct bq_desc *sbq_desc;
1369 struct sk_buff *skb = NULL;
1370 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1371 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1374 * Handle the header buffer if present.
1376 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1377 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1378 QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len);
1380 * Headers fit nicely into a small buffer.
1382 sbq_desc = ql_get_curr_sbuf(rx_ring);
1383 pci_unmap_single(qdev->pdev,
1384 pci_unmap_addr(sbq_desc, mapaddr),
1385 pci_unmap_len(sbq_desc, maplen),
1386 PCI_DMA_FROMDEVICE);
1387 skb = sbq_desc->p.skb;
1388 ql_realign_skb(skb, hdr_len);
1389 skb_put(skb, hdr_len);
1390 sbq_desc->p.skb = NULL;
1394 * Handle the data buffer(s).
1396 if (unlikely(!length)) { /* Is there data too? */
1397 QPRINTK(qdev, RX_STATUS, DEBUG,
1398 "No Data buffer in this packet.\n");
1402 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1403 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1404 QPRINTK(qdev, RX_STATUS, DEBUG,
1405 "Headers in small, data of %d bytes in small, combine them.\n", length);
1407 * Data is less than small buffer size so it's
1408 * stuffed in a small buffer.
1409 * For this case we append the data
1410 * from the "data" small buffer to the "header" small
1413 sbq_desc = ql_get_curr_sbuf(rx_ring);
1414 pci_dma_sync_single_for_cpu(qdev->pdev,
1416 (sbq_desc, mapaddr),
1419 PCI_DMA_FROMDEVICE);
1420 memcpy(skb_put(skb, length),
1421 sbq_desc->p.skb->data, length);
1422 pci_dma_sync_single_for_device(qdev->pdev,
1429 PCI_DMA_FROMDEVICE);
1431 QPRINTK(qdev, RX_STATUS, DEBUG,
1432 "%d bytes in a single small buffer.\n", length);
1433 sbq_desc = ql_get_curr_sbuf(rx_ring);
1434 skb = sbq_desc->p.skb;
1435 ql_realign_skb(skb, length);
1436 skb_put(skb, length);
1437 pci_unmap_single(qdev->pdev,
1438 pci_unmap_addr(sbq_desc,
1440 pci_unmap_len(sbq_desc,
1442 PCI_DMA_FROMDEVICE);
1443 sbq_desc->p.skb = NULL;
1445 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1446 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1447 QPRINTK(qdev, RX_STATUS, DEBUG,
1448 "Header in small, %d bytes in large. Chain large to small!\n", length);
1450 * The data is in a single large buffer. We
1451 * chain it to the header buffer's skb and let
1454 lbq_desc = ql_get_curr_lbuf(rx_ring);
1455 pci_unmap_page(qdev->pdev,
1456 pci_unmap_addr(lbq_desc,
1458 pci_unmap_len(lbq_desc, maplen),
1459 PCI_DMA_FROMDEVICE);
1460 QPRINTK(qdev, RX_STATUS, DEBUG,
1461 "Chaining page to skb.\n");
1462 skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
1465 skb->data_len += length;
1466 skb->truesize += length;
1467 lbq_desc->p.lbq_page = NULL;
1470 * The headers and data are in a single large buffer. We
1471 * copy it to a new skb and let it go. This can happen with
1472 * jumbo mtu on a non-TCP/UDP frame.
1474 lbq_desc = ql_get_curr_lbuf(rx_ring);
1475 skb = netdev_alloc_skb(qdev->ndev, length);
1477 QPRINTK(qdev, PROBE, DEBUG,
1478 "No skb available, drop the packet.\n");
1481 pci_unmap_page(qdev->pdev,
1482 pci_unmap_addr(lbq_desc,
1484 pci_unmap_len(lbq_desc, maplen),
1485 PCI_DMA_FROMDEVICE);
1486 skb_reserve(skb, NET_IP_ALIGN);
1487 QPRINTK(qdev, RX_STATUS, DEBUG,
1488 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
1489 skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
1492 skb->data_len += length;
1493 skb->truesize += length;
1495 lbq_desc->p.lbq_page = NULL;
1496 __pskb_pull_tail(skb,
1497 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1498 VLAN_ETH_HLEN : ETH_HLEN);
1502 * The data is in a chain of large buffers
1503 * pointed to by a small buffer. We loop
1504 * thru and chain them to the our small header
1506 * frags: There are 18 max frags and our small
1507 * buffer will hold 32 of them. The thing is,
1508 * we'll use 3 max for our 9000 byte jumbo
1509 * frames. If the MTU goes up we could
1510 * eventually be in trouble.
1512 int size, offset, i = 0;
1513 __le64 *bq, bq_array[8];
1514 sbq_desc = ql_get_curr_sbuf(rx_ring);
1515 pci_unmap_single(qdev->pdev,
1516 pci_unmap_addr(sbq_desc, mapaddr),
1517 pci_unmap_len(sbq_desc, maplen),
1518 PCI_DMA_FROMDEVICE);
1519 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1521 * This is an non TCP/UDP IP frame, so
1522 * the headers aren't split into a small
1523 * buffer. We have to use the small buffer
1524 * that contains our sg list as our skb to
1525 * send upstairs. Copy the sg list here to
1526 * a local buffer and use it to find the
1529 QPRINTK(qdev, RX_STATUS, DEBUG,
1530 "%d bytes of headers & data in chain of large.\n", length);
1531 skb = sbq_desc->p.skb;
1533 memcpy(bq, skb->data, sizeof(bq_array));
1534 sbq_desc->p.skb = NULL;
1535 skb_reserve(skb, NET_IP_ALIGN);
1537 QPRINTK(qdev, RX_STATUS, DEBUG,
1538 "Headers in small, %d bytes of data in chain of large.\n", length);
1539 bq = (__le64 *)sbq_desc->p.skb->data;
1541 while (length > 0) {
1542 lbq_desc = ql_get_curr_lbuf(rx_ring);
1543 pci_unmap_page(qdev->pdev,
1544 pci_unmap_addr(lbq_desc,
1546 pci_unmap_len(lbq_desc,
1548 PCI_DMA_FROMDEVICE);
1549 size = (length < PAGE_SIZE) ? length : PAGE_SIZE;
1552 QPRINTK(qdev, RX_STATUS, DEBUG,
1553 "Adding page %d to skb for %d bytes.\n",
1555 skb_fill_page_desc(skb, i, lbq_desc->p.lbq_page,
1558 skb->data_len += size;
1559 skb->truesize += size;
1561 lbq_desc->p.lbq_page = NULL;
1565 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1566 VLAN_ETH_HLEN : ETH_HLEN);
1571 /* Process an inbound completion from an rx ring. */
1572 static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1573 struct rx_ring *rx_ring,
1574 struct ib_mac_iocb_rsp *ib_mac_rsp)
1576 struct net_device *ndev = qdev->ndev;
1577 struct sk_buff *skb = NULL;
1578 u16 vlan_id = (le16_to_cpu(ib_mac_rsp->vlan_id) &
1579 IB_MAC_IOCB_RSP_VLAN_MASK)
1581 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1583 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1584 if (unlikely(!skb)) {
1585 QPRINTK(qdev, RX_STATUS, DEBUG,
1586 "No skb available, drop packet.\n");
1590 /* Frame error, so drop the packet. */
1591 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1592 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1593 ib_mac_rsp->flags2);
1594 dev_kfree_skb_any(skb);
1598 /* The max framesize filter on this chip is set higher than
1599 * MTU since FCoE uses 2k frames.
1601 if (skb->len > ndev->mtu + ETH_HLEN) {
1602 dev_kfree_skb_any(skb);
1606 prefetch(skb->data);
1608 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1609 QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
1610 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1611 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1612 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1613 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1614 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1615 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1617 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1618 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
1621 skb->protocol = eth_type_trans(skb, ndev);
1622 skb->ip_summed = CHECKSUM_NONE;
1624 /* If rx checksum is on, and there are no
1625 * csum or frame errors.
1627 if (qdev->rx_csum &&
1628 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1630 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1631 QPRINTK(qdev, RX_STATUS, DEBUG,
1632 "TCP checksum done!\n");
1633 skb->ip_summed = CHECKSUM_UNNECESSARY;
1634 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1635 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1636 /* Unfragmented ipv4 UDP frame. */
1637 struct iphdr *iph = (struct iphdr *) skb->data;
1638 if (!(iph->frag_off &
1639 cpu_to_be16(IP_MF|IP_OFFSET))) {
1640 skb->ip_summed = CHECKSUM_UNNECESSARY;
1641 QPRINTK(qdev, RX_STATUS, DEBUG,
1642 "TCP checksum done!\n");
1647 qdev->stats.rx_packets++;
1648 qdev->stats.rx_bytes += skb->len;
1649 skb_record_rx_queue(skb, rx_ring->cq_id);
1650 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1652 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1654 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1657 napi_gro_receive(&rx_ring->napi, skb);
1660 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1662 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1664 netif_receive_skb(skb);
1668 /* Process an outbound completion from an rx ring. */
1669 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1670 struct ob_mac_iocb_rsp *mac_rsp)
1672 struct tx_ring *tx_ring;
1673 struct tx_ring_desc *tx_ring_desc;
1675 QL_DUMP_OB_MAC_RSP(mac_rsp);
1676 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
1677 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
1678 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
1679 qdev->stats.tx_bytes += (tx_ring_desc->skb)->len;
1680 qdev->stats.tx_packets++;
1681 dev_kfree_skb(tx_ring_desc->skb);
1682 tx_ring_desc->skb = NULL;
1684 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
1687 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
1688 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
1689 QPRINTK(qdev, TX_DONE, WARNING,
1690 "Total descriptor length did not match transfer length.\n");
1692 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
1693 QPRINTK(qdev, TX_DONE, WARNING,
1694 "Frame too short to be legal, not sent.\n");
1696 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
1697 QPRINTK(qdev, TX_DONE, WARNING,
1698 "Frame too long, but sent anyway.\n");
1700 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
1701 QPRINTK(qdev, TX_DONE, WARNING,
1702 "PCI backplane error. Frame not sent.\n");
1705 atomic_inc(&tx_ring->tx_count);
1708 /* Fire up a handler to reset the MPI processor. */
1709 void ql_queue_fw_error(struct ql_adapter *qdev)
1712 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
1715 void ql_queue_asic_error(struct ql_adapter *qdev)
1718 ql_disable_interrupts(qdev);
1719 /* Clear adapter up bit to signal the recovery
1720 * process that it shouldn't kill the reset worker
1723 clear_bit(QL_ADAPTER_UP, &qdev->flags);
1724 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
1727 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
1728 struct ib_ae_iocb_rsp *ib_ae_rsp)
1730 switch (ib_ae_rsp->event) {
1731 case MGMT_ERR_EVENT:
1732 QPRINTK(qdev, RX_ERR, ERR,
1733 "Management Processor Fatal Error.\n");
1734 ql_queue_fw_error(qdev);
1737 case CAM_LOOKUP_ERR_EVENT:
1738 QPRINTK(qdev, LINK, ERR,
1739 "Multiple CAM hits lookup occurred.\n");
1740 QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n");
1741 ql_queue_asic_error(qdev);
1744 case SOFT_ECC_ERROR_EVENT:
1745 QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n");
1746 ql_queue_asic_error(qdev);
1749 case PCI_ERR_ANON_BUF_RD:
1750 QPRINTK(qdev, RX_ERR, ERR,
1751 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
1753 ql_queue_asic_error(qdev);
1757 QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n",
1759 ql_queue_asic_error(qdev);
1764 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
1766 struct ql_adapter *qdev = rx_ring->qdev;
1767 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1768 struct ob_mac_iocb_rsp *net_rsp = NULL;
1771 struct tx_ring *tx_ring;
1772 /* While there are entries in the completion queue. */
1773 while (prod != rx_ring->cnsmr_idx) {
1775 QPRINTK(qdev, RX_STATUS, DEBUG,
1776 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1777 prod, rx_ring->cnsmr_idx);
1779 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
1781 switch (net_rsp->opcode) {
1783 case OPCODE_OB_MAC_TSO_IOCB:
1784 case OPCODE_OB_MAC_IOCB:
1785 ql_process_mac_tx_intr(qdev, net_rsp);
1788 QPRINTK(qdev, RX_STATUS, DEBUG,
1789 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1793 ql_update_cq(rx_ring);
1794 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1796 ql_write_cq_idx(rx_ring);
1797 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
1798 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id) &&
1800 if (atomic_read(&tx_ring->queue_stopped) &&
1801 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
1803 * The queue got stopped because the tx_ring was full.
1804 * Wake it up, because it's now at least 25% empty.
1806 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
1812 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
1814 struct ql_adapter *qdev = rx_ring->qdev;
1815 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1816 struct ql_net_rsp_iocb *net_rsp;
1819 /* While there are entries in the completion queue. */
1820 while (prod != rx_ring->cnsmr_idx) {
1822 QPRINTK(qdev, RX_STATUS, DEBUG,
1823 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1824 prod, rx_ring->cnsmr_idx);
1826 net_rsp = rx_ring->curr_entry;
1828 switch (net_rsp->opcode) {
1829 case OPCODE_IB_MAC_IOCB:
1830 ql_process_mac_rx_intr(qdev, rx_ring,
1831 (struct ib_mac_iocb_rsp *)
1835 case OPCODE_IB_AE_IOCB:
1836 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
1841 QPRINTK(qdev, RX_STATUS, DEBUG,
1842 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1847 ql_update_cq(rx_ring);
1848 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1849 if (count == budget)
1852 ql_update_buffer_queues(qdev, rx_ring);
1853 ql_write_cq_idx(rx_ring);
1857 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
1859 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
1860 struct ql_adapter *qdev = rx_ring->qdev;
1861 struct rx_ring *trx_ring;
1862 int i, work_done = 0;
1863 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
1865 QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n",
1868 /* Service the TX rings first. They start
1869 * right after the RSS rings. */
1870 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
1871 trx_ring = &qdev->rx_ring[i];
1872 /* If this TX completion ring belongs to this vector and
1873 * it's not empty then service it.
1875 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
1876 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
1877 trx_ring->cnsmr_idx)) {
1878 QPRINTK(qdev, INTR, DEBUG,
1879 "%s: Servicing TX completion ring %d.\n",
1880 __func__, trx_ring->cq_id);
1881 ql_clean_outbound_rx_ring(trx_ring);
1886 * Now service the RSS ring if it's active.
1888 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
1889 rx_ring->cnsmr_idx) {
1890 QPRINTK(qdev, INTR, DEBUG,
1891 "%s: Servicing RX completion ring %d.\n",
1892 __func__, rx_ring->cq_id);
1893 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
1896 if (work_done < budget) {
1897 napi_complete(napi);
1898 ql_enable_completion_interrupt(qdev, rx_ring->irq);
1903 static void ql_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
1905 struct ql_adapter *qdev = netdev_priv(ndev);
1909 QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n");
1910 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
1911 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
1913 QPRINTK(qdev, IFUP, DEBUG,
1914 "Turning off VLAN in NIC_RCV_CFG.\n");
1915 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
1919 static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
1921 struct ql_adapter *qdev = netdev_priv(ndev);
1922 u32 enable_bit = MAC_ADDR_E;
1925 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
1928 if (ql_set_mac_addr_reg
1929 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1930 QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
1932 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
1935 static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
1937 struct ql_adapter *qdev = netdev_priv(ndev);
1941 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
1945 if (ql_set_mac_addr_reg
1946 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1947 QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
1949 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
1953 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
1954 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
1956 struct rx_ring *rx_ring = dev_id;
1957 napi_schedule(&rx_ring->napi);
1961 /* This handles a fatal error, MPI activity, and the default
1962 * rx_ring in an MSI-X multiple vector environment.
1963 * In MSI/Legacy environment it also process the rest of
1966 static irqreturn_t qlge_isr(int irq, void *dev_id)
1968 struct rx_ring *rx_ring = dev_id;
1969 struct ql_adapter *qdev = rx_ring->qdev;
1970 struct intr_context *intr_context = &qdev->intr_context[0];
1974 spin_lock(&qdev->hw_lock);
1975 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
1976 QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n");
1977 spin_unlock(&qdev->hw_lock);
1980 spin_unlock(&qdev->hw_lock);
1982 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
1985 * Check for fatal error.
1988 ql_queue_asic_error(qdev);
1989 QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var);
1990 var = ql_read32(qdev, ERR_STS);
1991 QPRINTK(qdev, INTR, ERR,
1992 "Resetting chip. Error Status Register = 0x%x\n", var);
1997 * Check MPI processor activity.
1999 if ((var & STS_PI) &&
2000 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2002 * We've got an async event or mailbox completion.
2003 * Handle it and clear the source of the interrupt.
2005 QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
2006 ql_disable_completion_interrupt(qdev, intr_context->intr);
2007 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2008 queue_delayed_work_on(smp_processor_id(),
2009 qdev->workqueue, &qdev->mpi_work, 0);
2014 * Get the bit-mask that shows the active queues for this
2015 * pass. Compare it to the queues that this irq services
2016 * and call napi if there's a match.
2018 var = ql_read32(qdev, ISR1);
2019 if (var & intr_context->irq_mask) {
2020 QPRINTK(qdev, INTR, INFO,
2021 "Waking handler for rx_ring[0].\n");
2022 ql_disable_completion_interrupt(qdev, intr_context->intr);
2023 napi_schedule(&rx_ring->napi);
2026 ql_enable_completion_interrupt(qdev, intr_context->intr);
2027 return work_done ? IRQ_HANDLED : IRQ_NONE;
2030 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2033 if (skb_is_gso(skb)) {
2035 if (skb_header_cloned(skb)) {
2036 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2041 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2042 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2043 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2044 mac_iocb_ptr->total_hdrs_len =
2045 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2046 mac_iocb_ptr->net_trans_offset =
2047 cpu_to_le16(skb_network_offset(skb) |
2048 skb_transport_offset(skb)
2049 << OB_MAC_TRANSPORT_HDR_SHIFT);
2050 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2051 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2052 if (likely(skb->protocol == htons(ETH_P_IP))) {
2053 struct iphdr *iph = ip_hdr(skb);
2055 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2056 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2060 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2061 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2062 tcp_hdr(skb)->check =
2063 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2064 &ipv6_hdr(skb)->daddr,
2072 static void ql_hw_csum_setup(struct sk_buff *skb,
2073 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2076 struct iphdr *iph = ip_hdr(skb);
2078 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2079 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2080 mac_iocb_ptr->net_trans_offset =
2081 cpu_to_le16(skb_network_offset(skb) |
2082 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2084 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2085 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2086 if (likely(iph->protocol == IPPROTO_TCP)) {
2087 check = &(tcp_hdr(skb)->check);
2088 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2089 mac_iocb_ptr->total_hdrs_len =
2090 cpu_to_le16(skb_transport_offset(skb) +
2091 (tcp_hdr(skb)->doff << 2));
2093 check = &(udp_hdr(skb)->check);
2094 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2095 mac_iocb_ptr->total_hdrs_len =
2096 cpu_to_le16(skb_transport_offset(skb) +
2097 sizeof(struct udphdr));
2099 *check = ~csum_tcpudp_magic(iph->saddr,
2100 iph->daddr, len, iph->protocol, 0);
2103 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2105 struct tx_ring_desc *tx_ring_desc;
2106 struct ob_mac_iocb_req *mac_iocb_ptr;
2107 struct ql_adapter *qdev = netdev_priv(ndev);
2109 struct tx_ring *tx_ring;
2110 u32 tx_ring_idx = (u32) skb->queue_mapping;
2112 tx_ring = &qdev->tx_ring[tx_ring_idx];
2114 if (skb_padto(skb, ETH_ZLEN))
2115 return NETDEV_TX_OK;
2117 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2118 QPRINTK(qdev, TX_QUEUED, INFO,
2119 "%s: shutting down tx queue %d du to lack of resources.\n",
2120 __func__, tx_ring_idx);
2121 netif_stop_subqueue(ndev, tx_ring->wq_id);
2122 atomic_inc(&tx_ring->queue_stopped);
2123 return NETDEV_TX_BUSY;
2125 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2126 mac_iocb_ptr = tx_ring_desc->queue_entry;
2127 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2129 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2130 mac_iocb_ptr->tid = tx_ring_desc->index;
2131 /* We use the upper 32-bits to store the tx queue for this IO.
2132 * When we get the completion we can use it to establish the context.
2134 mac_iocb_ptr->txq_idx = tx_ring_idx;
2135 tx_ring_desc->skb = skb;
2137 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2139 if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
2140 QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n",
2141 vlan_tx_tag_get(skb));
2142 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2143 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2145 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2147 dev_kfree_skb_any(skb);
2148 return NETDEV_TX_OK;
2149 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2150 ql_hw_csum_setup(skb,
2151 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2153 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2155 QPRINTK(qdev, TX_QUEUED, ERR,
2156 "Could not map the segments.\n");
2157 return NETDEV_TX_BUSY;
2159 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2160 tx_ring->prod_idx++;
2161 if (tx_ring->prod_idx == tx_ring->wq_len)
2162 tx_ring->prod_idx = 0;
2165 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2166 QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
2167 tx_ring->prod_idx, skb->len);
2169 atomic_dec(&tx_ring->tx_count);
2170 return NETDEV_TX_OK;
2173 static void ql_free_shadow_space(struct ql_adapter *qdev)
2175 if (qdev->rx_ring_shadow_reg_area) {
2176 pci_free_consistent(qdev->pdev,
2178 qdev->rx_ring_shadow_reg_area,
2179 qdev->rx_ring_shadow_reg_dma);
2180 qdev->rx_ring_shadow_reg_area = NULL;
2182 if (qdev->tx_ring_shadow_reg_area) {
2183 pci_free_consistent(qdev->pdev,
2185 qdev->tx_ring_shadow_reg_area,
2186 qdev->tx_ring_shadow_reg_dma);
2187 qdev->tx_ring_shadow_reg_area = NULL;
2191 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2193 qdev->rx_ring_shadow_reg_area =
2194 pci_alloc_consistent(qdev->pdev,
2195 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2196 if (qdev->rx_ring_shadow_reg_area == NULL) {
2197 QPRINTK(qdev, IFUP, ERR,
2198 "Allocation of RX shadow space failed.\n");
2201 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2202 qdev->tx_ring_shadow_reg_area =
2203 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2204 &qdev->tx_ring_shadow_reg_dma);
2205 if (qdev->tx_ring_shadow_reg_area == NULL) {
2206 QPRINTK(qdev, IFUP, ERR,
2207 "Allocation of TX shadow space failed.\n");
2208 goto err_wqp_sh_area;
2210 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2214 pci_free_consistent(qdev->pdev,
2216 qdev->rx_ring_shadow_reg_area,
2217 qdev->rx_ring_shadow_reg_dma);
2221 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2223 struct tx_ring_desc *tx_ring_desc;
2225 struct ob_mac_iocb_req *mac_iocb_ptr;
2227 mac_iocb_ptr = tx_ring->wq_base;
2228 tx_ring_desc = tx_ring->q;
2229 for (i = 0; i < tx_ring->wq_len; i++) {
2230 tx_ring_desc->index = i;
2231 tx_ring_desc->skb = NULL;
2232 tx_ring_desc->queue_entry = mac_iocb_ptr;
2236 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2237 atomic_set(&tx_ring->queue_stopped, 0);
2240 static void ql_free_tx_resources(struct ql_adapter *qdev,
2241 struct tx_ring *tx_ring)
2243 if (tx_ring->wq_base) {
2244 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2245 tx_ring->wq_base, tx_ring->wq_base_dma);
2246 tx_ring->wq_base = NULL;
2252 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2253 struct tx_ring *tx_ring)
2256 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2257 &tx_ring->wq_base_dma);
2259 if ((tx_ring->wq_base == NULL)
2260 || tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
2261 QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
2265 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2266 if (tx_ring->q == NULL)
2271 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2272 tx_ring->wq_base, tx_ring->wq_base_dma);
2276 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2279 struct bq_desc *lbq_desc;
2281 for (i = 0; i < rx_ring->lbq_len; i++) {
2282 lbq_desc = &rx_ring->lbq[i];
2283 if (lbq_desc->p.lbq_page) {
2284 pci_unmap_page(qdev->pdev,
2285 pci_unmap_addr(lbq_desc, mapaddr),
2286 pci_unmap_len(lbq_desc, maplen),
2287 PCI_DMA_FROMDEVICE);
2289 put_page(lbq_desc->p.lbq_page);
2290 lbq_desc->p.lbq_page = NULL;
2295 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2298 struct bq_desc *sbq_desc;
2300 for (i = 0; i < rx_ring->sbq_len; i++) {
2301 sbq_desc = &rx_ring->sbq[i];
2302 if (sbq_desc == NULL) {
2303 QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i);
2306 if (sbq_desc->p.skb) {
2307 pci_unmap_single(qdev->pdev,
2308 pci_unmap_addr(sbq_desc, mapaddr),
2309 pci_unmap_len(sbq_desc, maplen),
2310 PCI_DMA_FROMDEVICE);
2311 dev_kfree_skb(sbq_desc->p.skb);
2312 sbq_desc->p.skb = NULL;
2317 /* Free all large and small rx buffers associated
2318 * with the completion queues for this device.
2320 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2323 struct rx_ring *rx_ring;
2325 for (i = 0; i < qdev->rx_ring_count; i++) {
2326 rx_ring = &qdev->rx_ring[i];
2328 ql_free_lbq_buffers(qdev, rx_ring);
2330 ql_free_sbq_buffers(qdev, rx_ring);
2334 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2336 struct rx_ring *rx_ring;
2339 for (i = 0; i < qdev->rx_ring_count; i++) {
2340 rx_ring = &qdev->rx_ring[i];
2341 if (rx_ring->type != TX_Q)
2342 ql_update_buffer_queues(qdev, rx_ring);
2346 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2347 struct rx_ring *rx_ring)
2350 struct bq_desc *lbq_desc;
2351 __le64 *bq = rx_ring->lbq_base;
2353 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2354 for (i = 0; i < rx_ring->lbq_len; i++) {
2355 lbq_desc = &rx_ring->lbq[i];
2356 memset(lbq_desc, 0, sizeof(*lbq_desc));
2357 lbq_desc->index = i;
2358 lbq_desc->addr = bq;
2363 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2364 struct rx_ring *rx_ring)
2367 struct bq_desc *sbq_desc;
2368 __le64 *bq = rx_ring->sbq_base;
2370 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2371 for (i = 0; i < rx_ring->sbq_len; i++) {
2372 sbq_desc = &rx_ring->sbq[i];
2373 memset(sbq_desc, 0, sizeof(*sbq_desc));
2374 sbq_desc->index = i;
2375 sbq_desc->addr = bq;
2380 static void ql_free_rx_resources(struct ql_adapter *qdev,
2381 struct rx_ring *rx_ring)
2383 /* Free the small buffer queue. */
2384 if (rx_ring->sbq_base) {
2385 pci_free_consistent(qdev->pdev,
2387 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2388 rx_ring->sbq_base = NULL;
2391 /* Free the small buffer queue control blocks. */
2392 kfree(rx_ring->sbq);
2393 rx_ring->sbq = NULL;
2395 /* Free the large buffer queue. */
2396 if (rx_ring->lbq_base) {
2397 pci_free_consistent(qdev->pdev,
2399 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2400 rx_ring->lbq_base = NULL;
2403 /* Free the large buffer queue control blocks. */
2404 kfree(rx_ring->lbq);
2405 rx_ring->lbq = NULL;
2407 /* Free the rx queue. */
2408 if (rx_ring->cq_base) {
2409 pci_free_consistent(qdev->pdev,
2411 rx_ring->cq_base, rx_ring->cq_base_dma);
2412 rx_ring->cq_base = NULL;
2416 /* Allocate queues and buffers for this completions queue based
2417 * on the values in the parameter structure. */
2418 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2419 struct rx_ring *rx_ring)
2423 * Allocate the completion queue for this rx_ring.
2426 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2427 &rx_ring->cq_base_dma);
2429 if (rx_ring->cq_base == NULL) {
2430 QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n");
2434 if (rx_ring->sbq_len) {
2436 * Allocate small buffer queue.
2439 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2440 &rx_ring->sbq_base_dma);
2442 if (rx_ring->sbq_base == NULL) {
2443 QPRINTK(qdev, IFUP, ERR,
2444 "Small buffer queue allocation failed.\n");
2449 * Allocate small buffer queue control blocks.
2452 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2454 if (rx_ring->sbq == NULL) {
2455 QPRINTK(qdev, IFUP, ERR,
2456 "Small buffer queue control block allocation failed.\n");
2460 ql_init_sbq_ring(qdev, rx_ring);
2463 if (rx_ring->lbq_len) {
2465 * Allocate large buffer queue.
2468 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2469 &rx_ring->lbq_base_dma);
2471 if (rx_ring->lbq_base == NULL) {
2472 QPRINTK(qdev, IFUP, ERR,
2473 "Large buffer queue allocation failed.\n");
2477 * Allocate large buffer queue control blocks.
2480 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2482 if (rx_ring->lbq == NULL) {
2483 QPRINTK(qdev, IFUP, ERR,
2484 "Large buffer queue control block allocation failed.\n");
2488 ql_init_lbq_ring(qdev, rx_ring);
2494 ql_free_rx_resources(qdev, rx_ring);
2498 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2500 struct tx_ring *tx_ring;
2501 struct tx_ring_desc *tx_ring_desc;
2505 * Loop through all queues and free
2508 for (j = 0; j < qdev->tx_ring_count; j++) {
2509 tx_ring = &qdev->tx_ring[j];
2510 for (i = 0; i < tx_ring->wq_len; i++) {
2511 tx_ring_desc = &tx_ring->q[i];
2512 if (tx_ring_desc && tx_ring_desc->skb) {
2513 QPRINTK(qdev, IFDOWN, ERR,
2514 "Freeing lost SKB %p, from queue %d, index %d.\n",
2515 tx_ring_desc->skb, j,
2516 tx_ring_desc->index);
2517 ql_unmap_send(qdev, tx_ring_desc,
2518 tx_ring_desc->map_cnt);
2519 dev_kfree_skb(tx_ring_desc->skb);
2520 tx_ring_desc->skb = NULL;
2526 static void ql_free_mem_resources(struct ql_adapter *qdev)
2530 for (i = 0; i < qdev->tx_ring_count; i++)
2531 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2532 for (i = 0; i < qdev->rx_ring_count; i++)
2533 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2534 ql_free_shadow_space(qdev);
2537 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2541 /* Allocate space for our shadow registers and such. */
2542 if (ql_alloc_shadow_space(qdev))
2545 for (i = 0; i < qdev->rx_ring_count; i++) {
2546 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2547 QPRINTK(qdev, IFUP, ERR,
2548 "RX resource allocation failed.\n");
2552 /* Allocate tx queue resources */
2553 for (i = 0; i < qdev->tx_ring_count; i++) {
2554 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2555 QPRINTK(qdev, IFUP, ERR,
2556 "TX resource allocation failed.\n");
2563 ql_free_mem_resources(qdev);
2567 /* Set up the rx ring control block and pass it to the chip.
2568 * The control block is defined as
2569 * "Completion Queue Initialization Control Block", or cqicb.
2571 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2573 struct cqicb *cqicb = &rx_ring->cqicb;
2574 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
2575 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2576 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
2577 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2578 void __iomem *doorbell_area =
2579 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2583 __le64 *base_indirect_ptr;
2586 /* Set up the shadow registers for this ring. */
2587 rx_ring->prod_idx_sh_reg = shadow_reg;
2588 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
2589 shadow_reg += sizeof(u64);
2590 shadow_reg_dma += sizeof(u64);
2591 rx_ring->lbq_base_indirect = shadow_reg;
2592 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
2593 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
2594 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
2595 rx_ring->sbq_base_indirect = shadow_reg;
2596 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
2598 /* PCI doorbell mem area + 0x00 for consumer index register */
2599 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
2600 rx_ring->cnsmr_idx = 0;
2601 rx_ring->curr_entry = rx_ring->cq_base;
2603 /* PCI doorbell mem area + 0x04 for valid register */
2604 rx_ring->valid_db_reg = doorbell_area + 0x04;
2606 /* PCI doorbell mem area + 0x18 for large buffer consumer */
2607 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
2609 /* PCI doorbell mem area + 0x1c */
2610 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
2612 memset((void *)cqicb, 0, sizeof(struct cqicb));
2613 cqicb->msix_vect = rx_ring->irq;
2615 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
2616 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
2618 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
2620 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
2623 * Set up the control block load flags.
2625 cqicb->flags = FLAGS_LC | /* Load queue base address */
2626 FLAGS_LV | /* Load MSI-X vector */
2627 FLAGS_LI; /* Load irq delay values */
2628 if (rx_ring->lbq_len) {
2629 cqicb->flags |= FLAGS_LL; /* Load lbq values */
2630 tmp = (u64)rx_ring->lbq_base_dma;
2631 base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
2634 *base_indirect_ptr = cpu_to_le64(tmp);
2635 tmp += DB_PAGE_SIZE;
2636 base_indirect_ptr++;
2638 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
2640 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
2641 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
2642 (u16) rx_ring->lbq_buf_size;
2643 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
2644 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
2645 (u16) rx_ring->lbq_len;
2646 cqicb->lbq_len = cpu_to_le16(bq_len);
2647 rx_ring->lbq_prod_idx = 0;
2648 rx_ring->lbq_curr_idx = 0;
2649 rx_ring->lbq_clean_idx = 0;
2650 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
2652 if (rx_ring->sbq_len) {
2653 cqicb->flags |= FLAGS_LS; /* Load sbq values */
2654 tmp = (u64)rx_ring->sbq_base_dma;
2655 base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
2658 *base_indirect_ptr = cpu_to_le64(tmp);
2659 tmp += DB_PAGE_SIZE;
2660 base_indirect_ptr++;
2662 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
2664 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
2665 cqicb->sbq_buf_size =
2666 cpu_to_le16((u16)(rx_ring->sbq_buf_size/2));
2667 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
2668 (u16) rx_ring->sbq_len;
2669 cqicb->sbq_len = cpu_to_le16(bq_len);
2670 rx_ring->sbq_prod_idx = 0;
2671 rx_ring->sbq_curr_idx = 0;
2672 rx_ring->sbq_clean_idx = 0;
2673 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
2675 switch (rx_ring->type) {
2677 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
2678 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
2681 /* Inbound completion handling rx_rings run in
2682 * separate NAPI contexts.
2684 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
2686 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
2687 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
2690 QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
2693 QPRINTK(qdev, IFUP, DEBUG, "Initializing rx work queue.\n");
2694 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
2695 CFG_LCQ, rx_ring->cq_id);
2697 QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
2703 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2705 struct wqicb *wqicb = (struct wqicb *)tx_ring;
2706 void __iomem *doorbell_area =
2707 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
2708 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
2709 (tx_ring->wq_id * sizeof(u64));
2710 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
2711 (tx_ring->wq_id * sizeof(u64));
2715 * Assign doorbell registers for this tx_ring.
2717 /* TX PCI doorbell mem area for tx producer index */
2718 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
2719 tx_ring->prod_idx = 0;
2720 /* TX PCI doorbell mem area + 0x04 */
2721 tx_ring->valid_db_reg = doorbell_area + 0x04;
2724 * Assign shadow registers for this tx_ring.
2726 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
2727 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
2729 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
2730 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
2731 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
2732 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
2734 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
2736 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
2738 ql_init_tx_ring(qdev, tx_ring);
2740 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
2741 (u16) tx_ring->wq_id);
2743 QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
2746 QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded WQICB.\n");
2750 static void ql_disable_msix(struct ql_adapter *qdev)
2752 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2753 pci_disable_msix(qdev->pdev);
2754 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
2755 kfree(qdev->msi_x_entry);
2756 qdev->msi_x_entry = NULL;
2757 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
2758 pci_disable_msi(qdev->pdev);
2759 clear_bit(QL_MSI_ENABLED, &qdev->flags);
2763 /* We start by trying to get the number of vectors
2764 * stored in qdev->intr_count. If we don't get that
2765 * many then we reduce the count and try again.
2767 static void ql_enable_msix(struct ql_adapter *qdev)
2771 /* Get the MSIX vectors. */
2772 if (irq_type == MSIX_IRQ) {
2773 /* Try to alloc space for the msix struct,
2774 * if it fails then go to MSI/legacy.
2776 qdev->msi_x_entry = kcalloc(qdev->intr_count,
2777 sizeof(struct msix_entry),
2779 if (!qdev->msi_x_entry) {
2784 for (i = 0; i < qdev->intr_count; i++)
2785 qdev->msi_x_entry[i].entry = i;
2787 /* Loop to get our vectors. We start with
2788 * what we want and settle for what we get.
2791 err = pci_enable_msix(qdev->pdev,
2792 qdev->msi_x_entry, qdev->intr_count);
2794 qdev->intr_count = err;
2798 kfree(qdev->msi_x_entry);
2799 qdev->msi_x_entry = NULL;
2800 QPRINTK(qdev, IFUP, WARNING,
2801 "MSI-X Enable failed, trying MSI.\n");
2802 qdev->intr_count = 1;
2804 } else if (err == 0) {
2805 set_bit(QL_MSIX_ENABLED, &qdev->flags);
2806 QPRINTK(qdev, IFUP, INFO,
2807 "MSI-X Enabled, got %d vectors.\n",
2813 qdev->intr_count = 1;
2814 if (irq_type == MSI_IRQ) {
2815 if (!pci_enable_msi(qdev->pdev)) {
2816 set_bit(QL_MSI_ENABLED, &qdev->flags);
2817 QPRINTK(qdev, IFUP, INFO,
2818 "Running with MSI interrupts.\n");
2823 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
2826 /* Each vector services 1 RSS ring and and 1 or more
2827 * TX completion rings. This function loops through
2828 * the TX completion rings and assigns the vector that
2829 * will service it. An example would be if there are
2830 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
2831 * This would mean that vector 0 would service RSS ring 0
2832 * and TX competion rings 0,1,2 and 3. Vector 1 would
2833 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
2835 static void ql_set_tx_vect(struct ql_adapter *qdev)
2838 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
2840 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2841 /* Assign irq vectors to TX rx_rings.*/
2842 for (vect = 0, j = 0, i = qdev->rss_ring_count;
2843 i < qdev->rx_ring_count; i++) {
2844 if (j == tx_rings_per_vector) {
2848 qdev->rx_ring[i].irq = vect;
2852 /* For single vector all rings have an irq
2855 for (i = 0; i < qdev->rx_ring_count; i++)
2856 qdev->rx_ring[i].irq = 0;
2860 /* Set the interrupt mask for this vector. Each vector
2861 * will service 1 RSS ring and 1 or more TX completion
2862 * rings. This function sets up a bit mask per vector
2863 * that indicates which rings it services.
2865 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
2867 int j, vect = ctx->intr;
2868 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
2870 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2871 /* Add the RSS ring serviced by this vector
2874 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
2875 /* Add the TX ring(s) serviced by this vector
2877 for (j = 0; j < tx_rings_per_vector; j++) {
2879 (1 << qdev->rx_ring[qdev->rss_ring_count +
2880 (vect * tx_rings_per_vector) + j].cq_id);
2883 /* For single vector we just shift each queue's
2886 for (j = 0; j < qdev->rx_ring_count; j++)
2887 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
2892 * Here we build the intr_context structures based on
2893 * our rx_ring count and intr vector count.
2894 * The intr_context structure is used to hook each vector
2895 * to possibly different handlers.
2897 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
2900 struct intr_context *intr_context = &qdev->intr_context[0];
2902 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2903 /* Each rx_ring has it's
2904 * own intr_context since we have separate
2905 * vectors for each queue.
2907 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2908 qdev->rx_ring[i].irq = i;
2909 intr_context->intr = i;
2910 intr_context->qdev = qdev;
2911 /* Set up this vector's bit-mask that indicates
2912 * which queues it services.
2914 ql_set_irq_mask(qdev, intr_context);
2916 * We set up each vectors enable/disable/read bits so
2917 * there's no bit/mask calculations in the critical path.
2919 intr_context->intr_en_mask =
2920 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2921 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
2923 intr_context->intr_dis_mask =
2924 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2925 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
2927 intr_context->intr_read_mask =
2928 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2929 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
2932 /* The first vector/queue handles
2933 * broadcast/multicast, fatal errors,
2934 * and firmware events. This in addition
2935 * to normal inbound NAPI processing.
2937 intr_context->handler = qlge_isr;
2938 sprintf(intr_context->name, "%s-rx-%d",
2939 qdev->ndev->name, i);
2942 * Inbound queues handle unicast frames only.
2944 intr_context->handler = qlge_msix_rx_isr;
2945 sprintf(intr_context->name, "%s-rx-%d",
2946 qdev->ndev->name, i);
2951 * All rx_rings use the same intr_context since
2952 * there is only one vector.
2954 intr_context->intr = 0;
2955 intr_context->qdev = qdev;
2957 * We set up each vectors enable/disable/read bits so
2958 * there's no bit/mask calculations in the critical path.
2960 intr_context->intr_en_mask =
2961 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
2962 intr_context->intr_dis_mask =
2963 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2964 INTR_EN_TYPE_DISABLE;
2965 intr_context->intr_read_mask =
2966 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
2968 * Single interrupt means one handler for all rings.
2970 intr_context->handler = qlge_isr;
2971 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
2972 /* Set up this vector's bit-mask that indicates
2973 * which queues it services. In this case there is
2974 * a single vector so it will service all RSS and
2975 * TX completion rings.
2977 ql_set_irq_mask(qdev, intr_context);
2979 /* Tell the TX completion rings which MSIx vector
2980 * they will be using.
2982 ql_set_tx_vect(qdev);
2985 static void ql_free_irq(struct ql_adapter *qdev)
2988 struct intr_context *intr_context = &qdev->intr_context[0];
2990 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2991 if (intr_context->hooked) {
2992 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2993 free_irq(qdev->msi_x_entry[i].vector,
2995 QPRINTK(qdev, IFDOWN, DEBUG,
2996 "freeing msix interrupt %d.\n", i);
2998 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
2999 QPRINTK(qdev, IFDOWN, DEBUG,
3000 "freeing msi interrupt %d.\n", i);
3004 ql_disable_msix(qdev);
3007 static int ql_request_irq(struct ql_adapter *qdev)
3011 struct pci_dev *pdev = qdev->pdev;
3012 struct intr_context *intr_context = &qdev->intr_context[0];
3014 ql_resolve_queues_to_irqs(qdev);
3016 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3017 atomic_set(&intr_context->irq_cnt, 0);
3018 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3019 status = request_irq(qdev->msi_x_entry[i].vector,
3020 intr_context->handler,
3025 QPRINTK(qdev, IFUP, ERR,
3026 "Failed request for MSIX interrupt %d.\n",
3030 QPRINTK(qdev, IFUP, DEBUG,
3031 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3033 qdev->rx_ring[i].type ==
3034 DEFAULT_Q ? "DEFAULT_Q" : "",
3035 qdev->rx_ring[i].type ==
3037 qdev->rx_ring[i].type ==
3038 RX_Q ? "RX_Q" : "", intr_context->name);
3041 QPRINTK(qdev, IFUP, DEBUG,
3042 "trying msi or legacy interrupts.\n");
3043 QPRINTK(qdev, IFUP, DEBUG,
3044 "%s: irq = %d.\n", __func__, pdev->irq);
3045 QPRINTK(qdev, IFUP, DEBUG,
3046 "%s: context->name = %s.\n", __func__,
3047 intr_context->name);
3048 QPRINTK(qdev, IFUP, DEBUG,
3049 "%s: dev_id = 0x%p.\n", __func__,
3052 request_irq(pdev->irq, qlge_isr,
3053 test_bit(QL_MSI_ENABLED,
3055 flags) ? 0 : IRQF_SHARED,
3056 intr_context->name, &qdev->rx_ring[0]);
3060 QPRINTK(qdev, IFUP, ERR,
3061 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3063 qdev->rx_ring[0].type ==
3064 DEFAULT_Q ? "DEFAULT_Q" : "",
3065 qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "",
3066 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3067 intr_context->name);
3069 intr_context->hooked = 1;
3073 QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n");
3078 static int ql_start_rss(struct ql_adapter *qdev)
3080 u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3081 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f,
3082 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b,
3083 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80,
3084 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b,
3085 0xbe, 0xac, 0x01, 0xfa};
3086 struct ricb *ricb = &qdev->ricb;
3089 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3091 memset((void *)ricb, 0, sizeof(*ricb));
3093 ricb->base_cq = RSS_L4K;
3095 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3096 ricb->mask = cpu_to_le16((u16)(0x3ff));
3099 * Fill out the Indirection Table.
3101 for (i = 0; i < 1024; i++)
3102 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3104 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3105 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3107 QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n");
3109 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3111 QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
3114 QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded RICB.\n");
3118 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3122 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3125 /* Clear all the entries in the routing table. */
3126 for (i = 0; i < 16; i++) {
3127 status = ql_set_routing_reg(qdev, i, 0, 0);
3129 QPRINTK(qdev, IFUP, ERR,
3130 "Failed to init routing register for CAM "
3135 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3139 /* Initialize the frame-to-queue routing. */
3140 static int ql_route_initialize(struct ql_adapter *qdev)
3144 /* Clear all the entries in the routing table. */
3145 status = ql_clear_routing_entries(qdev);
3149 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3153 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
3155 QPRINTK(qdev, IFUP, ERR,
3156 "Failed to init routing register for error packets.\n");
3159 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3161 QPRINTK(qdev, IFUP, ERR,
3162 "Failed to init routing register for broadcast packets.\n");
3165 /* If we have more than one inbound queue, then turn on RSS in the
3168 if (qdev->rss_ring_count > 1) {
3169 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3170 RT_IDX_RSS_MATCH, 1);
3172 QPRINTK(qdev, IFUP, ERR,
3173 "Failed to init routing register for MATCH RSS packets.\n");
3178 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3181 QPRINTK(qdev, IFUP, ERR,
3182 "Failed to init routing register for CAM packets.\n");
3184 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3188 int ql_cam_route_initialize(struct ql_adapter *qdev)
3192 /* If check if the link is up and use to
3193 * determine if we are setting or clearing
3194 * the MAC address in the CAM.
3196 set = ql_read32(qdev, STS);
3197 set &= qdev->port_link_up;
3198 status = ql_set_mac_addr(qdev, set);
3200 QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
3204 status = ql_route_initialize(qdev);
3206 QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
3211 static int ql_adapter_initialize(struct ql_adapter *qdev)
3218 * Set up the System register to halt on errors.
3220 value = SYS_EFE | SYS_FAE;
3222 ql_write32(qdev, SYS, mask | value);
3224 /* Set the default queue, and VLAN behavior. */
3225 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3226 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
3227 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3229 /* Set the MPI interrupt to enabled. */
3230 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3232 /* Enable the function, set pagesize, enable error checking. */
3233 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3234 FSC_EC | FSC_VM_PAGE_4K | FSC_SH;
3236 /* Set/clear header splitting. */
3237 mask = FSC_VM_PAGESIZE_MASK |
3238 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3239 ql_write32(qdev, FSC, mask | value);
3241 ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
3242 min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE));
3244 /* Set RX packet routing to use port/pci function on which the
3245 * packet arrived on in addition to usual frame routing.
3246 * This is helpful on bonding where both interfaces can have
3247 * the same MAC address.
3249 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3251 /* Start up the rx queues. */
3252 for (i = 0; i < qdev->rx_ring_count; i++) {
3253 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3255 QPRINTK(qdev, IFUP, ERR,
3256 "Failed to start rx ring[%d].\n", i);
3261 /* If there is more than one inbound completion queue
3262 * then download a RICB to configure RSS.
3264 if (qdev->rss_ring_count > 1) {
3265 status = ql_start_rss(qdev);
3267 QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n");
3272 /* Start up the tx queues. */
3273 for (i = 0; i < qdev->tx_ring_count; i++) {
3274 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3276 QPRINTK(qdev, IFUP, ERR,
3277 "Failed to start tx ring[%d].\n", i);
3282 /* Initialize the port and set the max framesize. */
3283 status = qdev->nic_ops->port_initialize(qdev);
3285 QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
3289 /* Set up the MAC address and frame routing filter. */
3290 status = ql_cam_route_initialize(qdev);
3292 QPRINTK(qdev, IFUP, ERR,
3293 "Failed to init CAM/Routing tables.\n");
3297 /* Start NAPI for the RSS queues. */
3298 for (i = 0; i < qdev->rss_ring_count; i++) {
3299 QPRINTK(qdev, IFUP, DEBUG, "Enabling NAPI for rx_ring[%d].\n",
3301 napi_enable(&qdev->rx_ring[i].napi);
3307 /* Issue soft reset to chip. */
3308 static int ql_adapter_reset(struct ql_adapter *qdev)
3312 unsigned long end_jiffies;
3314 /* Clear all the entries in the routing table. */
3315 status = ql_clear_routing_entries(qdev);
3317 QPRINTK(qdev, IFUP, ERR, "Failed to clear routing bits.\n");
3321 end_jiffies = jiffies +
3322 max((unsigned long)1, usecs_to_jiffies(30));
3323 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3326 value = ql_read32(qdev, RST_FO);
3327 if ((value & RST_FO_FR) == 0)
3330 } while (time_before(jiffies, end_jiffies));
3332 if (value & RST_FO_FR) {
3333 QPRINTK(qdev, IFDOWN, ERR,
3334 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3335 status = -ETIMEDOUT;
3341 static void ql_display_dev_info(struct net_device *ndev)
3343 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3345 QPRINTK(qdev, PROBE, INFO,
3346 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3347 "XG Roll = %d, XG Rev = %d.\n",
3350 qdev->chip_rev_id & 0x0000000f,
3351 qdev->chip_rev_id >> 4 & 0x0000000f,
3352 qdev->chip_rev_id >> 8 & 0x0000000f,
3353 qdev->chip_rev_id >> 12 & 0x0000000f);
3354 QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
3357 static int ql_adapter_down(struct ql_adapter *qdev)
3363 /* Don't kill the reset worker thread if we
3364 * are in the process of recovery.
3366 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3367 cancel_delayed_work_sync(&qdev->asic_reset_work);
3368 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3369 cancel_delayed_work_sync(&qdev->mpi_work);
3370 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3371 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3373 for (i = 0; i < qdev->rss_ring_count; i++)
3374 napi_disable(&qdev->rx_ring[i].napi);
3376 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3378 ql_disable_interrupts(qdev);
3380 ql_tx_ring_clean(qdev);
3382 /* Call netif_napi_del() from common point.
3384 for (i = 0; i < qdev->rss_ring_count; i++)
3385 netif_napi_del(&qdev->rx_ring[i].napi);
3387 ql_free_rx_buffers(qdev);
3389 status = ql_adapter_reset(qdev);
3391 QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
3396 static int ql_adapter_up(struct ql_adapter *qdev)
3400 err = ql_adapter_initialize(qdev);
3402 QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
3405 set_bit(QL_ADAPTER_UP, &qdev->flags);
3406 ql_alloc_rx_buffers(qdev);
3407 /* If the port is initialized and the
3408 * link is up the turn on the carrier.
3410 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3411 (ql_read32(qdev, STS) & qdev->port_link_up))
3413 ql_enable_interrupts(qdev);
3414 ql_enable_all_completion_interrupts(qdev);
3415 netif_tx_start_all_queues(qdev->ndev);
3419 ql_adapter_reset(qdev);
3423 static void ql_release_adapter_resources(struct ql_adapter *qdev)
3425 ql_free_mem_resources(qdev);
3429 static int ql_get_adapter_resources(struct ql_adapter *qdev)
3433 if (ql_alloc_mem_resources(qdev)) {
3434 QPRINTK(qdev, IFUP, ERR, "Unable to allocate memory.\n");
3437 status = ql_request_irq(qdev);
3441 static int qlge_close(struct net_device *ndev)
3443 struct ql_adapter *qdev = netdev_priv(ndev);
3446 * Wait for device to recover from a reset.
3447 * (Rarely happens, but possible.)
3449 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3451 ql_adapter_down(qdev);
3452 ql_release_adapter_resources(qdev);
3456 static int ql_configure_rings(struct ql_adapter *qdev)
3459 struct rx_ring *rx_ring;
3460 struct tx_ring *tx_ring;
3461 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
3463 /* In a perfect world we have one RSS ring for each CPU
3464 * and each has it's own vector. To do that we ask for
3465 * cpu_cnt vectors. ql_enable_msix() will adjust the
3466 * vector count to what we actually get. We then
3467 * allocate an RSS ring for each.
3468 * Essentially, we are doing min(cpu_count, msix_vector_count).
3470 qdev->intr_count = cpu_cnt;
3471 ql_enable_msix(qdev);
3472 /* Adjust the RSS ring count to the actual vector count. */
3473 qdev->rss_ring_count = qdev->intr_count;
3474 qdev->tx_ring_count = cpu_cnt;
3475 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
3477 for (i = 0; i < qdev->tx_ring_count; i++) {
3478 tx_ring = &qdev->tx_ring[i];
3479 memset((void *)tx_ring, 0, sizeof(*tx_ring));
3480 tx_ring->qdev = qdev;
3482 tx_ring->wq_len = qdev->tx_ring_size;
3484 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
3487 * The completion queue ID for the tx rings start
3488 * immediately after the rss rings.
3490 tx_ring->cq_id = qdev->rss_ring_count + i;
3493 for (i = 0; i < qdev->rx_ring_count; i++) {
3494 rx_ring = &qdev->rx_ring[i];
3495 memset((void *)rx_ring, 0, sizeof(*rx_ring));
3496 rx_ring->qdev = qdev;
3498 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
3499 if (i < qdev->rss_ring_count) {
3501 * Inbound (RSS) queues.
3503 rx_ring->cq_len = qdev->rx_ring_size;
3505 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3506 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3508 rx_ring->lbq_len * sizeof(__le64);
3509 rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
3510 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3512 rx_ring->sbq_len * sizeof(__le64);
3513 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
3514 rx_ring->type = RX_Q;
3517 * Outbound queue handles outbound completions only.
3519 /* outbound cq is same size as tx_ring it services. */
3520 rx_ring->cq_len = qdev->tx_ring_size;
3522 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3523 rx_ring->lbq_len = 0;
3524 rx_ring->lbq_size = 0;
3525 rx_ring->lbq_buf_size = 0;
3526 rx_ring->sbq_len = 0;
3527 rx_ring->sbq_size = 0;
3528 rx_ring->sbq_buf_size = 0;
3529 rx_ring->type = TX_Q;
3535 static int qlge_open(struct net_device *ndev)
3538 struct ql_adapter *qdev = netdev_priv(ndev);
3540 err = ql_configure_rings(qdev);
3544 err = ql_get_adapter_resources(qdev);
3548 err = ql_adapter_up(qdev);
3555 ql_release_adapter_resources(qdev);
3559 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
3561 struct ql_adapter *qdev = netdev_priv(ndev);
3563 if (ndev->mtu == 1500 && new_mtu == 9000) {
3564 QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
3565 queue_delayed_work(qdev->workqueue,
3566 &qdev->mpi_port_cfg_work, 0);
3567 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
3568 QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
3569 } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
3570 (ndev->mtu == 9000 && new_mtu == 9000)) {
3574 ndev->mtu = new_mtu;
3578 static struct net_device_stats *qlge_get_stats(struct net_device
3581 struct ql_adapter *qdev = netdev_priv(ndev);
3582 return &qdev->stats;
3585 static void qlge_set_multicast_list(struct net_device *ndev)
3587 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3588 struct dev_mc_list *mc_ptr;
3591 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3595 * Set or clear promiscuous mode if a
3596 * transition is taking place.
3598 if (ndev->flags & IFF_PROMISC) {
3599 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3600 if (ql_set_routing_reg
3601 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
3602 QPRINTK(qdev, HW, ERR,
3603 "Failed to set promiscous mode.\n");
3605 set_bit(QL_PROMISCUOUS, &qdev->flags);
3609 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3610 if (ql_set_routing_reg
3611 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
3612 QPRINTK(qdev, HW, ERR,
3613 "Failed to clear promiscous mode.\n");
3615 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3621 * Set or clear all multicast mode if a
3622 * transition is taking place.
3624 if ((ndev->flags & IFF_ALLMULTI) ||
3625 (ndev->mc_count > MAX_MULTICAST_ENTRIES)) {
3626 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
3627 if (ql_set_routing_reg
3628 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
3629 QPRINTK(qdev, HW, ERR,
3630 "Failed to set all-multi mode.\n");
3632 set_bit(QL_ALLMULTI, &qdev->flags);
3636 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
3637 if (ql_set_routing_reg
3638 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
3639 QPRINTK(qdev, HW, ERR,
3640 "Failed to clear all-multi mode.\n");
3642 clear_bit(QL_ALLMULTI, &qdev->flags);
3647 if (ndev->mc_count) {
3648 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
3651 for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
3652 i++, mc_ptr = mc_ptr->next)
3653 if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
3654 MAC_ADDR_TYPE_MULTI_MAC, i)) {
3655 QPRINTK(qdev, HW, ERR,
3656 "Failed to loadmulticast address.\n");
3657 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
3660 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
3661 if (ql_set_routing_reg
3662 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
3663 QPRINTK(qdev, HW, ERR,
3664 "Failed to set multicast match mode.\n");
3666 set_bit(QL_ALLMULTI, &qdev->flags);
3670 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3673 static int qlge_set_mac_address(struct net_device *ndev, void *p)
3675 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3676 struct sockaddr *addr = p;
3679 if (netif_running(ndev))
3682 if (!is_valid_ether_addr(addr->sa_data))
3683 return -EADDRNOTAVAIL;
3684 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3686 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
3689 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
3690 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
3692 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
3693 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
3697 static void qlge_tx_timeout(struct net_device *ndev)
3699 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3700 ql_queue_asic_error(qdev);
3703 static void ql_asic_reset_work(struct work_struct *work)
3705 struct ql_adapter *qdev =
3706 container_of(work, struct ql_adapter, asic_reset_work.work);
3709 status = ql_adapter_down(qdev);
3713 status = ql_adapter_up(qdev);
3719 QPRINTK(qdev, IFUP, ALERT,
3720 "Driver up/down cycle failed, closing device\n");
3722 set_bit(QL_ADAPTER_UP, &qdev->flags);
3723 dev_close(qdev->ndev);
3727 static struct nic_operations qla8012_nic_ops = {
3728 .get_flash = ql_get_8012_flash_params,
3729 .port_initialize = ql_8012_port_initialize,
3732 static struct nic_operations qla8000_nic_ops = {
3733 .get_flash = ql_get_8000_flash_params,
3734 .port_initialize = ql_8000_port_initialize,
3737 /* Find the pcie function number for the other NIC
3738 * on this chip. Since both NIC functions share a
3739 * common firmware we have the lowest enabled function
3740 * do any common work. Examples would be resetting
3741 * after a fatal firmware error, or doing a firmware
3744 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
3748 u32 nic_func1, nic_func2;
3750 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
3755 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
3756 MPI_TEST_NIC_FUNC_MASK);
3757 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
3758 MPI_TEST_NIC_FUNC_MASK);
3760 if (qdev->func == nic_func1)
3761 qdev->alt_func = nic_func2;
3762 else if (qdev->func == nic_func2)
3763 qdev->alt_func = nic_func1;
3770 static int ql_get_board_info(struct ql_adapter *qdev)
3774 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
3778 status = ql_get_alt_pcie_func(qdev);
3782 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
3784 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
3785 qdev->port_link_up = STS_PL1;
3786 qdev->port_init = STS_PI1;
3787 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
3788 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
3790 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
3791 qdev->port_link_up = STS_PL0;
3792 qdev->port_init = STS_PI0;
3793 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
3794 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
3796 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
3797 qdev->device_id = qdev->pdev->device;
3798 if (qdev->device_id == QLGE_DEVICE_ID_8012)
3799 qdev->nic_ops = &qla8012_nic_ops;
3800 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
3801 qdev->nic_ops = &qla8000_nic_ops;
3805 static void ql_release_all(struct pci_dev *pdev)
3807 struct net_device *ndev = pci_get_drvdata(pdev);
3808 struct ql_adapter *qdev = netdev_priv(ndev);
3810 if (qdev->workqueue) {
3811 destroy_workqueue(qdev->workqueue);
3812 qdev->workqueue = NULL;
3816 iounmap(qdev->reg_base);
3817 if (qdev->doorbell_area)
3818 iounmap(qdev->doorbell_area);
3819 pci_release_regions(pdev);
3820 pci_set_drvdata(pdev, NULL);
3823 static int __devinit ql_init_device(struct pci_dev *pdev,
3824 struct net_device *ndev, int cards_found)
3826 struct ql_adapter *qdev = netdev_priv(ndev);
3830 memset((void *)qdev, 0, sizeof(*qdev));
3831 err = pci_enable_device(pdev);
3833 dev_err(&pdev->dev, "PCI device enable failed.\n");
3839 pci_set_drvdata(pdev, ndev);
3840 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
3842 dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
3846 pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
3847 val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
3848 val16 |= (PCI_EXP_DEVCTL_CERE |
3849 PCI_EXP_DEVCTL_NFERE |
3850 PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE);
3851 pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
3854 err = pci_request_regions(pdev, DRV_NAME);
3856 dev_err(&pdev->dev, "PCI region request failed.\n");
3860 pci_set_master(pdev);
3861 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3862 set_bit(QL_DMA64, &qdev->flags);
3863 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3865 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3867 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3871 dev_err(&pdev->dev, "No usable DMA configuration.\n");
3876 ioremap_nocache(pci_resource_start(pdev, 1),
3877 pci_resource_len(pdev, 1));
3878 if (!qdev->reg_base) {
3879 dev_err(&pdev->dev, "Register mapping failed.\n");
3884 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
3885 qdev->doorbell_area =
3886 ioremap_nocache(pci_resource_start(pdev, 3),
3887 pci_resource_len(pdev, 3));
3888 if (!qdev->doorbell_area) {
3889 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
3894 err = ql_get_board_info(qdev);
3896 dev_err(&pdev->dev, "Register access failed.\n");
3900 qdev->msg_enable = netif_msg_init(debug, default_msg);
3901 spin_lock_init(&qdev->hw_lock);
3902 spin_lock_init(&qdev->stats_lock);
3904 /* make sure the EEPROM is good */
3905 err = qdev->nic_ops->get_flash(qdev);
3907 dev_err(&pdev->dev, "Invalid FLASH.\n");
3911 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
3913 /* Set up the default ring sizes. */
3914 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
3915 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
3917 /* Set up the coalescing parameters. */
3918 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
3919 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
3920 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
3921 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
3924 * Set up the operating parameters.
3927 qdev->workqueue = create_singlethread_workqueue(ndev->name);
3928 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
3929 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
3930 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
3931 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
3932 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
3933 init_completion(&qdev->ide_completion);
3936 dev_info(&pdev->dev, "%s\n", DRV_STRING);
3937 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
3938 DRV_NAME, DRV_VERSION);
3942 ql_release_all(pdev);
3943 pci_disable_device(pdev);
3948 static const struct net_device_ops qlge_netdev_ops = {
3949 .ndo_open = qlge_open,
3950 .ndo_stop = qlge_close,
3951 .ndo_start_xmit = qlge_send,
3952 .ndo_change_mtu = qlge_change_mtu,
3953 .ndo_get_stats = qlge_get_stats,
3954 .ndo_set_multicast_list = qlge_set_multicast_list,
3955 .ndo_set_mac_address = qlge_set_mac_address,
3956 .ndo_validate_addr = eth_validate_addr,
3957 .ndo_tx_timeout = qlge_tx_timeout,
3958 .ndo_vlan_rx_register = ql_vlan_rx_register,
3959 .ndo_vlan_rx_add_vid = ql_vlan_rx_add_vid,
3960 .ndo_vlan_rx_kill_vid = ql_vlan_rx_kill_vid,
3963 static int __devinit qlge_probe(struct pci_dev *pdev,
3964 const struct pci_device_id *pci_entry)
3966 struct net_device *ndev = NULL;
3967 struct ql_adapter *qdev = NULL;
3968 static int cards_found = 0;
3971 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
3972 min(MAX_CPUS, (int)num_online_cpus()));
3976 err = ql_init_device(pdev, ndev, cards_found);
3982 qdev = netdev_priv(ndev);
3983 SET_NETDEV_DEV(ndev, &pdev->dev);
3990 | NETIF_F_HW_VLAN_TX
3991 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
3992 ndev->features |= NETIF_F_GRO;
3994 if (test_bit(QL_DMA64, &qdev->flags))
3995 ndev->features |= NETIF_F_HIGHDMA;
3998 * Set up net_device structure.
4000 ndev->tx_queue_len = qdev->tx_ring_size;
4001 ndev->irq = pdev->irq;
4003 ndev->netdev_ops = &qlge_netdev_ops;
4004 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
4005 ndev->watchdog_timeo = 10 * HZ;
4007 err = register_netdev(ndev);
4009 dev_err(&pdev->dev, "net device registration failed.\n");
4010 ql_release_all(pdev);
4011 pci_disable_device(pdev);
4015 ql_display_dev_info(ndev);
4020 static void __devexit qlge_remove(struct pci_dev *pdev)
4022 struct net_device *ndev = pci_get_drvdata(pdev);
4023 unregister_netdev(ndev);
4024 ql_release_all(pdev);
4025 pci_disable_device(pdev);
4030 * This callback is called by the PCI subsystem whenever
4031 * a PCI bus error is detected.
4033 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4034 enum pci_channel_state state)
4036 struct net_device *ndev = pci_get_drvdata(pdev);
4037 struct ql_adapter *qdev = netdev_priv(ndev);
4039 netif_device_detach(ndev);
4041 if (state == pci_channel_io_perm_failure)
4042 return PCI_ERS_RESULT_DISCONNECT;
4044 if (netif_running(ndev))
4045 ql_adapter_down(qdev);
4047 pci_disable_device(pdev);
4049 /* Request a slot reset. */
4050 return PCI_ERS_RESULT_NEED_RESET;
4054 * This callback is called after the PCI buss has been reset.
4055 * Basically, this tries to restart the card from scratch.
4056 * This is a shortened version of the device probe/discovery code,
4057 * it resembles the first-half of the () routine.
4059 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4061 struct net_device *ndev = pci_get_drvdata(pdev);
4062 struct ql_adapter *qdev = netdev_priv(ndev);
4064 if (pci_enable_device(pdev)) {
4065 QPRINTK(qdev, IFUP, ERR,
4066 "Cannot re-enable PCI device after reset.\n");
4067 return PCI_ERS_RESULT_DISCONNECT;
4070 pci_set_master(pdev);
4072 netif_carrier_off(ndev);
4073 ql_adapter_reset(qdev);
4075 /* Make sure the EEPROM is good */
4076 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4078 if (!is_valid_ether_addr(ndev->perm_addr)) {
4079 QPRINTK(qdev, IFUP, ERR, "After reset, invalid MAC address.\n");
4080 return PCI_ERS_RESULT_DISCONNECT;
4083 return PCI_ERS_RESULT_RECOVERED;
4086 static void qlge_io_resume(struct pci_dev *pdev)
4088 struct net_device *ndev = pci_get_drvdata(pdev);
4089 struct ql_adapter *qdev = netdev_priv(ndev);
4091 pci_set_master(pdev);
4093 if (netif_running(ndev)) {
4094 if (ql_adapter_up(qdev)) {
4095 QPRINTK(qdev, IFUP, ERR,
4096 "Device initialization failed after reset.\n");
4101 netif_device_attach(ndev);
4104 static struct pci_error_handlers qlge_err_handler = {
4105 .error_detected = qlge_io_error_detected,
4106 .slot_reset = qlge_io_slot_reset,
4107 .resume = qlge_io_resume,
4110 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4112 struct net_device *ndev = pci_get_drvdata(pdev);
4113 struct ql_adapter *qdev = netdev_priv(ndev);
4116 netif_device_detach(ndev);
4118 if (netif_running(ndev)) {
4119 err = ql_adapter_down(qdev);
4124 err = pci_save_state(pdev);
4128 pci_disable_device(pdev);
4130 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4136 static int qlge_resume(struct pci_dev *pdev)
4138 struct net_device *ndev = pci_get_drvdata(pdev);
4139 struct ql_adapter *qdev = netdev_priv(ndev);
4142 pci_set_power_state(pdev, PCI_D0);
4143 pci_restore_state(pdev);
4144 err = pci_enable_device(pdev);
4146 QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n");
4149 pci_set_master(pdev);
4151 pci_enable_wake(pdev, PCI_D3hot, 0);
4152 pci_enable_wake(pdev, PCI_D3cold, 0);
4154 if (netif_running(ndev)) {
4155 err = ql_adapter_up(qdev);
4160 netif_device_attach(ndev);
4164 #endif /* CONFIG_PM */
4166 static void qlge_shutdown(struct pci_dev *pdev)
4168 qlge_suspend(pdev, PMSG_SUSPEND);
4171 static struct pci_driver qlge_driver = {
4173 .id_table = qlge_pci_tbl,
4174 .probe = qlge_probe,
4175 .remove = __devexit_p(qlge_remove),
4177 .suspend = qlge_suspend,
4178 .resume = qlge_resume,
4180 .shutdown = qlge_shutdown,
4181 .err_handler = &qlge_err_handler
4184 static int __init qlge_init_module(void)
4186 return pci_register_driver(&qlge_driver);
4189 static void __exit qlge_exit(void)
4191 pci_unregister_driver(&qlge_driver);
4194 module_init(qlge_init_module);
4195 module_exit(qlge_exit);