Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[safe/jmp/linux-2.6] / drivers / net / qlge / qlge_main.c
1 /*
2  * QLogic qlge NIC HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  * See LICENSE.qlge for copyright and licensing details.
5  * Author:     Linux qlge network device driver by
6  *                      Ron Mercer <ron.mercer@qlogic.com>
7  */
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/ipv6.h>
28 #include <net/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/skbuff.h>
37 #include <linux/if_vlan.h>
38 #include <linux/delay.h>
39 #include <linux/mm.h>
40 #include <linux/vmalloc.h>
41 #include <net/ip6_checksum.h>
42
43 #include "qlge.h"
44
45 char qlge_driver_name[] = DRV_NAME;
46 const char qlge_driver_version[] = DRV_VERSION;
47
48 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
49 MODULE_DESCRIPTION(DRV_STRING " ");
50 MODULE_LICENSE("GPL");
51 MODULE_VERSION(DRV_VERSION);
52
53 static const u32 default_msg =
54     NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
55 /* NETIF_MSG_TIMER |    */
56     NETIF_MSG_IFDOWN |
57     NETIF_MSG_IFUP |
58     NETIF_MSG_RX_ERR |
59     NETIF_MSG_TX_ERR |
60 /*  NETIF_MSG_TX_QUEUED | */
61 /*  NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
62 /* NETIF_MSG_PKTDATA | */
63     NETIF_MSG_HW | NETIF_MSG_WOL | 0;
64
65 static int debug = 0x00007fff;  /* defaults above */
66 module_param(debug, int, 0);
67 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
68
69 #define MSIX_IRQ 0
70 #define MSI_IRQ 1
71 #define LEG_IRQ 2
72 static int qlge_irq_type = MSIX_IRQ;
73 module_param(qlge_irq_type, int, MSIX_IRQ);
74 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
75
76 static int qlge_mpi_coredump;
77 module_param(qlge_mpi_coredump, int, 0);
78 MODULE_PARM_DESC(qlge_mpi_coredump,
79                 "Option to enable MPI firmware dump. "
80                 "Default is OFF - Do Not allocate memory. ");
81
82 static int qlge_force_coredump;
83 module_param(qlge_force_coredump, int, 0);
84 MODULE_PARM_DESC(qlge_force_coredump,
85                 "Option to allow force of firmware core dump. "
86                 "Default is OFF - Do not allow.");
87
88 static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
89         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
90         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
91         /* required last entry */
92         {0,}
93 };
94
95 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
96
97 /* This hardware semaphore causes exclusive access to
98  * resources shared between the NIC driver, MPI firmware,
99  * FCOE firmware and the FC driver.
100  */
101 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
102 {
103         u32 sem_bits = 0;
104
105         switch (sem_mask) {
106         case SEM_XGMAC0_MASK:
107                 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
108                 break;
109         case SEM_XGMAC1_MASK:
110                 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
111                 break;
112         case SEM_ICB_MASK:
113                 sem_bits = SEM_SET << SEM_ICB_SHIFT;
114                 break;
115         case SEM_MAC_ADDR_MASK:
116                 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
117                 break;
118         case SEM_FLASH_MASK:
119                 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
120                 break;
121         case SEM_PROBE_MASK:
122                 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
123                 break;
124         case SEM_RT_IDX_MASK:
125                 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
126                 break;
127         case SEM_PROC_REG_MASK:
128                 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
129                 break;
130         default:
131                 QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n");
132                 return -EINVAL;
133         }
134
135         ql_write32(qdev, SEM, sem_bits | sem_mask);
136         return !(ql_read32(qdev, SEM) & sem_bits);
137 }
138
139 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
140 {
141         unsigned int wait_count = 30;
142         do {
143                 if (!ql_sem_trylock(qdev, sem_mask))
144                         return 0;
145                 udelay(100);
146         } while (--wait_count);
147         return -ETIMEDOUT;
148 }
149
150 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
151 {
152         ql_write32(qdev, SEM, sem_mask);
153         ql_read32(qdev, SEM);   /* flush */
154 }
155
156 /* This function waits for a specific bit to come ready
157  * in a given register.  It is used mostly by the initialize
158  * process, but is also used in kernel thread API such as
159  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
160  */
161 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
162 {
163         u32 temp;
164         int count = UDELAY_COUNT;
165
166         while (count) {
167                 temp = ql_read32(qdev, reg);
168
169                 /* check for errors */
170                 if (temp & err_bit) {
171                         QPRINTK(qdev, PROBE, ALERT,
172                                 "register 0x%.08x access error, value = 0x%.08x!.\n",
173                                 reg, temp);
174                         return -EIO;
175                 } else if (temp & bit)
176                         return 0;
177                 udelay(UDELAY_DELAY);
178                 count--;
179         }
180         QPRINTK(qdev, PROBE, ALERT,
181                 "Timed out waiting for reg %x to come ready.\n", reg);
182         return -ETIMEDOUT;
183 }
184
185 /* The CFG register is used to download TX and RX control blocks
186  * to the chip. This function waits for an operation to complete.
187  */
188 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
189 {
190         int count = UDELAY_COUNT;
191         u32 temp;
192
193         while (count) {
194                 temp = ql_read32(qdev, CFG);
195                 if (temp & CFG_LE)
196                         return -EIO;
197                 if (!(temp & bit))
198                         return 0;
199                 udelay(UDELAY_DELAY);
200                 count--;
201         }
202         return -ETIMEDOUT;
203 }
204
205
206 /* Used to issue init control blocks to hw. Maps control block,
207  * sets address, triggers download, waits for completion.
208  */
209 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
210                  u16 q_id)
211 {
212         u64 map;
213         int status = 0;
214         int direction;
215         u32 mask;
216         u32 value;
217
218         direction =
219             (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
220             PCI_DMA_FROMDEVICE;
221
222         map = pci_map_single(qdev->pdev, ptr, size, direction);
223         if (pci_dma_mapping_error(qdev->pdev, map)) {
224                 QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n");
225                 return -ENOMEM;
226         }
227
228         status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
229         if (status)
230                 return status;
231
232         status = ql_wait_cfg(qdev, bit);
233         if (status) {
234                 QPRINTK(qdev, IFUP, ERR,
235                         "Timed out waiting for CFG to come ready.\n");
236                 goto exit;
237         }
238
239         ql_write32(qdev, ICB_L, (u32) map);
240         ql_write32(qdev, ICB_H, (u32) (map >> 32));
241
242         mask = CFG_Q_MASK | (bit << 16);
243         value = bit | (q_id << CFG_Q_SHIFT);
244         ql_write32(qdev, CFG, (mask | value));
245
246         /*
247          * Wait for the bit to clear after signaling hw.
248          */
249         status = ql_wait_cfg(qdev, bit);
250 exit:
251         ql_sem_unlock(qdev, SEM_ICB_MASK);      /* does flush too */
252         pci_unmap_single(qdev->pdev, map, size, direction);
253         return status;
254 }
255
256 /* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
257 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
258                         u32 *value)
259 {
260         u32 offset = 0;
261         int status;
262
263         switch (type) {
264         case MAC_ADDR_TYPE_MULTI_MAC:
265         case MAC_ADDR_TYPE_CAM_MAC:
266                 {
267                         status =
268                             ql_wait_reg_rdy(qdev,
269                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
270                         if (status)
271                                 goto exit;
272                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
273                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
274                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
275                         status =
276                             ql_wait_reg_rdy(qdev,
277                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
278                         if (status)
279                                 goto exit;
280                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
281                         status =
282                             ql_wait_reg_rdy(qdev,
283                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
284                         if (status)
285                                 goto exit;
286                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
287                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
288                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
289                         status =
290                             ql_wait_reg_rdy(qdev,
291                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
292                         if (status)
293                                 goto exit;
294                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
295                         if (type == MAC_ADDR_TYPE_CAM_MAC) {
296                                 status =
297                                     ql_wait_reg_rdy(qdev,
298                                         MAC_ADDR_IDX, MAC_ADDR_MW, 0);
299                                 if (status)
300                                         goto exit;
301                                 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
302                                            (index << MAC_ADDR_IDX_SHIFT) | /* index */
303                                            MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
304                                 status =
305                                     ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
306                                                     MAC_ADDR_MR, 0);
307                                 if (status)
308                                         goto exit;
309                                 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
310                         }
311                         break;
312                 }
313         case MAC_ADDR_TYPE_VLAN:
314         case MAC_ADDR_TYPE_MULTI_FLTR:
315         default:
316                 QPRINTK(qdev, IFUP, CRIT,
317                         "Address type %d not yet supported.\n", type);
318                 status = -EPERM;
319         }
320 exit:
321         return status;
322 }
323
324 /* Set up a MAC, multicast or VLAN address for the
325  * inbound frame matching.
326  */
327 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
328                                u16 index)
329 {
330         u32 offset = 0;
331         int status = 0;
332
333         switch (type) {
334         case MAC_ADDR_TYPE_MULTI_MAC:
335                 {
336                         u32 upper = (addr[0] << 8) | addr[1];
337                         u32 lower = (addr[2] << 24) | (addr[3] << 16) |
338                                         (addr[4] << 8) | (addr[5]);
339
340                         status =
341                                 ql_wait_reg_rdy(qdev,
342                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
343                         if (status)
344                                 goto exit;
345                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
346                                 (index << MAC_ADDR_IDX_SHIFT) |
347                                 type | MAC_ADDR_E);
348                         ql_write32(qdev, MAC_ADDR_DATA, lower);
349                         status =
350                                 ql_wait_reg_rdy(qdev,
351                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
352                         if (status)
353                                 goto exit;
354                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
355                                 (index << MAC_ADDR_IDX_SHIFT) |
356                                 type | MAC_ADDR_E);
357
358                         ql_write32(qdev, MAC_ADDR_DATA, upper);
359                         status =
360                                 ql_wait_reg_rdy(qdev,
361                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
362                         if (status)
363                                 goto exit;
364                         break;
365                 }
366         case MAC_ADDR_TYPE_CAM_MAC:
367                 {
368                         u32 cam_output;
369                         u32 upper = (addr[0] << 8) | addr[1];
370                         u32 lower =
371                             (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
372                             (addr[5]);
373
374                         QPRINTK(qdev, IFUP, DEBUG,
375                                 "Adding %s address %pM"
376                                 " at index %d in the CAM.\n",
377                                 ((type ==
378                                   MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
379                                  "UNICAST"), addr, index);
380
381                         status =
382                             ql_wait_reg_rdy(qdev,
383                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
384                         if (status)
385                                 goto exit;
386                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
387                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
388                                    type);       /* type */
389                         ql_write32(qdev, MAC_ADDR_DATA, lower);
390                         status =
391                             ql_wait_reg_rdy(qdev,
392                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
393                         if (status)
394                                 goto exit;
395                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
396                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
397                                    type);       /* type */
398                         ql_write32(qdev, MAC_ADDR_DATA, upper);
399                         status =
400                             ql_wait_reg_rdy(qdev,
401                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
402                         if (status)
403                                 goto exit;
404                         ql_write32(qdev, MAC_ADDR_IDX, (offset) |       /* offset */
405                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
406                                    type);       /* type */
407                         /* This field should also include the queue id
408                            and possibly the function id.  Right now we hardcode
409                            the route field to NIC core.
410                          */
411                         cam_output = (CAM_OUT_ROUTE_NIC |
412                                       (qdev->
413                                        func << CAM_OUT_FUNC_SHIFT) |
414                                         (0 << CAM_OUT_CQ_ID_SHIFT));
415                         if (qdev->vlgrp)
416                                 cam_output |= CAM_OUT_RV;
417                         /* route to NIC core */
418                         ql_write32(qdev, MAC_ADDR_DATA, cam_output);
419                         break;
420                 }
421         case MAC_ADDR_TYPE_VLAN:
422                 {
423                         u32 enable_bit = *((u32 *) &addr[0]);
424                         /* For VLAN, the addr actually holds a bit that
425                          * either enables or disables the vlan id we are
426                          * addressing. It's either MAC_ADDR_E on or off.
427                          * That's bit-27 we're talking about.
428                          */
429                         QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n",
430                                 (enable_bit ? "Adding" : "Removing"),
431                                 index, (enable_bit ? "to" : "from"));
432
433                         status =
434                             ql_wait_reg_rdy(qdev,
435                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
436                         if (status)
437                                 goto exit;
438                         ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
439                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
440                                    type |       /* type */
441                                    enable_bit); /* enable/disable */
442                         break;
443                 }
444         case MAC_ADDR_TYPE_MULTI_FLTR:
445         default:
446                 QPRINTK(qdev, IFUP, CRIT,
447                         "Address type %d not yet supported.\n", type);
448                 status = -EPERM;
449         }
450 exit:
451         return status;
452 }
453
454 /* Set or clear MAC address in hardware. We sometimes
455  * have to clear it to prevent wrong frame routing
456  * especially in a bonding environment.
457  */
458 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
459 {
460         int status;
461         char zero_mac_addr[ETH_ALEN];
462         char *addr;
463
464         if (set) {
465                 addr = &qdev->ndev->dev_addr[0];
466                 QPRINTK(qdev, IFUP, DEBUG,
467                         "Set Mac addr %pM\n", addr);
468         } else {
469                 memset(zero_mac_addr, 0, ETH_ALEN);
470                 addr = &zero_mac_addr[0];
471                 QPRINTK(qdev, IFUP, DEBUG,
472                                 "Clearing MAC address on %s\n",
473                                 qdev->ndev->name);
474         }
475         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
476         if (status)
477                 return status;
478         status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
479                         MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
480         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
481         if (status)
482                 QPRINTK(qdev, IFUP, ERR, "Failed to init mac "
483                         "address.\n");
484         return status;
485 }
486
487 void ql_link_on(struct ql_adapter *qdev)
488 {
489         QPRINTK(qdev, LINK, ERR, "%s: Link is up.\n",
490                                  qdev->ndev->name);
491         netif_carrier_on(qdev->ndev);
492         ql_set_mac_addr(qdev, 1);
493 }
494
495 void ql_link_off(struct ql_adapter *qdev)
496 {
497         QPRINTK(qdev, LINK, ERR, "%s: Link is down.\n",
498                                  qdev->ndev->name);
499         netif_carrier_off(qdev->ndev);
500         ql_set_mac_addr(qdev, 0);
501 }
502
503 /* Get a specific frame routing value from the CAM.
504  * Used for debug and reg dump.
505  */
506 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
507 {
508         int status = 0;
509
510         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
511         if (status)
512                 goto exit;
513
514         ql_write32(qdev, RT_IDX,
515                    RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
516         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
517         if (status)
518                 goto exit;
519         *value = ql_read32(qdev, RT_DATA);
520 exit:
521         return status;
522 }
523
524 /* The NIC function for this chip has 16 routing indexes.  Each one can be used
525  * to route different frame types to various inbound queues.  We send broadcast/
526  * multicast/error frames to the default queue for slow handling,
527  * and CAM hit/RSS frames to the fast handling queues.
528  */
529 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
530                               int enable)
531 {
532         int status = -EINVAL; /* Return error if no mask match. */
533         u32 value = 0;
534
535         QPRINTK(qdev, IFUP, DEBUG,
536                 "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
537                 (enable ? "Adding" : "Removing"),
538                 ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
539                 ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
540                 ((index ==
541                   RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
542                 ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
543                 ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
544                 ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
545                 ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
546                 ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
547                 ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
548                 ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
549                 ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
550                 ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
551                 ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
552                 ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
553                 ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
554                 ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
555                 (enable ? "to" : "from"));
556
557         switch (mask) {
558         case RT_IDX_CAM_HIT:
559                 {
560                         value = RT_IDX_DST_CAM_Q |      /* dest */
561                             RT_IDX_TYPE_NICQ |  /* type */
562                             (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
563                         break;
564                 }
565         case RT_IDX_VALID:      /* Promiscuous Mode frames. */
566                 {
567                         value = RT_IDX_DST_DFLT_Q |     /* dest */
568                             RT_IDX_TYPE_NICQ |  /* type */
569                             (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
570                         break;
571                 }
572         case RT_IDX_ERR:        /* Pass up MAC,IP,TCP/UDP error frames. */
573                 {
574                         value = RT_IDX_DST_DFLT_Q |     /* dest */
575                             RT_IDX_TYPE_NICQ |  /* type */
576                             (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
577                         break;
578                 }
579         case RT_IDX_BCAST:      /* Pass up Broadcast frames to default Q. */
580                 {
581                         value = RT_IDX_DST_DFLT_Q |     /* dest */
582                             RT_IDX_TYPE_NICQ |  /* type */
583                             (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
584                         break;
585                 }
586         case RT_IDX_MCAST:      /* Pass up All Multicast frames. */
587                 {
588                         value = RT_IDX_DST_DFLT_Q |     /* dest */
589                             RT_IDX_TYPE_NICQ |  /* type */
590                             (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
591                         break;
592                 }
593         case RT_IDX_MCAST_MATCH:        /* Pass up matched Multicast frames. */
594                 {
595                         value = RT_IDX_DST_DFLT_Q |     /* dest */
596                             RT_IDX_TYPE_NICQ |  /* type */
597                             (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
598                         break;
599                 }
600         case RT_IDX_RSS_MATCH:  /* Pass up matched RSS frames. */
601                 {
602                         value = RT_IDX_DST_RSS |        /* dest */
603                             RT_IDX_TYPE_NICQ |  /* type */
604                             (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
605                         break;
606                 }
607         case 0:         /* Clear the E-bit on an entry. */
608                 {
609                         value = RT_IDX_DST_DFLT_Q |     /* dest */
610                             RT_IDX_TYPE_NICQ |  /* type */
611                             (index << RT_IDX_IDX_SHIFT);/* index */
612                         break;
613                 }
614         default:
615                 QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n",
616                         mask);
617                 status = -EPERM;
618                 goto exit;
619         }
620
621         if (value) {
622                 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
623                 if (status)
624                         goto exit;
625                 value |= (enable ? RT_IDX_E : 0);
626                 ql_write32(qdev, RT_IDX, value);
627                 ql_write32(qdev, RT_DATA, enable ? mask : 0);
628         }
629 exit:
630         return status;
631 }
632
633 static void ql_enable_interrupts(struct ql_adapter *qdev)
634 {
635         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
636 }
637
638 static void ql_disable_interrupts(struct ql_adapter *qdev)
639 {
640         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
641 }
642
643 /* If we're running with multiple MSI-X vectors then we enable on the fly.
644  * Otherwise, we may have multiple outstanding workers and don't want to
645  * enable until the last one finishes. In this case, the irq_cnt gets
646  * incremented everytime we queue a worker and decremented everytime
647  * a worker finishes.  Once it hits zero we enable the interrupt.
648  */
649 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
650 {
651         u32 var = 0;
652         unsigned long hw_flags = 0;
653         struct intr_context *ctx = qdev->intr_context + intr;
654
655         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
656                 /* Always enable if we're MSIX multi interrupts and
657                  * it's not the default (zeroeth) interrupt.
658                  */
659                 ql_write32(qdev, INTR_EN,
660                            ctx->intr_en_mask);
661                 var = ql_read32(qdev, STS);
662                 return var;
663         }
664
665         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
666         if (atomic_dec_and_test(&ctx->irq_cnt)) {
667                 ql_write32(qdev, INTR_EN,
668                            ctx->intr_en_mask);
669                 var = ql_read32(qdev, STS);
670         }
671         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
672         return var;
673 }
674
675 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
676 {
677         u32 var = 0;
678         struct intr_context *ctx;
679
680         /* HW disables for us if we're MSIX multi interrupts and
681          * it's not the default (zeroeth) interrupt.
682          */
683         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
684                 return 0;
685
686         ctx = qdev->intr_context + intr;
687         spin_lock(&qdev->hw_lock);
688         if (!atomic_read(&ctx->irq_cnt)) {
689                 ql_write32(qdev, INTR_EN,
690                 ctx->intr_dis_mask);
691                 var = ql_read32(qdev, STS);
692         }
693         atomic_inc(&ctx->irq_cnt);
694         spin_unlock(&qdev->hw_lock);
695         return var;
696 }
697
698 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
699 {
700         int i;
701         for (i = 0; i < qdev->intr_count; i++) {
702                 /* The enable call does a atomic_dec_and_test
703                  * and enables only if the result is zero.
704                  * So we precharge it here.
705                  */
706                 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
707                         i == 0))
708                         atomic_set(&qdev->intr_context[i].irq_cnt, 1);
709                 ql_enable_completion_interrupt(qdev, i);
710         }
711
712 }
713
714 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
715 {
716         int status, i;
717         u16 csum = 0;
718         __le16 *flash = (__le16 *)&qdev->flash;
719
720         status = strncmp((char *)&qdev->flash, str, 4);
721         if (status) {
722                 QPRINTK(qdev, IFUP, ERR, "Invalid flash signature.\n");
723                 return  status;
724         }
725
726         for (i = 0; i < size; i++)
727                 csum += le16_to_cpu(*flash++);
728
729         if (csum)
730                 QPRINTK(qdev, IFUP, ERR,
731                         "Invalid flash checksum, csum = 0x%.04x.\n", csum);
732
733         return csum;
734 }
735
736 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
737 {
738         int status = 0;
739         /* wait for reg to come ready */
740         status = ql_wait_reg_rdy(qdev,
741                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
742         if (status)
743                 goto exit;
744         /* set up for reg read */
745         ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
746         /* wait for reg to come ready */
747         status = ql_wait_reg_rdy(qdev,
748                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
749         if (status)
750                 goto exit;
751          /* This data is stored on flash as an array of
752          * __le32.  Since ql_read32() returns cpu endian
753          * we need to swap it back.
754          */
755         *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
756 exit:
757         return status;
758 }
759
760 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
761 {
762         u32 i, size;
763         int status;
764         __le32 *p = (__le32 *)&qdev->flash;
765         u32 offset;
766         u8 mac_addr[6];
767
768         /* Get flash offset for function and adjust
769          * for dword access.
770          */
771         if (!qdev->port)
772                 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
773         else
774                 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
775
776         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
777                 return -ETIMEDOUT;
778
779         size = sizeof(struct flash_params_8000) / sizeof(u32);
780         for (i = 0; i < size; i++, p++) {
781                 status = ql_read_flash_word(qdev, i+offset, p);
782                 if (status) {
783                         QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
784                         goto exit;
785                 }
786         }
787
788         status = ql_validate_flash(qdev,
789                         sizeof(struct flash_params_8000) / sizeof(u16),
790                         "8000");
791         if (status) {
792                 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
793                 status = -EINVAL;
794                 goto exit;
795         }
796
797         /* Extract either manufacturer or BOFM modified
798          * MAC address.
799          */
800         if (qdev->flash.flash_params_8000.data_type1 == 2)
801                 memcpy(mac_addr,
802                         qdev->flash.flash_params_8000.mac_addr1,
803                         qdev->ndev->addr_len);
804         else
805                 memcpy(mac_addr,
806                         qdev->flash.flash_params_8000.mac_addr,
807                         qdev->ndev->addr_len);
808
809         if (!is_valid_ether_addr(mac_addr)) {
810                 QPRINTK(qdev, IFUP, ERR, "Invalid MAC address.\n");
811                 status = -EINVAL;
812                 goto exit;
813         }
814
815         memcpy(qdev->ndev->dev_addr,
816                 mac_addr,
817                 qdev->ndev->addr_len);
818
819 exit:
820         ql_sem_unlock(qdev, SEM_FLASH_MASK);
821         return status;
822 }
823
824 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
825 {
826         int i;
827         int status;
828         __le32 *p = (__le32 *)&qdev->flash;
829         u32 offset = 0;
830         u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
831
832         /* Second function's parameters follow the first
833          * function's.
834          */
835         if (qdev->port)
836                 offset = size;
837
838         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
839                 return -ETIMEDOUT;
840
841         for (i = 0; i < size; i++, p++) {
842                 status = ql_read_flash_word(qdev, i+offset, p);
843                 if (status) {
844                         QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
845                         goto exit;
846                 }
847
848         }
849
850         status = ql_validate_flash(qdev,
851                         sizeof(struct flash_params_8012) / sizeof(u16),
852                         "8012");
853         if (status) {
854                 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
855                 status = -EINVAL;
856                 goto exit;
857         }
858
859         if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
860                 status = -EINVAL;
861                 goto exit;
862         }
863
864         memcpy(qdev->ndev->dev_addr,
865                 qdev->flash.flash_params_8012.mac_addr,
866                 qdev->ndev->addr_len);
867
868 exit:
869         ql_sem_unlock(qdev, SEM_FLASH_MASK);
870         return status;
871 }
872
873 /* xgmac register are located behind the xgmac_addr and xgmac_data
874  * register pair.  Each read/write requires us to wait for the ready
875  * bit before reading/writing the data.
876  */
877 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
878 {
879         int status;
880         /* wait for reg to come ready */
881         status = ql_wait_reg_rdy(qdev,
882                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
883         if (status)
884                 return status;
885         /* write the data to the data reg */
886         ql_write32(qdev, XGMAC_DATA, data);
887         /* trigger the write */
888         ql_write32(qdev, XGMAC_ADDR, reg);
889         return status;
890 }
891
892 /* xgmac register are located behind the xgmac_addr and xgmac_data
893  * register pair.  Each read/write requires us to wait for the ready
894  * bit before reading/writing the data.
895  */
896 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
897 {
898         int status = 0;
899         /* wait for reg to come ready */
900         status = ql_wait_reg_rdy(qdev,
901                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
902         if (status)
903                 goto exit;
904         /* set up for reg read */
905         ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
906         /* wait for reg to come ready */
907         status = ql_wait_reg_rdy(qdev,
908                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
909         if (status)
910                 goto exit;
911         /* get the data */
912         *data = ql_read32(qdev, XGMAC_DATA);
913 exit:
914         return status;
915 }
916
917 /* This is used for reading the 64-bit statistics regs. */
918 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
919 {
920         int status = 0;
921         u32 hi = 0;
922         u32 lo = 0;
923
924         status = ql_read_xgmac_reg(qdev, reg, &lo);
925         if (status)
926                 goto exit;
927
928         status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
929         if (status)
930                 goto exit;
931
932         *data = (u64) lo | ((u64) hi << 32);
933
934 exit:
935         return status;
936 }
937
938 static int ql_8000_port_initialize(struct ql_adapter *qdev)
939 {
940         int status;
941         /*
942          * Get MPI firmware version for driver banner
943          * and ethool info.
944          */
945         status = ql_mb_about_fw(qdev);
946         if (status)
947                 goto exit;
948         status = ql_mb_get_fw_state(qdev);
949         if (status)
950                 goto exit;
951         /* Wake up a worker to get/set the TX/RX frame sizes. */
952         queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
953 exit:
954         return status;
955 }
956
957 /* Take the MAC Core out of reset.
958  * Enable statistics counting.
959  * Take the transmitter/receiver out of reset.
960  * This functionality may be done in the MPI firmware at a
961  * later date.
962  */
963 static int ql_8012_port_initialize(struct ql_adapter *qdev)
964 {
965         int status = 0;
966         u32 data;
967
968         if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
969                 /* Another function has the semaphore, so
970                  * wait for the port init bit to come ready.
971                  */
972                 QPRINTK(qdev, LINK, INFO,
973                         "Another function has the semaphore, so wait for the port init bit to come ready.\n");
974                 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
975                 if (status) {
976                         QPRINTK(qdev, LINK, CRIT,
977                                 "Port initialize timed out.\n");
978                 }
979                 return status;
980         }
981
982         QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n");
983         /* Set the core reset. */
984         status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
985         if (status)
986                 goto end;
987         data |= GLOBAL_CFG_RESET;
988         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
989         if (status)
990                 goto end;
991
992         /* Clear the core reset and turn on jumbo for receiver. */
993         data &= ~GLOBAL_CFG_RESET;      /* Clear core reset. */
994         data |= GLOBAL_CFG_JUMBO;       /* Turn on jumbo. */
995         data |= GLOBAL_CFG_TX_STAT_EN;
996         data |= GLOBAL_CFG_RX_STAT_EN;
997         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
998         if (status)
999                 goto end;
1000
1001         /* Enable transmitter, and clear it's reset. */
1002         status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1003         if (status)
1004                 goto end;
1005         data &= ~TX_CFG_RESET;  /* Clear the TX MAC reset. */
1006         data |= TX_CFG_EN;      /* Enable the transmitter. */
1007         status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1008         if (status)
1009                 goto end;
1010
1011         /* Enable receiver and clear it's reset. */
1012         status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1013         if (status)
1014                 goto end;
1015         data &= ~RX_CFG_RESET;  /* Clear the RX MAC reset. */
1016         data |= RX_CFG_EN;      /* Enable the receiver. */
1017         status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1018         if (status)
1019                 goto end;
1020
1021         /* Turn on jumbo. */
1022         status =
1023             ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1024         if (status)
1025                 goto end;
1026         status =
1027             ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1028         if (status)
1029                 goto end;
1030
1031         /* Signal to the world that the port is enabled.        */
1032         ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1033 end:
1034         ql_sem_unlock(qdev, qdev->xg_sem_mask);
1035         return status;
1036 }
1037
1038 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1039 {
1040         return PAGE_SIZE << qdev->lbq_buf_order;
1041 }
1042
1043 /* Get the next large buffer. */
1044 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1045 {
1046         struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1047         rx_ring->lbq_curr_idx++;
1048         if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1049                 rx_ring->lbq_curr_idx = 0;
1050         rx_ring->lbq_free_cnt++;
1051         return lbq_desc;
1052 }
1053
1054 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1055                 struct rx_ring *rx_ring)
1056 {
1057         struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1058
1059         pci_dma_sync_single_for_cpu(qdev->pdev,
1060                                         pci_unmap_addr(lbq_desc, mapaddr),
1061                                     rx_ring->lbq_buf_size,
1062                                         PCI_DMA_FROMDEVICE);
1063
1064         /* If it's the last chunk of our master page then
1065          * we unmap it.
1066          */
1067         if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1068                                         == ql_lbq_block_size(qdev))
1069                 pci_unmap_page(qdev->pdev,
1070                                 lbq_desc->p.pg_chunk.map,
1071                                 ql_lbq_block_size(qdev),
1072                                 PCI_DMA_FROMDEVICE);
1073         return lbq_desc;
1074 }
1075
1076 /* Get the next small buffer. */
1077 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1078 {
1079         struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1080         rx_ring->sbq_curr_idx++;
1081         if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1082                 rx_ring->sbq_curr_idx = 0;
1083         rx_ring->sbq_free_cnt++;
1084         return sbq_desc;
1085 }
1086
1087 /* Update an rx ring index. */
1088 static void ql_update_cq(struct rx_ring *rx_ring)
1089 {
1090         rx_ring->cnsmr_idx++;
1091         rx_ring->curr_entry++;
1092         if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1093                 rx_ring->cnsmr_idx = 0;
1094                 rx_ring->curr_entry = rx_ring->cq_base;
1095         }
1096 }
1097
1098 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1099 {
1100         ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1101 }
1102
1103 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1104                                                 struct bq_desc *lbq_desc)
1105 {
1106         if (!rx_ring->pg_chunk.page) {
1107                 u64 map;
1108                 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1109                                                 GFP_ATOMIC,
1110                                                 qdev->lbq_buf_order);
1111                 if (unlikely(!rx_ring->pg_chunk.page)) {
1112                         QPRINTK(qdev, DRV, ERR,
1113                                 "page allocation failed.\n");
1114                         return -ENOMEM;
1115                 }
1116                 rx_ring->pg_chunk.offset = 0;
1117                 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1118                                         0, ql_lbq_block_size(qdev),
1119                                         PCI_DMA_FROMDEVICE);
1120                 if (pci_dma_mapping_error(qdev->pdev, map)) {
1121                         __free_pages(rx_ring->pg_chunk.page,
1122                                         qdev->lbq_buf_order);
1123                         QPRINTK(qdev, DRV, ERR,
1124                                 "PCI mapping failed.\n");
1125                         return -ENOMEM;
1126                 }
1127                 rx_ring->pg_chunk.map = map;
1128                 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1129         }
1130
1131         /* Copy the current master pg_chunk info
1132          * to the current descriptor.
1133          */
1134         lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1135
1136         /* Adjust the master page chunk for next
1137          * buffer get.
1138          */
1139         rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1140         if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1141                 rx_ring->pg_chunk.page = NULL;
1142                 lbq_desc->p.pg_chunk.last_flag = 1;
1143         } else {
1144                 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1145                 get_page(rx_ring->pg_chunk.page);
1146                 lbq_desc->p.pg_chunk.last_flag = 0;
1147         }
1148         return 0;
1149 }
1150 /* Process (refill) a large buffer queue. */
1151 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1152 {
1153         u32 clean_idx = rx_ring->lbq_clean_idx;
1154         u32 start_idx = clean_idx;
1155         struct bq_desc *lbq_desc;
1156         u64 map;
1157         int i;
1158
1159         while (rx_ring->lbq_free_cnt > 32) {
1160                 for (i = 0; i < 16; i++) {
1161                         QPRINTK(qdev, RX_STATUS, DEBUG,
1162                                 "lbq: try cleaning clean_idx = %d.\n",
1163                                 clean_idx);
1164                         lbq_desc = &rx_ring->lbq[clean_idx];
1165                         if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1166                                 QPRINTK(qdev, IFUP, ERR,
1167                                         "Could not get a page chunk.\n");
1168                                         return;
1169                                 }
1170
1171                         map = lbq_desc->p.pg_chunk.map +
1172                                 lbq_desc->p.pg_chunk.offset;
1173                                 pci_unmap_addr_set(lbq_desc, mapaddr, map);
1174                         pci_unmap_len_set(lbq_desc, maplen,
1175                                         rx_ring->lbq_buf_size);
1176                                 *lbq_desc->addr = cpu_to_le64(map);
1177
1178                         pci_dma_sync_single_for_device(qdev->pdev, map,
1179                                                 rx_ring->lbq_buf_size,
1180                                                 PCI_DMA_FROMDEVICE);
1181                         clean_idx++;
1182                         if (clean_idx == rx_ring->lbq_len)
1183                                 clean_idx = 0;
1184                 }
1185
1186                 rx_ring->lbq_clean_idx = clean_idx;
1187                 rx_ring->lbq_prod_idx += 16;
1188                 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1189                         rx_ring->lbq_prod_idx = 0;
1190                 rx_ring->lbq_free_cnt -= 16;
1191         }
1192
1193         if (start_idx != clean_idx) {
1194                 QPRINTK(qdev, RX_STATUS, DEBUG,
1195                         "lbq: updating prod idx = %d.\n",
1196                         rx_ring->lbq_prod_idx);
1197                 ql_write_db_reg(rx_ring->lbq_prod_idx,
1198                                 rx_ring->lbq_prod_idx_db_reg);
1199         }
1200 }
1201
1202 /* Process (refill) a small buffer queue. */
1203 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1204 {
1205         u32 clean_idx = rx_ring->sbq_clean_idx;
1206         u32 start_idx = clean_idx;
1207         struct bq_desc *sbq_desc;
1208         u64 map;
1209         int i;
1210
1211         while (rx_ring->sbq_free_cnt > 16) {
1212                 for (i = 0; i < 16; i++) {
1213                         sbq_desc = &rx_ring->sbq[clean_idx];
1214                         QPRINTK(qdev, RX_STATUS, DEBUG,
1215                                 "sbq: try cleaning clean_idx = %d.\n",
1216                                 clean_idx);
1217                         if (sbq_desc->p.skb == NULL) {
1218                                 QPRINTK(qdev, RX_STATUS, DEBUG,
1219                                         "sbq: getting new skb for index %d.\n",
1220                                         sbq_desc->index);
1221                                 sbq_desc->p.skb =
1222                                     netdev_alloc_skb(qdev->ndev,
1223                                                      SMALL_BUFFER_SIZE);
1224                                 if (sbq_desc->p.skb == NULL) {
1225                                         QPRINTK(qdev, PROBE, ERR,
1226                                                 "Couldn't get an skb.\n");
1227                                         rx_ring->sbq_clean_idx = clean_idx;
1228                                         return;
1229                                 }
1230                                 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1231                                 map = pci_map_single(qdev->pdev,
1232                                                      sbq_desc->p.skb->data,
1233                                                      rx_ring->sbq_buf_size,
1234                                                      PCI_DMA_FROMDEVICE);
1235                                 if (pci_dma_mapping_error(qdev->pdev, map)) {
1236                                         QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
1237                                         rx_ring->sbq_clean_idx = clean_idx;
1238                                         dev_kfree_skb_any(sbq_desc->p.skb);
1239                                         sbq_desc->p.skb = NULL;
1240                                         return;
1241                                 }
1242                                 pci_unmap_addr_set(sbq_desc, mapaddr, map);
1243                                 pci_unmap_len_set(sbq_desc, maplen,
1244                                                   rx_ring->sbq_buf_size);
1245                                 *sbq_desc->addr = cpu_to_le64(map);
1246                         }
1247
1248                         clean_idx++;
1249                         if (clean_idx == rx_ring->sbq_len)
1250                                 clean_idx = 0;
1251                 }
1252                 rx_ring->sbq_clean_idx = clean_idx;
1253                 rx_ring->sbq_prod_idx += 16;
1254                 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1255                         rx_ring->sbq_prod_idx = 0;
1256                 rx_ring->sbq_free_cnt -= 16;
1257         }
1258
1259         if (start_idx != clean_idx) {
1260                 QPRINTK(qdev, RX_STATUS, DEBUG,
1261                         "sbq: updating prod idx = %d.\n",
1262                         rx_ring->sbq_prod_idx);
1263                 ql_write_db_reg(rx_ring->sbq_prod_idx,
1264                                 rx_ring->sbq_prod_idx_db_reg);
1265         }
1266 }
1267
1268 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1269                                     struct rx_ring *rx_ring)
1270 {
1271         ql_update_sbq(qdev, rx_ring);
1272         ql_update_lbq(qdev, rx_ring);
1273 }
1274
1275 /* Unmaps tx buffers.  Can be called from send() if a pci mapping
1276  * fails at some stage, or from the interrupt when a tx completes.
1277  */
1278 static void ql_unmap_send(struct ql_adapter *qdev,
1279                           struct tx_ring_desc *tx_ring_desc, int mapped)
1280 {
1281         int i;
1282         for (i = 0; i < mapped; i++) {
1283                 if (i == 0 || (i == 7 && mapped > 7)) {
1284                         /*
1285                          * Unmap the skb->data area, or the
1286                          * external sglist (AKA the Outbound
1287                          * Address List (OAL)).
1288                          * If its the zeroeth element, then it's
1289                          * the skb->data area.  If it's the 7th
1290                          * element and there is more than 6 frags,
1291                          * then its an OAL.
1292                          */
1293                         if (i == 7) {
1294                                 QPRINTK(qdev, TX_DONE, DEBUG,
1295                                         "unmapping OAL area.\n");
1296                         }
1297                         pci_unmap_single(qdev->pdev,
1298                                          pci_unmap_addr(&tx_ring_desc->map[i],
1299                                                         mapaddr),
1300                                          pci_unmap_len(&tx_ring_desc->map[i],
1301                                                        maplen),
1302                                          PCI_DMA_TODEVICE);
1303                 } else {
1304                         QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n",
1305                                 i);
1306                         pci_unmap_page(qdev->pdev,
1307                                        pci_unmap_addr(&tx_ring_desc->map[i],
1308                                                       mapaddr),
1309                                        pci_unmap_len(&tx_ring_desc->map[i],
1310                                                      maplen), PCI_DMA_TODEVICE);
1311                 }
1312         }
1313
1314 }
1315
1316 /* Map the buffers for this transmit.  This will return
1317  * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1318  */
1319 static int ql_map_send(struct ql_adapter *qdev,
1320                        struct ob_mac_iocb_req *mac_iocb_ptr,
1321                        struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1322 {
1323         int len = skb_headlen(skb);
1324         dma_addr_t map;
1325         int frag_idx, err, map_idx = 0;
1326         struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1327         int frag_cnt = skb_shinfo(skb)->nr_frags;
1328
1329         if (frag_cnt) {
1330                 QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt);
1331         }
1332         /*
1333          * Map the skb buffer first.
1334          */
1335         map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1336
1337         err = pci_dma_mapping_error(qdev->pdev, map);
1338         if (err) {
1339                 QPRINTK(qdev, TX_QUEUED, ERR,
1340                         "PCI mapping failed with error: %d\n", err);
1341
1342                 return NETDEV_TX_BUSY;
1343         }
1344
1345         tbd->len = cpu_to_le32(len);
1346         tbd->addr = cpu_to_le64(map);
1347         pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1348         pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1349         map_idx++;
1350
1351         /*
1352          * This loop fills the remainder of the 8 address descriptors
1353          * in the IOCB.  If there are more than 7 fragments, then the
1354          * eighth address desc will point to an external list (OAL).
1355          * When this happens, the remainder of the frags will be stored
1356          * in this list.
1357          */
1358         for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1359                 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1360                 tbd++;
1361                 if (frag_idx == 6 && frag_cnt > 7) {
1362                         /* Let's tack on an sglist.
1363                          * Our control block will now
1364                          * look like this:
1365                          * iocb->seg[0] = skb->data
1366                          * iocb->seg[1] = frag[0]
1367                          * iocb->seg[2] = frag[1]
1368                          * iocb->seg[3] = frag[2]
1369                          * iocb->seg[4] = frag[3]
1370                          * iocb->seg[5] = frag[4]
1371                          * iocb->seg[6] = frag[5]
1372                          * iocb->seg[7] = ptr to OAL (external sglist)
1373                          * oal->seg[0] = frag[6]
1374                          * oal->seg[1] = frag[7]
1375                          * oal->seg[2] = frag[8]
1376                          * oal->seg[3] = frag[9]
1377                          * oal->seg[4] = frag[10]
1378                          *      etc...
1379                          */
1380                         /* Tack on the OAL in the eighth segment of IOCB. */
1381                         map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1382                                              sizeof(struct oal),
1383                                              PCI_DMA_TODEVICE);
1384                         err = pci_dma_mapping_error(qdev->pdev, map);
1385                         if (err) {
1386                                 QPRINTK(qdev, TX_QUEUED, ERR,
1387                                         "PCI mapping outbound address list with error: %d\n",
1388                                         err);
1389                                 goto map_error;
1390                         }
1391
1392                         tbd->addr = cpu_to_le64(map);
1393                         /*
1394                          * The length is the number of fragments
1395                          * that remain to be mapped times the length
1396                          * of our sglist (OAL).
1397                          */
1398                         tbd->len =
1399                             cpu_to_le32((sizeof(struct tx_buf_desc) *
1400                                          (frag_cnt - frag_idx)) | TX_DESC_C);
1401                         pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1402                                            map);
1403                         pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1404                                           sizeof(struct oal));
1405                         tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1406                         map_idx++;
1407                 }
1408
1409                 map =
1410                     pci_map_page(qdev->pdev, frag->page,
1411                                  frag->page_offset, frag->size,
1412                                  PCI_DMA_TODEVICE);
1413
1414                 err = pci_dma_mapping_error(qdev->pdev, map);
1415                 if (err) {
1416                         QPRINTK(qdev, TX_QUEUED, ERR,
1417                                 "PCI mapping frags failed with error: %d.\n",
1418                                 err);
1419                         goto map_error;
1420                 }
1421
1422                 tbd->addr = cpu_to_le64(map);
1423                 tbd->len = cpu_to_le32(frag->size);
1424                 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1425                 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1426                                   frag->size);
1427
1428         }
1429         /* Save the number of segments we've mapped. */
1430         tx_ring_desc->map_cnt = map_idx;
1431         /* Terminate the last segment. */
1432         tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1433         return NETDEV_TX_OK;
1434
1435 map_error:
1436         /*
1437          * If the first frag mapping failed, then i will be zero.
1438          * This causes the unmap of the skb->data area.  Otherwise
1439          * we pass in the number of frags that mapped successfully
1440          * so they can be umapped.
1441          */
1442         ql_unmap_send(qdev, tx_ring_desc, map_idx);
1443         return NETDEV_TX_BUSY;
1444 }
1445
1446 /* Process an inbound completion from an rx ring. */
1447 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1448                                         struct rx_ring *rx_ring,
1449                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1450                                         u32 length,
1451                                         u16 vlan_id)
1452 {
1453         struct sk_buff *skb;
1454         struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1455         struct skb_frag_struct *rx_frag;
1456         int nr_frags;
1457         struct napi_struct *napi = &rx_ring->napi;
1458
1459         napi->dev = qdev->ndev;
1460
1461         skb = napi_get_frags(napi);
1462         if (!skb) {
1463                 QPRINTK(qdev, DRV, ERR, "Couldn't get an skb, exiting.\n");
1464                 rx_ring->rx_dropped++;
1465                 put_page(lbq_desc->p.pg_chunk.page);
1466                 return;
1467         }
1468         prefetch(lbq_desc->p.pg_chunk.va);
1469         rx_frag = skb_shinfo(skb)->frags;
1470         nr_frags = skb_shinfo(skb)->nr_frags;
1471         rx_frag += nr_frags;
1472         rx_frag->page = lbq_desc->p.pg_chunk.page;
1473         rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
1474         rx_frag->size = length;
1475
1476         skb->len += length;
1477         skb->data_len += length;
1478         skb->truesize += length;
1479         skb_shinfo(skb)->nr_frags++;
1480
1481         rx_ring->rx_packets++;
1482         rx_ring->rx_bytes += length;
1483         skb->ip_summed = CHECKSUM_UNNECESSARY;
1484         skb_record_rx_queue(skb, rx_ring->cq_id);
1485         if (qdev->vlgrp && (vlan_id != 0xffff))
1486                 vlan_gro_frags(&rx_ring->napi, qdev->vlgrp, vlan_id);
1487         else
1488                 napi_gro_frags(napi);
1489 }
1490
1491 /* Process an inbound completion from an rx ring. */
1492 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1493                                         struct rx_ring *rx_ring,
1494                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1495                                         u32 length,
1496                                         u16 vlan_id)
1497 {
1498         struct net_device *ndev = qdev->ndev;
1499         struct sk_buff *skb = NULL;
1500         void *addr;
1501         struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1502         struct napi_struct *napi = &rx_ring->napi;
1503
1504         skb = netdev_alloc_skb(ndev, length);
1505         if (!skb) {
1506                 QPRINTK(qdev, DRV, ERR, "Couldn't get an skb, "
1507                                 "need to unwind!.\n");
1508                 rx_ring->rx_dropped++;
1509                 put_page(lbq_desc->p.pg_chunk.page);
1510                 return;
1511         }
1512
1513         addr = lbq_desc->p.pg_chunk.va;
1514         prefetch(addr);
1515
1516
1517         /* Frame error, so drop the packet. */
1518         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1519                 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1520                                 ib_mac_rsp->flags2);
1521                 rx_ring->rx_errors++;
1522                 goto err_out;
1523         }
1524
1525         /* The max framesize filter on this chip is set higher than
1526          * MTU since FCoE uses 2k frames.
1527          */
1528         if (skb->len > ndev->mtu + ETH_HLEN) {
1529                 QPRINTK(qdev, DRV, ERR, "Segment too small, dropping.\n");
1530                 rx_ring->rx_dropped++;
1531                 goto err_out;
1532         }
1533         memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1534         QPRINTK(qdev, RX_STATUS, DEBUG,
1535                 "%d bytes of headers and data in large. Chain "
1536                 "page to new skb and pull tail.\n", length);
1537         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1538                                 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1539                                 length-ETH_HLEN);
1540         skb->len += length-ETH_HLEN;
1541         skb->data_len += length-ETH_HLEN;
1542         skb->truesize += length-ETH_HLEN;
1543
1544         rx_ring->rx_packets++;
1545         rx_ring->rx_bytes += skb->len;
1546         skb->protocol = eth_type_trans(skb, ndev);
1547         skb->ip_summed = CHECKSUM_NONE;
1548
1549         if (qdev->rx_csum &&
1550                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1551                 /* TCP frame. */
1552                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1553                         QPRINTK(qdev, RX_STATUS, DEBUG,
1554                                         "TCP checksum done!\n");
1555                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1556                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1557                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1558                         /* Unfragmented ipv4 UDP frame. */
1559                         struct iphdr *iph = (struct iphdr *) skb->data;
1560                         if (!(iph->frag_off &
1561                                 cpu_to_be16(IP_MF|IP_OFFSET))) {
1562                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1563                                 QPRINTK(qdev, RX_STATUS, DEBUG,
1564                                                 "TCP checksum done!\n");
1565                         }
1566                 }
1567         }
1568
1569         skb_record_rx_queue(skb, rx_ring->cq_id);
1570         if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1571                 if (qdev->vlgrp && (vlan_id != 0xffff))
1572                         vlan_gro_receive(napi, qdev->vlgrp, vlan_id, skb);
1573                 else
1574                         napi_gro_receive(napi, skb);
1575         } else {
1576                 if (qdev->vlgrp && (vlan_id != 0xffff))
1577                         vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1578                 else
1579                         netif_receive_skb(skb);
1580         }
1581         return;
1582 err_out:
1583         dev_kfree_skb_any(skb);
1584         put_page(lbq_desc->p.pg_chunk.page);
1585 }
1586
1587 /* Process an inbound completion from an rx ring. */
1588 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1589                                         struct rx_ring *rx_ring,
1590                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1591                                         u32 length,
1592                                         u16 vlan_id)
1593 {
1594         struct net_device *ndev = qdev->ndev;
1595         struct sk_buff *skb = NULL;
1596         struct sk_buff *new_skb = NULL;
1597         struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1598
1599         skb = sbq_desc->p.skb;
1600         /* Allocate new_skb and copy */
1601         new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1602         if (new_skb == NULL) {
1603                 QPRINTK(qdev, PROBE, ERR,
1604                         "No skb available, drop the packet.\n");
1605                 rx_ring->rx_dropped++;
1606                 return;
1607         }
1608         skb_reserve(new_skb, NET_IP_ALIGN);
1609         memcpy(skb_put(new_skb, length), skb->data, length);
1610         skb = new_skb;
1611
1612         /* Frame error, so drop the packet. */
1613         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1614                 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1615                                         ib_mac_rsp->flags2);
1616                 dev_kfree_skb_any(skb);
1617                 rx_ring->rx_errors++;
1618                 return;
1619         }
1620
1621         /* loopback self test for ethtool */
1622         if (test_bit(QL_SELFTEST, &qdev->flags)) {
1623                 ql_check_lb_frame(qdev, skb);
1624                 dev_kfree_skb_any(skb);
1625                 return;
1626         }
1627
1628         /* The max framesize filter on this chip is set higher than
1629          * MTU since FCoE uses 2k frames.
1630          */
1631         if (skb->len > ndev->mtu + ETH_HLEN) {
1632                 dev_kfree_skb_any(skb);
1633                 rx_ring->rx_dropped++;
1634                 return;
1635         }
1636
1637         prefetch(skb->data);
1638         skb->dev = ndev;
1639         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1640                 QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
1641                         (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1642                         IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1643                         (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1644                         IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1645                         (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1646                         IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1647         }
1648         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1649                 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
1650
1651         rx_ring->rx_packets++;
1652         rx_ring->rx_bytes += skb->len;
1653         skb->protocol = eth_type_trans(skb, ndev);
1654         skb->ip_summed = CHECKSUM_NONE;
1655
1656         /* If rx checksum is on, and there are no
1657          * csum or frame errors.
1658          */
1659         if (qdev->rx_csum &&
1660                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1661                 /* TCP frame. */
1662                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1663                         QPRINTK(qdev, RX_STATUS, DEBUG,
1664                                         "TCP checksum done!\n");
1665                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1666                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1667                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1668                         /* Unfragmented ipv4 UDP frame. */
1669                         struct iphdr *iph = (struct iphdr *) skb->data;
1670                         if (!(iph->frag_off &
1671                                 cpu_to_be16(IP_MF|IP_OFFSET))) {
1672                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1673                                 QPRINTK(qdev, RX_STATUS, DEBUG,
1674                                                 "TCP checksum done!\n");
1675                         }
1676                 }
1677         }
1678
1679         skb_record_rx_queue(skb, rx_ring->cq_id);
1680         if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1681                 if (qdev->vlgrp && (vlan_id != 0xffff))
1682                         vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1683                                                 vlan_id, skb);
1684                 else
1685                         napi_gro_receive(&rx_ring->napi, skb);
1686         } else {
1687                 if (qdev->vlgrp && (vlan_id != 0xffff))
1688                         vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1689                 else
1690                         netif_receive_skb(skb);
1691         }
1692 }
1693
1694 static void ql_realign_skb(struct sk_buff *skb, int len)
1695 {
1696         void *temp_addr = skb->data;
1697
1698         /* Undo the skb_reserve(skb,32) we did before
1699          * giving to hardware, and realign data on
1700          * a 2-byte boundary.
1701          */
1702         skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1703         skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1704         skb_copy_to_linear_data(skb, temp_addr,
1705                 (unsigned int)len);
1706 }
1707
1708 /*
1709  * This function builds an skb for the given inbound
1710  * completion.  It will be rewritten for readability in the near
1711  * future, but for not it works well.
1712  */
1713 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1714                                        struct rx_ring *rx_ring,
1715                                        struct ib_mac_iocb_rsp *ib_mac_rsp)
1716 {
1717         struct bq_desc *lbq_desc;
1718         struct bq_desc *sbq_desc;
1719         struct sk_buff *skb = NULL;
1720         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1721        u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1722
1723         /*
1724          * Handle the header buffer if present.
1725          */
1726         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1727             ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1728                 QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len);
1729                 /*
1730                  * Headers fit nicely into a small buffer.
1731                  */
1732                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1733                 pci_unmap_single(qdev->pdev,
1734                                 pci_unmap_addr(sbq_desc, mapaddr),
1735                                 pci_unmap_len(sbq_desc, maplen),
1736                                 PCI_DMA_FROMDEVICE);
1737                 skb = sbq_desc->p.skb;
1738                 ql_realign_skb(skb, hdr_len);
1739                 skb_put(skb, hdr_len);
1740                 sbq_desc->p.skb = NULL;
1741         }
1742
1743         /*
1744          * Handle the data buffer(s).
1745          */
1746         if (unlikely(!length)) {        /* Is there data too? */
1747                 QPRINTK(qdev, RX_STATUS, DEBUG,
1748                         "No Data buffer in this packet.\n");
1749                 return skb;
1750         }
1751
1752         if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1753                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1754                         QPRINTK(qdev, RX_STATUS, DEBUG,
1755                                 "Headers in small, data of %d bytes in small, combine them.\n", length);
1756                         /*
1757                          * Data is less than small buffer size so it's
1758                          * stuffed in a small buffer.
1759                          * For this case we append the data
1760                          * from the "data" small buffer to the "header" small
1761                          * buffer.
1762                          */
1763                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1764                         pci_dma_sync_single_for_cpu(qdev->pdev,
1765                                                     pci_unmap_addr
1766                                                     (sbq_desc, mapaddr),
1767                                                     pci_unmap_len
1768                                                     (sbq_desc, maplen),
1769                                                     PCI_DMA_FROMDEVICE);
1770                         memcpy(skb_put(skb, length),
1771                                sbq_desc->p.skb->data, length);
1772                         pci_dma_sync_single_for_device(qdev->pdev,
1773                                                        pci_unmap_addr
1774                                                        (sbq_desc,
1775                                                         mapaddr),
1776                                                        pci_unmap_len
1777                                                        (sbq_desc,
1778                                                         maplen),
1779                                                        PCI_DMA_FROMDEVICE);
1780                 } else {
1781                         QPRINTK(qdev, RX_STATUS, DEBUG,
1782                                 "%d bytes in a single small buffer.\n", length);
1783                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1784                         skb = sbq_desc->p.skb;
1785                         ql_realign_skb(skb, length);
1786                         skb_put(skb, length);
1787                         pci_unmap_single(qdev->pdev,
1788                                          pci_unmap_addr(sbq_desc,
1789                                                         mapaddr),
1790                                          pci_unmap_len(sbq_desc,
1791                                                        maplen),
1792                                          PCI_DMA_FROMDEVICE);
1793                         sbq_desc->p.skb = NULL;
1794                 }
1795         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1796                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1797                         QPRINTK(qdev, RX_STATUS, DEBUG,
1798                                 "Header in small, %d bytes in large. Chain large to small!\n", length);
1799                         /*
1800                          * The data is in a single large buffer.  We
1801                          * chain it to the header buffer's skb and let
1802                          * it rip.
1803                          */
1804                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1805                         QPRINTK(qdev, RX_STATUS, DEBUG,
1806                                 "Chaining page at offset = %d,"
1807                                 "for %d bytes  to skb.\n",
1808                                 lbq_desc->p.pg_chunk.offset, length);
1809                         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1810                                                 lbq_desc->p.pg_chunk.offset,
1811                                                 length);
1812                         skb->len += length;
1813                         skb->data_len += length;
1814                         skb->truesize += length;
1815                 } else {
1816                         /*
1817                          * The headers and data are in a single large buffer. We
1818                          * copy it to a new skb and let it go. This can happen with
1819                          * jumbo mtu on a non-TCP/UDP frame.
1820                          */
1821                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1822                         skb = netdev_alloc_skb(qdev->ndev, length);
1823                         if (skb == NULL) {
1824                                 QPRINTK(qdev, PROBE, DEBUG,
1825                                         "No skb available, drop the packet.\n");
1826                                 return NULL;
1827                         }
1828                         pci_unmap_page(qdev->pdev,
1829                                        pci_unmap_addr(lbq_desc,
1830                                                       mapaddr),
1831                                        pci_unmap_len(lbq_desc, maplen),
1832                                        PCI_DMA_FROMDEVICE);
1833                         skb_reserve(skb, NET_IP_ALIGN);
1834                         QPRINTK(qdev, RX_STATUS, DEBUG,
1835                                 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
1836                         skb_fill_page_desc(skb, 0,
1837                                                 lbq_desc->p.pg_chunk.page,
1838                                                 lbq_desc->p.pg_chunk.offset,
1839                                                 length);
1840                         skb->len += length;
1841                         skb->data_len += length;
1842                         skb->truesize += length;
1843                         length -= length;
1844                         __pskb_pull_tail(skb,
1845                                 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1846                                 VLAN_ETH_HLEN : ETH_HLEN);
1847                 }
1848         } else {
1849                 /*
1850                  * The data is in a chain of large buffers
1851                  * pointed to by a small buffer.  We loop
1852                  * thru and chain them to the our small header
1853                  * buffer's skb.
1854                  * frags:  There are 18 max frags and our small
1855                  *         buffer will hold 32 of them. The thing is,
1856                  *         we'll use 3 max for our 9000 byte jumbo
1857                  *         frames.  If the MTU goes up we could
1858                  *          eventually be in trouble.
1859                  */
1860                 int size, i = 0;
1861                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1862                 pci_unmap_single(qdev->pdev,
1863                                  pci_unmap_addr(sbq_desc, mapaddr),
1864                                  pci_unmap_len(sbq_desc, maplen),
1865                                  PCI_DMA_FROMDEVICE);
1866                 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1867                         /*
1868                          * This is an non TCP/UDP IP frame, so
1869                          * the headers aren't split into a small
1870                          * buffer.  We have to use the small buffer
1871                          * that contains our sg list as our skb to
1872                          * send upstairs. Copy the sg list here to
1873                          * a local buffer and use it to find the
1874                          * pages to chain.
1875                          */
1876                         QPRINTK(qdev, RX_STATUS, DEBUG,
1877                                 "%d bytes of headers & data in chain of large.\n", length);
1878                         skb = sbq_desc->p.skb;
1879                         sbq_desc->p.skb = NULL;
1880                         skb_reserve(skb, NET_IP_ALIGN);
1881                 }
1882                 while (length > 0) {
1883                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1884                         size = (length < rx_ring->lbq_buf_size) ? length :
1885                                 rx_ring->lbq_buf_size;
1886
1887                         QPRINTK(qdev, RX_STATUS, DEBUG,
1888                                 "Adding page %d to skb for %d bytes.\n",
1889                                 i, size);
1890                         skb_fill_page_desc(skb, i,
1891                                                 lbq_desc->p.pg_chunk.page,
1892                                                 lbq_desc->p.pg_chunk.offset,
1893                                                 size);
1894                         skb->len += size;
1895                         skb->data_len += size;
1896                         skb->truesize += size;
1897                         length -= size;
1898                         i++;
1899                 }
1900                 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1901                                 VLAN_ETH_HLEN : ETH_HLEN);
1902         }
1903         return skb;
1904 }
1905
1906 /* Process an inbound completion from an rx ring. */
1907 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1908                                    struct rx_ring *rx_ring,
1909                                    struct ib_mac_iocb_rsp *ib_mac_rsp,
1910                                    u16 vlan_id)
1911 {
1912         struct net_device *ndev = qdev->ndev;
1913         struct sk_buff *skb = NULL;
1914
1915         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1916
1917         skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1918         if (unlikely(!skb)) {
1919                 QPRINTK(qdev, RX_STATUS, DEBUG,
1920                         "No skb available, drop packet.\n");
1921                 rx_ring->rx_dropped++;
1922                 return;
1923         }
1924
1925         /* Frame error, so drop the packet. */
1926         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1927                 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1928                                         ib_mac_rsp->flags2);
1929                 dev_kfree_skb_any(skb);
1930                 rx_ring->rx_errors++;
1931                 return;
1932         }
1933
1934         /* The max framesize filter on this chip is set higher than
1935          * MTU since FCoE uses 2k frames.
1936          */
1937         if (skb->len > ndev->mtu + ETH_HLEN) {
1938                 dev_kfree_skb_any(skb);
1939                 rx_ring->rx_dropped++;
1940                 return;
1941         }
1942
1943         /* loopback self test for ethtool */
1944         if (test_bit(QL_SELFTEST, &qdev->flags)) {
1945                 ql_check_lb_frame(qdev, skb);
1946                 dev_kfree_skb_any(skb);
1947                 return;
1948         }
1949
1950         prefetch(skb->data);
1951         skb->dev = ndev;
1952         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1953                 QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
1954                         (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1955                         IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1956                         (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1957                         IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1958                         (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1959                         IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1960                 rx_ring->rx_multicast++;
1961         }
1962         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1963                 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
1964         }
1965
1966         skb->protocol = eth_type_trans(skb, ndev);
1967         skb->ip_summed = CHECKSUM_NONE;
1968
1969         /* If rx checksum is on, and there are no
1970          * csum or frame errors.
1971          */
1972         if (qdev->rx_csum &&
1973                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1974                 /* TCP frame. */
1975                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1976                         QPRINTK(qdev, RX_STATUS, DEBUG,
1977                                         "TCP checksum done!\n");
1978                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1979                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1980                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1981                 /* Unfragmented ipv4 UDP frame. */
1982                         struct iphdr *iph = (struct iphdr *) skb->data;
1983                         if (!(iph->frag_off &
1984                                 cpu_to_be16(IP_MF|IP_OFFSET))) {
1985                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1986                                 QPRINTK(qdev, RX_STATUS, DEBUG,
1987                                                 "TCP checksum done!\n");
1988                         }
1989                 }
1990         }
1991
1992         rx_ring->rx_packets++;
1993         rx_ring->rx_bytes += skb->len;
1994         skb_record_rx_queue(skb, rx_ring->cq_id);
1995         if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1996                 if (qdev->vlgrp &&
1997                         (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1998                         (vlan_id != 0))
1999                         vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
2000                                 vlan_id, skb);
2001                 else
2002                         napi_gro_receive(&rx_ring->napi, skb);
2003         } else {
2004                 if (qdev->vlgrp &&
2005                         (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2006                         (vlan_id != 0))
2007                         vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
2008                 else
2009                         netif_receive_skb(skb);
2010         }
2011 }
2012
2013 /* Process an inbound completion from an rx ring. */
2014 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2015                                         struct rx_ring *rx_ring,
2016                                         struct ib_mac_iocb_rsp *ib_mac_rsp)
2017 {
2018         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2019         u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2020                         ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2021                         IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2022
2023         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2024
2025         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2026                 /* The data and headers are split into
2027                  * separate buffers.
2028                  */
2029                 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2030                                                 vlan_id);
2031         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2032                 /* The data fit in a single small buffer.
2033                  * Allocate a new skb, copy the data and
2034                  * return the buffer to the free pool.
2035                  */
2036                 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2037                                                 length, vlan_id);
2038         } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2039                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2040                 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2041                 /* TCP packet in a page chunk that's been checksummed.
2042                  * Tack it on to our GRO skb and let it go.
2043                  */
2044                 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2045                                                 length, vlan_id);
2046         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2047                 /* Non-TCP packet in a page chunk. Allocate an
2048                  * skb, tack it on frags, and send it up.
2049                  */
2050                 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2051                                                 length, vlan_id);
2052         } else {
2053                 struct bq_desc *lbq_desc;
2054
2055                 /* Free small buffer that holds the IAL */
2056                 lbq_desc = ql_get_curr_sbuf(rx_ring);
2057                 QPRINTK(qdev, RX_ERR, ERR, "Dropping frame, len %d > mtu %d\n",
2058                         length, qdev->ndev->mtu);
2059
2060                 /* Unwind the large buffers for this frame. */
2061                 while (length > 0) {
2062                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
2063                         length -= (length < rx_ring->lbq_buf_size) ?
2064                                 length : rx_ring->lbq_buf_size;
2065                         put_page(lbq_desc->p.pg_chunk.page);
2066                 }
2067         }
2068
2069         return (unsigned long)length;
2070 }
2071
2072 /* Process an outbound completion from an rx ring. */
2073 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2074                                    struct ob_mac_iocb_rsp *mac_rsp)
2075 {
2076         struct tx_ring *tx_ring;
2077         struct tx_ring_desc *tx_ring_desc;
2078
2079         QL_DUMP_OB_MAC_RSP(mac_rsp);
2080         tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2081         tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2082         ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2083         tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2084         tx_ring->tx_packets++;
2085         dev_kfree_skb(tx_ring_desc->skb);
2086         tx_ring_desc->skb = NULL;
2087
2088         if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2089                                         OB_MAC_IOCB_RSP_S |
2090                                         OB_MAC_IOCB_RSP_L |
2091                                         OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2092                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2093                         QPRINTK(qdev, TX_DONE, WARNING,
2094                                 "Total descriptor length did not match transfer length.\n");
2095                 }
2096                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2097                         QPRINTK(qdev, TX_DONE, WARNING,
2098                                 "Frame too short to be legal, not sent.\n");
2099                 }
2100                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2101                         QPRINTK(qdev, TX_DONE, WARNING,
2102                                 "Frame too long, but sent anyway.\n");
2103                 }
2104                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2105                         QPRINTK(qdev, TX_DONE, WARNING,
2106                                 "PCI backplane error. Frame not sent.\n");
2107                 }
2108         }
2109         atomic_inc(&tx_ring->tx_count);
2110 }
2111
2112 /* Fire up a handler to reset the MPI processor. */
2113 void ql_queue_fw_error(struct ql_adapter *qdev)
2114 {
2115         ql_link_off(qdev);
2116         queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2117 }
2118
2119 void ql_queue_asic_error(struct ql_adapter *qdev)
2120 {
2121         ql_link_off(qdev);
2122         ql_disable_interrupts(qdev);
2123         /* Clear adapter up bit to signal the recovery
2124          * process that it shouldn't kill the reset worker
2125          * thread
2126          */
2127         clear_bit(QL_ADAPTER_UP, &qdev->flags);
2128         queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2129 }
2130
2131 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2132                                     struct ib_ae_iocb_rsp *ib_ae_rsp)
2133 {
2134         switch (ib_ae_rsp->event) {
2135         case MGMT_ERR_EVENT:
2136                 QPRINTK(qdev, RX_ERR, ERR,
2137                         "Management Processor Fatal Error.\n");
2138                 ql_queue_fw_error(qdev);
2139                 return;
2140
2141         case CAM_LOOKUP_ERR_EVENT:
2142                 QPRINTK(qdev, LINK, ERR,
2143                         "Multiple CAM hits lookup occurred.\n");
2144                 QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n");
2145                 ql_queue_asic_error(qdev);
2146                 return;
2147
2148         case SOFT_ECC_ERROR_EVENT:
2149                 QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n");
2150                 ql_queue_asic_error(qdev);
2151                 break;
2152
2153         case PCI_ERR_ANON_BUF_RD:
2154                 QPRINTK(qdev, RX_ERR, ERR,
2155                         "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
2156                         ib_ae_rsp->q_id);
2157                 ql_queue_asic_error(qdev);
2158                 break;
2159
2160         default:
2161                 QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n",
2162                         ib_ae_rsp->event);
2163                 ql_queue_asic_error(qdev);
2164                 break;
2165         }
2166 }
2167
2168 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2169 {
2170         struct ql_adapter *qdev = rx_ring->qdev;
2171         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2172         struct ob_mac_iocb_rsp *net_rsp = NULL;
2173         int count = 0;
2174
2175         struct tx_ring *tx_ring;
2176         /* While there are entries in the completion queue. */
2177         while (prod != rx_ring->cnsmr_idx) {
2178
2179                 QPRINTK(qdev, RX_STATUS, DEBUG,
2180                         "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
2181                         prod, rx_ring->cnsmr_idx);
2182
2183                 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2184                 rmb();
2185                 switch (net_rsp->opcode) {
2186
2187                 case OPCODE_OB_MAC_TSO_IOCB:
2188                 case OPCODE_OB_MAC_IOCB:
2189                         ql_process_mac_tx_intr(qdev, net_rsp);
2190                         break;
2191                 default:
2192                         QPRINTK(qdev, RX_STATUS, DEBUG,
2193                                 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2194                                 net_rsp->opcode);
2195                 }
2196                 count++;
2197                 ql_update_cq(rx_ring);
2198                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2199         }
2200         ql_write_cq_idx(rx_ring);
2201         tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2202         if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id) &&
2203                                         net_rsp != NULL) {
2204                 if (atomic_read(&tx_ring->queue_stopped) &&
2205                     (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2206                         /*
2207                          * The queue got stopped because the tx_ring was full.
2208                          * Wake it up, because it's now at least 25% empty.
2209                          */
2210                         netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2211         }
2212
2213         return count;
2214 }
2215
2216 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2217 {
2218         struct ql_adapter *qdev = rx_ring->qdev;
2219         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2220         struct ql_net_rsp_iocb *net_rsp;
2221         int count = 0;
2222
2223         /* While there are entries in the completion queue. */
2224         while (prod != rx_ring->cnsmr_idx) {
2225
2226                 QPRINTK(qdev, RX_STATUS, DEBUG,
2227                         "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
2228                         prod, rx_ring->cnsmr_idx);
2229
2230                 net_rsp = rx_ring->curr_entry;
2231                 rmb();
2232                 switch (net_rsp->opcode) {
2233                 case OPCODE_IB_MAC_IOCB:
2234                         ql_process_mac_rx_intr(qdev, rx_ring,
2235                                                (struct ib_mac_iocb_rsp *)
2236                                                net_rsp);
2237                         break;
2238
2239                 case OPCODE_IB_AE_IOCB:
2240                         ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2241                                                 net_rsp);
2242                         break;
2243                 default:
2244                         {
2245                                 QPRINTK(qdev, RX_STATUS, DEBUG,
2246                                         "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2247                                         net_rsp->opcode);
2248                         }
2249                 }
2250                 count++;
2251                 ql_update_cq(rx_ring);
2252                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2253                 if (count == budget)
2254                         break;
2255         }
2256         ql_update_buffer_queues(qdev, rx_ring);
2257         ql_write_cq_idx(rx_ring);
2258         return count;
2259 }
2260
2261 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2262 {
2263         struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2264         struct ql_adapter *qdev = rx_ring->qdev;
2265         struct rx_ring *trx_ring;
2266         int i, work_done = 0;
2267         struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2268
2269         QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n",
2270                 rx_ring->cq_id);
2271
2272         /* Service the TX rings first.  They start
2273          * right after the RSS rings. */
2274         for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2275                 trx_ring = &qdev->rx_ring[i];
2276                 /* If this TX completion ring belongs to this vector and
2277                  * it's not empty then service it.
2278                  */
2279                 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2280                         (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2281                                         trx_ring->cnsmr_idx)) {
2282                         QPRINTK(qdev, INTR, DEBUG,
2283                                 "%s: Servicing TX completion ring %d.\n",
2284                                 __func__, trx_ring->cq_id);
2285                         ql_clean_outbound_rx_ring(trx_ring);
2286                 }
2287         }
2288
2289         /*
2290          * Now service the RSS ring if it's active.
2291          */
2292         if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2293                                         rx_ring->cnsmr_idx) {
2294                 QPRINTK(qdev, INTR, DEBUG,
2295                         "%s: Servicing RX completion ring %d.\n",
2296                         __func__, rx_ring->cq_id);
2297                 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2298         }
2299
2300         if (work_done < budget) {
2301                 napi_complete(napi);
2302                 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2303         }
2304         return work_done;
2305 }
2306
2307 static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
2308 {
2309         struct ql_adapter *qdev = netdev_priv(ndev);
2310
2311         qdev->vlgrp = grp;
2312         if (grp) {
2313                 QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n");
2314                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2315                            NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2316         } else {
2317                 QPRINTK(qdev, IFUP, DEBUG,
2318                         "Turning off VLAN in NIC_RCV_CFG.\n");
2319                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2320         }
2321 }
2322
2323 static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2324 {
2325         struct ql_adapter *qdev = netdev_priv(ndev);
2326         u32 enable_bit = MAC_ADDR_E;
2327         int status;
2328
2329         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2330         if (status)
2331                 return;
2332         if (ql_set_mac_addr_reg
2333             (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2334                 QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
2335         }
2336         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2337 }
2338
2339 static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2340 {
2341         struct ql_adapter *qdev = netdev_priv(ndev);
2342         u32 enable_bit = 0;
2343         int status;
2344
2345         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2346         if (status)
2347                 return;
2348
2349         if (ql_set_mac_addr_reg
2350             (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2351                 QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
2352         }
2353         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2354
2355 }
2356
2357 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2358 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2359 {
2360         struct rx_ring *rx_ring = dev_id;
2361         napi_schedule(&rx_ring->napi);
2362         return IRQ_HANDLED;
2363 }
2364
2365 /* This handles a fatal error, MPI activity, and the default
2366  * rx_ring in an MSI-X multiple vector environment.
2367  * In MSI/Legacy environment it also process the rest of
2368  * the rx_rings.
2369  */
2370 static irqreturn_t qlge_isr(int irq, void *dev_id)
2371 {
2372         struct rx_ring *rx_ring = dev_id;
2373         struct ql_adapter *qdev = rx_ring->qdev;
2374         struct intr_context *intr_context = &qdev->intr_context[0];
2375         u32 var;
2376         int work_done = 0;
2377
2378         spin_lock(&qdev->hw_lock);
2379         if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2380                 QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n");
2381                 spin_unlock(&qdev->hw_lock);
2382                 return IRQ_NONE;
2383         }
2384         spin_unlock(&qdev->hw_lock);
2385
2386         var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2387
2388         /*
2389          * Check for fatal error.
2390          */
2391         if (var & STS_FE) {
2392                 ql_queue_asic_error(qdev);
2393                 QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var);
2394                 var = ql_read32(qdev, ERR_STS);
2395                 QPRINTK(qdev, INTR, ERR,
2396                         "Resetting chip. Error Status Register = 0x%x\n", var);
2397                 return IRQ_HANDLED;
2398         }
2399
2400         /*
2401          * Check MPI processor activity.
2402          */
2403         if ((var & STS_PI) &&
2404                 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2405                 /*
2406                  * We've got an async event or mailbox completion.
2407                  * Handle it and clear the source of the interrupt.
2408                  */
2409                 QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
2410                 ql_disable_completion_interrupt(qdev, intr_context->intr);
2411                 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2412                 queue_delayed_work_on(smp_processor_id(),
2413                                 qdev->workqueue, &qdev->mpi_work, 0);
2414                 work_done++;
2415         }
2416
2417         /*
2418          * Get the bit-mask that shows the active queues for this
2419          * pass.  Compare it to the queues that this irq services
2420          * and call napi if there's a match.
2421          */
2422         var = ql_read32(qdev, ISR1);
2423         if (var & intr_context->irq_mask) {
2424                 QPRINTK(qdev, INTR, INFO,
2425                         "Waking handler for rx_ring[0].\n");
2426                 ql_disable_completion_interrupt(qdev, intr_context->intr);
2427                 napi_schedule(&rx_ring->napi);
2428                 work_done++;
2429         }
2430         ql_enable_completion_interrupt(qdev, intr_context->intr);
2431         return work_done ? IRQ_HANDLED : IRQ_NONE;
2432 }
2433
2434 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2435 {
2436
2437         if (skb_is_gso(skb)) {
2438                 int err;
2439                 if (skb_header_cloned(skb)) {
2440                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2441                         if (err)
2442                                 return err;
2443                 }
2444
2445                 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2446                 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2447                 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2448                 mac_iocb_ptr->total_hdrs_len =
2449                     cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2450                 mac_iocb_ptr->net_trans_offset =
2451                     cpu_to_le16(skb_network_offset(skb) |
2452                                 skb_transport_offset(skb)
2453                                 << OB_MAC_TRANSPORT_HDR_SHIFT);
2454                 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2455                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2456                 if (likely(skb->protocol == htons(ETH_P_IP))) {
2457                         struct iphdr *iph = ip_hdr(skb);
2458                         iph->check = 0;
2459                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2460                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2461                                                                  iph->daddr, 0,
2462                                                                  IPPROTO_TCP,
2463                                                                  0);
2464                 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2465                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2466                         tcp_hdr(skb)->check =
2467                             ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2468                                              &ipv6_hdr(skb)->daddr,
2469                                              0, IPPROTO_TCP, 0);
2470                 }
2471                 return 1;
2472         }
2473         return 0;
2474 }
2475
2476 static void ql_hw_csum_setup(struct sk_buff *skb,
2477                              struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2478 {
2479         int len;
2480         struct iphdr *iph = ip_hdr(skb);
2481         __sum16 *check;
2482         mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2483         mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2484         mac_iocb_ptr->net_trans_offset =
2485                 cpu_to_le16(skb_network_offset(skb) |
2486                 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2487
2488         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2489         len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2490         if (likely(iph->protocol == IPPROTO_TCP)) {
2491                 check = &(tcp_hdr(skb)->check);
2492                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2493                 mac_iocb_ptr->total_hdrs_len =
2494                     cpu_to_le16(skb_transport_offset(skb) +
2495                                 (tcp_hdr(skb)->doff << 2));
2496         } else {
2497                 check = &(udp_hdr(skb)->check);
2498                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2499                 mac_iocb_ptr->total_hdrs_len =
2500                     cpu_to_le16(skb_transport_offset(skb) +
2501                                 sizeof(struct udphdr));
2502         }
2503         *check = ~csum_tcpudp_magic(iph->saddr,
2504                                     iph->daddr, len, iph->protocol, 0);
2505 }
2506
2507 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2508 {
2509         struct tx_ring_desc *tx_ring_desc;
2510         struct ob_mac_iocb_req *mac_iocb_ptr;
2511         struct ql_adapter *qdev = netdev_priv(ndev);
2512         int tso;
2513         struct tx_ring *tx_ring;
2514         u32 tx_ring_idx = (u32) skb->queue_mapping;
2515
2516         tx_ring = &qdev->tx_ring[tx_ring_idx];
2517
2518         if (skb_padto(skb, ETH_ZLEN))
2519                 return NETDEV_TX_OK;
2520
2521         if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2522                 QPRINTK(qdev, TX_QUEUED, INFO,
2523                         "%s: shutting down tx queue %d du to lack of resources.\n",
2524                         __func__, tx_ring_idx);
2525                 netif_stop_subqueue(ndev, tx_ring->wq_id);
2526                 atomic_inc(&tx_ring->queue_stopped);
2527                 tx_ring->tx_errors++;
2528                 return NETDEV_TX_BUSY;
2529         }
2530         tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2531         mac_iocb_ptr = tx_ring_desc->queue_entry;
2532         memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2533
2534         mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2535         mac_iocb_ptr->tid = tx_ring_desc->index;
2536         /* We use the upper 32-bits to store the tx queue for this IO.
2537          * When we get the completion we can use it to establish the context.
2538          */
2539         mac_iocb_ptr->txq_idx = tx_ring_idx;
2540         tx_ring_desc->skb = skb;
2541
2542         mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2543
2544         if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
2545                 QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n",
2546                         vlan_tx_tag_get(skb));
2547                 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2548                 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2549         }
2550         tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2551         if (tso < 0) {
2552                 dev_kfree_skb_any(skb);
2553                 return NETDEV_TX_OK;
2554         } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2555                 ql_hw_csum_setup(skb,
2556                                  (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2557         }
2558         if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2559                         NETDEV_TX_OK) {
2560                 QPRINTK(qdev, TX_QUEUED, ERR,
2561                                 "Could not map the segments.\n");
2562                 tx_ring->tx_errors++;
2563                 return NETDEV_TX_BUSY;
2564         }
2565         QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2566         tx_ring->prod_idx++;
2567         if (tx_ring->prod_idx == tx_ring->wq_len)
2568                 tx_ring->prod_idx = 0;
2569         wmb();
2570
2571         ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2572         QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
2573                 tx_ring->prod_idx, skb->len);
2574
2575         atomic_dec(&tx_ring->tx_count);
2576         return NETDEV_TX_OK;
2577 }
2578
2579
2580 static void ql_free_shadow_space(struct ql_adapter *qdev)
2581 {
2582         if (qdev->rx_ring_shadow_reg_area) {
2583                 pci_free_consistent(qdev->pdev,
2584                                     PAGE_SIZE,
2585                                     qdev->rx_ring_shadow_reg_area,
2586                                     qdev->rx_ring_shadow_reg_dma);
2587                 qdev->rx_ring_shadow_reg_area = NULL;
2588         }
2589         if (qdev->tx_ring_shadow_reg_area) {
2590                 pci_free_consistent(qdev->pdev,
2591                                     PAGE_SIZE,
2592                                     qdev->tx_ring_shadow_reg_area,
2593                                     qdev->tx_ring_shadow_reg_dma);
2594                 qdev->tx_ring_shadow_reg_area = NULL;
2595         }
2596 }
2597
2598 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2599 {
2600         qdev->rx_ring_shadow_reg_area =
2601             pci_alloc_consistent(qdev->pdev,
2602                                  PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2603         if (qdev->rx_ring_shadow_reg_area == NULL) {
2604                 QPRINTK(qdev, IFUP, ERR,
2605                         "Allocation of RX shadow space failed.\n");
2606                 return -ENOMEM;
2607         }
2608         memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2609         qdev->tx_ring_shadow_reg_area =
2610             pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2611                                  &qdev->tx_ring_shadow_reg_dma);
2612         if (qdev->tx_ring_shadow_reg_area == NULL) {
2613                 QPRINTK(qdev, IFUP, ERR,
2614                         "Allocation of TX shadow space failed.\n");
2615                 goto err_wqp_sh_area;
2616         }
2617         memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2618         return 0;
2619
2620 err_wqp_sh_area:
2621         pci_free_consistent(qdev->pdev,
2622                             PAGE_SIZE,
2623                             qdev->rx_ring_shadow_reg_area,
2624                             qdev->rx_ring_shadow_reg_dma);
2625         return -ENOMEM;
2626 }
2627
2628 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2629 {
2630         struct tx_ring_desc *tx_ring_desc;
2631         int i;
2632         struct ob_mac_iocb_req *mac_iocb_ptr;
2633
2634         mac_iocb_ptr = tx_ring->wq_base;
2635         tx_ring_desc = tx_ring->q;
2636         for (i = 0; i < tx_ring->wq_len; i++) {
2637                 tx_ring_desc->index = i;
2638                 tx_ring_desc->skb = NULL;
2639                 tx_ring_desc->queue_entry = mac_iocb_ptr;
2640                 mac_iocb_ptr++;
2641                 tx_ring_desc++;
2642         }
2643         atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2644         atomic_set(&tx_ring->queue_stopped, 0);
2645 }
2646
2647 static void ql_free_tx_resources(struct ql_adapter *qdev,
2648                                  struct tx_ring *tx_ring)
2649 {
2650         if (tx_ring->wq_base) {
2651                 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2652                                     tx_ring->wq_base, tx_ring->wq_base_dma);
2653                 tx_ring->wq_base = NULL;
2654         }
2655         kfree(tx_ring->q);
2656         tx_ring->q = NULL;
2657 }
2658
2659 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2660                                  struct tx_ring *tx_ring)
2661 {
2662         tx_ring->wq_base =
2663             pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2664                                  &tx_ring->wq_base_dma);
2665
2666         if ((tx_ring->wq_base == NULL) ||
2667             tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
2668                 QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
2669                 return -ENOMEM;
2670         }
2671         tx_ring->q =
2672             kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2673         if (tx_ring->q == NULL)
2674                 goto err;
2675
2676         return 0;
2677 err:
2678         pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2679                             tx_ring->wq_base, tx_ring->wq_base_dma);
2680         return -ENOMEM;
2681 }
2682
2683 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2684 {
2685         struct bq_desc *lbq_desc;
2686
2687         uint32_t  curr_idx, clean_idx;
2688
2689         curr_idx = rx_ring->lbq_curr_idx;
2690         clean_idx = rx_ring->lbq_clean_idx;
2691         while (curr_idx != clean_idx) {
2692                 lbq_desc = &rx_ring->lbq[curr_idx];
2693
2694                 if (lbq_desc->p.pg_chunk.last_flag) {
2695                         pci_unmap_page(qdev->pdev,
2696                                 lbq_desc->p.pg_chunk.map,
2697                                 ql_lbq_block_size(qdev),
2698                                        PCI_DMA_FROMDEVICE);
2699                         lbq_desc->p.pg_chunk.last_flag = 0;
2700                 }
2701
2702                 put_page(lbq_desc->p.pg_chunk.page);
2703                 lbq_desc->p.pg_chunk.page = NULL;
2704
2705                 if (++curr_idx == rx_ring->lbq_len)
2706                         curr_idx = 0;
2707
2708         }
2709 }
2710
2711 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2712 {
2713         int i;
2714         struct bq_desc *sbq_desc;
2715
2716         for (i = 0; i < rx_ring->sbq_len; i++) {
2717                 sbq_desc = &rx_ring->sbq[i];
2718                 if (sbq_desc == NULL) {
2719                         QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i);
2720                         return;
2721                 }
2722                 if (sbq_desc->p.skb) {
2723                         pci_unmap_single(qdev->pdev,
2724                                          pci_unmap_addr(sbq_desc, mapaddr),
2725                                          pci_unmap_len(sbq_desc, maplen),
2726                                          PCI_DMA_FROMDEVICE);
2727                         dev_kfree_skb(sbq_desc->p.skb);
2728                         sbq_desc->p.skb = NULL;
2729                 }
2730         }
2731 }
2732
2733 /* Free all large and small rx buffers associated
2734  * with the completion queues for this device.
2735  */
2736 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2737 {
2738         int i;
2739         struct rx_ring *rx_ring;
2740
2741         for (i = 0; i < qdev->rx_ring_count; i++) {
2742                 rx_ring = &qdev->rx_ring[i];
2743                 if (rx_ring->lbq)
2744                         ql_free_lbq_buffers(qdev, rx_ring);
2745                 if (rx_ring->sbq)
2746                         ql_free_sbq_buffers(qdev, rx_ring);
2747         }
2748 }
2749
2750 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2751 {
2752         struct rx_ring *rx_ring;
2753         int i;
2754
2755         for (i = 0; i < qdev->rx_ring_count; i++) {
2756                 rx_ring = &qdev->rx_ring[i];
2757                 if (rx_ring->type != TX_Q)
2758                         ql_update_buffer_queues(qdev, rx_ring);
2759         }
2760 }
2761
2762 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2763                                 struct rx_ring *rx_ring)
2764 {
2765         int i;
2766         struct bq_desc *lbq_desc;
2767         __le64 *bq = rx_ring->lbq_base;
2768
2769         memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2770         for (i = 0; i < rx_ring->lbq_len; i++) {
2771                 lbq_desc = &rx_ring->lbq[i];
2772                 memset(lbq_desc, 0, sizeof(*lbq_desc));
2773                 lbq_desc->index = i;
2774                 lbq_desc->addr = bq;
2775                 bq++;
2776         }
2777 }
2778
2779 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2780                                 struct rx_ring *rx_ring)
2781 {
2782         int i;
2783         struct bq_desc *sbq_desc;
2784         __le64 *bq = rx_ring->sbq_base;
2785
2786         memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2787         for (i = 0; i < rx_ring->sbq_len; i++) {
2788                 sbq_desc = &rx_ring->sbq[i];
2789                 memset(sbq_desc, 0, sizeof(*sbq_desc));
2790                 sbq_desc->index = i;
2791                 sbq_desc->addr = bq;
2792                 bq++;
2793         }
2794 }
2795
2796 static void ql_free_rx_resources(struct ql_adapter *qdev,
2797                                  struct rx_ring *rx_ring)
2798 {
2799         /* Free the small buffer queue. */
2800         if (rx_ring->sbq_base) {
2801                 pci_free_consistent(qdev->pdev,
2802                                     rx_ring->sbq_size,
2803                                     rx_ring->sbq_base, rx_ring->sbq_base_dma);
2804                 rx_ring->sbq_base = NULL;
2805         }
2806
2807         /* Free the small buffer queue control blocks. */
2808         kfree(rx_ring->sbq);
2809         rx_ring->sbq = NULL;
2810
2811         /* Free the large buffer queue. */
2812         if (rx_ring->lbq_base) {
2813                 pci_free_consistent(qdev->pdev,
2814                                     rx_ring->lbq_size,
2815                                     rx_ring->lbq_base, rx_ring->lbq_base_dma);
2816                 rx_ring->lbq_base = NULL;
2817         }
2818
2819         /* Free the large buffer queue control blocks. */
2820         kfree(rx_ring->lbq);
2821         rx_ring->lbq = NULL;
2822
2823         /* Free the rx queue. */
2824         if (rx_ring->cq_base) {
2825                 pci_free_consistent(qdev->pdev,
2826                                     rx_ring->cq_size,
2827                                     rx_ring->cq_base, rx_ring->cq_base_dma);
2828                 rx_ring->cq_base = NULL;
2829         }
2830 }
2831
2832 /* Allocate queues and buffers for this completions queue based
2833  * on the values in the parameter structure. */
2834 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2835                                  struct rx_ring *rx_ring)
2836 {
2837
2838         /*
2839          * Allocate the completion queue for this rx_ring.
2840          */
2841         rx_ring->cq_base =
2842             pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2843                                  &rx_ring->cq_base_dma);
2844
2845         if (rx_ring->cq_base == NULL) {
2846                 QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n");
2847                 return -ENOMEM;
2848         }
2849
2850         if (rx_ring->sbq_len) {
2851                 /*
2852                  * Allocate small buffer queue.
2853                  */
2854                 rx_ring->sbq_base =
2855                     pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2856                                          &rx_ring->sbq_base_dma);
2857
2858                 if (rx_ring->sbq_base == NULL) {
2859                         QPRINTK(qdev, IFUP, ERR,
2860                                 "Small buffer queue allocation failed.\n");
2861                         goto err_mem;
2862                 }
2863
2864                 /*
2865                  * Allocate small buffer queue control blocks.
2866                  */
2867                 rx_ring->sbq =
2868                     kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2869                             GFP_KERNEL);
2870                 if (rx_ring->sbq == NULL) {
2871                         QPRINTK(qdev, IFUP, ERR,
2872                                 "Small buffer queue control block allocation failed.\n");
2873                         goto err_mem;
2874                 }
2875
2876                 ql_init_sbq_ring(qdev, rx_ring);
2877         }
2878
2879         if (rx_ring->lbq_len) {
2880                 /*
2881                  * Allocate large buffer queue.
2882                  */
2883                 rx_ring->lbq_base =
2884                     pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2885                                          &rx_ring->lbq_base_dma);
2886
2887                 if (rx_ring->lbq_base == NULL) {
2888                         QPRINTK(qdev, IFUP, ERR,
2889                                 "Large buffer queue allocation failed.\n");
2890                         goto err_mem;
2891                 }
2892                 /*
2893                  * Allocate large buffer queue control blocks.
2894                  */
2895                 rx_ring->lbq =
2896                     kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2897                             GFP_KERNEL);
2898                 if (rx_ring->lbq == NULL) {
2899                         QPRINTK(qdev, IFUP, ERR,
2900                                 "Large buffer queue control block allocation failed.\n");
2901                         goto err_mem;
2902                 }
2903
2904                 ql_init_lbq_ring(qdev, rx_ring);
2905         }
2906
2907         return 0;
2908
2909 err_mem:
2910         ql_free_rx_resources(qdev, rx_ring);
2911         return -ENOMEM;
2912 }
2913
2914 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2915 {
2916         struct tx_ring *tx_ring;
2917         struct tx_ring_desc *tx_ring_desc;
2918         int i, j;
2919
2920         /*
2921          * Loop through all queues and free
2922          * any resources.
2923          */
2924         for (j = 0; j < qdev->tx_ring_count; j++) {
2925                 tx_ring = &qdev->tx_ring[j];
2926                 for (i = 0; i < tx_ring->wq_len; i++) {
2927                         tx_ring_desc = &tx_ring->q[i];
2928                         if (tx_ring_desc && tx_ring_desc->skb) {
2929                                 QPRINTK(qdev, IFDOWN, ERR,
2930                                 "Freeing lost SKB %p, from queue %d, index %d.\n",
2931                                         tx_ring_desc->skb, j,
2932                                         tx_ring_desc->index);
2933                                 ql_unmap_send(qdev, tx_ring_desc,
2934                                               tx_ring_desc->map_cnt);
2935                                 dev_kfree_skb(tx_ring_desc->skb);
2936                                 tx_ring_desc->skb = NULL;
2937                         }
2938                 }
2939         }
2940 }
2941
2942 static void ql_free_mem_resources(struct ql_adapter *qdev)
2943 {
2944         int i;
2945
2946         for (i = 0; i < qdev->tx_ring_count; i++)
2947                 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2948         for (i = 0; i < qdev->rx_ring_count; i++)
2949                 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2950         ql_free_shadow_space(qdev);
2951 }
2952
2953 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2954 {
2955         int i;
2956
2957         /* Allocate space for our shadow registers and such. */
2958         if (ql_alloc_shadow_space(qdev))
2959                 return -ENOMEM;
2960
2961         for (i = 0; i < qdev->rx_ring_count; i++) {
2962                 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2963                         QPRINTK(qdev, IFUP, ERR,
2964                                 "RX resource allocation failed.\n");
2965                         goto err_mem;
2966                 }
2967         }
2968         /* Allocate tx queue resources */
2969         for (i = 0; i < qdev->tx_ring_count; i++) {
2970                 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2971                         QPRINTK(qdev, IFUP, ERR,
2972                                 "TX resource allocation failed.\n");
2973                         goto err_mem;
2974                 }
2975         }
2976         return 0;
2977
2978 err_mem:
2979         ql_free_mem_resources(qdev);
2980         return -ENOMEM;
2981 }
2982
2983 /* Set up the rx ring control block and pass it to the chip.
2984  * The control block is defined as
2985  * "Completion Queue Initialization Control Block", or cqicb.
2986  */
2987 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2988 {
2989         struct cqicb *cqicb = &rx_ring->cqicb;
2990         void *shadow_reg = qdev->rx_ring_shadow_reg_area +
2991                 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2992         u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
2993                 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2994         void __iomem *doorbell_area =
2995             qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2996         int err = 0;
2997         u16 bq_len;
2998         u64 tmp;
2999         __le64 *base_indirect_ptr;
3000         int page_entries;
3001
3002         /* Set up the shadow registers for this ring. */
3003         rx_ring->prod_idx_sh_reg = shadow_reg;
3004         rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3005         *rx_ring->prod_idx_sh_reg = 0;
3006         shadow_reg += sizeof(u64);
3007         shadow_reg_dma += sizeof(u64);
3008         rx_ring->lbq_base_indirect = shadow_reg;
3009         rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3010         shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3011         shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3012         rx_ring->sbq_base_indirect = shadow_reg;
3013         rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3014
3015         /* PCI doorbell mem area + 0x00 for consumer index register */
3016         rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3017         rx_ring->cnsmr_idx = 0;
3018         rx_ring->curr_entry = rx_ring->cq_base;
3019
3020         /* PCI doorbell mem area + 0x04 for valid register */
3021         rx_ring->valid_db_reg = doorbell_area + 0x04;
3022
3023         /* PCI doorbell mem area + 0x18 for large buffer consumer */
3024         rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3025
3026         /* PCI doorbell mem area + 0x1c */
3027         rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3028
3029         memset((void *)cqicb, 0, sizeof(struct cqicb));
3030         cqicb->msix_vect = rx_ring->irq;
3031
3032         bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3033         cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3034
3035         cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3036
3037         cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3038
3039         /*
3040          * Set up the control block load flags.
3041          */
3042         cqicb->flags = FLAGS_LC |       /* Load queue base address */
3043             FLAGS_LV |          /* Load MSI-X vector */
3044             FLAGS_LI;           /* Load irq delay values */
3045         if (rx_ring->lbq_len) {
3046                 cqicb->flags |= FLAGS_LL;       /* Load lbq values */
3047                 tmp = (u64)rx_ring->lbq_base_dma;
3048                 base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
3049                 page_entries = 0;
3050                 do {
3051                         *base_indirect_ptr = cpu_to_le64(tmp);
3052                         tmp += DB_PAGE_SIZE;
3053                         base_indirect_ptr++;
3054                         page_entries++;
3055                 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3056                 cqicb->lbq_addr =
3057                     cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3058                 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3059                         (u16) rx_ring->lbq_buf_size;
3060                 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3061                 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3062                         (u16) rx_ring->lbq_len;
3063                 cqicb->lbq_len = cpu_to_le16(bq_len);
3064                 rx_ring->lbq_prod_idx = 0;
3065                 rx_ring->lbq_curr_idx = 0;
3066                 rx_ring->lbq_clean_idx = 0;
3067                 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3068         }
3069         if (rx_ring->sbq_len) {
3070                 cqicb->flags |= FLAGS_LS;       /* Load sbq values */
3071                 tmp = (u64)rx_ring->sbq_base_dma;
3072                 base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
3073                 page_entries = 0;
3074                 do {
3075                         *base_indirect_ptr = cpu_to_le64(tmp);
3076                         tmp += DB_PAGE_SIZE;
3077                         base_indirect_ptr++;
3078                         page_entries++;
3079                 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3080                 cqicb->sbq_addr =
3081                     cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3082                 cqicb->sbq_buf_size =
3083                     cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3084                 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3085                         (u16) rx_ring->sbq_len;
3086                 cqicb->sbq_len = cpu_to_le16(bq_len);
3087                 rx_ring->sbq_prod_idx = 0;
3088                 rx_ring->sbq_curr_idx = 0;
3089                 rx_ring->sbq_clean_idx = 0;
3090                 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3091         }
3092         switch (rx_ring->type) {
3093         case TX_Q:
3094                 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3095                 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3096                 break;
3097         case RX_Q:
3098                 /* Inbound completion handling rx_rings run in
3099                  * separate NAPI contexts.
3100                  */
3101                 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3102                                64);
3103                 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3104                 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3105                 break;
3106         default:
3107                 QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
3108                         rx_ring->type);
3109         }
3110         QPRINTK(qdev, IFUP, DEBUG, "Initializing rx work queue.\n");
3111         err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3112                            CFG_LCQ, rx_ring->cq_id);
3113         if (err) {
3114                 QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
3115                 return err;
3116         }
3117         return err;
3118 }
3119
3120 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3121 {
3122         struct wqicb *wqicb = (struct wqicb *)tx_ring;
3123         void __iomem *doorbell_area =
3124             qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3125         void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3126             (tx_ring->wq_id * sizeof(u64));
3127         u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3128             (tx_ring->wq_id * sizeof(u64));
3129         int err = 0;
3130
3131         /*
3132          * Assign doorbell registers for this tx_ring.
3133          */
3134         /* TX PCI doorbell mem area for tx producer index */
3135         tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3136         tx_ring->prod_idx = 0;
3137         /* TX PCI doorbell mem area + 0x04 */
3138         tx_ring->valid_db_reg = doorbell_area + 0x04;
3139
3140         /*
3141          * Assign shadow registers for this tx_ring.
3142          */
3143         tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3144         tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3145
3146         wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3147         wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3148                                    Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3149         wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3150         wqicb->rid = 0;
3151         wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3152
3153         wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3154
3155         ql_init_tx_ring(qdev, tx_ring);
3156
3157         err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3158                            (u16) tx_ring->wq_id);
3159         if (err) {
3160                 QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
3161                 return err;
3162         }
3163         QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded WQICB.\n");
3164         return err;
3165 }
3166
3167 static void ql_disable_msix(struct ql_adapter *qdev)
3168 {
3169         if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3170                 pci_disable_msix(qdev->pdev);
3171                 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3172                 kfree(qdev->msi_x_entry);
3173                 qdev->msi_x_entry = NULL;
3174         } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3175                 pci_disable_msi(qdev->pdev);
3176                 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3177         }
3178 }
3179
3180 /* We start by trying to get the number of vectors
3181  * stored in qdev->intr_count. If we don't get that
3182  * many then we reduce the count and try again.
3183  */
3184 static void ql_enable_msix(struct ql_adapter *qdev)
3185 {
3186         int i, err;
3187
3188         /* Get the MSIX vectors. */
3189         if (qlge_irq_type == MSIX_IRQ) {
3190                 /* Try to alloc space for the msix struct,
3191                  * if it fails then go to MSI/legacy.
3192                  */
3193                 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3194                                             sizeof(struct msix_entry),
3195                                             GFP_KERNEL);
3196                 if (!qdev->msi_x_entry) {
3197                         qlge_irq_type = MSI_IRQ;
3198                         goto msi;
3199                 }
3200
3201                 for (i = 0; i < qdev->intr_count; i++)
3202                         qdev->msi_x_entry[i].entry = i;
3203
3204                 /* Loop to get our vectors.  We start with
3205                  * what we want and settle for what we get.
3206                  */
3207                 do {
3208                         err = pci_enable_msix(qdev->pdev,
3209                                 qdev->msi_x_entry, qdev->intr_count);
3210                         if (err > 0)
3211                                 qdev->intr_count = err;
3212                 } while (err > 0);
3213
3214                 if (err < 0) {
3215                         kfree(qdev->msi_x_entry);
3216                         qdev->msi_x_entry = NULL;
3217                         QPRINTK(qdev, IFUP, WARNING,
3218                                 "MSI-X Enable failed, trying MSI.\n");
3219                         qdev->intr_count = 1;
3220                         qlge_irq_type = MSI_IRQ;
3221                 } else if (err == 0) {
3222                         set_bit(QL_MSIX_ENABLED, &qdev->flags);
3223                         QPRINTK(qdev, IFUP, INFO,
3224                                 "MSI-X Enabled, got %d vectors.\n",
3225                                 qdev->intr_count);
3226                         return;
3227                 }
3228         }
3229 msi:
3230         qdev->intr_count = 1;
3231         if (qlge_irq_type == MSI_IRQ) {
3232                 if (!pci_enable_msi(qdev->pdev)) {
3233                         set_bit(QL_MSI_ENABLED, &qdev->flags);
3234                         QPRINTK(qdev, IFUP, INFO,
3235                                 "Running with MSI interrupts.\n");
3236                         return;
3237                 }
3238         }
3239         qlge_irq_type = LEG_IRQ;
3240         QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
3241 }
3242
3243 /* Each vector services 1 RSS ring and and 1 or more
3244  * TX completion rings.  This function loops through
3245  * the TX completion rings and assigns the vector that
3246  * will service it.  An example would be if there are
3247  * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3248  * This would mean that vector 0 would service RSS ring 0
3249  * and TX competion rings 0,1,2 and 3.  Vector 1 would
3250  * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3251  */
3252 static void ql_set_tx_vect(struct ql_adapter *qdev)
3253 {
3254         int i, j, vect;
3255         u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3256
3257         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3258                 /* Assign irq vectors to TX rx_rings.*/
3259                 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3260                                          i < qdev->rx_ring_count; i++) {
3261                         if (j == tx_rings_per_vector) {
3262                                 vect++;
3263                                 j = 0;
3264                         }
3265                         qdev->rx_ring[i].irq = vect;
3266                         j++;
3267                 }
3268         } else {
3269                 /* For single vector all rings have an irq
3270                  * of zero.
3271                  */
3272                 for (i = 0; i < qdev->rx_ring_count; i++)
3273                         qdev->rx_ring[i].irq = 0;
3274         }
3275 }
3276
3277 /* Set the interrupt mask for this vector.  Each vector
3278  * will service 1 RSS ring and 1 or more TX completion
3279  * rings.  This function sets up a bit mask per vector
3280  * that indicates which rings it services.
3281  */
3282 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3283 {
3284         int j, vect = ctx->intr;
3285         u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3286
3287         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3288                 /* Add the RSS ring serviced by this vector
3289                  * to the mask.
3290                  */
3291                 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3292                 /* Add the TX ring(s) serviced by this vector
3293                  * to the mask. */
3294                 for (j = 0; j < tx_rings_per_vector; j++) {
3295                         ctx->irq_mask |=
3296                         (1 << qdev->rx_ring[qdev->rss_ring_count +
3297                         (vect * tx_rings_per_vector) + j].cq_id);
3298                 }
3299         } else {
3300                 /* For single vector we just shift each queue's
3301                  * ID into the mask.
3302                  */
3303                 for (j = 0; j < qdev->rx_ring_count; j++)
3304                         ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3305         }
3306 }
3307
3308 /*
3309  * Here we build the intr_context structures based on
3310  * our rx_ring count and intr vector count.
3311  * The intr_context structure is used to hook each vector
3312  * to possibly different handlers.
3313  */
3314 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3315 {
3316         int i = 0;
3317         struct intr_context *intr_context = &qdev->intr_context[0];
3318
3319         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3320                 /* Each rx_ring has it's
3321                  * own intr_context since we have separate
3322                  * vectors for each queue.
3323                  */
3324                 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3325                         qdev->rx_ring[i].irq = i;
3326                         intr_context->intr = i;
3327                         intr_context->qdev = qdev;
3328                         /* Set up this vector's bit-mask that indicates
3329                          * which queues it services.
3330                          */
3331                         ql_set_irq_mask(qdev, intr_context);
3332                         /*
3333                          * We set up each vectors enable/disable/read bits so
3334                          * there's no bit/mask calculations in the critical path.
3335                          */
3336                         intr_context->intr_en_mask =
3337                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3338                             INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3339                             | i;
3340                         intr_context->intr_dis_mask =
3341                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3342                             INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3343                             INTR_EN_IHD | i;
3344                         intr_context->intr_read_mask =
3345                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3346                             INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3347                             i;
3348                         if (i == 0) {
3349                                 /* The first vector/queue handles
3350                                  * broadcast/multicast, fatal errors,
3351                                  * and firmware events.  This in addition
3352                                  * to normal inbound NAPI processing.
3353                                  */
3354                                 intr_context->handler = qlge_isr;
3355                                 sprintf(intr_context->name, "%s-rx-%d",
3356                                         qdev->ndev->name, i);
3357                         } else {
3358                                 /*
3359                                  * Inbound queues handle unicast frames only.
3360                                  */
3361                                 intr_context->handler = qlge_msix_rx_isr;
3362                                 sprintf(intr_context->name, "%s-rx-%d",
3363                                         qdev->ndev->name, i);
3364                         }
3365                 }
3366         } else {
3367                 /*
3368                  * All rx_rings use the same intr_context since
3369                  * there is only one vector.
3370                  */
3371                 intr_context->intr = 0;
3372                 intr_context->qdev = qdev;
3373                 /*
3374                  * We set up each vectors enable/disable/read bits so
3375                  * there's no bit/mask calculations in the critical path.
3376                  */
3377                 intr_context->intr_en_mask =
3378                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3379                 intr_context->intr_dis_mask =
3380                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3381                     INTR_EN_TYPE_DISABLE;
3382                 intr_context->intr_read_mask =
3383                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3384                 /*
3385                  * Single interrupt means one handler for all rings.
3386                  */
3387                 intr_context->handler = qlge_isr;
3388                 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3389                 /* Set up this vector's bit-mask that indicates
3390                  * which queues it services. In this case there is
3391                  * a single vector so it will service all RSS and
3392                  * TX completion rings.
3393                  */
3394                 ql_set_irq_mask(qdev, intr_context);
3395         }
3396         /* Tell the TX completion rings which MSIx vector
3397          * they will be using.
3398          */
3399         ql_set_tx_vect(qdev);
3400 }
3401
3402 static void ql_free_irq(struct ql_adapter *qdev)
3403 {
3404         int i;
3405         struct intr_context *intr_context = &qdev->intr_context[0];
3406
3407         for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3408                 if (intr_context->hooked) {
3409                         if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3410                                 free_irq(qdev->msi_x_entry[i].vector,
3411                                          &qdev->rx_ring[i]);
3412                                 QPRINTK(qdev, IFDOWN, DEBUG,
3413                                         "freeing msix interrupt %d.\n", i);
3414                         } else {
3415                                 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3416                                 QPRINTK(qdev, IFDOWN, DEBUG,
3417                                         "freeing msi interrupt %d.\n", i);
3418                         }
3419                 }
3420         }
3421         ql_disable_msix(qdev);
3422 }
3423
3424 static int ql_request_irq(struct ql_adapter *qdev)
3425 {
3426         int i;
3427         int status = 0;
3428         struct pci_dev *pdev = qdev->pdev;
3429         struct intr_context *intr_context = &qdev->intr_context[0];
3430
3431         ql_resolve_queues_to_irqs(qdev);
3432
3433         for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3434                 atomic_set(&intr_context->irq_cnt, 0);
3435                 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3436                         status = request_irq(qdev->msi_x_entry[i].vector,
3437                                              intr_context->handler,
3438                                              0,
3439                                              intr_context->name,
3440                                              &qdev->rx_ring[i]);
3441                         if (status) {
3442                                 QPRINTK(qdev, IFUP, ERR,
3443                                         "Failed request for MSIX interrupt %d.\n",
3444                                         i);
3445                                 goto err_irq;
3446                         } else {
3447                                 QPRINTK(qdev, IFUP, DEBUG,
3448                                         "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3449                                         i,
3450                                         qdev->rx_ring[i].type ==
3451                                         DEFAULT_Q ? "DEFAULT_Q" : "",
3452                                         qdev->rx_ring[i].type ==
3453                                         TX_Q ? "TX_Q" : "",
3454                                         qdev->rx_ring[i].type ==
3455                                         RX_Q ? "RX_Q" : "", intr_context->name);
3456                         }
3457                 } else {
3458                         QPRINTK(qdev, IFUP, DEBUG,
3459                                 "trying msi or legacy interrupts.\n");
3460                         QPRINTK(qdev, IFUP, DEBUG,
3461                                 "%s: irq = %d.\n", __func__, pdev->irq);
3462                         QPRINTK(qdev, IFUP, DEBUG,
3463                                 "%s: context->name = %s.\n", __func__,
3464                                intr_context->name);
3465                         QPRINTK(qdev, IFUP, DEBUG,
3466                                 "%s: dev_id = 0x%p.\n", __func__,
3467                                &qdev->rx_ring[0]);
3468                         status =
3469                             request_irq(pdev->irq, qlge_isr,
3470                                         test_bit(QL_MSI_ENABLED,
3471                                                  &qdev->
3472                                                  flags) ? 0 : IRQF_SHARED,
3473                                         intr_context->name, &qdev->rx_ring[0]);
3474                         if (status)
3475                                 goto err_irq;
3476
3477                         QPRINTK(qdev, IFUP, ERR,
3478                                 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3479                                 i,
3480                                 qdev->rx_ring[0].type ==
3481                                 DEFAULT_Q ? "DEFAULT_Q" : "",
3482                                 qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "",
3483                                 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3484                                 intr_context->name);
3485                 }
3486                 intr_context->hooked = 1;
3487         }
3488         return status;
3489 err_irq:
3490         QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n");
3491         ql_free_irq(qdev);
3492         return status;
3493 }
3494
3495 static int ql_start_rss(struct ql_adapter *qdev)
3496 {
3497         u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3498                                 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f,
3499                                 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b,
3500                                 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80,
3501                                 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b,
3502                                 0xbe, 0xac, 0x01, 0xfa};
3503         struct ricb *ricb = &qdev->ricb;
3504         int status = 0;
3505         int i;
3506         u8 *hash_id = (u8 *) ricb->hash_cq_id;
3507
3508         memset((void *)ricb, 0, sizeof(*ricb));
3509
3510         ricb->base_cq = RSS_L4K;
3511         ricb->flags =
3512                 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3513         ricb->mask = cpu_to_le16((u16)(0x3ff));
3514
3515         /*
3516          * Fill out the Indirection Table.
3517          */
3518         for (i = 0; i < 1024; i++)
3519                 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3520
3521         memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3522         memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3523
3524         QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n");
3525
3526         status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3527         if (status) {
3528                 QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
3529                 return status;
3530         }
3531         QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded RICB.\n");
3532         return status;
3533 }
3534
3535 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3536 {
3537         int i, status = 0;
3538
3539         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3540         if (status)
3541                 return status;
3542         /* Clear all the entries in the routing table. */
3543         for (i = 0; i < 16; i++) {
3544                 status = ql_set_routing_reg(qdev, i, 0, 0);
3545                 if (status) {
3546                         QPRINTK(qdev, IFUP, ERR,
3547                                 "Failed to init routing register for CAM "
3548                                 "packets.\n");
3549                         break;
3550                 }
3551         }
3552         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3553         return status;
3554 }
3555
3556 /* Initialize the frame-to-queue routing. */
3557 static int ql_route_initialize(struct ql_adapter *qdev)
3558 {
3559         int status = 0;
3560
3561         /* Clear all the entries in the routing table. */
3562         status = ql_clear_routing_entries(qdev);
3563         if (status)
3564                 return status;
3565
3566         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3567         if (status)
3568                 return status;
3569
3570         status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
3571         if (status) {
3572                 QPRINTK(qdev, IFUP, ERR,
3573                         "Failed to init routing register for error packets.\n");
3574                 goto exit;
3575         }
3576         status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3577         if (status) {
3578                 QPRINTK(qdev, IFUP, ERR,
3579                         "Failed to init routing register for broadcast packets.\n");
3580                 goto exit;
3581         }
3582         /* If we have more than one inbound queue, then turn on RSS in the
3583          * routing block.
3584          */
3585         if (qdev->rss_ring_count > 1) {
3586                 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3587                                         RT_IDX_RSS_MATCH, 1);
3588                 if (status) {
3589                         QPRINTK(qdev, IFUP, ERR,
3590                                 "Failed to init routing register for MATCH RSS packets.\n");
3591                         goto exit;
3592                 }
3593         }
3594
3595         status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3596                                     RT_IDX_CAM_HIT, 1);
3597         if (status)
3598                 QPRINTK(qdev, IFUP, ERR,
3599                         "Failed to init routing register for CAM packets.\n");
3600 exit:
3601         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3602         return status;
3603 }
3604
3605 int ql_cam_route_initialize(struct ql_adapter *qdev)
3606 {
3607         int status, set;
3608
3609         /* If check if the link is up and use to
3610          * determine if we are setting or clearing
3611          * the MAC address in the CAM.
3612          */
3613         set = ql_read32(qdev, STS);
3614         set &= qdev->port_link_up;
3615         status = ql_set_mac_addr(qdev, set);
3616         if (status) {
3617                 QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
3618                 return status;
3619         }
3620
3621         status = ql_route_initialize(qdev);
3622         if (status)
3623                 QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
3624
3625         return status;
3626 }
3627
3628 static int ql_adapter_initialize(struct ql_adapter *qdev)
3629 {
3630         u32 value, mask;
3631         int i;
3632         int status = 0;
3633
3634         /*
3635          * Set up the System register to halt on errors.
3636          */
3637         value = SYS_EFE | SYS_FAE;
3638         mask = value << 16;
3639         ql_write32(qdev, SYS, mask | value);
3640
3641         /* Set the default queue, and VLAN behavior. */
3642         value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3643         mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
3644         ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3645
3646         /* Set the MPI interrupt to enabled. */
3647         ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3648
3649         /* Enable the function, set pagesize, enable error checking. */
3650         value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3651             FSC_EC | FSC_VM_PAGE_4K;
3652         value |= SPLT_SETTING;
3653
3654         /* Set/clear header splitting. */
3655         mask = FSC_VM_PAGESIZE_MASK |
3656             FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3657         ql_write32(qdev, FSC, mask | value);
3658
3659         ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3660
3661         /* Set RX packet routing to use port/pci function on which the
3662          * packet arrived on in addition to usual frame routing.
3663          * This is helpful on bonding where both interfaces can have
3664          * the same MAC address.
3665          */
3666         ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3667         /* Reroute all packets to our Interface.
3668          * They may have been routed to MPI firmware
3669          * due to WOL.
3670          */
3671         value = ql_read32(qdev, MGMT_RCV_CFG);
3672         value &= ~MGMT_RCV_CFG_RM;
3673         mask = 0xffff0000;
3674
3675         /* Sticky reg needs clearing due to WOL. */
3676         ql_write32(qdev, MGMT_RCV_CFG, mask);
3677         ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3678
3679         /* Default WOL is enable on Mezz cards */
3680         if (qdev->pdev->subsystem_device == 0x0068 ||
3681                         qdev->pdev->subsystem_device == 0x0180)
3682                 qdev->wol = WAKE_MAGIC;
3683
3684         /* Start up the rx queues. */
3685         for (i = 0; i < qdev->rx_ring_count; i++) {
3686                 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3687                 if (status) {
3688                         QPRINTK(qdev, IFUP, ERR,
3689                                 "Failed to start rx ring[%d].\n", i);
3690                         return status;
3691                 }
3692         }
3693
3694         /* If there is more than one inbound completion queue
3695          * then download a RICB to configure RSS.
3696          */
3697         if (qdev->rss_ring_count > 1) {
3698                 status = ql_start_rss(qdev);
3699                 if (status) {
3700                         QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n");
3701                         return status;
3702                 }
3703         }
3704
3705         /* Start up the tx queues. */
3706         for (i = 0; i < qdev->tx_ring_count; i++) {
3707                 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3708                 if (status) {
3709                         QPRINTK(qdev, IFUP, ERR,
3710                                 "Failed to start tx ring[%d].\n", i);
3711                         return status;
3712                 }
3713         }
3714
3715         /* Initialize the port and set the max framesize. */
3716         status = qdev->nic_ops->port_initialize(qdev);
3717         if (status)
3718                 QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
3719
3720         /* Set up the MAC address and frame routing filter. */
3721         status = ql_cam_route_initialize(qdev);
3722         if (status) {
3723                 QPRINTK(qdev, IFUP, ERR,
3724                                 "Failed to init CAM/Routing tables.\n");
3725                 return status;
3726         }
3727
3728         /* Start NAPI for the RSS queues. */
3729         for (i = 0; i < qdev->rss_ring_count; i++) {
3730                 QPRINTK(qdev, IFUP, DEBUG, "Enabling NAPI for rx_ring[%d].\n",
3731                         i);
3732                 napi_enable(&qdev->rx_ring[i].napi);
3733         }
3734
3735         return status;
3736 }
3737
3738 /* Issue soft reset to chip. */
3739 static int ql_adapter_reset(struct ql_adapter *qdev)
3740 {
3741         u32 value;
3742         int status = 0;
3743         unsigned long end_jiffies;
3744
3745         /* Clear all the entries in the routing table. */
3746         status = ql_clear_routing_entries(qdev);
3747         if (status) {
3748                 QPRINTK(qdev, IFUP, ERR, "Failed to clear routing bits.\n");
3749                 return status;
3750         }
3751
3752         end_jiffies = jiffies +
3753                 max((unsigned long)1, usecs_to_jiffies(30));
3754
3755         /* Stop management traffic. */
3756         ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3757
3758         /* Wait for the NIC and MGMNT FIFOs to empty. */
3759         ql_wait_fifo_empty(qdev);
3760
3761         ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3762
3763         do {
3764                 value = ql_read32(qdev, RST_FO);
3765                 if ((value & RST_FO_FR) == 0)
3766                         break;
3767                 cpu_relax();
3768         } while (time_before(jiffies, end_jiffies));
3769
3770         if (value & RST_FO_FR) {
3771                 QPRINTK(qdev, IFDOWN, ERR,
3772                         "ETIMEDOUT!!! errored out of resetting the chip!\n");
3773                 status = -ETIMEDOUT;
3774         }
3775
3776         /* Resume management traffic. */
3777         ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3778         return status;
3779 }
3780
3781 static void ql_display_dev_info(struct net_device *ndev)
3782 {
3783         struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3784
3785         QPRINTK(qdev, PROBE, INFO,
3786                 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3787                 "XG Roll = %d, XG Rev = %d.\n",
3788                 qdev->func,
3789                 qdev->port,
3790                 qdev->chip_rev_id & 0x0000000f,
3791                 qdev->chip_rev_id >> 4 & 0x0000000f,
3792                 qdev->chip_rev_id >> 8 & 0x0000000f,
3793                 qdev->chip_rev_id >> 12 & 0x0000000f);
3794         QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
3795 }
3796
3797 int ql_wol(struct ql_adapter *qdev)
3798 {
3799         int status = 0;
3800         u32 wol = MB_WOL_DISABLE;
3801
3802         /* The CAM is still intact after a reset, but if we
3803          * are doing WOL, then we may need to program the
3804          * routing regs. We would also need to issue the mailbox
3805          * commands to instruct the MPI what to do per the ethtool
3806          * settings.
3807          */
3808
3809         if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3810                         WAKE_MCAST | WAKE_BCAST)) {
3811                 QPRINTK(qdev, IFDOWN, ERR,
3812                         "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3813                         qdev->wol);
3814                 return -EINVAL;
3815         }
3816
3817         if (qdev->wol & WAKE_MAGIC) {
3818                 status = ql_mb_wol_set_magic(qdev, 1);
3819                 if (status) {
3820                         QPRINTK(qdev, IFDOWN, ERR,
3821                                 "Failed to set magic packet on %s.\n",
3822                                 qdev->ndev->name);
3823                         return status;
3824                 } else
3825                         QPRINTK(qdev, DRV, INFO,
3826                                 "Enabled magic packet successfully on %s.\n",
3827                                 qdev->ndev->name);
3828
3829                 wol |= MB_WOL_MAGIC_PKT;
3830         }
3831
3832         if (qdev->wol) {
3833                 wol |= MB_WOL_MODE_ON;
3834                 status = ql_mb_wol_mode(qdev, wol);
3835                 QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n",
3836                         (status == 0) ? "Sucessfully set" : "Failed", wol,
3837                         qdev->ndev->name);
3838         }
3839
3840         return status;
3841 }
3842
3843 static int ql_adapter_down(struct ql_adapter *qdev)
3844 {
3845         int i, status = 0;
3846
3847         ql_link_off(qdev);
3848
3849         /* Don't kill the reset worker thread if we
3850          * are in the process of recovery.
3851          */
3852         if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3853                 cancel_delayed_work_sync(&qdev->asic_reset_work);
3854         cancel_delayed_work_sync(&qdev->mpi_reset_work);
3855         cancel_delayed_work_sync(&qdev->mpi_work);
3856         cancel_delayed_work_sync(&qdev->mpi_idc_work);
3857         cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3858         cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3859
3860         for (i = 0; i < qdev->rss_ring_count; i++)
3861                 napi_disable(&qdev->rx_ring[i].napi);
3862
3863         clear_bit(QL_ADAPTER_UP, &qdev->flags);
3864
3865         ql_disable_interrupts(qdev);
3866
3867         ql_tx_ring_clean(qdev);
3868
3869         /* Call netif_napi_del() from common point.
3870          */
3871         for (i = 0; i < qdev->rss_ring_count; i++)
3872                 netif_napi_del(&qdev->rx_ring[i].napi);
3873
3874         ql_free_rx_buffers(qdev);
3875
3876         status = ql_adapter_reset(qdev);
3877         if (status)
3878                 QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
3879                         qdev->func);
3880         return status;
3881 }
3882
3883 static int ql_adapter_up(struct ql_adapter *qdev)
3884 {
3885         int err = 0;
3886
3887         err = ql_adapter_initialize(qdev);
3888         if (err) {
3889                 QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
3890                 goto err_init;
3891         }
3892         set_bit(QL_ADAPTER_UP, &qdev->flags);
3893         ql_alloc_rx_buffers(qdev);
3894         /* If the port is initialized and the
3895          * link is up the turn on the carrier.
3896          */
3897         if ((ql_read32(qdev, STS) & qdev->port_init) &&
3898                         (ql_read32(qdev, STS) & qdev->port_link_up))
3899                 ql_link_on(qdev);
3900         ql_enable_interrupts(qdev);
3901         ql_enable_all_completion_interrupts(qdev);
3902         netif_tx_start_all_queues(qdev->ndev);
3903
3904         return 0;
3905 err_init:
3906         ql_adapter_reset(qdev);
3907         return err;
3908 }
3909
3910 static void ql_release_adapter_resources(struct ql_adapter *qdev)
3911 {
3912         ql_free_mem_resources(qdev);
3913         ql_free_irq(qdev);
3914 }
3915
3916 static int ql_get_adapter_resources(struct ql_adapter *qdev)
3917 {
3918         int status = 0;
3919
3920         if (ql_alloc_mem_resources(qdev)) {
3921                 QPRINTK(qdev, IFUP, ERR, "Unable to  allocate memory.\n");
3922                 return -ENOMEM;
3923         }
3924         status = ql_request_irq(qdev);
3925         return status;
3926 }
3927
3928 static int qlge_close(struct net_device *ndev)
3929 {
3930         struct ql_adapter *qdev = netdev_priv(ndev);
3931
3932         /*
3933          * Wait for device to recover from a reset.
3934          * (Rarely happens, but possible.)
3935          */
3936         while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3937                 msleep(1);
3938         ql_adapter_down(qdev);
3939         ql_release_adapter_resources(qdev);
3940         return 0;
3941 }
3942
3943 static int ql_configure_rings(struct ql_adapter *qdev)
3944 {
3945         int i;
3946         struct rx_ring *rx_ring;
3947         struct tx_ring *tx_ring;
3948         int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
3949         unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
3950                 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
3951
3952         qdev->lbq_buf_order = get_order(lbq_buf_len);
3953
3954         /* In a perfect world we have one RSS ring for each CPU
3955          * and each has it's own vector.  To do that we ask for
3956          * cpu_cnt vectors.  ql_enable_msix() will adjust the
3957          * vector count to what we actually get.  We then
3958          * allocate an RSS ring for each.
3959          * Essentially, we are doing min(cpu_count, msix_vector_count).
3960          */
3961         qdev->intr_count = cpu_cnt;
3962         ql_enable_msix(qdev);
3963         /* Adjust the RSS ring count to the actual vector count. */
3964         qdev->rss_ring_count = qdev->intr_count;
3965         qdev->tx_ring_count = cpu_cnt;
3966         qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
3967
3968         for (i = 0; i < qdev->tx_ring_count; i++) {
3969                 tx_ring = &qdev->tx_ring[i];
3970                 memset((void *)tx_ring, 0, sizeof(*tx_ring));
3971                 tx_ring->qdev = qdev;
3972                 tx_ring->wq_id = i;
3973                 tx_ring->wq_len = qdev->tx_ring_size;
3974                 tx_ring->wq_size =
3975                     tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
3976
3977                 /*
3978                  * The completion queue ID for the tx rings start
3979                  * immediately after the rss rings.
3980                  */
3981                 tx_ring->cq_id = qdev->rss_ring_count + i;
3982         }
3983
3984         for (i = 0; i < qdev->rx_ring_count; i++) {
3985                 rx_ring = &qdev->rx_ring[i];
3986                 memset((void *)rx_ring, 0, sizeof(*rx_ring));
3987                 rx_ring->qdev = qdev;
3988                 rx_ring->cq_id = i;
3989                 rx_ring->cpu = i % cpu_cnt;     /* CPU to run handler on. */
3990                 if (i < qdev->rss_ring_count) {
3991                         /*
3992                          * Inbound (RSS) queues.
3993                          */
3994                         rx_ring->cq_len = qdev->rx_ring_size;
3995                         rx_ring->cq_size =
3996                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3997                         rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3998                         rx_ring->lbq_size =
3999                             rx_ring->lbq_len * sizeof(__le64);
4000                         rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4001                         QPRINTK(qdev, IFUP, DEBUG,
4002                                 "lbq_buf_size %d, order = %d\n",
4003                                 rx_ring->lbq_buf_size, qdev->lbq_buf_order);
4004                         rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4005                         rx_ring->sbq_size =
4006                             rx_ring->sbq_len * sizeof(__le64);
4007                         rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4008                         rx_ring->type = RX_Q;
4009                 } else {
4010                         /*
4011                          * Outbound queue handles outbound completions only.
4012                          */
4013                         /* outbound cq is same size as tx_ring it services. */
4014                         rx_ring->cq_len = qdev->tx_ring_size;
4015                         rx_ring->cq_size =
4016                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4017                         rx_ring->lbq_len = 0;
4018                         rx_ring->lbq_size = 0;
4019                         rx_ring->lbq_buf_size = 0;
4020                         rx_ring->sbq_len = 0;
4021                         rx_ring->sbq_size = 0;
4022                         rx_ring->sbq_buf_size = 0;
4023                         rx_ring->type = TX_Q;
4024                 }
4025         }
4026         return 0;
4027 }
4028
4029 static int qlge_open(struct net_device *ndev)
4030 {
4031         int err = 0;
4032         struct ql_adapter *qdev = netdev_priv(ndev);
4033
4034         err = ql_adapter_reset(qdev);
4035         if (err)
4036                 return err;
4037
4038         err = ql_configure_rings(qdev);
4039         if (err)
4040                 return err;
4041
4042         err = ql_get_adapter_resources(qdev);
4043         if (err)
4044                 goto error_up;
4045
4046         err = ql_adapter_up(qdev);
4047         if (err)
4048                 goto error_up;
4049
4050         return err;
4051
4052 error_up:
4053         ql_release_adapter_resources(qdev);
4054         return err;
4055 }
4056
4057 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4058 {
4059         struct rx_ring *rx_ring;
4060         int i, status;
4061         u32 lbq_buf_len;
4062
4063         /* Wait for an oustanding reset to complete. */
4064         if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4065                 int i = 3;
4066                 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4067                         QPRINTK(qdev, IFUP, ERR,
4068                                  "Waiting for adapter UP...\n");
4069                         ssleep(1);
4070                 }
4071
4072                 if (!i) {
4073                         QPRINTK(qdev, IFUP, ERR,
4074                          "Timed out waiting for adapter UP\n");
4075                         return -ETIMEDOUT;
4076                 }
4077         }
4078
4079         status = ql_adapter_down(qdev);
4080         if (status)
4081                 goto error;
4082
4083         /* Get the new rx buffer size. */
4084         lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4085                 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4086         qdev->lbq_buf_order = get_order(lbq_buf_len);
4087
4088         for (i = 0; i < qdev->rss_ring_count; i++) {
4089                 rx_ring = &qdev->rx_ring[i];
4090                 /* Set the new size. */
4091                 rx_ring->lbq_buf_size = lbq_buf_len;
4092         }
4093
4094         status = ql_adapter_up(qdev);
4095         if (status)
4096                 goto error;
4097
4098         return status;
4099 error:
4100         QPRINTK(qdev, IFUP, ALERT,
4101                 "Driver up/down cycle failed, closing device.\n");
4102         set_bit(QL_ADAPTER_UP, &qdev->flags);
4103         dev_close(qdev->ndev);
4104         return status;
4105 }
4106
4107 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4108 {
4109         struct ql_adapter *qdev = netdev_priv(ndev);
4110         int status;
4111
4112         if (ndev->mtu == 1500 && new_mtu == 9000) {
4113                 QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
4114         } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4115                 QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
4116         } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
4117                    (ndev->mtu == 9000 && new_mtu == 9000)) {
4118                 return 0;
4119         } else
4120                 return -EINVAL;
4121
4122         queue_delayed_work(qdev->workqueue,
4123                         &qdev->mpi_port_cfg_work, 3*HZ);
4124
4125         if (!netif_running(qdev->ndev)) {
4126                 ndev->mtu = new_mtu;
4127                 return 0;
4128         }
4129
4130         ndev->mtu = new_mtu;
4131         status = ql_change_rx_buffers(qdev);
4132         if (status) {
4133                 QPRINTK(qdev, IFUP, ERR,
4134                         "Changing MTU failed.\n");
4135         }
4136
4137         return status;
4138 }
4139
4140 static struct net_device_stats *qlge_get_stats(struct net_device
4141                                                *ndev)
4142 {
4143         struct ql_adapter *qdev = netdev_priv(ndev);
4144         struct rx_ring *rx_ring = &qdev->rx_ring[0];
4145         struct tx_ring *tx_ring = &qdev->tx_ring[0];
4146         unsigned long pkts, mcast, dropped, errors, bytes;
4147         int i;
4148
4149         /* Get RX stats. */
4150         pkts = mcast = dropped = errors = bytes = 0;
4151         for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4152                         pkts += rx_ring->rx_packets;
4153                         bytes += rx_ring->rx_bytes;
4154                         dropped += rx_ring->rx_dropped;
4155                         errors += rx_ring->rx_errors;
4156                         mcast += rx_ring->rx_multicast;
4157         }
4158         ndev->stats.rx_packets = pkts;
4159         ndev->stats.rx_bytes = bytes;
4160         ndev->stats.rx_dropped = dropped;
4161         ndev->stats.rx_errors = errors;
4162         ndev->stats.multicast = mcast;
4163
4164         /* Get TX stats. */
4165         pkts = errors = bytes = 0;
4166         for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4167                         pkts += tx_ring->tx_packets;
4168                         bytes += tx_ring->tx_bytes;
4169                         errors += tx_ring->tx_errors;
4170         }
4171         ndev->stats.tx_packets = pkts;
4172         ndev->stats.tx_bytes = bytes;
4173         ndev->stats.tx_errors = errors;
4174         return &ndev->stats;
4175 }
4176
4177 static void qlge_set_multicast_list(struct net_device *ndev)
4178 {
4179         struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4180         struct dev_mc_list *mc_ptr;
4181         int i, status;
4182
4183         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4184         if (status)
4185                 return;
4186         /*
4187          * Set or clear promiscuous mode if a
4188          * transition is taking place.
4189          */
4190         if (ndev->flags & IFF_PROMISC) {
4191                 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4192                         if (ql_set_routing_reg
4193                             (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4194                                 QPRINTK(qdev, HW, ERR,
4195                                         "Failed to set promiscous mode.\n");
4196                         } else {
4197                                 set_bit(QL_PROMISCUOUS, &qdev->flags);
4198                         }
4199                 }
4200         } else {
4201                 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4202                         if (ql_set_routing_reg
4203                             (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4204                                 QPRINTK(qdev, HW, ERR,
4205                                         "Failed to clear promiscous mode.\n");
4206                         } else {
4207                                 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4208                         }
4209                 }
4210         }
4211
4212         /*
4213          * Set or clear all multicast mode if a
4214          * transition is taking place.
4215          */
4216         if ((ndev->flags & IFF_ALLMULTI) ||
4217             (ndev->mc_count > MAX_MULTICAST_ENTRIES)) {
4218                 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4219                         if (ql_set_routing_reg
4220                             (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4221                                 QPRINTK(qdev, HW, ERR,
4222                                         "Failed to set all-multi mode.\n");
4223                         } else {
4224                                 set_bit(QL_ALLMULTI, &qdev->flags);
4225                         }
4226                 }
4227         } else {
4228                 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4229                         if (ql_set_routing_reg
4230                             (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4231                                 QPRINTK(qdev, HW, ERR,
4232                                         "Failed to clear all-multi mode.\n");
4233                         } else {
4234                                 clear_bit(QL_ALLMULTI, &qdev->flags);
4235                         }
4236                 }
4237         }
4238
4239         if (ndev->mc_count) {
4240                 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4241                 if (status)
4242                         goto exit;
4243                 for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
4244                      i++, mc_ptr = mc_ptr->next)
4245                         if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
4246                                                 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4247                                 QPRINTK(qdev, HW, ERR,
4248                                         "Failed to loadmulticast address.\n");
4249                                 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4250                                 goto exit;
4251                         }
4252                 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4253                 if (ql_set_routing_reg
4254                     (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4255                         QPRINTK(qdev, HW, ERR,
4256                                 "Failed to set multicast match mode.\n");
4257                 } else {
4258                         set_bit(QL_ALLMULTI, &qdev->flags);
4259                 }
4260         }
4261 exit:
4262         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4263 }
4264
4265 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4266 {
4267         struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4268         struct sockaddr *addr = p;
4269         int status;
4270
4271         if (!is_valid_ether_addr(addr->sa_data))
4272                 return -EADDRNOTAVAIL;
4273         memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4274
4275         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4276         if (status)
4277                 return status;
4278         status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4279                         MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4280         if (status)
4281                 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
4282         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4283         return status;
4284 }
4285
4286 static void qlge_tx_timeout(struct net_device *ndev)
4287 {
4288         struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4289         ql_queue_asic_error(qdev);
4290 }
4291
4292 static void ql_asic_reset_work(struct work_struct *work)
4293 {
4294         struct ql_adapter *qdev =
4295             container_of(work, struct ql_adapter, asic_reset_work.work);
4296         int status;
4297         rtnl_lock();
4298         status = ql_adapter_down(qdev);
4299         if (status)
4300                 goto error;
4301
4302         status = ql_adapter_up(qdev);
4303         if (status)
4304                 goto error;
4305
4306         /* Restore rx mode. */
4307         clear_bit(QL_ALLMULTI, &qdev->flags);
4308         clear_bit(QL_PROMISCUOUS, &qdev->flags);
4309         qlge_set_multicast_list(qdev->ndev);
4310
4311         rtnl_unlock();
4312         return;
4313 error:
4314         QPRINTK(qdev, IFUP, ALERT,
4315                 "Driver up/down cycle failed, closing device\n");
4316
4317         set_bit(QL_ADAPTER_UP, &qdev->flags);
4318         dev_close(qdev->ndev);
4319         rtnl_unlock();
4320 }
4321
4322 static struct nic_operations qla8012_nic_ops = {
4323         .get_flash              = ql_get_8012_flash_params,
4324         .port_initialize        = ql_8012_port_initialize,
4325 };
4326
4327 static struct nic_operations qla8000_nic_ops = {
4328         .get_flash              = ql_get_8000_flash_params,
4329         .port_initialize        = ql_8000_port_initialize,
4330 };
4331
4332 /* Find the pcie function number for the other NIC
4333  * on this chip.  Since both NIC functions share a
4334  * common firmware we have the lowest enabled function
4335  * do any common work.  Examples would be resetting
4336  * after a fatal firmware error, or doing a firmware
4337  * coredump.
4338  */
4339 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4340 {
4341         int status = 0;
4342         u32 temp;
4343         u32 nic_func1, nic_func2;
4344
4345         status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4346                         &temp);
4347         if (status)
4348                 return status;
4349
4350         nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4351                         MPI_TEST_NIC_FUNC_MASK);
4352         nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4353                         MPI_TEST_NIC_FUNC_MASK);
4354
4355         if (qdev->func == nic_func1)
4356                 qdev->alt_func = nic_func2;
4357         else if (qdev->func == nic_func2)
4358                 qdev->alt_func = nic_func1;
4359         else
4360                 status = -EIO;
4361
4362         return status;
4363 }
4364
4365 static int ql_get_board_info(struct ql_adapter *qdev)
4366 {
4367         int status;
4368         qdev->func =
4369             (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4370         if (qdev->func > 3)
4371                 return -EIO;
4372
4373         status = ql_get_alt_pcie_func(qdev);
4374         if (status)
4375                 return status;
4376
4377         qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4378         if (qdev->port) {
4379                 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4380                 qdev->port_link_up = STS_PL1;
4381                 qdev->port_init = STS_PI1;
4382                 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4383                 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4384         } else {
4385                 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4386                 qdev->port_link_up = STS_PL0;
4387                 qdev->port_init = STS_PI0;
4388                 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4389                 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4390         }
4391         qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4392         qdev->device_id = qdev->pdev->device;
4393         if (qdev->device_id == QLGE_DEVICE_ID_8012)
4394                 qdev->nic_ops = &qla8012_nic_ops;
4395         else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4396                 qdev->nic_ops = &qla8000_nic_ops;
4397         return status;
4398 }
4399
4400 static void ql_release_all(struct pci_dev *pdev)
4401 {
4402         struct net_device *ndev = pci_get_drvdata(pdev);
4403         struct ql_adapter *qdev = netdev_priv(ndev);
4404
4405         if (qdev->workqueue) {
4406                 destroy_workqueue(qdev->workqueue);
4407                 qdev->workqueue = NULL;
4408         }
4409
4410         if (qdev->reg_base)
4411                 iounmap(qdev->reg_base);
4412         if (qdev->doorbell_area)
4413                 iounmap(qdev->doorbell_area);
4414         vfree(qdev->mpi_coredump);
4415         pci_release_regions(pdev);
4416         pci_set_drvdata(pdev, NULL);
4417 }
4418
4419 static int __devinit ql_init_device(struct pci_dev *pdev,
4420                                     struct net_device *ndev, int cards_found)
4421 {
4422         struct ql_adapter *qdev = netdev_priv(ndev);
4423         int err = 0;
4424
4425         memset((void *)qdev, 0, sizeof(*qdev));
4426         err = pci_enable_device(pdev);
4427         if (err) {
4428                 dev_err(&pdev->dev, "PCI device enable failed.\n");
4429                 return err;
4430         }
4431
4432         qdev->ndev = ndev;
4433         qdev->pdev = pdev;
4434         pci_set_drvdata(pdev, ndev);
4435
4436         /* Set PCIe read request size */
4437         err = pcie_set_readrq(pdev, 4096);
4438         if (err) {
4439                 dev_err(&pdev->dev, "Set readrq failed.\n");
4440                 goto err_out1;
4441         }
4442
4443         err = pci_request_regions(pdev, DRV_NAME);
4444         if (err) {
4445                 dev_err(&pdev->dev, "PCI region request failed.\n");
4446                 return err;
4447         }
4448
4449         pci_set_master(pdev);
4450         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4451                 set_bit(QL_DMA64, &qdev->flags);
4452                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4453         } else {
4454                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4455                 if (!err)
4456                        err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4457         }
4458
4459         if (err) {
4460                 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4461                 goto err_out2;
4462         }
4463
4464         /* Set PCIe reset type for EEH to fundamental. */
4465         pdev->needs_freset = 1;
4466         pci_save_state(pdev);
4467         qdev->reg_base =
4468             ioremap_nocache(pci_resource_start(pdev, 1),
4469                             pci_resource_len(pdev, 1));
4470         if (!qdev->reg_base) {
4471                 dev_err(&pdev->dev, "Register mapping failed.\n");
4472                 err = -ENOMEM;
4473                 goto err_out2;
4474         }
4475
4476         qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4477         qdev->doorbell_area =
4478             ioremap_nocache(pci_resource_start(pdev, 3),
4479                             pci_resource_len(pdev, 3));
4480         if (!qdev->doorbell_area) {
4481                 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4482                 err = -ENOMEM;
4483                 goto err_out2;
4484         }
4485
4486         err = ql_get_board_info(qdev);
4487         if (err) {
4488                 dev_err(&pdev->dev, "Register access failed.\n");
4489                 err = -EIO;
4490                 goto err_out2;
4491         }
4492         qdev->msg_enable = netif_msg_init(debug, default_msg);
4493         spin_lock_init(&qdev->hw_lock);
4494         spin_lock_init(&qdev->stats_lock);
4495
4496         if (qlge_mpi_coredump) {
4497                 qdev->mpi_coredump =
4498                         vmalloc(sizeof(struct ql_mpi_coredump));
4499                 if (qdev->mpi_coredump == NULL) {
4500                         dev_err(&pdev->dev, "Coredump alloc failed.\n");
4501                         err = -ENOMEM;
4502                         goto err_out;
4503                 }
4504                 if (qlge_force_coredump)
4505                         set_bit(QL_FRC_COREDUMP, &qdev->flags);
4506         }
4507         /* make sure the EEPROM is good */
4508         err = qdev->nic_ops->get_flash(qdev);
4509         if (err) {
4510                 dev_err(&pdev->dev, "Invalid FLASH.\n");
4511                 goto err_out2;
4512         }
4513
4514         memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4515
4516         /* Set up the default ring sizes. */
4517         qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4518         qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4519
4520         /* Set up the coalescing parameters. */
4521         qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4522         qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4523         qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4524         qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4525
4526         /*
4527          * Set up the operating parameters.
4528          */
4529         qdev->rx_csum = 1;
4530         qdev->workqueue = create_singlethread_workqueue(ndev->name);
4531         INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4532         INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4533         INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4534         INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4535         INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4536         INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4537         init_completion(&qdev->ide_completion);
4538
4539         if (!cards_found) {
4540                 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4541                 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4542                          DRV_NAME, DRV_VERSION);
4543         }
4544         return 0;
4545 err_out2:
4546         ql_release_all(pdev);
4547 err_out1:
4548         pci_disable_device(pdev);
4549         return err;
4550 }
4551
4552 static const struct net_device_ops qlge_netdev_ops = {
4553         .ndo_open               = qlge_open,
4554         .ndo_stop               = qlge_close,
4555         .ndo_start_xmit         = qlge_send,
4556         .ndo_change_mtu         = qlge_change_mtu,
4557         .ndo_get_stats          = qlge_get_stats,
4558         .ndo_set_multicast_list = qlge_set_multicast_list,
4559         .ndo_set_mac_address    = qlge_set_mac_address,
4560         .ndo_validate_addr      = eth_validate_addr,
4561         .ndo_tx_timeout         = qlge_tx_timeout,
4562         .ndo_vlan_rx_register   = qlge_vlan_rx_register,
4563         .ndo_vlan_rx_add_vid    = qlge_vlan_rx_add_vid,
4564         .ndo_vlan_rx_kill_vid   = qlge_vlan_rx_kill_vid,
4565 };
4566
4567 static int __devinit qlge_probe(struct pci_dev *pdev,
4568                                 const struct pci_device_id *pci_entry)
4569 {
4570         struct net_device *ndev = NULL;
4571         struct ql_adapter *qdev = NULL;
4572         static int cards_found = 0;
4573         int err = 0;
4574
4575         ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4576                         min(MAX_CPUS, (int)num_online_cpus()));
4577         if (!ndev)
4578                 return -ENOMEM;
4579
4580         err = ql_init_device(pdev, ndev, cards_found);
4581         if (err < 0) {
4582                 free_netdev(ndev);
4583                 return err;
4584         }
4585
4586         qdev = netdev_priv(ndev);
4587         SET_NETDEV_DEV(ndev, &pdev->dev);
4588         ndev->features = (0
4589                           | NETIF_F_IP_CSUM
4590                           | NETIF_F_SG
4591                           | NETIF_F_TSO
4592                           | NETIF_F_TSO6
4593                           | NETIF_F_TSO_ECN
4594                           | NETIF_F_HW_VLAN_TX
4595                           | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
4596         ndev->features |= NETIF_F_GRO;
4597
4598         if (test_bit(QL_DMA64, &qdev->flags))
4599                 ndev->features |= NETIF_F_HIGHDMA;
4600
4601         /*
4602          * Set up net_device structure.
4603          */
4604         ndev->tx_queue_len = qdev->tx_ring_size;
4605         ndev->irq = pdev->irq;
4606
4607         ndev->netdev_ops = &qlge_netdev_ops;
4608         SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
4609         ndev->watchdog_timeo = 10 * HZ;
4610
4611         err = register_netdev(ndev);
4612         if (err) {
4613                 dev_err(&pdev->dev, "net device registration failed.\n");
4614                 ql_release_all(pdev);
4615                 pci_disable_device(pdev);
4616                 return err;
4617         }
4618         ql_link_off(qdev);
4619         ql_display_dev_info(ndev);
4620         atomic_set(&qdev->lb_count, 0);
4621         cards_found++;
4622         return 0;
4623 }
4624
4625 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4626 {
4627         return qlge_send(skb, ndev);
4628 }
4629
4630 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4631 {
4632         return ql_clean_inbound_rx_ring(rx_ring, budget);
4633 }
4634
4635 static void __devexit qlge_remove(struct pci_dev *pdev)
4636 {
4637         struct net_device *ndev = pci_get_drvdata(pdev);
4638         unregister_netdev(ndev);
4639         ql_release_all(pdev);
4640         pci_disable_device(pdev);
4641         free_netdev(ndev);
4642 }
4643
4644 /* Clean up resources without touching hardware. */
4645 static void ql_eeh_close(struct net_device *ndev)
4646 {
4647         int i;
4648         struct ql_adapter *qdev = netdev_priv(ndev);
4649
4650         if (netif_carrier_ok(ndev)) {
4651                 netif_carrier_off(ndev);
4652                 netif_stop_queue(ndev);
4653         }
4654
4655         if (test_bit(QL_ADAPTER_UP, &qdev->flags))
4656                 cancel_delayed_work_sync(&qdev->asic_reset_work);
4657         cancel_delayed_work_sync(&qdev->mpi_reset_work);
4658         cancel_delayed_work_sync(&qdev->mpi_work);
4659         cancel_delayed_work_sync(&qdev->mpi_idc_work);
4660         cancel_delayed_work_sync(&qdev->mpi_core_to_log);
4661         cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
4662
4663         for (i = 0; i < qdev->rss_ring_count; i++)
4664                 netif_napi_del(&qdev->rx_ring[i].napi);
4665
4666         clear_bit(QL_ADAPTER_UP, &qdev->flags);
4667         ql_tx_ring_clean(qdev);
4668         ql_free_rx_buffers(qdev);
4669         ql_release_adapter_resources(qdev);
4670 }
4671
4672 /*
4673  * This callback is called by the PCI subsystem whenever
4674  * a PCI bus error is detected.
4675  */
4676 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4677                                                enum pci_channel_state state)
4678 {
4679         struct net_device *ndev = pci_get_drvdata(pdev);
4680
4681         switch (state) {
4682         case pci_channel_io_normal:
4683                 return PCI_ERS_RESULT_CAN_RECOVER;
4684         case pci_channel_io_frozen:
4685                 netif_device_detach(ndev);
4686                 if (netif_running(ndev))
4687                         ql_eeh_close(ndev);
4688                 pci_disable_device(pdev);
4689                 return PCI_ERS_RESULT_NEED_RESET;
4690         case pci_channel_io_perm_failure:
4691                 dev_err(&pdev->dev,
4692                         "%s: pci_channel_io_perm_failure.\n", __func__);
4693                 return PCI_ERS_RESULT_DISCONNECT;
4694         }
4695
4696         /* Request a slot reset. */
4697         return PCI_ERS_RESULT_NEED_RESET;
4698 }
4699
4700 /*
4701  * This callback is called after the PCI buss has been reset.
4702  * Basically, this tries to restart the card from scratch.
4703  * This is a shortened version of the device probe/discovery code,
4704  * it resembles the first-half of the () routine.
4705  */
4706 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4707 {
4708         struct net_device *ndev = pci_get_drvdata(pdev);
4709         struct ql_adapter *qdev = netdev_priv(ndev);
4710
4711         pdev->error_state = pci_channel_io_normal;
4712
4713         pci_restore_state(pdev);
4714         if (pci_enable_device(pdev)) {
4715                 QPRINTK(qdev, IFUP, ERR,
4716                         "Cannot re-enable PCI device after reset.\n");
4717                 return PCI_ERS_RESULT_DISCONNECT;
4718         }
4719         pci_set_master(pdev);
4720         return PCI_ERS_RESULT_RECOVERED;
4721 }
4722
4723 static void qlge_io_resume(struct pci_dev *pdev)
4724 {
4725         struct net_device *ndev = pci_get_drvdata(pdev);
4726         struct ql_adapter *qdev = netdev_priv(ndev);
4727         int err = 0;
4728
4729         if (ql_adapter_reset(qdev))
4730                 QPRINTK(qdev, DRV, ERR, "reset FAILED!\n");
4731         if (netif_running(ndev)) {
4732                 err = qlge_open(ndev);
4733                 if (err) {
4734                         QPRINTK(qdev, IFUP, ERR,
4735                                 "Device initialization failed after reset.\n");
4736                         return;
4737                 }
4738         } else {
4739                 QPRINTK(qdev, IFUP, ERR,
4740                         "Device was not running prior to EEH.\n");
4741         }
4742         netif_device_attach(ndev);
4743 }
4744
4745 static struct pci_error_handlers qlge_err_handler = {
4746         .error_detected = qlge_io_error_detected,
4747         .slot_reset = qlge_io_slot_reset,
4748         .resume = qlge_io_resume,
4749 };
4750
4751 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4752 {
4753         struct net_device *ndev = pci_get_drvdata(pdev);
4754         struct ql_adapter *qdev = netdev_priv(ndev);
4755         int err;
4756
4757         netif_device_detach(ndev);
4758
4759         if (netif_running(ndev)) {
4760                 err = ql_adapter_down(qdev);
4761                 if (!err)
4762                         return err;
4763         }
4764
4765         ql_wol(qdev);
4766         err = pci_save_state(pdev);
4767         if (err)
4768                 return err;
4769
4770         pci_disable_device(pdev);
4771
4772         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4773
4774         return 0;
4775 }
4776
4777 #ifdef CONFIG_PM
4778 static int qlge_resume(struct pci_dev *pdev)
4779 {
4780         struct net_device *ndev = pci_get_drvdata(pdev);
4781         struct ql_adapter *qdev = netdev_priv(ndev);
4782         int err;
4783
4784         pci_set_power_state(pdev, PCI_D0);
4785         pci_restore_state(pdev);
4786         err = pci_enable_device(pdev);
4787         if (err) {
4788                 QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n");
4789                 return err;
4790         }
4791         pci_set_master(pdev);
4792
4793         pci_enable_wake(pdev, PCI_D3hot, 0);
4794         pci_enable_wake(pdev, PCI_D3cold, 0);
4795
4796         if (netif_running(ndev)) {
4797                 err = ql_adapter_up(qdev);
4798                 if (err)
4799                         return err;
4800         }
4801
4802         netif_device_attach(ndev);
4803
4804         return 0;
4805 }
4806 #endif /* CONFIG_PM */
4807
4808 static void qlge_shutdown(struct pci_dev *pdev)
4809 {
4810         qlge_suspend(pdev, PMSG_SUSPEND);
4811 }
4812
4813 static struct pci_driver qlge_driver = {
4814         .name = DRV_NAME,
4815         .id_table = qlge_pci_tbl,
4816         .probe = qlge_probe,
4817         .remove = __devexit_p(qlge_remove),
4818 #ifdef CONFIG_PM
4819         .suspend = qlge_suspend,
4820         .resume = qlge_resume,
4821 #endif
4822         .shutdown = qlge_shutdown,
4823         .err_handler = &qlge_err_handler
4824 };
4825
4826 static int __init qlge_init_module(void)
4827 {
4828         return pci_register_driver(&qlge_driver);
4829 }
4830
4831 static void __exit qlge_exit(void)
4832 {
4833         pci_unregister_driver(&qlge_driver);
4834 }
4835
4836 module_init(qlge_init_module);
4837 module_exit(qlge_exit);