qlge: Remove support for device ID 8000.
[safe/jmp/linux-2.6] / drivers / net / qlge / qlge_main.c
1 /*
2  * QLogic qlge NIC HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  * See LICENSE.qlge for copyright and licensing details.
5  * Author:     Linux qlge network device driver by
6  *                      Ron Mercer <ron.mercer@qlogic.com>
7  */
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/ipv6.h>
28 #include <net/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/skbuff.h>
37 #include <linux/rtnetlink.h>
38 #include <linux/if_vlan.h>
39 #include <linux/delay.h>
40 #include <linux/mm.h>
41 #include <linux/vmalloc.h>
42 #include <net/ip6_checksum.h>
43
44 #include "qlge.h"
45
46 char qlge_driver_name[] = DRV_NAME;
47 const char qlge_driver_version[] = DRV_VERSION;
48
49 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
50 MODULE_DESCRIPTION(DRV_STRING " ");
51 MODULE_LICENSE("GPL");
52 MODULE_VERSION(DRV_VERSION);
53
54 static const u32 default_msg =
55     NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
56 /* NETIF_MSG_TIMER |    */
57     NETIF_MSG_IFDOWN |
58     NETIF_MSG_IFUP |
59     NETIF_MSG_RX_ERR |
60     NETIF_MSG_TX_ERR |
61     NETIF_MSG_TX_QUEUED |
62     NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS |
63 /* NETIF_MSG_PKTDATA | */
64     NETIF_MSG_HW | NETIF_MSG_WOL | 0;
65
66 static int debug = 0x00007fff;  /* defaults above */
67 module_param(debug, int, 0);
68 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69
70 #define MSIX_IRQ 0
71 #define MSI_IRQ 1
72 #define LEG_IRQ 2
73 static int irq_type = MSIX_IRQ;
74 module_param(irq_type, int, MSIX_IRQ);
75 MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
76
77 static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
78         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID)},
79         /* required last entry */
80         {0,}
81 };
82
83 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
84
85 /* This hardware semaphore causes exclusive access to
86  * resources shared between the NIC driver, MPI firmware,
87  * FCOE firmware and the FC driver.
88  */
89 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
90 {
91         u32 sem_bits = 0;
92
93         switch (sem_mask) {
94         case SEM_XGMAC0_MASK:
95                 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
96                 break;
97         case SEM_XGMAC1_MASK:
98                 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
99                 break;
100         case SEM_ICB_MASK:
101                 sem_bits = SEM_SET << SEM_ICB_SHIFT;
102                 break;
103         case SEM_MAC_ADDR_MASK:
104                 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
105                 break;
106         case SEM_FLASH_MASK:
107                 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
108                 break;
109         case SEM_PROBE_MASK:
110                 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
111                 break;
112         case SEM_RT_IDX_MASK:
113                 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
114                 break;
115         case SEM_PROC_REG_MASK:
116                 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
117                 break;
118         default:
119                 QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n");
120                 return -EINVAL;
121         }
122
123         ql_write32(qdev, SEM, sem_bits | sem_mask);
124         return !(ql_read32(qdev, SEM) & sem_bits);
125 }
126
127 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
128 {
129         unsigned int seconds = 3;
130         do {
131                 if (!ql_sem_trylock(qdev, sem_mask))
132                         return 0;
133                 ssleep(1);
134         } while (--seconds);
135         return -ETIMEDOUT;
136 }
137
138 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
139 {
140         ql_write32(qdev, SEM, sem_mask);
141         ql_read32(qdev, SEM);   /* flush */
142 }
143
144 /* This function waits for a specific bit to come ready
145  * in a given register.  It is used mostly by the initialize
146  * process, but is also used in kernel thread API such as
147  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
148  */
149 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
150 {
151         u32 temp;
152         int count = UDELAY_COUNT;
153
154         while (count) {
155                 temp = ql_read32(qdev, reg);
156
157                 /* check for errors */
158                 if (temp & err_bit) {
159                         QPRINTK(qdev, PROBE, ALERT,
160                                 "register 0x%.08x access error, value = 0x%.08x!.\n",
161                                 reg, temp);
162                         return -EIO;
163                 } else if (temp & bit)
164                         return 0;
165                 udelay(UDELAY_DELAY);
166                 count--;
167         }
168         QPRINTK(qdev, PROBE, ALERT,
169                 "Timed out waiting for reg %x to come ready.\n", reg);
170         return -ETIMEDOUT;
171 }
172
173 /* The CFG register is used to download TX and RX control blocks
174  * to the chip. This function waits for an operation to complete.
175  */
176 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
177 {
178         int count = UDELAY_COUNT;
179         u32 temp;
180
181         while (count) {
182                 temp = ql_read32(qdev, CFG);
183                 if (temp & CFG_LE)
184                         return -EIO;
185                 if (!(temp & bit))
186                         return 0;
187                 udelay(UDELAY_DELAY);
188                 count--;
189         }
190         return -ETIMEDOUT;
191 }
192
193
194 /* Used to issue init control blocks to hw. Maps control block,
195  * sets address, triggers download, waits for completion.
196  */
197 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
198                  u16 q_id)
199 {
200         u64 map;
201         int status = 0;
202         int direction;
203         u32 mask;
204         u32 value;
205
206         direction =
207             (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
208             PCI_DMA_FROMDEVICE;
209
210         map = pci_map_single(qdev->pdev, ptr, size, direction);
211         if (pci_dma_mapping_error(qdev->pdev, map)) {
212                 QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n");
213                 return -ENOMEM;
214         }
215
216         status = ql_wait_cfg(qdev, bit);
217         if (status) {
218                 QPRINTK(qdev, IFUP, ERR,
219                         "Timed out waiting for CFG to come ready.\n");
220                 goto exit;
221         }
222
223         status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
224         if (status)
225                 goto exit;
226         ql_write32(qdev, ICB_L, (u32) map);
227         ql_write32(qdev, ICB_H, (u32) (map >> 32));
228         ql_sem_unlock(qdev, SEM_ICB_MASK);      /* does flush too */
229
230         mask = CFG_Q_MASK | (bit << 16);
231         value = bit | (q_id << CFG_Q_SHIFT);
232         ql_write32(qdev, CFG, (mask | value));
233
234         /*
235          * Wait for the bit to clear after signaling hw.
236          */
237         status = ql_wait_cfg(qdev, bit);
238 exit:
239         pci_unmap_single(qdev->pdev, map, size, direction);
240         return status;
241 }
242
243 /* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
244 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
245                         u32 *value)
246 {
247         u32 offset = 0;
248         int status;
249
250         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
251         if (status)
252                 return status;
253         switch (type) {
254         case MAC_ADDR_TYPE_MULTI_MAC:
255         case MAC_ADDR_TYPE_CAM_MAC:
256                 {
257                         status =
258                             ql_wait_reg_rdy(qdev,
259                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
260                         if (status)
261                                 goto exit;
262                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
263                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
264                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
265                         status =
266                             ql_wait_reg_rdy(qdev,
267                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
268                         if (status)
269                                 goto exit;
270                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
271                         status =
272                             ql_wait_reg_rdy(qdev,
273                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
274                         if (status)
275                                 goto exit;
276                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
277                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
278                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
279                         status =
280                             ql_wait_reg_rdy(qdev,
281                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
282                         if (status)
283                                 goto exit;
284                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
285                         if (type == MAC_ADDR_TYPE_CAM_MAC) {
286                                 status =
287                                     ql_wait_reg_rdy(qdev,
288                                         MAC_ADDR_IDX, MAC_ADDR_MW, 0);
289                                 if (status)
290                                         goto exit;
291                                 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292                                            (index << MAC_ADDR_IDX_SHIFT) | /* index */
293                                            MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
294                                 status =
295                                     ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
296                                                     MAC_ADDR_MR, 0);
297                                 if (status)
298                                         goto exit;
299                                 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
300                         }
301                         break;
302                 }
303         case MAC_ADDR_TYPE_VLAN:
304         case MAC_ADDR_TYPE_MULTI_FLTR:
305         default:
306                 QPRINTK(qdev, IFUP, CRIT,
307                         "Address type %d not yet supported.\n", type);
308                 status = -EPERM;
309         }
310 exit:
311         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
312         return status;
313 }
314
315 /* Set up a MAC, multicast or VLAN address for the
316  * inbound frame matching.
317  */
318 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
319                                u16 index)
320 {
321         u32 offset = 0;
322         int status = 0;
323
324         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
325         if (status)
326                 return status;
327         switch (type) {
328         case MAC_ADDR_TYPE_MULTI_MAC:
329         case MAC_ADDR_TYPE_CAM_MAC:
330                 {
331                         u32 cam_output;
332                         u32 upper = (addr[0] << 8) | addr[1];
333                         u32 lower =
334                             (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
335                             (addr[5]);
336
337                         QPRINTK(qdev, IFUP, INFO,
338                                 "Adding %s address %pM"
339                                 " at index %d in the CAM.\n",
340                                 ((type ==
341                                   MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
342                                  "UNICAST"), addr, index);
343
344                         status =
345                             ql_wait_reg_rdy(qdev,
346                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
347                         if (status)
348                                 goto exit;
349                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
350                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
351                                    type);       /* type */
352                         ql_write32(qdev, MAC_ADDR_DATA, lower);
353                         status =
354                             ql_wait_reg_rdy(qdev,
355                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
356                         if (status)
357                                 goto exit;
358                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
359                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
360                                    type);       /* type */
361                         ql_write32(qdev, MAC_ADDR_DATA, upper);
362                         status =
363                             ql_wait_reg_rdy(qdev,
364                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
365                         if (status)
366                                 goto exit;
367                         ql_write32(qdev, MAC_ADDR_IDX, (offset) |       /* offset */
368                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
369                                    type);       /* type */
370                         /* This field should also include the queue id
371                            and possibly the function id.  Right now we hardcode
372                            the route field to NIC core.
373                          */
374                         if (type == MAC_ADDR_TYPE_CAM_MAC) {
375                                 cam_output = (CAM_OUT_ROUTE_NIC |
376                                               (qdev->
377                                                func << CAM_OUT_FUNC_SHIFT) |
378                                               (qdev->
379                                                rss_ring_first_cq_id <<
380                                                CAM_OUT_CQ_ID_SHIFT));
381                                 if (qdev->vlgrp)
382                                         cam_output |= CAM_OUT_RV;
383                                 /* route to NIC core */
384                                 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
385                         }
386                         break;
387                 }
388         case MAC_ADDR_TYPE_VLAN:
389                 {
390                         u32 enable_bit = *((u32 *) &addr[0]);
391                         /* For VLAN, the addr actually holds a bit that
392                          * either enables or disables the vlan id we are
393                          * addressing. It's either MAC_ADDR_E on or off.
394                          * That's bit-27 we're talking about.
395                          */
396                         QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n",
397                                 (enable_bit ? "Adding" : "Removing"),
398                                 index, (enable_bit ? "to" : "from"));
399
400                         status =
401                             ql_wait_reg_rdy(qdev,
402                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
403                         if (status)
404                                 goto exit;
405                         ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
406                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
407                                    type |       /* type */
408                                    enable_bit); /* enable/disable */
409                         break;
410                 }
411         case MAC_ADDR_TYPE_MULTI_FLTR:
412         default:
413                 QPRINTK(qdev, IFUP, CRIT,
414                         "Address type %d not yet supported.\n", type);
415                 status = -EPERM;
416         }
417 exit:
418         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
419         return status;
420 }
421
422 /* Get a specific frame routing value from the CAM.
423  * Used for debug and reg dump.
424  */
425 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
426 {
427         int status = 0;
428
429         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
430         if (status)
431                 goto exit;
432
433         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
434         if (status)
435                 goto exit;
436
437         ql_write32(qdev, RT_IDX,
438                    RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
439         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
440         if (status)
441                 goto exit;
442         *value = ql_read32(qdev, RT_DATA);
443 exit:
444         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
445         return status;
446 }
447
448 /* The NIC function for this chip has 16 routing indexes.  Each one can be used
449  * to route different frame types to various inbound queues.  We send broadcast/
450  * multicast/error frames to the default queue for slow handling,
451  * and CAM hit/RSS frames to the fast handling queues.
452  */
453 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
454                               int enable)
455 {
456         int status;
457         u32 value = 0;
458
459         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
460         if (status)
461                 return status;
462
463         QPRINTK(qdev, IFUP, DEBUG,
464                 "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
465                 (enable ? "Adding" : "Removing"),
466                 ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
467                 ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
468                 ((index ==
469                   RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
470                 ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
471                 ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
472                 ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
473                 ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
474                 ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
475                 ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
476                 ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
477                 ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
478                 ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
479                 ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
480                 ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
481                 ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
482                 ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
483                 (enable ? "to" : "from"));
484
485         switch (mask) {
486         case RT_IDX_CAM_HIT:
487                 {
488                         value = RT_IDX_DST_CAM_Q |      /* dest */
489                             RT_IDX_TYPE_NICQ |  /* type */
490                             (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
491                         break;
492                 }
493         case RT_IDX_VALID:      /* Promiscuous Mode frames. */
494                 {
495                         value = RT_IDX_DST_DFLT_Q |     /* dest */
496                             RT_IDX_TYPE_NICQ |  /* type */
497                             (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
498                         break;
499                 }
500         case RT_IDX_ERR:        /* Pass up MAC,IP,TCP/UDP error frames. */
501                 {
502                         value = RT_IDX_DST_DFLT_Q |     /* dest */
503                             RT_IDX_TYPE_NICQ |  /* type */
504                             (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
505                         break;
506                 }
507         case RT_IDX_BCAST:      /* Pass up Broadcast frames to default Q. */
508                 {
509                         value = RT_IDX_DST_DFLT_Q |     /* dest */
510                             RT_IDX_TYPE_NICQ |  /* type */
511                             (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
512                         break;
513                 }
514         case RT_IDX_MCAST:      /* Pass up All Multicast frames. */
515                 {
516                         value = RT_IDX_DST_CAM_Q |      /* dest */
517                             RT_IDX_TYPE_NICQ |  /* type */
518                             (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
519                         break;
520                 }
521         case RT_IDX_MCAST_MATCH:        /* Pass up matched Multicast frames. */
522                 {
523                         value = RT_IDX_DST_CAM_Q |      /* dest */
524                             RT_IDX_TYPE_NICQ |  /* type */
525                             (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
526                         break;
527                 }
528         case RT_IDX_RSS_MATCH:  /* Pass up matched RSS frames. */
529                 {
530                         value = RT_IDX_DST_RSS |        /* dest */
531                             RT_IDX_TYPE_NICQ |  /* type */
532                             (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
533                         break;
534                 }
535         case 0:         /* Clear the E-bit on an entry. */
536                 {
537                         value = RT_IDX_DST_DFLT_Q |     /* dest */
538                             RT_IDX_TYPE_NICQ |  /* type */
539                             (index << RT_IDX_IDX_SHIFT);/* index */
540                         break;
541                 }
542         default:
543                 QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n",
544                         mask);
545                 status = -EPERM;
546                 goto exit;
547         }
548
549         if (value) {
550                 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
551                 if (status)
552                         goto exit;
553                 value |= (enable ? RT_IDX_E : 0);
554                 ql_write32(qdev, RT_IDX, value);
555                 ql_write32(qdev, RT_DATA, enable ? mask : 0);
556         }
557 exit:
558         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
559         return status;
560 }
561
562 static void ql_enable_interrupts(struct ql_adapter *qdev)
563 {
564         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
565 }
566
567 static void ql_disable_interrupts(struct ql_adapter *qdev)
568 {
569         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
570 }
571
572 /* If we're running with multiple MSI-X vectors then we enable on the fly.
573  * Otherwise, we may have multiple outstanding workers and don't want to
574  * enable until the last one finishes. In this case, the irq_cnt gets
575  * incremented everytime we queue a worker and decremented everytime
576  * a worker finishes.  Once it hits zero we enable the interrupt.
577  */
578 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
579 {
580         u32 var = 0;
581         unsigned long hw_flags = 0;
582         struct intr_context *ctx = qdev->intr_context + intr;
583
584         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
585                 /* Always enable if we're MSIX multi interrupts and
586                  * it's not the default (zeroeth) interrupt.
587                  */
588                 ql_write32(qdev, INTR_EN,
589                            ctx->intr_en_mask);
590                 var = ql_read32(qdev, STS);
591                 return var;
592         }
593
594         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
595         if (atomic_dec_and_test(&ctx->irq_cnt)) {
596                 ql_write32(qdev, INTR_EN,
597                            ctx->intr_en_mask);
598                 var = ql_read32(qdev, STS);
599         }
600         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
601         return var;
602 }
603
604 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
605 {
606         u32 var = 0;
607         unsigned long hw_flags;
608         struct intr_context *ctx;
609
610         /* HW disables for us if we're MSIX multi interrupts and
611          * it's not the default (zeroeth) interrupt.
612          */
613         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
614                 return 0;
615
616         ctx = qdev->intr_context + intr;
617         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
618         if (!atomic_read(&ctx->irq_cnt)) {
619                 ql_write32(qdev, INTR_EN,
620                 ctx->intr_dis_mask);
621                 var = ql_read32(qdev, STS);
622         }
623         atomic_inc(&ctx->irq_cnt);
624         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
625         return var;
626 }
627
628 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
629 {
630         int i;
631         for (i = 0; i < qdev->intr_count; i++) {
632                 /* The enable call does a atomic_dec_and_test
633                  * and enables only if the result is zero.
634                  * So we precharge it here.
635                  */
636                 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
637                         i == 0))
638                         atomic_set(&qdev->intr_context[i].irq_cnt, 1);
639                 ql_enable_completion_interrupt(qdev, i);
640         }
641
642 }
643
644 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, u32 *data)
645 {
646         int status = 0;
647         /* wait for reg to come ready */
648         status = ql_wait_reg_rdy(qdev,
649                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
650         if (status)
651                 goto exit;
652         /* set up for reg read */
653         ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
654         /* wait for reg to come ready */
655         status = ql_wait_reg_rdy(qdev,
656                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
657         if (status)
658                 goto exit;
659         /* get the data */
660         *data = ql_read32(qdev, FLASH_DATA);
661 exit:
662         return status;
663 }
664
665 static int ql_get_flash_params(struct ql_adapter *qdev)
666 {
667         int i;
668         int status;
669         u32 *p = (u32 *)&qdev->flash;
670
671         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
672                 return -ETIMEDOUT;
673
674         for (i = 0; i < sizeof(qdev->flash) / sizeof(u32); i++, p++) {
675                 status = ql_read_flash_word(qdev, i, p);
676                 if (status) {
677                         QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
678                         goto exit;
679                 }
680
681         }
682 exit:
683         ql_sem_unlock(qdev, SEM_FLASH_MASK);
684         return status;
685 }
686
687 /* xgmac register are located behind the xgmac_addr and xgmac_data
688  * register pair.  Each read/write requires us to wait for the ready
689  * bit before reading/writing the data.
690  */
691 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
692 {
693         int status;
694         /* wait for reg to come ready */
695         status = ql_wait_reg_rdy(qdev,
696                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
697         if (status)
698                 return status;
699         /* write the data to the data reg */
700         ql_write32(qdev, XGMAC_DATA, data);
701         /* trigger the write */
702         ql_write32(qdev, XGMAC_ADDR, reg);
703         return status;
704 }
705
706 /* xgmac register are located behind the xgmac_addr and xgmac_data
707  * register pair.  Each read/write requires us to wait for the ready
708  * bit before reading/writing the data.
709  */
710 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
711 {
712         int status = 0;
713         /* wait for reg to come ready */
714         status = ql_wait_reg_rdy(qdev,
715                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
716         if (status)
717                 goto exit;
718         /* set up for reg read */
719         ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
720         /* wait for reg to come ready */
721         status = ql_wait_reg_rdy(qdev,
722                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
723         if (status)
724                 goto exit;
725         /* get the data */
726         *data = ql_read32(qdev, XGMAC_DATA);
727 exit:
728         return status;
729 }
730
731 /* This is used for reading the 64-bit statistics regs. */
732 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
733 {
734         int status = 0;
735         u32 hi = 0;
736         u32 lo = 0;
737
738         status = ql_read_xgmac_reg(qdev, reg, &lo);
739         if (status)
740                 goto exit;
741
742         status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
743         if (status)
744                 goto exit;
745
746         *data = (u64) lo | ((u64) hi << 32);
747
748 exit:
749         return status;
750 }
751
752 /* Take the MAC Core out of reset.
753  * Enable statistics counting.
754  * Take the transmitter/receiver out of reset.
755  * This functionality may be done in the MPI firmware at a
756  * later date.
757  */
758 static int ql_port_initialize(struct ql_adapter *qdev)
759 {
760         int status = 0;
761         u32 data;
762
763         if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
764                 /* Another function has the semaphore, so
765                  * wait for the port init bit to come ready.
766                  */
767                 QPRINTK(qdev, LINK, INFO,
768                         "Another function has the semaphore, so wait for the port init bit to come ready.\n");
769                 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
770                 if (status) {
771                         QPRINTK(qdev, LINK, CRIT,
772                                 "Port initialize timed out.\n");
773                 }
774                 return status;
775         }
776
777         QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n");
778         /* Set the core reset. */
779         status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
780         if (status)
781                 goto end;
782         data |= GLOBAL_CFG_RESET;
783         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
784         if (status)
785                 goto end;
786
787         /* Clear the core reset and turn on jumbo for receiver. */
788         data &= ~GLOBAL_CFG_RESET;      /* Clear core reset. */
789         data |= GLOBAL_CFG_JUMBO;       /* Turn on jumbo. */
790         data |= GLOBAL_CFG_TX_STAT_EN;
791         data |= GLOBAL_CFG_RX_STAT_EN;
792         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
793         if (status)
794                 goto end;
795
796         /* Enable transmitter, and clear it's reset. */
797         status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
798         if (status)
799                 goto end;
800         data &= ~TX_CFG_RESET;  /* Clear the TX MAC reset. */
801         data |= TX_CFG_EN;      /* Enable the transmitter. */
802         status = ql_write_xgmac_reg(qdev, TX_CFG, data);
803         if (status)
804                 goto end;
805
806         /* Enable receiver and clear it's reset. */
807         status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
808         if (status)
809                 goto end;
810         data &= ~RX_CFG_RESET;  /* Clear the RX MAC reset. */
811         data |= RX_CFG_EN;      /* Enable the receiver. */
812         status = ql_write_xgmac_reg(qdev, RX_CFG, data);
813         if (status)
814                 goto end;
815
816         /* Turn on jumbo. */
817         status =
818             ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
819         if (status)
820                 goto end;
821         status =
822             ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
823         if (status)
824                 goto end;
825
826         /* Signal to the world that the port is enabled.        */
827         ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
828 end:
829         ql_sem_unlock(qdev, qdev->xg_sem_mask);
830         return status;
831 }
832
833 /* Get the next large buffer. */
834 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
835 {
836         struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
837         rx_ring->lbq_curr_idx++;
838         if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
839                 rx_ring->lbq_curr_idx = 0;
840         rx_ring->lbq_free_cnt++;
841         return lbq_desc;
842 }
843
844 /* Get the next small buffer. */
845 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
846 {
847         struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
848         rx_ring->sbq_curr_idx++;
849         if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
850                 rx_ring->sbq_curr_idx = 0;
851         rx_ring->sbq_free_cnt++;
852         return sbq_desc;
853 }
854
855 /* Update an rx ring index. */
856 static void ql_update_cq(struct rx_ring *rx_ring)
857 {
858         rx_ring->cnsmr_idx++;
859         rx_ring->curr_entry++;
860         if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
861                 rx_ring->cnsmr_idx = 0;
862                 rx_ring->curr_entry = rx_ring->cq_base;
863         }
864 }
865
866 static void ql_write_cq_idx(struct rx_ring *rx_ring)
867 {
868         ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
869 }
870
871 /* Process (refill) a large buffer queue. */
872 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
873 {
874         int clean_idx = rx_ring->lbq_clean_idx;
875         struct bq_desc *lbq_desc;
876         u64 map;
877         int i;
878
879         while (rx_ring->lbq_free_cnt > 16) {
880                 for (i = 0; i < 16; i++) {
881                         QPRINTK(qdev, RX_STATUS, DEBUG,
882                                 "lbq: try cleaning clean_idx = %d.\n",
883                                 clean_idx);
884                         lbq_desc = &rx_ring->lbq[clean_idx];
885                         if (lbq_desc->p.lbq_page == NULL) {
886                                 QPRINTK(qdev, RX_STATUS, DEBUG,
887                                         "lbq: getting new page for index %d.\n",
888                                         lbq_desc->index);
889                                 lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
890                                 if (lbq_desc->p.lbq_page == NULL) {
891                                         QPRINTK(qdev, RX_STATUS, ERR,
892                                                 "Couldn't get a page.\n");
893                                         return;
894                                 }
895                                 map = pci_map_page(qdev->pdev,
896                                                    lbq_desc->p.lbq_page,
897                                                    0, PAGE_SIZE,
898                                                    PCI_DMA_FROMDEVICE);
899                                 if (pci_dma_mapping_error(qdev->pdev, map)) {
900                                         QPRINTK(qdev, RX_STATUS, ERR,
901                                                 "PCI mapping failed.\n");
902                                         return;
903                                 }
904                                 pci_unmap_addr_set(lbq_desc, mapaddr, map);
905                                 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
906                                 *lbq_desc->addr = cpu_to_le64(map);
907                         }
908                         clean_idx++;
909                         if (clean_idx == rx_ring->lbq_len)
910                                 clean_idx = 0;
911                 }
912
913                 rx_ring->lbq_clean_idx = clean_idx;
914                 rx_ring->lbq_prod_idx += 16;
915                 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
916                         rx_ring->lbq_prod_idx = 0;
917                 QPRINTK(qdev, RX_STATUS, DEBUG,
918                         "lbq: updating prod idx = %d.\n",
919                         rx_ring->lbq_prod_idx);
920                 ql_write_db_reg(rx_ring->lbq_prod_idx,
921                                 rx_ring->lbq_prod_idx_db_reg);
922                 rx_ring->lbq_free_cnt -= 16;
923         }
924 }
925
926 /* Process (refill) a small buffer queue. */
927 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
928 {
929         int clean_idx = rx_ring->sbq_clean_idx;
930         struct bq_desc *sbq_desc;
931         u64 map;
932         int i;
933
934         while (rx_ring->sbq_free_cnt > 16) {
935                 for (i = 0; i < 16; i++) {
936                         sbq_desc = &rx_ring->sbq[clean_idx];
937                         QPRINTK(qdev, RX_STATUS, DEBUG,
938                                 "sbq: try cleaning clean_idx = %d.\n",
939                                 clean_idx);
940                         if (sbq_desc->p.skb == NULL) {
941                                 QPRINTK(qdev, RX_STATUS, DEBUG,
942                                         "sbq: getting new skb for index %d.\n",
943                                         sbq_desc->index);
944                                 sbq_desc->p.skb =
945                                     netdev_alloc_skb(qdev->ndev,
946                                                      rx_ring->sbq_buf_size);
947                                 if (sbq_desc->p.skb == NULL) {
948                                         QPRINTK(qdev, PROBE, ERR,
949                                                 "Couldn't get an skb.\n");
950                                         rx_ring->sbq_clean_idx = clean_idx;
951                                         return;
952                                 }
953                                 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
954                                 map = pci_map_single(qdev->pdev,
955                                                      sbq_desc->p.skb->data,
956                                                      rx_ring->sbq_buf_size /
957                                                      2, PCI_DMA_FROMDEVICE);
958                                 if (pci_dma_mapping_error(qdev->pdev, map)) {
959                                         QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
960                                         rx_ring->sbq_clean_idx = clean_idx;
961                                         return;
962                                 }
963                                 pci_unmap_addr_set(sbq_desc, mapaddr, map);
964                                 pci_unmap_len_set(sbq_desc, maplen,
965                                                   rx_ring->sbq_buf_size / 2);
966                                 *sbq_desc->addr = cpu_to_le64(map);
967                         }
968
969                         clean_idx++;
970                         if (clean_idx == rx_ring->sbq_len)
971                                 clean_idx = 0;
972                 }
973                 rx_ring->sbq_clean_idx = clean_idx;
974                 rx_ring->sbq_prod_idx += 16;
975                 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
976                         rx_ring->sbq_prod_idx = 0;
977                 QPRINTK(qdev, RX_STATUS, DEBUG,
978                         "sbq: updating prod idx = %d.\n",
979                         rx_ring->sbq_prod_idx);
980                 ql_write_db_reg(rx_ring->sbq_prod_idx,
981                                 rx_ring->sbq_prod_idx_db_reg);
982
983                 rx_ring->sbq_free_cnt -= 16;
984         }
985 }
986
987 static void ql_update_buffer_queues(struct ql_adapter *qdev,
988                                     struct rx_ring *rx_ring)
989 {
990         ql_update_sbq(qdev, rx_ring);
991         ql_update_lbq(qdev, rx_ring);
992 }
993
994 /* Unmaps tx buffers.  Can be called from send() if a pci mapping
995  * fails at some stage, or from the interrupt when a tx completes.
996  */
997 static void ql_unmap_send(struct ql_adapter *qdev,
998                           struct tx_ring_desc *tx_ring_desc, int mapped)
999 {
1000         int i;
1001         for (i = 0; i < mapped; i++) {
1002                 if (i == 0 || (i == 7 && mapped > 7)) {
1003                         /*
1004                          * Unmap the skb->data area, or the
1005                          * external sglist (AKA the Outbound
1006                          * Address List (OAL)).
1007                          * If its the zeroeth element, then it's
1008                          * the skb->data area.  If it's the 7th
1009                          * element and there is more than 6 frags,
1010                          * then its an OAL.
1011                          */
1012                         if (i == 7) {
1013                                 QPRINTK(qdev, TX_DONE, DEBUG,
1014                                         "unmapping OAL area.\n");
1015                         }
1016                         pci_unmap_single(qdev->pdev,
1017                                          pci_unmap_addr(&tx_ring_desc->map[i],
1018                                                         mapaddr),
1019                                          pci_unmap_len(&tx_ring_desc->map[i],
1020                                                        maplen),
1021                                          PCI_DMA_TODEVICE);
1022                 } else {
1023                         QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n",
1024                                 i);
1025                         pci_unmap_page(qdev->pdev,
1026                                        pci_unmap_addr(&tx_ring_desc->map[i],
1027                                                       mapaddr),
1028                                        pci_unmap_len(&tx_ring_desc->map[i],
1029                                                      maplen), PCI_DMA_TODEVICE);
1030                 }
1031         }
1032
1033 }
1034
1035 /* Map the buffers for this transmit.  This will return
1036  * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1037  */
1038 static int ql_map_send(struct ql_adapter *qdev,
1039                        struct ob_mac_iocb_req *mac_iocb_ptr,
1040                        struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1041 {
1042         int len = skb_headlen(skb);
1043         dma_addr_t map;
1044         int frag_idx, err, map_idx = 0;
1045         struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1046         int frag_cnt = skb_shinfo(skb)->nr_frags;
1047
1048         if (frag_cnt) {
1049                 QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt);
1050         }
1051         /*
1052          * Map the skb buffer first.
1053          */
1054         map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1055
1056         err = pci_dma_mapping_error(qdev->pdev, map);
1057         if (err) {
1058                 QPRINTK(qdev, TX_QUEUED, ERR,
1059                         "PCI mapping failed with error: %d\n", err);
1060
1061                 return NETDEV_TX_BUSY;
1062         }
1063
1064         tbd->len = cpu_to_le32(len);
1065         tbd->addr = cpu_to_le64(map);
1066         pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1067         pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1068         map_idx++;
1069
1070         /*
1071          * This loop fills the remainder of the 8 address descriptors
1072          * in the IOCB.  If there are more than 7 fragments, then the
1073          * eighth address desc will point to an external list (OAL).
1074          * When this happens, the remainder of the frags will be stored
1075          * in this list.
1076          */
1077         for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1078                 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1079                 tbd++;
1080                 if (frag_idx == 6 && frag_cnt > 7) {
1081                         /* Let's tack on an sglist.
1082                          * Our control block will now
1083                          * look like this:
1084                          * iocb->seg[0] = skb->data
1085                          * iocb->seg[1] = frag[0]
1086                          * iocb->seg[2] = frag[1]
1087                          * iocb->seg[3] = frag[2]
1088                          * iocb->seg[4] = frag[3]
1089                          * iocb->seg[5] = frag[4]
1090                          * iocb->seg[6] = frag[5]
1091                          * iocb->seg[7] = ptr to OAL (external sglist)
1092                          * oal->seg[0] = frag[6]
1093                          * oal->seg[1] = frag[7]
1094                          * oal->seg[2] = frag[8]
1095                          * oal->seg[3] = frag[9]
1096                          * oal->seg[4] = frag[10]
1097                          *      etc...
1098                          */
1099                         /* Tack on the OAL in the eighth segment of IOCB. */
1100                         map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1101                                              sizeof(struct oal),
1102                                              PCI_DMA_TODEVICE);
1103                         err = pci_dma_mapping_error(qdev->pdev, map);
1104                         if (err) {
1105                                 QPRINTK(qdev, TX_QUEUED, ERR,
1106                                         "PCI mapping outbound address list with error: %d\n",
1107                                         err);
1108                                 goto map_error;
1109                         }
1110
1111                         tbd->addr = cpu_to_le64(map);
1112                         /*
1113                          * The length is the number of fragments
1114                          * that remain to be mapped times the length
1115                          * of our sglist (OAL).
1116                          */
1117                         tbd->len =
1118                             cpu_to_le32((sizeof(struct tx_buf_desc) *
1119                                          (frag_cnt - frag_idx)) | TX_DESC_C);
1120                         pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1121                                            map);
1122                         pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1123                                           sizeof(struct oal));
1124                         tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1125                         map_idx++;
1126                 }
1127
1128                 map =
1129                     pci_map_page(qdev->pdev, frag->page,
1130                                  frag->page_offset, frag->size,
1131                                  PCI_DMA_TODEVICE);
1132
1133                 err = pci_dma_mapping_error(qdev->pdev, map);
1134                 if (err) {
1135                         QPRINTK(qdev, TX_QUEUED, ERR,
1136                                 "PCI mapping frags failed with error: %d.\n",
1137                                 err);
1138                         goto map_error;
1139                 }
1140
1141                 tbd->addr = cpu_to_le64(map);
1142                 tbd->len = cpu_to_le32(frag->size);
1143                 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1144                 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1145                                   frag->size);
1146
1147         }
1148         /* Save the number of segments we've mapped. */
1149         tx_ring_desc->map_cnt = map_idx;
1150         /* Terminate the last segment. */
1151         tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1152         return NETDEV_TX_OK;
1153
1154 map_error:
1155         /*
1156          * If the first frag mapping failed, then i will be zero.
1157          * This causes the unmap of the skb->data area.  Otherwise
1158          * we pass in the number of frags that mapped successfully
1159          * so they can be umapped.
1160          */
1161         ql_unmap_send(qdev, tx_ring_desc, map_idx);
1162         return NETDEV_TX_BUSY;
1163 }
1164
1165 static void ql_realign_skb(struct sk_buff *skb, int len)
1166 {
1167         void *temp_addr = skb->data;
1168
1169         /* Undo the skb_reserve(skb,32) we did before
1170          * giving to hardware, and realign data on
1171          * a 2-byte boundary.
1172          */
1173         skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1174         skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1175         skb_copy_to_linear_data(skb, temp_addr,
1176                 (unsigned int)len);
1177 }
1178
1179 /*
1180  * This function builds an skb for the given inbound
1181  * completion.  It will be rewritten for readability in the near
1182  * future, but for not it works well.
1183  */
1184 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1185                                        struct rx_ring *rx_ring,
1186                                        struct ib_mac_iocb_rsp *ib_mac_rsp)
1187 {
1188         struct bq_desc *lbq_desc;
1189         struct bq_desc *sbq_desc;
1190         struct sk_buff *skb = NULL;
1191         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1192        u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1193
1194         /*
1195          * Handle the header buffer if present.
1196          */
1197         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1198             ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1199                 QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len);
1200                 /*
1201                  * Headers fit nicely into a small buffer.
1202                  */
1203                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1204                 pci_unmap_single(qdev->pdev,
1205                                 pci_unmap_addr(sbq_desc, mapaddr),
1206                                 pci_unmap_len(sbq_desc, maplen),
1207                                 PCI_DMA_FROMDEVICE);
1208                 skb = sbq_desc->p.skb;
1209                 ql_realign_skb(skb, hdr_len);
1210                 skb_put(skb, hdr_len);
1211                 sbq_desc->p.skb = NULL;
1212         }
1213
1214         /*
1215          * Handle the data buffer(s).
1216          */
1217         if (unlikely(!length)) {        /* Is there data too? */
1218                 QPRINTK(qdev, RX_STATUS, DEBUG,
1219                         "No Data buffer in this packet.\n");
1220                 return skb;
1221         }
1222
1223         if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1224                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1225                         QPRINTK(qdev, RX_STATUS, DEBUG,
1226                                 "Headers in small, data of %d bytes in small, combine them.\n", length);
1227                         /*
1228                          * Data is less than small buffer size so it's
1229                          * stuffed in a small buffer.
1230                          * For this case we append the data
1231                          * from the "data" small buffer to the "header" small
1232                          * buffer.
1233                          */
1234                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1235                         pci_dma_sync_single_for_cpu(qdev->pdev,
1236                                                     pci_unmap_addr
1237                                                     (sbq_desc, mapaddr),
1238                                                     pci_unmap_len
1239                                                     (sbq_desc, maplen),
1240                                                     PCI_DMA_FROMDEVICE);
1241                         memcpy(skb_put(skb, length),
1242                                sbq_desc->p.skb->data, length);
1243                         pci_dma_sync_single_for_device(qdev->pdev,
1244                                                        pci_unmap_addr
1245                                                        (sbq_desc,
1246                                                         mapaddr),
1247                                                        pci_unmap_len
1248                                                        (sbq_desc,
1249                                                         maplen),
1250                                                        PCI_DMA_FROMDEVICE);
1251                 } else {
1252                         QPRINTK(qdev, RX_STATUS, DEBUG,
1253                                 "%d bytes in a single small buffer.\n", length);
1254                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1255                         skb = sbq_desc->p.skb;
1256                         ql_realign_skb(skb, length);
1257                         skb_put(skb, length);
1258                         pci_unmap_single(qdev->pdev,
1259                                          pci_unmap_addr(sbq_desc,
1260                                                         mapaddr),
1261                                          pci_unmap_len(sbq_desc,
1262                                                        maplen),
1263                                          PCI_DMA_FROMDEVICE);
1264                         sbq_desc->p.skb = NULL;
1265                 }
1266         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1267                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1268                         QPRINTK(qdev, RX_STATUS, DEBUG,
1269                                 "Header in small, %d bytes in large. Chain large to small!\n", length);
1270                         /*
1271                          * The data is in a single large buffer.  We
1272                          * chain it to the header buffer's skb and let
1273                          * it rip.
1274                          */
1275                         lbq_desc = ql_get_curr_lbuf(rx_ring);
1276                         pci_unmap_page(qdev->pdev,
1277                                        pci_unmap_addr(lbq_desc,
1278                                                       mapaddr),
1279                                        pci_unmap_len(lbq_desc, maplen),
1280                                        PCI_DMA_FROMDEVICE);
1281                         QPRINTK(qdev, RX_STATUS, DEBUG,
1282                                 "Chaining page to skb.\n");
1283                         skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
1284                                            0, length);
1285                         skb->len += length;
1286                         skb->data_len += length;
1287                         skb->truesize += length;
1288                         lbq_desc->p.lbq_page = NULL;
1289                 } else {
1290                         /*
1291                          * The headers and data are in a single large buffer. We
1292                          * copy it to a new skb and let it go. This can happen with
1293                          * jumbo mtu on a non-TCP/UDP frame.
1294                          */
1295                         lbq_desc = ql_get_curr_lbuf(rx_ring);
1296                         skb = netdev_alloc_skb(qdev->ndev, length);
1297                         if (skb == NULL) {
1298                                 QPRINTK(qdev, PROBE, DEBUG,
1299                                         "No skb available, drop the packet.\n");
1300                                 return NULL;
1301                         }
1302                         pci_unmap_page(qdev->pdev,
1303                                        pci_unmap_addr(lbq_desc,
1304                                                       mapaddr),
1305                                        pci_unmap_len(lbq_desc, maplen),
1306                                        PCI_DMA_FROMDEVICE);
1307                         skb_reserve(skb, NET_IP_ALIGN);
1308                         QPRINTK(qdev, RX_STATUS, DEBUG,
1309                                 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
1310                         skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
1311                                            0, length);
1312                         skb->len += length;
1313                         skb->data_len += length;
1314                         skb->truesize += length;
1315                         length -= length;
1316                         lbq_desc->p.lbq_page = NULL;
1317                         __pskb_pull_tail(skb,
1318                                 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1319                                 VLAN_ETH_HLEN : ETH_HLEN);
1320                 }
1321         } else {
1322                 /*
1323                  * The data is in a chain of large buffers
1324                  * pointed to by a small buffer.  We loop
1325                  * thru and chain them to the our small header
1326                  * buffer's skb.
1327                  * frags:  There are 18 max frags and our small
1328                  *         buffer will hold 32 of them. The thing is,
1329                  *         we'll use 3 max for our 9000 byte jumbo
1330                  *         frames.  If the MTU goes up we could
1331                  *          eventually be in trouble.
1332                  */
1333                 int size, offset, i = 0;
1334                 __le64 *bq, bq_array[8];
1335                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1336                 pci_unmap_single(qdev->pdev,
1337                                  pci_unmap_addr(sbq_desc, mapaddr),
1338                                  pci_unmap_len(sbq_desc, maplen),
1339                                  PCI_DMA_FROMDEVICE);
1340                 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1341                         /*
1342                          * This is an non TCP/UDP IP frame, so
1343                          * the headers aren't split into a small
1344                          * buffer.  We have to use the small buffer
1345                          * that contains our sg list as our skb to
1346                          * send upstairs. Copy the sg list here to
1347                          * a local buffer and use it to find the
1348                          * pages to chain.
1349                          */
1350                         QPRINTK(qdev, RX_STATUS, DEBUG,
1351                                 "%d bytes of headers & data in chain of large.\n", length);
1352                         skb = sbq_desc->p.skb;
1353                         bq = &bq_array[0];
1354                         memcpy(bq, skb->data, sizeof(bq_array));
1355                         sbq_desc->p.skb = NULL;
1356                         skb_reserve(skb, NET_IP_ALIGN);
1357                 } else {
1358                         QPRINTK(qdev, RX_STATUS, DEBUG,
1359                                 "Headers in small, %d bytes of data in chain of large.\n", length);
1360                         bq = (__le64 *)sbq_desc->p.skb->data;
1361                 }
1362                 while (length > 0) {
1363                         lbq_desc = ql_get_curr_lbuf(rx_ring);
1364                         pci_unmap_page(qdev->pdev,
1365                                        pci_unmap_addr(lbq_desc,
1366                                                       mapaddr),
1367                                        pci_unmap_len(lbq_desc,
1368                                                      maplen),
1369                                        PCI_DMA_FROMDEVICE);
1370                         size = (length < PAGE_SIZE) ? length : PAGE_SIZE;
1371                         offset = 0;
1372
1373                         QPRINTK(qdev, RX_STATUS, DEBUG,
1374                                 "Adding page %d to skb for %d bytes.\n",
1375                                 i, size);
1376                         skb_fill_page_desc(skb, i, lbq_desc->p.lbq_page,
1377                                            offset, size);
1378                         skb->len += size;
1379                         skb->data_len += size;
1380                         skb->truesize += size;
1381                         length -= size;
1382                         lbq_desc->p.lbq_page = NULL;
1383                         bq++;
1384                         i++;
1385                 }
1386                 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1387                                 VLAN_ETH_HLEN : ETH_HLEN);
1388         }
1389         return skb;
1390 }
1391
1392 /* Process an inbound completion from an rx ring. */
1393 static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1394                                    struct rx_ring *rx_ring,
1395                                    struct ib_mac_iocb_rsp *ib_mac_rsp)
1396 {
1397         struct net_device *ndev = qdev->ndev;
1398         struct sk_buff *skb = NULL;
1399
1400         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1401
1402         skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1403         if (unlikely(!skb)) {
1404                 QPRINTK(qdev, RX_STATUS, DEBUG,
1405                         "No skb available, drop packet.\n");
1406                 return;
1407         }
1408
1409         prefetch(skb->data);
1410         skb->dev = ndev;
1411         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1412                 QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
1413                         (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1414                         IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1415                         (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1416                         IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1417                         (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1418                         IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1419         }
1420         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1421                 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
1422         }
1423         if (ib_mac_rsp->flags1 & (IB_MAC_IOCB_RSP_IE | IB_MAC_IOCB_RSP_TE)) {
1424                 QPRINTK(qdev, RX_STATUS, ERR,
1425                         "Bad checksum for this %s packet.\n",
1426                         ((ib_mac_rsp->
1427                           flags2 & IB_MAC_IOCB_RSP_T) ? "TCP" : "UDP"));
1428                 skb->ip_summed = CHECKSUM_NONE;
1429         } else if (qdev->rx_csum &&
1430                    ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ||
1431                     ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1432                      !(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU)))) {
1433                 QPRINTK(qdev, RX_STATUS, DEBUG, "RX checksum done!\n");
1434                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1435         }
1436         qdev->stats.rx_packets++;
1437         qdev->stats.rx_bytes += skb->len;
1438         skb->protocol = eth_type_trans(skb, ndev);
1439         if (qdev->vlgrp && (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)) {
1440                 QPRINTK(qdev, RX_STATUS, DEBUG,
1441                         "Passing a VLAN packet upstream.\n");
1442                 vlan_hwaccel_rx(skb, qdev->vlgrp,
1443                                 le16_to_cpu(ib_mac_rsp->vlan_id));
1444         } else {
1445                 QPRINTK(qdev, RX_STATUS, DEBUG,
1446                         "Passing a normal packet upstream.\n");
1447                 netif_rx(skb);
1448         }
1449 }
1450
1451 /* Process an outbound completion from an rx ring. */
1452 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1453                                    struct ob_mac_iocb_rsp *mac_rsp)
1454 {
1455         struct tx_ring *tx_ring;
1456         struct tx_ring_desc *tx_ring_desc;
1457
1458         QL_DUMP_OB_MAC_RSP(mac_rsp);
1459         tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
1460         tx_ring_desc = &tx_ring->q[mac_rsp->tid];
1461         ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
1462         qdev->stats.tx_bytes += tx_ring_desc->map_cnt;
1463         qdev->stats.tx_packets++;
1464         dev_kfree_skb(tx_ring_desc->skb);
1465         tx_ring_desc->skb = NULL;
1466
1467         if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
1468                                         OB_MAC_IOCB_RSP_S |
1469                                         OB_MAC_IOCB_RSP_L |
1470                                         OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
1471                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
1472                         QPRINTK(qdev, TX_DONE, WARNING,
1473                                 "Total descriptor length did not match transfer length.\n");
1474                 }
1475                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
1476                         QPRINTK(qdev, TX_DONE, WARNING,
1477                                 "Frame too short to be legal, not sent.\n");
1478                 }
1479                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
1480                         QPRINTK(qdev, TX_DONE, WARNING,
1481                                 "Frame too long, but sent anyway.\n");
1482                 }
1483                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
1484                         QPRINTK(qdev, TX_DONE, WARNING,
1485                                 "PCI backplane error. Frame not sent.\n");
1486                 }
1487         }
1488         atomic_inc(&tx_ring->tx_count);
1489 }
1490
1491 /* Fire up a handler to reset the MPI processor. */
1492 void ql_queue_fw_error(struct ql_adapter *qdev)
1493 {
1494         netif_stop_queue(qdev->ndev);
1495         netif_carrier_off(qdev->ndev);
1496         queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
1497 }
1498
1499 void ql_queue_asic_error(struct ql_adapter *qdev)
1500 {
1501         netif_stop_queue(qdev->ndev);
1502         netif_carrier_off(qdev->ndev);
1503         ql_disable_interrupts(qdev);
1504         queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
1505 }
1506
1507 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
1508                                     struct ib_ae_iocb_rsp *ib_ae_rsp)
1509 {
1510         switch (ib_ae_rsp->event) {
1511         case MGMT_ERR_EVENT:
1512                 QPRINTK(qdev, RX_ERR, ERR,
1513                         "Management Processor Fatal Error.\n");
1514                 ql_queue_fw_error(qdev);
1515                 return;
1516
1517         case CAM_LOOKUP_ERR_EVENT:
1518                 QPRINTK(qdev, LINK, ERR,
1519                         "Multiple CAM hits lookup occurred.\n");
1520                 QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n");
1521                 ql_queue_asic_error(qdev);
1522                 return;
1523
1524         case SOFT_ECC_ERROR_EVENT:
1525                 QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n");
1526                 ql_queue_asic_error(qdev);
1527                 break;
1528
1529         case PCI_ERR_ANON_BUF_RD:
1530                 QPRINTK(qdev, RX_ERR, ERR,
1531                         "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
1532                         ib_ae_rsp->q_id);
1533                 ql_queue_asic_error(qdev);
1534                 break;
1535
1536         default:
1537                 QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n",
1538                         ib_ae_rsp->event);
1539                 ql_queue_asic_error(qdev);
1540                 break;
1541         }
1542 }
1543
1544 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
1545 {
1546         struct ql_adapter *qdev = rx_ring->qdev;
1547         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1548         struct ob_mac_iocb_rsp *net_rsp = NULL;
1549         int count = 0;
1550
1551         /* While there are entries in the completion queue. */
1552         while (prod != rx_ring->cnsmr_idx) {
1553
1554                 QPRINTK(qdev, RX_STATUS, DEBUG,
1555                         "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1556                         prod, rx_ring->cnsmr_idx);
1557
1558                 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
1559                 rmb();
1560                 switch (net_rsp->opcode) {
1561
1562                 case OPCODE_OB_MAC_TSO_IOCB:
1563                 case OPCODE_OB_MAC_IOCB:
1564                         ql_process_mac_tx_intr(qdev, net_rsp);
1565                         break;
1566                 default:
1567                         QPRINTK(qdev, RX_STATUS, DEBUG,
1568                                 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1569                                 net_rsp->opcode);
1570                 }
1571                 count++;
1572                 ql_update_cq(rx_ring);
1573                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1574         }
1575         ql_write_cq_idx(rx_ring);
1576         if (netif_queue_stopped(qdev->ndev) && net_rsp != NULL) {
1577                 struct tx_ring *tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
1578                 if (atomic_read(&tx_ring->queue_stopped) &&
1579                     (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
1580                         /*
1581                          * The queue got stopped because the tx_ring was full.
1582                          * Wake it up, because it's now at least 25% empty.
1583                          */
1584                         netif_wake_queue(qdev->ndev);
1585         }
1586
1587         return count;
1588 }
1589
1590 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
1591 {
1592         struct ql_adapter *qdev = rx_ring->qdev;
1593         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1594         struct ql_net_rsp_iocb *net_rsp;
1595         int count = 0;
1596
1597         /* While there are entries in the completion queue. */
1598         while (prod != rx_ring->cnsmr_idx) {
1599
1600                 QPRINTK(qdev, RX_STATUS, DEBUG,
1601                         "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1602                         prod, rx_ring->cnsmr_idx);
1603
1604                 net_rsp = rx_ring->curr_entry;
1605                 rmb();
1606                 switch (net_rsp->opcode) {
1607                 case OPCODE_IB_MAC_IOCB:
1608                         ql_process_mac_rx_intr(qdev, rx_ring,
1609                                                (struct ib_mac_iocb_rsp *)
1610                                                net_rsp);
1611                         break;
1612
1613                 case OPCODE_IB_AE_IOCB:
1614                         ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
1615                                                 net_rsp);
1616                         break;
1617                 default:
1618                         {
1619                                 QPRINTK(qdev, RX_STATUS, DEBUG,
1620                                         "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1621                                         net_rsp->opcode);
1622                         }
1623                 }
1624                 count++;
1625                 ql_update_cq(rx_ring);
1626                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1627                 if (count == budget)
1628                         break;
1629         }
1630         ql_update_buffer_queues(qdev, rx_ring);
1631         ql_write_cq_idx(rx_ring);
1632         return count;
1633 }
1634
1635 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
1636 {
1637         struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
1638         struct ql_adapter *qdev = rx_ring->qdev;
1639         int work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
1640
1641         QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n",
1642                 rx_ring->cq_id);
1643
1644         if (work_done < budget) {
1645                 __netif_rx_complete(napi);
1646                 ql_enable_completion_interrupt(qdev, rx_ring->irq);
1647         }
1648         return work_done;
1649 }
1650
1651 static void ql_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
1652 {
1653         struct ql_adapter *qdev = netdev_priv(ndev);
1654
1655         qdev->vlgrp = grp;
1656         if (grp) {
1657                 QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n");
1658                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
1659                            NIC_RCV_CFG_VLAN_MATCH_AND_NON);
1660         } else {
1661                 QPRINTK(qdev, IFUP, DEBUG,
1662                         "Turning off VLAN in NIC_RCV_CFG.\n");
1663                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
1664         }
1665 }
1666
1667 static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
1668 {
1669         struct ql_adapter *qdev = netdev_priv(ndev);
1670         u32 enable_bit = MAC_ADDR_E;
1671
1672         spin_lock(&qdev->hw_lock);
1673         if (ql_set_mac_addr_reg
1674             (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1675                 QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
1676         }
1677         spin_unlock(&qdev->hw_lock);
1678 }
1679
1680 static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
1681 {
1682         struct ql_adapter *qdev = netdev_priv(ndev);
1683         u32 enable_bit = 0;
1684
1685         spin_lock(&qdev->hw_lock);
1686         if (ql_set_mac_addr_reg
1687             (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1688                 QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
1689         }
1690         spin_unlock(&qdev->hw_lock);
1691
1692 }
1693
1694 /* Worker thread to process a given rx_ring that is dedicated
1695  * to outbound completions.
1696  */
1697 static void ql_tx_clean(struct work_struct *work)
1698 {
1699         struct rx_ring *rx_ring =
1700             container_of(work, struct rx_ring, rx_work.work);
1701         ql_clean_outbound_rx_ring(rx_ring);
1702         ql_enable_completion_interrupt(rx_ring->qdev, rx_ring->irq);
1703
1704 }
1705
1706 /* Worker thread to process a given rx_ring that is dedicated
1707  * to inbound completions.
1708  */
1709 static void ql_rx_clean(struct work_struct *work)
1710 {
1711         struct rx_ring *rx_ring =
1712             container_of(work, struct rx_ring, rx_work.work);
1713         ql_clean_inbound_rx_ring(rx_ring, 64);
1714         ql_enable_completion_interrupt(rx_ring->qdev, rx_ring->irq);
1715 }
1716
1717 /* MSI-X Multiple Vector Interrupt Handler for outbound completions. */
1718 static irqreturn_t qlge_msix_tx_isr(int irq, void *dev_id)
1719 {
1720         struct rx_ring *rx_ring = dev_id;
1721         queue_delayed_work_on(rx_ring->cpu, rx_ring->qdev->q_workqueue,
1722                               &rx_ring->rx_work, 0);
1723         return IRQ_HANDLED;
1724 }
1725
1726 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
1727 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
1728 {
1729         struct rx_ring *rx_ring = dev_id;
1730         netif_rx_schedule(&rx_ring->napi);
1731         return IRQ_HANDLED;
1732 }
1733
1734 /* This handles a fatal error, MPI activity, and the default
1735  * rx_ring in an MSI-X multiple vector environment.
1736  * In MSI/Legacy environment it also process the rest of
1737  * the rx_rings.
1738  */
1739 static irqreturn_t qlge_isr(int irq, void *dev_id)
1740 {
1741         struct rx_ring *rx_ring = dev_id;
1742         struct ql_adapter *qdev = rx_ring->qdev;
1743         struct intr_context *intr_context = &qdev->intr_context[0];
1744         u32 var;
1745         int i;
1746         int work_done = 0;
1747
1748         spin_lock(&qdev->hw_lock);
1749         if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
1750                 QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n");
1751                 spin_unlock(&qdev->hw_lock);
1752                 return IRQ_NONE;
1753         }
1754         spin_unlock(&qdev->hw_lock);
1755
1756         var = ql_disable_completion_interrupt(qdev, intr_context->intr);
1757
1758         /*
1759          * Check for fatal error.
1760          */
1761         if (var & STS_FE) {
1762                 ql_queue_asic_error(qdev);
1763                 QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var);
1764                 var = ql_read32(qdev, ERR_STS);
1765                 QPRINTK(qdev, INTR, ERR,
1766                         "Resetting chip. Error Status Register = 0x%x\n", var);
1767                 return IRQ_HANDLED;
1768         }
1769
1770         /*
1771          * Check MPI processor activity.
1772          */
1773         if (var & STS_PI) {
1774                 /*
1775                  * We've got an async event or mailbox completion.
1776                  * Handle it and clear the source of the interrupt.
1777                  */
1778                 QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
1779                 ql_disable_completion_interrupt(qdev, intr_context->intr);
1780                 queue_delayed_work_on(smp_processor_id(), qdev->workqueue,
1781                                       &qdev->mpi_work, 0);
1782                 work_done++;
1783         }
1784
1785         /*
1786          * Check the default queue and wake handler if active.
1787          */
1788         rx_ring = &qdev->rx_ring[0];
1789         if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != rx_ring->cnsmr_idx) {
1790                 QPRINTK(qdev, INTR, INFO, "Waking handler for rx_ring[0].\n");
1791                 ql_disable_completion_interrupt(qdev, intr_context->intr);
1792                 queue_delayed_work_on(smp_processor_id(), qdev->q_workqueue,
1793                                       &rx_ring->rx_work, 0);
1794                 work_done++;
1795         }
1796
1797         if (!test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
1798                 /*
1799                  * Start the DPC for each active queue.
1800                  */
1801                 for (i = 1; i < qdev->rx_ring_count; i++) {
1802                         rx_ring = &qdev->rx_ring[i];
1803                         if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
1804                             rx_ring->cnsmr_idx) {
1805                                 QPRINTK(qdev, INTR, INFO,
1806                                         "Waking handler for rx_ring[%d].\n", i);
1807                                 ql_disable_completion_interrupt(qdev,
1808                                                                 intr_context->
1809                                                                 intr);
1810                                 if (i < qdev->rss_ring_first_cq_id)
1811                                         queue_delayed_work_on(rx_ring->cpu,
1812                                                               qdev->q_workqueue,
1813                                                               &rx_ring->rx_work,
1814                                                               0);
1815                                 else
1816                                         netif_rx_schedule(&rx_ring->napi);
1817                                 work_done++;
1818                         }
1819                 }
1820         }
1821         ql_enable_completion_interrupt(qdev, intr_context->intr);
1822         return work_done ? IRQ_HANDLED : IRQ_NONE;
1823 }
1824
1825 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
1826 {
1827
1828         if (skb_is_gso(skb)) {
1829                 int err;
1830                 if (skb_header_cloned(skb)) {
1831                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1832                         if (err)
1833                                 return err;
1834                 }
1835
1836                 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
1837                 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
1838                 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
1839                 mac_iocb_ptr->total_hdrs_len =
1840                     cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
1841                 mac_iocb_ptr->net_trans_offset =
1842                     cpu_to_le16(skb_network_offset(skb) |
1843                                 skb_transport_offset(skb)
1844                                 << OB_MAC_TRANSPORT_HDR_SHIFT);
1845                 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1846                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
1847                 if (likely(skb->protocol == htons(ETH_P_IP))) {
1848                         struct iphdr *iph = ip_hdr(skb);
1849                         iph->check = 0;
1850                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
1851                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1852                                                                  iph->daddr, 0,
1853                                                                  IPPROTO_TCP,
1854                                                                  0);
1855                 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1856                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
1857                         tcp_hdr(skb)->check =
1858                             ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1859                                              &ipv6_hdr(skb)->daddr,
1860                                              0, IPPROTO_TCP, 0);
1861                 }
1862                 return 1;
1863         }
1864         return 0;
1865 }
1866
1867 static void ql_hw_csum_setup(struct sk_buff *skb,
1868                              struct ob_mac_tso_iocb_req *mac_iocb_ptr)
1869 {
1870         int len;
1871         struct iphdr *iph = ip_hdr(skb);
1872         __sum16 *check;
1873         mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
1874         mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
1875         mac_iocb_ptr->net_trans_offset =
1876                 cpu_to_le16(skb_network_offset(skb) |
1877                 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
1878
1879         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
1880         len = (ntohs(iph->tot_len) - (iph->ihl << 2));
1881         if (likely(iph->protocol == IPPROTO_TCP)) {
1882                 check = &(tcp_hdr(skb)->check);
1883                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
1884                 mac_iocb_ptr->total_hdrs_len =
1885                     cpu_to_le16(skb_transport_offset(skb) +
1886                                 (tcp_hdr(skb)->doff << 2));
1887         } else {
1888                 check = &(udp_hdr(skb)->check);
1889                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
1890                 mac_iocb_ptr->total_hdrs_len =
1891                     cpu_to_le16(skb_transport_offset(skb) +
1892                                 sizeof(struct udphdr));
1893         }
1894         *check = ~csum_tcpudp_magic(iph->saddr,
1895                                     iph->daddr, len, iph->protocol, 0);
1896 }
1897
1898 static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
1899 {
1900         struct tx_ring_desc *tx_ring_desc;
1901         struct ob_mac_iocb_req *mac_iocb_ptr;
1902         struct ql_adapter *qdev = netdev_priv(ndev);
1903         int tso;
1904         struct tx_ring *tx_ring;
1905         u32 tx_ring_idx = (u32) QL_TXQ_IDX(qdev, skb);
1906
1907         tx_ring = &qdev->tx_ring[tx_ring_idx];
1908
1909         if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
1910                 QPRINTK(qdev, TX_QUEUED, INFO,
1911                         "%s: shutting down tx queue %d du to lack of resources.\n",
1912                         __func__, tx_ring_idx);
1913                 netif_stop_queue(ndev);
1914                 atomic_inc(&tx_ring->queue_stopped);
1915                 return NETDEV_TX_BUSY;
1916         }
1917         tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
1918         mac_iocb_ptr = tx_ring_desc->queue_entry;
1919         memset((void *)mac_iocb_ptr, 0, sizeof(mac_iocb_ptr));
1920         if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != NETDEV_TX_OK) {
1921                 QPRINTK(qdev, TX_QUEUED, ERR, "Could not map the segments.\n");
1922                 return NETDEV_TX_BUSY;
1923         }
1924
1925         mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
1926         mac_iocb_ptr->tid = tx_ring_desc->index;
1927         /* We use the upper 32-bits to store the tx queue for this IO.
1928          * When we get the completion we can use it to establish the context.
1929          */
1930         mac_iocb_ptr->txq_idx = tx_ring_idx;
1931         tx_ring_desc->skb = skb;
1932
1933         mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
1934
1935         if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
1936                 QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n",
1937                         vlan_tx_tag_get(skb));
1938                 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
1939                 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
1940         }
1941         tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
1942         if (tso < 0) {
1943                 dev_kfree_skb_any(skb);
1944                 return NETDEV_TX_OK;
1945         } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
1946                 ql_hw_csum_setup(skb,
1947                                  (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
1948         }
1949         QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
1950         tx_ring->prod_idx++;
1951         if (tx_ring->prod_idx == tx_ring->wq_len)
1952                 tx_ring->prod_idx = 0;
1953         wmb();
1954
1955         ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
1956         ndev->trans_start = jiffies;
1957         QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
1958                 tx_ring->prod_idx, skb->len);
1959
1960         atomic_dec(&tx_ring->tx_count);
1961         return NETDEV_TX_OK;
1962 }
1963
1964 static void ql_free_shadow_space(struct ql_adapter *qdev)
1965 {
1966         if (qdev->rx_ring_shadow_reg_area) {
1967                 pci_free_consistent(qdev->pdev,
1968                                     PAGE_SIZE,
1969                                     qdev->rx_ring_shadow_reg_area,
1970                                     qdev->rx_ring_shadow_reg_dma);
1971                 qdev->rx_ring_shadow_reg_area = NULL;
1972         }
1973         if (qdev->tx_ring_shadow_reg_area) {
1974                 pci_free_consistent(qdev->pdev,
1975                                     PAGE_SIZE,
1976                                     qdev->tx_ring_shadow_reg_area,
1977                                     qdev->tx_ring_shadow_reg_dma);
1978                 qdev->tx_ring_shadow_reg_area = NULL;
1979         }
1980 }
1981
1982 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
1983 {
1984         qdev->rx_ring_shadow_reg_area =
1985             pci_alloc_consistent(qdev->pdev,
1986                                  PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
1987         if (qdev->rx_ring_shadow_reg_area == NULL) {
1988                 QPRINTK(qdev, IFUP, ERR,
1989                         "Allocation of RX shadow space failed.\n");
1990                 return -ENOMEM;
1991         }
1992         qdev->tx_ring_shadow_reg_area =
1993             pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
1994                                  &qdev->tx_ring_shadow_reg_dma);
1995         if (qdev->tx_ring_shadow_reg_area == NULL) {
1996                 QPRINTK(qdev, IFUP, ERR,
1997                         "Allocation of TX shadow space failed.\n");
1998                 goto err_wqp_sh_area;
1999         }
2000         return 0;
2001
2002 err_wqp_sh_area:
2003         pci_free_consistent(qdev->pdev,
2004                             PAGE_SIZE,
2005                             qdev->rx_ring_shadow_reg_area,
2006                             qdev->rx_ring_shadow_reg_dma);
2007         return -ENOMEM;
2008 }
2009
2010 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2011 {
2012         struct tx_ring_desc *tx_ring_desc;
2013         int i;
2014         struct ob_mac_iocb_req *mac_iocb_ptr;
2015
2016         mac_iocb_ptr = tx_ring->wq_base;
2017         tx_ring_desc = tx_ring->q;
2018         for (i = 0; i < tx_ring->wq_len; i++) {
2019                 tx_ring_desc->index = i;
2020                 tx_ring_desc->skb = NULL;
2021                 tx_ring_desc->queue_entry = mac_iocb_ptr;
2022                 mac_iocb_ptr++;
2023                 tx_ring_desc++;
2024         }
2025         atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2026         atomic_set(&tx_ring->queue_stopped, 0);
2027 }
2028
2029 static void ql_free_tx_resources(struct ql_adapter *qdev,
2030                                  struct tx_ring *tx_ring)
2031 {
2032         if (tx_ring->wq_base) {
2033                 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2034                                     tx_ring->wq_base, tx_ring->wq_base_dma);
2035                 tx_ring->wq_base = NULL;
2036         }
2037         kfree(tx_ring->q);
2038         tx_ring->q = NULL;
2039 }
2040
2041 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2042                                  struct tx_ring *tx_ring)
2043 {
2044         tx_ring->wq_base =
2045             pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2046                                  &tx_ring->wq_base_dma);
2047
2048         if ((tx_ring->wq_base == NULL)
2049             || tx_ring->wq_base_dma & (tx_ring->wq_size - 1)) {
2050                 QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
2051                 return -ENOMEM;
2052         }
2053         tx_ring->q =
2054             kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2055         if (tx_ring->q == NULL)
2056                 goto err;
2057
2058         return 0;
2059 err:
2060         pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2061                             tx_ring->wq_base, tx_ring->wq_base_dma);
2062         return -ENOMEM;
2063 }
2064
2065 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2066 {
2067         int i;
2068         struct bq_desc *lbq_desc;
2069
2070         for (i = 0; i < rx_ring->lbq_len; i++) {
2071                 lbq_desc = &rx_ring->lbq[i];
2072                 if (lbq_desc->p.lbq_page) {
2073                         pci_unmap_page(qdev->pdev,
2074                                        pci_unmap_addr(lbq_desc, mapaddr),
2075                                        pci_unmap_len(lbq_desc, maplen),
2076                                        PCI_DMA_FROMDEVICE);
2077
2078                         put_page(lbq_desc->p.lbq_page);
2079                         lbq_desc->p.lbq_page = NULL;
2080                 }
2081         }
2082 }
2083
2084 /*
2085  * Allocate and map a page for each element of the lbq.
2086  */
2087 static int ql_alloc_lbq_buffers(struct ql_adapter *qdev,
2088                                 struct rx_ring *rx_ring)
2089 {
2090         int i;
2091         struct bq_desc *lbq_desc;
2092         u64 map;
2093         __le64 *bq = rx_ring->lbq_base;
2094
2095         for (i = 0; i < rx_ring->lbq_len; i++) {
2096                 lbq_desc = &rx_ring->lbq[i];
2097                 memset(lbq_desc, 0, sizeof(lbq_desc));
2098                 lbq_desc->addr = bq;
2099                 lbq_desc->index = i;
2100                 lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
2101                 if (unlikely(!lbq_desc->p.lbq_page)) {
2102                         QPRINTK(qdev, IFUP, ERR, "failed alloc_page().\n");
2103                         goto mem_error;
2104                 } else {
2105                         map = pci_map_page(qdev->pdev,
2106                                            lbq_desc->p.lbq_page,
2107                                            0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
2108                         if (pci_dma_mapping_error(qdev->pdev, map)) {
2109                                 QPRINTK(qdev, IFUP, ERR,
2110                                         "PCI mapping failed.\n");
2111                                 goto mem_error;
2112                         }
2113                         pci_unmap_addr_set(lbq_desc, mapaddr, map);
2114                         pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
2115                         *lbq_desc->addr = cpu_to_le64(map);
2116                 }
2117                 bq++;
2118         }
2119         return 0;
2120 mem_error:
2121         ql_free_lbq_buffers(qdev, rx_ring);
2122         return -ENOMEM;
2123 }
2124
2125 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2126 {
2127         int i;
2128         struct bq_desc *sbq_desc;
2129
2130         for (i = 0; i < rx_ring->sbq_len; i++) {
2131                 sbq_desc = &rx_ring->sbq[i];
2132                 if (sbq_desc == NULL) {
2133                         QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i);
2134                         return;
2135                 }
2136                 if (sbq_desc->p.skb) {
2137                         pci_unmap_single(qdev->pdev,
2138                                          pci_unmap_addr(sbq_desc, mapaddr),
2139                                          pci_unmap_len(sbq_desc, maplen),
2140                                          PCI_DMA_FROMDEVICE);
2141                         dev_kfree_skb(sbq_desc->p.skb);
2142                         sbq_desc->p.skb = NULL;
2143                 }
2144         }
2145 }
2146
2147 /* Allocate and map an skb for each element of the sbq. */
2148 static int ql_alloc_sbq_buffers(struct ql_adapter *qdev,
2149                                 struct rx_ring *rx_ring)
2150 {
2151         int i;
2152         struct bq_desc *sbq_desc;
2153         struct sk_buff *skb;
2154         u64 map;
2155         __le64 *bq = rx_ring->sbq_base;
2156
2157         for (i = 0; i < rx_ring->sbq_len; i++) {
2158                 sbq_desc = &rx_ring->sbq[i];
2159                 memset(sbq_desc, 0, sizeof(sbq_desc));
2160                 sbq_desc->index = i;
2161                 sbq_desc->addr = bq;
2162                 skb = netdev_alloc_skb(qdev->ndev, rx_ring->sbq_buf_size);
2163                 if (unlikely(!skb)) {
2164                         /* Better luck next round */
2165                         QPRINTK(qdev, IFUP, ERR,
2166                                 "small buff alloc failed for %d bytes at index %d.\n",
2167                                 rx_ring->sbq_buf_size, i);
2168                         goto mem_err;
2169                 }
2170                 skb_reserve(skb, QLGE_SB_PAD);
2171                 sbq_desc->p.skb = skb;
2172                 /*
2173                  * Map only half the buffer. Because the
2174                  * other half may get some data copied to it
2175                  * when the completion arrives.
2176                  */
2177                 map = pci_map_single(qdev->pdev,
2178                                      skb->data,
2179                                      rx_ring->sbq_buf_size / 2,
2180                                      PCI_DMA_FROMDEVICE);
2181                 if (pci_dma_mapping_error(qdev->pdev, map)) {
2182                         QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
2183                         goto mem_err;
2184                 }
2185                 pci_unmap_addr_set(sbq_desc, mapaddr, map);
2186                 pci_unmap_len_set(sbq_desc, maplen, rx_ring->sbq_buf_size / 2);
2187                 *sbq_desc->addr = cpu_to_le64(map);
2188                 bq++;
2189         }
2190         return 0;
2191 mem_err:
2192         ql_free_sbq_buffers(qdev, rx_ring);
2193         return -ENOMEM;
2194 }
2195
2196 static void ql_free_rx_resources(struct ql_adapter *qdev,
2197                                  struct rx_ring *rx_ring)
2198 {
2199         if (rx_ring->sbq_len)
2200                 ql_free_sbq_buffers(qdev, rx_ring);
2201         if (rx_ring->lbq_len)
2202                 ql_free_lbq_buffers(qdev, rx_ring);
2203
2204         /* Free the small buffer queue. */
2205         if (rx_ring->sbq_base) {
2206                 pci_free_consistent(qdev->pdev,
2207                                     rx_ring->sbq_size,
2208                                     rx_ring->sbq_base, rx_ring->sbq_base_dma);
2209                 rx_ring->sbq_base = NULL;
2210         }
2211
2212         /* Free the small buffer queue control blocks. */
2213         kfree(rx_ring->sbq);
2214         rx_ring->sbq = NULL;
2215
2216         /* Free the large buffer queue. */
2217         if (rx_ring->lbq_base) {
2218                 pci_free_consistent(qdev->pdev,
2219                                     rx_ring->lbq_size,
2220                                     rx_ring->lbq_base, rx_ring->lbq_base_dma);
2221                 rx_ring->lbq_base = NULL;
2222         }
2223
2224         /* Free the large buffer queue control blocks. */
2225         kfree(rx_ring->lbq);
2226         rx_ring->lbq = NULL;
2227
2228         /* Free the rx queue. */
2229         if (rx_ring->cq_base) {
2230                 pci_free_consistent(qdev->pdev,
2231                                     rx_ring->cq_size,
2232                                     rx_ring->cq_base, rx_ring->cq_base_dma);
2233                 rx_ring->cq_base = NULL;
2234         }
2235 }
2236
2237 /* Allocate queues and buffers for this completions queue based
2238  * on the values in the parameter structure. */
2239 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2240                                  struct rx_ring *rx_ring)
2241 {
2242
2243         /*
2244          * Allocate the completion queue for this rx_ring.
2245          */
2246         rx_ring->cq_base =
2247             pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2248                                  &rx_ring->cq_base_dma);
2249
2250         if (rx_ring->cq_base == NULL) {
2251                 QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n");
2252                 return -ENOMEM;
2253         }
2254
2255         if (rx_ring->sbq_len) {
2256                 /*
2257                  * Allocate small buffer queue.
2258                  */
2259                 rx_ring->sbq_base =
2260                     pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2261                                          &rx_ring->sbq_base_dma);
2262
2263                 if (rx_ring->sbq_base == NULL) {
2264                         QPRINTK(qdev, IFUP, ERR,
2265                                 "Small buffer queue allocation failed.\n");
2266                         goto err_mem;
2267                 }
2268
2269                 /*
2270                  * Allocate small buffer queue control blocks.
2271                  */
2272                 rx_ring->sbq =
2273                     kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2274                             GFP_KERNEL);
2275                 if (rx_ring->sbq == NULL) {
2276                         QPRINTK(qdev, IFUP, ERR,
2277                                 "Small buffer queue control block allocation failed.\n");
2278                         goto err_mem;
2279                 }
2280
2281                 if (ql_alloc_sbq_buffers(qdev, rx_ring)) {
2282                         QPRINTK(qdev, IFUP, ERR,
2283                                 "Small buffer allocation failed.\n");
2284                         goto err_mem;
2285                 }
2286         }
2287
2288         if (rx_ring->lbq_len) {
2289                 /*
2290                  * Allocate large buffer queue.
2291                  */
2292                 rx_ring->lbq_base =
2293                     pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2294                                          &rx_ring->lbq_base_dma);
2295
2296                 if (rx_ring->lbq_base == NULL) {
2297                         QPRINTK(qdev, IFUP, ERR,
2298                                 "Large buffer queue allocation failed.\n");
2299                         goto err_mem;
2300                 }
2301                 /*
2302                  * Allocate large buffer queue control blocks.
2303                  */
2304                 rx_ring->lbq =
2305                     kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2306                             GFP_KERNEL);
2307                 if (rx_ring->lbq == NULL) {
2308                         QPRINTK(qdev, IFUP, ERR,
2309                                 "Large buffer queue control block allocation failed.\n");
2310                         goto err_mem;
2311                 }
2312
2313                 /*
2314                  * Allocate the buffers.
2315                  */
2316                 if (ql_alloc_lbq_buffers(qdev, rx_ring)) {
2317                         QPRINTK(qdev, IFUP, ERR,
2318                                 "Large buffer allocation failed.\n");
2319                         goto err_mem;
2320                 }
2321         }
2322
2323         return 0;
2324
2325 err_mem:
2326         ql_free_rx_resources(qdev, rx_ring);
2327         return -ENOMEM;
2328 }
2329
2330 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2331 {
2332         struct tx_ring *tx_ring;
2333         struct tx_ring_desc *tx_ring_desc;
2334         int i, j;
2335
2336         /*
2337          * Loop through all queues and free
2338          * any resources.
2339          */
2340         for (j = 0; j < qdev->tx_ring_count; j++) {
2341                 tx_ring = &qdev->tx_ring[j];
2342                 for (i = 0; i < tx_ring->wq_len; i++) {
2343                         tx_ring_desc = &tx_ring->q[i];
2344                         if (tx_ring_desc && tx_ring_desc->skb) {
2345                                 QPRINTK(qdev, IFDOWN, ERR,
2346                                 "Freeing lost SKB %p, from queue %d, index %d.\n",
2347                                         tx_ring_desc->skb, j,
2348                                         tx_ring_desc->index);
2349                                 ql_unmap_send(qdev, tx_ring_desc,
2350                                               tx_ring_desc->map_cnt);
2351                                 dev_kfree_skb(tx_ring_desc->skb);
2352                                 tx_ring_desc->skb = NULL;
2353                         }
2354                 }
2355         }
2356 }
2357
2358 static void ql_free_ring_cb(struct ql_adapter *qdev)
2359 {
2360         kfree(qdev->ring_mem);
2361 }
2362
2363 static int ql_alloc_ring_cb(struct ql_adapter *qdev)
2364 {
2365         /* Allocate space for tx/rx ring control blocks. */
2366         qdev->ring_mem_size =
2367             (qdev->tx_ring_count * sizeof(struct tx_ring)) +
2368             (qdev->rx_ring_count * sizeof(struct rx_ring));
2369         qdev->ring_mem = kmalloc(qdev->ring_mem_size, GFP_KERNEL);
2370         if (qdev->ring_mem == NULL) {
2371                 return -ENOMEM;
2372         } else {
2373                 qdev->rx_ring = qdev->ring_mem;
2374                 qdev->tx_ring = qdev->ring_mem +
2375                     (qdev->rx_ring_count * sizeof(struct rx_ring));
2376         }
2377         return 0;
2378 }
2379
2380 static void ql_free_mem_resources(struct ql_adapter *qdev)
2381 {
2382         int i;
2383
2384         for (i = 0; i < qdev->tx_ring_count; i++)
2385                 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2386         for (i = 0; i < qdev->rx_ring_count; i++)
2387                 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2388         ql_free_shadow_space(qdev);
2389 }
2390
2391 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2392 {
2393         int i;
2394
2395         /* Allocate space for our shadow registers and such. */
2396         if (ql_alloc_shadow_space(qdev))
2397                 return -ENOMEM;
2398
2399         for (i = 0; i < qdev->rx_ring_count; i++) {
2400                 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2401                         QPRINTK(qdev, IFUP, ERR,
2402                                 "RX resource allocation failed.\n");
2403                         goto err_mem;
2404                 }
2405         }
2406         /* Allocate tx queue resources */
2407         for (i = 0; i < qdev->tx_ring_count; i++) {
2408                 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2409                         QPRINTK(qdev, IFUP, ERR,
2410                                 "TX resource allocation failed.\n");
2411                         goto err_mem;
2412                 }
2413         }
2414         return 0;
2415
2416 err_mem:
2417         ql_free_mem_resources(qdev);
2418         return -ENOMEM;
2419 }
2420
2421 /* Set up the rx ring control block and pass it to the chip.
2422  * The control block is defined as
2423  * "Completion Queue Initialization Control Block", or cqicb.
2424  */
2425 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2426 {
2427         struct cqicb *cqicb = &rx_ring->cqicb;
2428         void *shadow_reg = qdev->rx_ring_shadow_reg_area +
2429             (rx_ring->cq_id * sizeof(u64) * 4);
2430         u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
2431             (rx_ring->cq_id * sizeof(u64) * 4);
2432         void __iomem *doorbell_area =
2433             qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2434         int err = 0;
2435         u16 bq_len;
2436
2437         /* Set up the shadow registers for this ring. */
2438         rx_ring->prod_idx_sh_reg = shadow_reg;
2439         rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
2440         shadow_reg += sizeof(u64);
2441         shadow_reg_dma += sizeof(u64);
2442         rx_ring->lbq_base_indirect = shadow_reg;
2443         rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
2444         shadow_reg += sizeof(u64);
2445         shadow_reg_dma += sizeof(u64);
2446         rx_ring->sbq_base_indirect = shadow_reg;
2447         rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
2448
2449         /* PCI doorbell mem area + 0x00 for consumer index register */
2450         rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
2451         rx_ring->cnsmr_idx = 0;
2452         rx_ring->curr_entry = rx_ring->cq_base;
2453
2454         /* PCI doorbell mem area + 0x04 for valid register */
2455         rx_ring->valid_db_reg = doorbell_area + 0x04;
2456
2457         /* PCI doorbell mem area + 0x18 for large buffer consumer */
2458         rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
2459
2460         /* PCI doorbell mem area + 0x1c */
2461         rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
2462
2463         memset((void *)cqicb, 0, sizeof(struct cqicb));
2464         cqicb->msix_vect = rx_ring->irq;
2465
2466         bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
2467         cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
2468
2469         cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
2470
2471         cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
2472
2473         /*
2474          * Set up the control block load flags.
2475          */
2476         cqicb->flags = FLAGS_LC |       /* Load queue base address */
2477             FLAGS_LV |          /* Load MSI-X vector */
2478             FLAGS_LI;           /* Load irq delay values */
2479         if (rx_ring->lbq_len) {
2480                 cqicb->flags |= FLAGS_LL;       /* Load lbq values */
2481                 *((u64 *) rx_ring->lbq_base_indirect) = rx_ring->lbq_base_dma;
2482                 cqicb->lbq_addr =
2483                     cpu_to_le64(rx_ring->lbq_base_indirect_dma);
2484                 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
2485                         (u16) rx_ring->lbq_buf_size;
2486                 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
2487                 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
2488                         (u16) rx_ring->lbq_len;
2489                 cqicb->lbq_len = cpu_to_le16(bq_len);
2490                 rx_ring->lbq_prod_idx = rx_ring->lbq_len - 16;
2491                 rx_ring->lbq_curr_idx = 0;
2492                 rx_ring->lbq_clean_idx = rx_ring->lbq_prod_idx;
2493                 rx_ring->lbq_free_cnt = 16;
2494         }
2495         if (rx_ring->sbq_len) {
2496                 cqicb->flags |= FLAGS_LS;       /* Load sbq values */
2497                 *((u64 *) rx_ring->sbq_base_indirect) = rx_ring->sbq_base_dma;
2498                 cqicb->sbq_addr =
2499                     cpu_to_le64(rx_ring->sbq_base_indirect_dma);
2500                 cqicb->sbq_buf_size =
2501                     cpu_to_le16(((rx_ring->sbq_buf_size / 2) + 8) & 0xfffffff8);
2502                 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
2503                         (u16) rx_ring->sbq_len;
2504                 cqicb->sbq_len = cpu_to_le16(bq_len);
2505                 rx_ring->sbq_prod_idx = rx_ring->sbq_len - 16;
2506                 rx_ring->sbq_curr_idx = 0;
2507                 rx_ring->sbq_clean_idx = rx_ring->sbq_prod_idx;
2508                 rx_ring->sbq_free_cnt = 16;
2509         }
2510         switch (rx_ring->type) {
2511         case TX_Q:
2512                 /* If there's only one interrupt, then we use
2513                  * worker threads to process the outbound
2514                  * completion handling rx_rings. We do this so
2515                  * they can be run on multiple CPUs. There is
2516                  * room to play with this more where we would only
2517                  * run in a worker if there are more than x number
2518                  * of outbound completions on the queue and more
2519                  * than one queue active.  Some threshold that
2520                  * would indicate a benefit in spite of the cost
2521                  * of a context switch.
2522                  * If there's more than one interrupt, then the
2523                  * outbound completions are processed in the ISR.
2524                  */
2525                 if (!test_bit(QL_MSIX_ENABLED, &qdev->flags))
2526                         INIT_DELAYED_WORK(&rx_ring->rx_work, ql_tx_clean);
2527                 else {
2528                         /* With all debug warnings on we see a WARN_ON message
2529                          * when we free the skb in the interrupt context.
2530                          */
2531                         INIT_DELAYED_WORK(&rx_ring->rx_work, ql_tx_clean);
2532                 }
2533                 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
2534                 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
2535                 break;
2536         case DEFAULT_Q:
2537                 INIT_DELAYED_WORK(&rx_ring->rx_work, ql_rx_clean);
2538                 cqicb->irq_delay = 0;
2539                 cqicb->pkt_delay = 0;
2540                 break;
2541         case RX_Q:
2542                 /* Inbound completion handling rx_rings run in
2543                  * separate NAPI contexts.
2544                  */
2545                 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
2546                                64);
2547                 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
2548                 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
2549                 break;
2550         default:
2551                 QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
2552                         rx_ring->type);
2553         }
2554         QPRINTK(qdev, IFUP, INFO, "Initializing rx work queue.\n");
2555         err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
2556                            CFG_LCQ, rx_ring->cq_id);
2557         if (err) {
2558                 QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
2559                 return err;
2560         }
2561         QPRINTK(qdev, IFUP, INFO, "Successfully loaded CQICB.\n");
2562         /*
2563          * Advance the producer index for the buffer queues.
2564          */
2565         wmb();
2566         if (rx_ring->lbq_len)
2567                 ql_write_db_reg(rx_ring->lbq_prod_idx,
2568                                 rx_ring->lbq_prod_idx_db_reg);
2569         if (rx_ring->sbq_len)
2570                 ql_write_db_reg(rx_ring->sbq_prod_idx,
2571                                 rx_ring->sbq_prod_idx_db_reg);
2572         return err;
2573 }
2574
2575 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2576 {
2577         struct wqicb *wqicb = (struct wqicb *)tx_ring;
2578         void __iomem *doorbell_area =
2579             qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
2580         void *shadow_reg = qdev->tx_ring_shadow_reg_area +
2581             (tx_ring->wq_id * sizeof(u64));
2582         u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
2583             (tx_ring->wq_id * sizeof(u64));
2584         int err = 0;
2585
2586         /*
2587          * Assign doorbell registers for this tx_ring.
2588          */
2589         /* TX PCI doorbell mem area for tx producer index */
2590         tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
2591         tx_ring->prod_idx = 0;
2592         /* TX PCI doorbell mem area + 0x04 */
2593         tx_ring->valid_db_reg = doorbell_area + 0x04;
2594
2595         /*
2596          * Assign shadow registers for this tx_ring.
2597          */
2598         tx_ring->cnsmr_idx_sh_reg = shadow_reg;
2599         tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
2600
2601         wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
2602         wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
2603                                    Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
2604         wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
2605         wqicb->rid = 0;
2606         wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
2607
2608         wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
2609
2610         ql_init_tx_ring(qdev, tx_ring);
2611
2612         err = ql_write_cfg(qdev, wqicb, sizeof(wqicb), CFG_LRQ,
2613                            (u16) tx_ring->wq_id);
2614         if (err) {
2615                 QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
2616                 return err;
2617         }
2618         QPRINTK(qdev, IFUP, INFO, "Successfully loaded WQICB.\n");
2619         return err;
2620 }
2621
2622 static void ql_disable_msix(struct ql_adapter *qdev)
2623 {
2624         if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2625                 pci_disable_msix(qdev->pdev);
2626                 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
2627                 kfree(qdev->msi_x_entry);
2628                 qdev->msi_x_entry = NULL;
2629         } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
2630                 pci_disable_msi(qdev->pdev);
2631                 clear_bit(QL_MSI_ENABLED, &qdev->flags);
2632         }
2633 }
2634
2635 static void ql_enable_msix(struct ql_adapter *qdev)
2636 {
2637         int i;
2638
2639         qdev->intr_count = 1;
2640         /* Get the MSIX vectors. */
2641         if (irq_type == MSIX_IRQ) {
2642                 /* Try to alloc space for the msix struct,
2643                  * if it fails then go to MSI/legacy.
2644                  */
2645                 qdev->msi_x_entry = kcalloc(qdev->rx_ring_count,
2646                                             sizeof(struct msix_entry),
2647                                             GFP_KERNEL);
2648                 if (!qdev->msi_x_entry) {
2649                         irq_type = MSI_IRQ;
2650                         goto msi;
2651                 }
2652
2653                 for (i = 0; i < qdev->rx_ring_count; i++)
2654                         qdev->msi_x_entry[i].entry = i;
2655
2656                 if (!pci_enable_msix
2657                     (qdev->pdev, qdev->msi_x_entry, qdev->rx_ring_count)) {
2658                         set_bit(QL_MSIX_ENABLED, &qdev->flags);
2659                         qdev->intr_count = qdev->rx_ring_count;
2660                         QPRINTK(qdev, IFUP, INFO,
2661                                 "MSI-X Enabled, got %d vectors.\n",
2662                                 qdev->intr_count);
2663                         return;
2664                 } else {
2665                         kfree(qdev->msi_x_entry);
2666                         qdev->msi_x_entry = NULL;
2667                         QPRINTK(qdev, IFUP, WARNING,
2668                                 "MSI-X Enable failed, trying MSI.\n");
2669                         irq_type = MSI_IRQ;
2670                 }
2671         }
2672 msi:
2673         if (irq_type == MSI_IRQ) {
2674                 if (!pci_enable_msi(qdev->pdev)) {
2675                         set_bit(QL_MSI_ENABLED, &qdev->flags);
2676                         QPRINTK(qdev, IFUP, INFO,
2677                                 "Running with MSI interrupts.\n");
2678                         return;
2679                 }
2680         }
2681         irq_type = LEG_IRQ;
2682         QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
2683 }
2684
2685 /*
2686  * Here we build the intr_context structures based on
2687  * our rx_ring count and intr vector count.
2688  * The intr_context structure is used to hook each vector
2689  * to possibly different handlers.
2690  */
2691 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
2692 {
2693         int i = 0;
2694         struct intr_context *intr_context = &qdev->intr_context[0];
2695
2696         ql_enable_msix(qdev);
2697
2698         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2699                 /* Each rx_ring has it's
2700                  * own intr_context since we have separate
2701                  * vectors for each queue.
2702                  * This only true when MSI-X is enabled.
2703                  */
2704                 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2705                         qdev->rx_ring[i].irq = i;
2706                         intr_context->intr = i;
2707                         intr_context->qdev = qdev;
2708                         /*
2709                          * We set up each vectors enable/disable/read bits so
2710                          * there's no bit/mask calculations in the critical path.
2711                          */
2712                         intr_context->intr_en_mask =
2713                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2714                             INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
2715                             | i;
2716                         intr_context->intr_dis_mask =
2717                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2718                             INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
2719                             INTR_EN_IHD | i;
2720                         intr_context->intr_read_mask =
2721                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2722                             INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
2723                             i;
2724
2725                         if (i == 0) {
2726                                 /*
2727                                  * Default queue handles bcast/mcast plus
2728                                  * async events.  Needs buffers.
2729                                  */
2730                                 intr_context->handler = qlge_isr;
2731                                 sprintf(intr_context->name, "%s-default-queue",
2732                                         qdev->ndev->name);
2733                         } else if (i < qdev->rss_ring_first_cq_id) {
2734                                 /*
2735                                  * Outbound queue is for outbound completions only.
2736                                  */
2737                                 intr_context->handler = qlge_msix_tx_isr;
2738                                 sprintf(intr_context->name, "%s-tx-%d",
2739                                         qdev->ndev->name, i);
2740                         } else {
2741                                 /*
2742                                  * Inbound queues handle unicast frames only.
2743                                  */
2744                                 intr_context->handler = qlge_msix_rx_isr;
2745                                 sprintf(intr_context->name, "%s-rx-%d",
2746                                         qdev->ndev->name, i);
2747                         }
2748                 }
2749         } else {
2750                 /*
2751                  * All rx_rings use the same intr_context since
2752                  * there is only one vector.
2753                  */
2754                 intr_context->intr = 0;
2755                 intr_context->qdev = qdev;
2756                 /*
2757                  * We set up each vectors enable/disable/read bits so
2758                  * there's no bit/mask calculations in the critical path.
2759                  */
2760                 intr_context->intr_en_mask =
2761                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
2762                 intr_context->intr_dis_mask =
2763                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2764                     INTR_EN_TYPE_DISABLE;
2765                 intr_context->intr_read_mask =
2766                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
2767                 /*
2768                  * Single interrupt means one handler for all rings.
2769                  */
2770                 intr_context->handler = qlge_isr;
2771                 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
2772                 for (i = 0; i < qdev->rx_ring_count; i++)
2773                         qdev->rx_ring[i].irq = 0;
2774         }
2775 }
2776
2777 static void ql_free_irq(struct ql_adapter *qdev)
2778 {
2779         int i;
2780         struct intr_context *intr_context = &qdev->intr_context[0];
2781
2782         for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2783                 if (intr_context->hooked) {
2784                         if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2785                                 free_irq(qdev->msi_x_entry[i].vector,
2786                                          &qdev->rx_ring[i]);
2787                                 QPRINTK(qdev, IFDOWN, ERR,
2788                                         "freeing msix interrupt %d.\n", i);
2789                         } else {
2790                                 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
2791                                 QPRINTK(qdev, IFDOWN, ERR,
2792                                         "freeing msi interrupt %d.\n", i);
2793                         }
2794                 }
2795         }
2796         ql_disable_msix(qdev);
2797 }
2798
2799 static int ql_request_irq(struct ql_adapter *qdev)
2800 {
2801         int i;
2802         int status = 0;
2803         struct pci_dev *pdev = qdev->pdev;
2804         struct intr_context *intr_context = &qdev->intr_context[0];
2805
2806         ql_resolve_queues_to_irqs(qdev);
2807
2808         for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2809                 atomic_set(&intr_context->irq_cnt, 0);
2810                 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2811                         status = request_irq(qdev->msi_x_entry[i].vector,
2812                                              intr_context->handler,
2813                                              0,
2814                                              intr_context->name,
2815                                              &qdev->rx_ring[i]);
2816                         if (status) {
2817                                 QPRINTK(qdev, IFUP, ERR,
2818                                         "Failed request for MSIX interrupt %d.\n",
2819                                         i);
2820                                 goto err_irq;
2821                         } else {
2822                                 QPRINTK(qdev, IFUP, INFO,
2823                                         "Hooked intr %d, queue type %s%s%s, with name %s.\n",
2824                                         i,
2825                                         qdev->rx_ring[i].type ==
2826                                         DEFAULT_Q ? "DEFAULT_Q" : "",
2827                                         qdev->rx_ring[i].type ==
2828                                         TX_Q ? "TX_Q" : "",
2829                                         qdev->rx_ring[i].type ==
2830                                         RX_Q ? "RX_Q" : "", intr_context->name);
2831                         }
2832                 } else {
2833                         QPRINTK(qdev, IFUP, DEBUG,
2834                                 "trying msi or legacy interrupts.\n");
2835                         QPRINTK(qdev, IFUP, DEBUG,
2836                                 "%s: irq = %d.\n", __func__, pdev->irq);
2837                         QPRINTK(qdev, IFUP, DEBUG,
2838                                 "%s: context->name = %s.\n", __func__,
2839                                intr_context->name);
2840                         QPRINTK(qdev, IFUP, DEBUG,
2841                                 "%s: dev_id = 0x%p.\n", __func__,
2842                                &qdev->rx_ring[0]);
2843                         status =
2844                             request_irq(pdev->irq, qlge_isr,
2845                                         test_bit(QL_MSI_ENABLED,
2846                                                  &qdev->
2847                                                  flags) ? 0 : IRQF_SHARED,
2848                                         intr_context->name, &qdev->rx_ring[0]);
2849                         if (status)
2850                                 goto err_irq;
2851
2852                         QPRINTK(qdev, IFUP, ERR,
2853                                 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
2854                                 i,
2855                                 qdev->rx_ring[0].type ==
2856                                 DEFAULT_Q ? "DEFAULT_Q" : "",
2857                                 qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "",
2858                                 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
2859                                 intr_context->name);
2860                 }
2861                 intr_context->hooked = 1;
2862         }
2863         return status;
2864 err_irq:
2865         QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n");
2866         ql_free_irq(qdev);
2867         return status;
2868 }
2869
2870 static int ql_start_rss(struct ql_adapter *qdev)
2871 {
2872         struct ricb *ricb = &qdev->ricb;
2873         int status = 0;
2874         int i;
2875         u8 *hash_id = (u8 *) ricb->hash_cq_id;
2876
2877         memset((void *)ricb, 0, sizeof(ricb));
2878
2879         ricb->base_cq = qdev->rss_ring_first_cq_id | RSS_L4K;
2880         ricb->flags =
2881             (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 |
2882              RSS_RT6);
2883         ricb->mask = cpu_to_le16(qdev->rss_ring_count - 1);
2884
2885         /*
2886          * Fill out the Indirection Table.
2887          */
2888         for (i = 0; i < 32; i++)
2889                 hash_id[i] = i & 1;
2890
2891         /*
2892          * Random values for the IPv6 and IPv4 Hash Keys.
2893          */
2894         get_random_bytes((void *)&ricb->ipv6_hash_key[0], 40);
2895         get_random_bytes((void *)&ricb->ipv4_hash_key[0], 16);
2896
2897         QPRINTK(qdev, IFUP, INFO, "Initializing RSS.\n");
2898
2899         status = ql_write_cfg(qdev, ricb, sizeof(ricb), CFG_LR, 0);
2900         if (status) {
2901                 QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
2902                 return status;
2903         }
2904         QPRINTK(qdev, IFUP, INFO, "Successfully loaded RICB.\n");
2905         return status;
2906 }
2907
2908 /* Initialize the frame-to-queue routing. */
2909 static int ql_route_initialize(struct ql_adapter *qdev)
2910 {
2911         int status = 0;
2912         int i;
2913
2914         /* Clear all the entries in the routing table. */
2915         for (i = 0; i < 16; i++) {
2916                 status = ql_set_routing_reg(qdev, i, 0, 0);
2917                 if (status) {
2918                         QPRINTK(qdev, IFUP, ERR,
2919                                 "Failed to init routing register for CAM packets.\n");
2920                         return status;
2921                 }
2922         }
2923
2924         status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
2925         if (status) {
2926                 QPRINTK(qdev, IFUP, ERR,
2927                         "Failed to init routing register for error packets.\n");
2928                 return status;
2929         }
2930         status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
2931         if (status) {
2932                 QPRINTK(qdev, IFUP, ERR,
2933                         "Failed to init routing register for broadcast packets.\n");
2934                 return status;
2935         }
2936         /* If we have more than one inbound queue, then turn on RSS in the
2937          * routing block.
2938          */
2939         if (qdev->rss_ring_count > 1) {
2940                 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
2941                                         RT_IDX_RSS_MATCH, 1);
2942                 if (status) {
2943                         QPRINTK(qdev, IFUP, ERR,
2944                                 "Failed to init routing register for MATCH RSS packets.\n");
2945                         return status;
2946                 }
2947         }
2948
2949         status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
2950                                     RT_IDX_CAM_HIT, 1);
2951         if (status) {
2952                 QPRINTK(qdev, IFUP, ERR,
2953                         "Failed to init routing register for CAM packets.\n");
2954                 return status;
2955         }
2956         return status;
2957 }
2958
2959 static int ql_adapter_initialize(struct ql_adapter *qdev)
2960 {
2961         u32 value, mask;
2962         int i;
2963         int status = 0;
2964
2965         /*
2966          * Set up the System register to halt on errors.
2967          */
2968         value = SYS_EFE | SYS_FAE;
2969         mask = value << 16;
2970         ql_write32(qdev, SYS, mask | value);
2971
2972         /* Set the default queue. */
2973         value = NIC_RCV_CFG_DFQ;
2974         mask = NIC_RCV_CFG_DFQ_MASK;
2975         ql_write32(qdev, NIC_RCV_CFG, (mask | value));
2976
2977         /* Set the MPI interrupt to enabled. */
2978         ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
2979
2980         /* Enable the function, set pagesize, enable error checking. */
2981         value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
2982             FSC_EC | FSC_VM_PAGE_4K | FSC_SH;
2983
2984         /* Set/clear header splitting. */
2985         mask = FSC_VM_PAGESIZE_MASK |
2986             FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
2987         ql_write32(qdev, FSC, mask | value);
2988
2989         ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
2990                 min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE));
2991
2992         /* Start up the rx queues. */
2993         for (i = 0; i < qdev->rx_ring_count; i++) {
2994                 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
2995                 if (status) {
2996                         QPRINTK(qdev, IFUP, ERR,
2997                                 "Failed to start rx ring[%d].\n", i);
2998                         return status;
2999                 }
3000         }
3001
3002         /* If there is more than one inbound completion queue
3003          * then download a RICB to configure RSS.
3004          */
3005         if (qdev->rss_ring_count > 1) {
3006                 status = ql_start_rss(qdev);
3007                 if (status) {
3008                         QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n");
3009                         return status;
3010                 }
3011         }
3012
3013         /* Start up the tx queues. */
3014         for (i = 0; i < qdev->tx_ring_count; i++) {
3015                 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3016                 if (status) {
3017                         QPRINTK(qdev, IFUP, ERR,
3018                                 "Failed to start tx ring[%d].\n", i);
3019                         return status;
3020                 }
3021         }
3022
3023         status = ql_port_initialize(qdev);
3024         if (status) {
3025                 QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
3026                 return status;
3027         }
3028
3029         status = ql_set_mac_addr_reg(qdev, (u8 *) qdev->ndev->perm_addr,
3030                                      MAC_ADDR_TYPE_CAM_MAC, qdev->func);
3031         if (status) {
3032                 QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
3033                 return status;
3034         }
3035
3036         status = ql_route_initialize(qdev);
3037         if (status) {
3038                 QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
3039                 return status;
3040         }
3041
3042         /* Start NAPI for the RSS queues. */
3043         for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++) {
3044                 QPRINTK(qdev, IFUP, INFO, "Enabling NAPI for rx_ring[%d].\n",
3045                         i);
3046                 napi_enable(&qdev->rx_ring[i].napi);
3047         }
3048
3049         return status;
3050 }
3051
3052 /* Issue soft reset to chip. */
3053 static int ql_adapter_reset(struct ql_adapter *qdev)
3054 {
3055         u32 value;
3056         int max_wait_time;
3057         int status = 0;
3058         int resetCnt = 0;
3059
3060 #define MAX_RESET_CNT   1
3061 issueReset:
3062         resetCnt++;
3063         QPRINTK(qdev, IFDOWN, DEBUG, "Issue soft reset to chip.\n");
3064         ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3065         /* Wait for reset to complete. */
3066         max_wait_time = 3;
3067         QPRINTK(qdev, IFDOWN, DEBUG, "Wait %d seconds for reset to complete.\n",
3068                 max_wait_time);
3069         do {
3070                 value = ql_read32(qdev, RST_FO);
3071                 if ((value & RST_FO_FR) == 0)
3072                         break;
3073
3074                 ssleep(1);
3075         } while ((--max_wait_time));
3076         if (value & RST_FO_FR) {
3077                 QPRINTK(qdev, IFDOWN, ERR,
3078                         "Stuck in SoftReset:  FSC_SR:0x%08x\n", value);
3079                 if (resetCnt < MAX_RESET_CNT)
3080                         goto issueReset;
3081         }
3082         if (max_wait_time == 0) {
3083                 status = -ETIMEDOUT;
3084                 QPRINTK(qdev, IFDOWN, ERR,
3085                         "ETIMEOUT!!! errored out of resetting the chip!\n");
3086         }
3087
3088         return status;
3089 }
3090
3091 static void ql_display_dev_info(struct net_device *ndev)
3092 {
3093         struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3094
3095         QPRINTK(qdev, PROBE, INFO,
3096                 "Function #%d, NIC Roll %d, NIC Rev = %d, "
3097                 "XG Roll = %d, XG Rev = %d.\n",
3098                 qdev->func,
3099                 qdev->chip_rev_id & 0x0000000f,
3100                 qdev->chip_rev_id >> 4 & 0x0000000f,
3101                 qdev->chip_rev_id >> 8 & 0x0000000f,
3102                 qdev->chip_rev_id >> 12 & 0x0000000f);
3103         QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
3104 }
3105
3106 static int ql_adapter_down(struct ql_adapter *qdev)
3107 {
3108         struct net_device *ndev = qdev->ndev;
3109         int i, status = 0;
3110         struct rx_ring *rx_ring;
3111
3112         netif_stop_queue(ndev);
3113         netif_carrier_off(ndev);
3114
3115         cancel_delayed_work_sync(&qdev->asic_reset_work);
3116         cancel_delayed_work_sync(&qdev->mpi_reset_work);
3117         cancel_delayed_work_sync(&qdev->mpi_work);
3118
3119         /* The default queue at index 0 is always processed in
3120          * a workqueue.
3121          */
3122         cancel_delayed_work_sync(&qdev->rx_ring[0].rx_work);
3123
3124         /* The rest of the rx_rings are processed in
3125          * a workqueue only if it's a single interrupt
3126          * environment (MSI/Legacy).
3127          */
3128         for (i = 1; i < qdev->rx_ring_count; i++) {
3129                 rx_ring = &qdev->rx_ring[i];
3130                 /* Only the RSS rings use NAPI on multi irq
3131                  * environment.  Outbound completion processing
3132                  * is done in interrupt context.
3133                  */
3134                 if (i >= qdev->rss_ring_first_cq_id) {
3135                         napi_disable(&rx_ring->napi);
3136                 } else {
3137                         cancel_delayed_work_sync(&rx_ring->rx_work);
3138                 }
3139         }
3140
3141         clear_bit(QL_ADAPTER_UP, &qdev->flags);
3142
3143         ql_disable_interrupts(qdev);
3144
3145         ql_tx_ring_clean(qdev);
3146
3147         spin_lock(&qdev->hw_lock);
3148         status = ql_adapter_reset(qdev);
3149         if (status)
3150                 QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
3151                         qdev->func);
3152         spin_unlock(&qdev->hw_lock);
3153         return status;
3154 }
3155
3156 static int ql_adapter_up(struct ql_adapter *qdev)
3157 {
3158         int err = 0;
3159
3160         spin_lock(&qdev->hw_lock);
3161         err = ql_adapter_initialize(qdev);
3162         if (err) {
3163                 QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
3164                 spin_unlock(&qdev->hw_lock);
3165                 goto err_init;
3166         }
3167         spin_unlock(&qdev->hw_lock);
3168         set_bit(QL_ADAPTER_UP, &qdev->flags);
3169         ql_enable_interrupts(qdev);
3170         ql_enable_all_completion_interrupts(qdev);
3171         if ((ql_read32(qdev, STS) & qdev->port_init)) {
3172                 netif_carrier_on(qdev->ndev);
3173                 netif_start_queue(qdev->ndev);
3174         }
3175
3176         return 0;
3177 err_init:
3178         ql_adapter_reset(qdev);
3179         return err;
3180 }
3181
3182 static int ql_cycle_adapter(struct ql_adapter *qdev)
3183 {
3184         int status;
3185
3186         status = ql_adapter_down(qdev);
3187         if (status)
3188                 goto error;
3189
3190         status = ql_adapter_up(qdev);
3191         if (status)
3192                 goto error;
3193
3194         return status;
3195 error:
3196         QPRINTK(qdev, IFUP, ALERT,
3197                 "Driver up/down cycle failed, closing device\n");
3198         rtnl_lock();
3199         dev_close(qdev->ndev);
3200         rtnl_unlock();
3201         return status;
3202 }
3203
3204 static void ql_release_adapter_resources(struct ql_adapter *qdev)
3205 {
3206         ql_free_mem_resources(qdev);
3207         ql_free_irq(qdev);
3208 }
3209
3210 static int ql_get_adapter_resources(struct ql_adapter *qdev)
3211 {
3212         int status = 0;
3213
3214         if (ql_alloc_mem_resources(qdev)) {
3215                 QPRINTK(qdev, IFUP, ERR, "Unable to  allocate memory.\n");
3216                 return -ENOMEM;
3217         }
3218         status = ql_request_irq(qdev);
3219         if (status)
3220                 goto err_irq;
3221         return status;
3222 err_irq:
3223         ql_free_mem_resources(qdev);
3224         return status;
3225 }
3226
3227 static int qlge_close(struct net_device *ndev)
3228 {
3229         struct ql_adapter *qdev = netdev_priv(ndev);
3230
3231         /*
3232          * Wait for device to recover from a reset.
3233          * (Rarely happens, but possible.)
3234          */
3235         while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3236                 msleep(1);
3237         ql_adapter_down(qdev);
3238         ql_release_adapter_resources(qdev);
3239         ql_free_ring_cb(qdev);
3240         return 0;
3241 }
3242
3243 static int ql_configure_rings(struct ql_adapter *qdev)
3244 {
3245         int i;
3246         struct rx_ring *rx_ring;
3247         struct tx_ring *tx_ring;
3248         int cpu_cnt = num_online_cpus();
3249
3250         /*
3251          * For each processor present we allocate one
3252          * rx_ring for outbound completions, and one
3253          * rx_ring for inbound completions.  Plus there is
3254          * always the one default queue.  For the CPU
3255          * counts we end up with the following rx_rings:
3256          * rx_ring count =
3257          *  one default queue +
3258          *  (CPU count * outbound completion rx_ring) +
3259          *  (CPU count * inbound (RSS) completion rx_ring)
3260          * To keep it simple we limit the total number of
3261          * queues to < 32, so we truncate CPU to 8.
3262          * This limitation can be removed when requested.
3263          */
3264
3265         if (cpu_cnt > 8)
3266                 cpu_cnt = 8;
3267
3268         /*
3269          * rx_ring[0] is always the default queue.
3270          */
3271         /* Allocate outbound completion ring for each CPU. */
3272         qdev->tx_ring_count = cpu_cnt;
3273         /* Allocate inbound completion (RSS) ring for each CPU. */
3274         qdev->rss_ring_count = cpu_cnt;
3275         /* cq_id for the first inbound ring handler. */
3276         qdev->rss_ring_first_cq_id = cpu_cnt + 1;
3277         /*
3278          * qdev->rx_ring_count:
3279          * Total number of rx_rings.  This includes the one
3280          * default queue, a number of outbound completion
3281          * handler rx_rings, and the number of inbound
3282          * completion handler rx_rings.
3283          */
3284         qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1;
3285
3286         if (ql_alloc_ring_cb(qdev))
3287                 return -ENOMEM;
3288
3289         for (i = 0; i < qdev->tx_ring_count; i++) {
3290                 tx_ring = &qdev->tx_ring[i];
3291                 memset((void *)tx_ring, 0, sizeof(tx_ring));
3292                 tx_ring->qdev = qdev;
3293                 tx_ring->wq_id = i;
3294                 tx_ring->wq_len = qdev->tx_ring_size;
3295                 tx_ring->wq_size =
3296                     tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
3297
3298                 /*
3299                  * The completion queue ID for the tx rings start
3300                  * immediately after the default Q ID, which is zero.
3301                  */
3302                 tx_ring->cq_id = i + 1;
3303         }
3304
3305         for (i = 0; i < qdev->rx_ring_count; i++) {
3306                 rx_ring = &qdev->rx_ring[i];
3307                 memset((void *)rx_ring, 0, sizeof(rx_ring));
3308                 rx_ring->qdev = qdev;
3309                 rx_ring->cq_id = i;
3310                 rx_ring->cpu = i % cpu_cnt;     /* CPU to run handler on. */
3311                 if (i == 0) {   /* Default queue at index 0. */
3312                         /*
3313                          * Default queue handles bcast/mcast plus
3314                          * async events.  Needs buffers.
3315                          */
3316                         rx_ring->cq_len = qdev->rx_ring_size;
3317                         rx_ring->cq_size =
3318                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3319                         rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3320                         rx_ring->lbq_size =
3321                             rx_ring->lbq_len * sizeof(__le64);
3322                         rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
3323                         rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3324                         rx_ring->sbq_size =
3325                             rx_ring->sbq_len * sizeof(__le64);
3326                         rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
3327                         rx_ring->type = DEFAULT_Q;
3328                 } else if (i < qdev->rss_ring_first_cq_id) {
3329                         /*
3330                          * Outbound queue handles outbound completions only.
3331                          */
3332                         /* outbound cq is same size as tx_ring it services. */
3333                         rx_ring->cq_len = qdev->tx_ring_size;
3334                         rx_ring->cq_size =
3335                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3336                         rx_ring->lbq_len = 0;
3337                         rx_ring->lbq_size = 0;
3338                         rx_ring->lbq_buf_size = 0;
3339                         rx_ring->sbq_len = 0;
3340                         rx_ring->sbq_size = 0;
3341                         rx_ring->sbq_buf_size = 0;
3342                         rx_ring->type = TX_Q;
3343                 } else {        /* Inbound completions (RSS) queues */
3344                         /*
3345                          * Inbound queues handle unicast frames only.
3346                          */
3347                         rx_ring->cq_len = qdev->rx_ring_size;
3348                         rx_ring->cq_size =
3349                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3350                         rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3351                         rx_ring->lbq_size =
3352                             rx_ring->lbq_len * sizeof(__le64);
3353                         rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
3354                         rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3355                         rx_ring->sbq_size =
3356                             rx_ring->sbq_len * sizeof(__le64);
3357                         rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
3358                         rx_ring->type = RX_Q;
3359                 }
3360         }
3361         return 0;
3362 }
3363
3364 static int qlge_open(struct net_device *ndev)
3365 {
3366         int err = 0;
3367         struct ql_adapter *qdev = netdev_priv(ndev);
3368
3369         err = ql_configure_rings(qdev);
3370         if (err)
3371                 return err;
3372
3373         err = ql_get_adapter_resources(qdev);
3374         if (err)
3375                 goto error_up;
3376
3377         err = ql_adapter_up(qdev);
3378         if (err)
3379                 goto error_up;
3380
3381         return err;
3382
3383 error_up:
3384         ql_release_adapter_resources(qdev);
3385         ql_free_ring_cb(qdev);
3386         return err;
3387 }
3388
3389 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
3390 {
3391         struct ql_adapter *qdev = netdev_priv(ndev);
3392
3393         if (ndev->mtu == 1500 && new_mtu == 9000) {
3394                 QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
3395         } else if (ndev->mtu == 9000 && new_mtu == 1500) {
3396                 QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
3397         } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
3398                    (ndev->mtu == 9000 && new_mtu == 9000)) {
3399                 return 0;
3400         } else
3401                 return -EINVAL;
3402         ndev->mtu = new_mtu;
3403         return 0;
3404 }
3405
3406 static struct net_device_stats *qlge_get_stats(struct net_device
3407                                                *ndev)
3408 {
3409         struct ql_adapter *qdev = netdev_priv(ndev);
3410         return &qdev->stats;
3411 }
3412
3413 static void qlge_set_multicast_list(struct net_device *ndev)
3414 {
3415         struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3416         struct dev_mc_list *mc_ptr;
3417         int i;
3418
3419         spin_lock(&qdev->hw_lock);
3420         /*
3421          * Set or clear promiscuous mode if a
3422          * transition is taking place.
3423          */
3424         if (ndev->flags & IFF_PROMISC) {
3425                 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3426                         if (ql_set_routing_reg
3427                             (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
3428                                 QPRINTK(qdev, HW, ERR,
3429                                         "Failed to set promiscous mode.\n");
3430                         } else {
3431                                 set_bit(QL_PROMISCUOUS, &qdev->flags);
3432                         }
3433                 }
3434         } else {
3435                 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3436                         if (ql_set_routing_reg
3437                             (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
3438                                 QPRINTK(qdev, HW, ERR,
3439                                         "Failed to clear promiscous mode.\n");
3440                         } else {
3441                                 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3442                         }
3443                 }
3444         }
3445
3446         /*
3447          * Set or clear all multicast mode if a
3448          * transition is taking place.
3449          */
3450         if ((ndev->flags & IFF_ALLMULTI) ||
3451             (ndev->mc_count > MAX_MULTICAST_ENTRIES)) {
3452                 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
3453                         if (ql_set_routing_reg
3454                             (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
3455                                 QPRINTK(qdev, HW, ERR,
3456                                         "Failed to set all-multi mode.\n");
3457                         } else {
3458                                 set_bit(QL_ALLMULTI, &qdev->flags);
3459                         }
3460                 }
3461         } else {
3462                 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
3463                         if (ql_set_routing_reg
3464                             (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
3465                                 QPRINTK(qdev, HW, ERR,
3466                                         "Failed to clear all-multi mode.\n");
3467                         } else {
3468                                 clear_bit(QL_ALLMULTI, &qdev->flags);
3469                         }
3470                 }
3471         }
3472
3473         if (ndev->mc_count) {
3474                 for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
3475                      i++, mc_ptr = mc_ptr->next)
3476                         if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
3477                                                 MAC_ADDR_TYPE_MULTI_MAC, i)) {
3478                                 QPRINTK(qdev, HW, ERR,
3479                                         "Failed to loadmulticast address.\n");
3480                                 goto exit;
3481                         }
3482                 if (ql_set_routing_reg
3483                     (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
3484                         QPRINTK(qdev, HW, ERR,
3485                                 "Failed to set multicast match mode.\n");
3486                 } else {
3487                         set_bit(QL_ALLMULTI, &qdev->flags);
3488                 }
3489         }
3490 exit:
3491         spin_unlock(&qdev->hw_lock);
3492 }
3493
3494 static int qlge_set_mac_address(struct net_device *ndev, void *p)
3495 {
3496         struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3497         struct sockaddr *addr = p;
3498         int ret = 0;
3499
3500         if (netif_running(ndev))
3501                 return -EBUSY;
3502
3503         if (!is_valid_ether_addr(addr->sa_data))
3504                 return -EADDRNOTAVAIL;
3505         memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3506
3507         spin_lock(&qdev->hw_lock);
3508         if (ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
3509                         MAC_ADDR_TYPE_CAM_MAC, qdev->func)) {/* Unicast */
3510                 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
3511                 ret = -1;
3512         }
3513         spin_unlock(&qdev->hw_lock);
3514
3515         return ret;
3516 }
3517
3518 static void qlge_tx_timeout(struct net_device *ndev)
3519 {
3520         struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3521         queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
3522 }
3523
3524 static void ql_asic_reset_work(struct work_struct *work)
3525 {
3526         struct ql_adapter *qdev =
3527             container_of(work, struct ql_adapter, asic_reset_work.work);
3528         ql_cycle_adapter(qdev);
3529 }
3530
3531 static void ql_get_board_info(struct ql_adapter *qdev)
3532 {
3533         qdev->func =
3534             (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
3535         if (qdev->func) {
3536                 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
3537                 qdev->port_link_up = STS_PL1;
3538                 qdev->port_init = STS_PI1;
3539                 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
3540                 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
3541         } else {
3542                 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
3543                 qdev->port_link_up = STS_PL0;
3544                 qdev->port_init = STS_PI0;
3545                 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
3546                 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
3547         }
3548         qdev->chip_rev_id = ql_read32(qdev, REV_ID);
3549 }
3550
3551 static void ql_release_all(struct pci_dev *pdev)
3552 {
3553         struct net_device *ndev = pci_get_drvdata(pdev);
3554         struct ql_adapter *qdev = netdev_priv(ndev);
3555
3556         if (qdev->workqueue) {
3557                 destroy_workqueue(qdev->workqueue);
3558                 qdev->workqueue = NULL;
3559         }
3560         if (qdev->q_workqueue) {
3561                 destroy_workqueue(qdev->q_workqueue);
3562                 qdev->q_workqueue = NULL;
3563         }
3564         if (qdev->reg_base)
3565                 iounmap(qdev->reg_base);
3566         if (qdev->doorbell_area)
3567                 iounmap(qdev->doorbell_area);
3568         pci_release_regions(pdev);
3569         pci_set_drvdata(pdev, NULL);
3570 }
3571
3572 static int __devinit ql_init_device(struct pci_dev *pdev,
3573                                     struct net_device *ndev, int cards_found)
3574 {
3575         struct ql_adapter *qdev = netdev_priv(ndev);
3576         int pos, err = 0;
3577         u16 val16;
3578
3579         memset((void *)qdev, 0, sizeof(qdev));
3580         err = pci_enable_device(pdev);
3581         if (err) {
3582                 dev_err(&pdev->dev, "PCI device enable failed.\n");
3583                 return err;
3584         }
3585
3586         pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
3587         if (pos <= 0) {
3588                 dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
3589                         "aborting.\n");
3590                 goto err_out;
3591         } else {
3592                 pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
3593                 val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
3594                 val16 |= (PCI_EXP_DEVCTL_CERE |
3595                           PCI_EXP_DEVCTL_NFERE |
3596                           PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE);
3597                 pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
3598         }
3599
3600         err = pci_request_regions(pdev, DRV_NAME);
3601         if (err) {
3602                 dev_err(&pdev->dev, "PCI region request failed.\n");
3603                 goto err_out;
3604         }
3605
3606         pci_set_master(pdev);
3607         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3608                 set_bit(QL_DMA64, &qdev->flags);
3609                 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3610         } else {
3611                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3612                 if (!err)
3613                        err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3614         }
3615
3616         if (err) {
3617                 dev_err(&pdev->dev, "No usable DMA configuration.\n");
3618                 goto err_out;
3619         }
3620
3621         pci_set_drvdata(pdev, ndev);
3622         qdev->reg_base =
3623             ioremap_nocache(pci_resource_start(pdev, 1),
3624                             pci_resource_len(pdev, 1));
3625         if (!qdev->reg_base) {
3626                 dev_err(&pdev->dev, "Register mapping failed.\n");
3627                 err = -ENOMEM;
3628                 goto err_out;
3629         }
3630
3631         qdev->doorbell_area_size = pci_resource_len(pdev, 3);
3632         qdev->doorbell_area =
3633             ioremap_nocache(pci_resource_start(pdev, 3),
3634                             pci_resource_len(pdev, 3));
3635         if (!qdev->doorbell_area) {
3636                 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
3637                 err = -ENOMEM;
3638                 goto err_out;
3639         }
3640
3641         ql_get_board_info(qdev);
3642         qdev->ndev = ndev;
3643         qdev->pdev = pdev;
3644         qdev->msg_enable = netif_msg_init(debug, default_msg);
3645         spin_lock_init(&qdev->hw_lock);
3646         spin_lock_init(&qdev->stats_lock);
3647
3648         /* make sure the EEPROM is good */
3649         err = ql_get_flash_params(qdev);
3650         if (err) {
3651                 dev_err(&pdev->dev, "Invalid FLASH.\n");
3652                 goto err_out;
3653         }
3654
3655         if (!is_valid_ether_addr(qdev->flash.mac_addr))
3656                 goto err_out;
3657
3658         memcpy(ndev->dev_addr, qdev->flash.mac_addr, ndev->addr_len);
3659         memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
3660
3661         /* Set up the default ring sizes. */
3662         qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
3663         qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
3664
3665         /* Set up the coalescing parameters. */
3666         qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
3667         qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
3668         qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
3669         qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
3670
3671         /*
3672          * Set up the operating parameters.
3673          */
3674         qdev->rx_csum = 1;
3675
3676         qdev->q_workqueue = create_workqueue(ndev->name);
3677         qdev->workqueue = create_singlethread_workqueue(ndev->name);
3678         INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
3679         INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
3680         INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
3681
3682         if (!cards_found) {
3683                 dev_info(&pdev->dev, "%s\n", DRV_STRING);
3684                 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
3685                          DRV_NAME, DRV_VERSION);
3686         }
3687         return 0;
3688 err_out:
3689         ql_release_all(pdev);
3690         pci_disable_device(pdev);
3691         return err;
3692 }
3693
3694
3695 static const struct net_device_ops qlge_netdev_ops = {
3696         .ndo_open               = qlge_open,
3697         .ndo_stop               = qlge_close,
3698         .ndo_start_xmit         = qlge_send,
3699         .ndo_change_mtu         = qlge_change_mtu,
3700         .ndo_get_stats          = qlge_get_stats,
3701         .ndo_set_multicast_list = qlge_set_multicast_list,
3702         .ndo_set_mac_address    = qlge_set_mac_address,
3703         .ndo_validate_addr      = eth_validate_addr,
3704         .ndo_tx_timeout         = qlge_tx_timeout,
3705         .ndo_vlan_rx_register   = ql_vlan_rx_register,
3706         .ndo_vlan_rx_add_vid    = ql_vlan_rx_add_vid,
3707         .ndo_vlan_rx_kill_vid   = ql_vlan_rx_kill_vid,
3708 };
3709
3710 static int __devinit qlge_probe(struct pci_dev *pdev,
3711                                 const struct pci_device_id *pci_entry)
3712 {
3713         struct net_device *ndev = NULL;
3714         struct ql_adapter *qdev = NULL;
3715         static int cards_found = 0;
3716         int err = 0;
3717
3718         ndev = alloc_etherdev(sizeof(struct ql_adapter));
3719         if (!ndev)
3720                 return -ENOMEM;
3721
3722         err = ql_init_device(pdev, ndev, cards_found);
3723         if (err < 0) {
3724                 free_netdev(ndev);
3725                 return err;
3726         }
3727
3728         qdev = netdev_priv(ndev);
3729         SET_NETDEV_DEV(ndev, &pdev->dev);
3730         ndev->features = (0
3731                           | NETIF_F_IP_CSUM
3732                           | NETIF_F_SG
3733                           | NETIF_F_TSO
3734                           | NETIF_F_TSO6
3735                           | NETIF_F_TSO_ECN
3736                           | NETIF_F_HW_VLAN_TX
3737                           | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
3738
3739         if (test_bit(QL_DMA64, &qdev->flags))
3740                 ndev->features |= NETIF_F_HIGHDMA;
3741
3742         /*
3743          * Set up net_device structure.
3744          */
3745         ndev->tx_queue_len = qdev->tx_ring_size;
3746         ndev->irq = pdev->irq;
3747
3748         ndev->netdev_ops = &qlge_netdev_ops;
3749         SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
3750         ndev->watchdog_timeo = 10 * HZ;
3751
3752         err = register_netdev(ndev);
3753         if (err) {
3754                 dev_err(&pdev->dev, "net device registration failed.\n");
3755                 ql_release_all(pdev);
3756                 pci_disable_device(pdev);
3757                 return err;
3758         }
3759         netif_carrier_off(ndev);
3760         netif_stop_queue(ndev);
3761         ql_display_dev_info(ndev);
3762         cards_found++;
3763         return 0;
3764 }
3765
3766 static void __devexit qlge_remove(struct pci_dev *pdev)
3767 {
3768         struct net_device *ndev = pci_get_drvdata(pdev);
3769         unregister_netdev(ndev);
3770         ql_release_all(pdev);
3771         pci_disable_device(pdev);
3772         free_netdev(ndev);
3773 }
3774
3775 /*
3776  * This callback is called by the PCI subsystem whenever
3777  * a PCI bus error is detected.
3778  */
3779 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
3780                                                enum pci_channel_state state)
3781 {
3782         struct net_device *ndev = pci_get_drvdata(pdev);
3783         struct ql_adapter *qdev = netdev_priv(ndev);
3784
3785         if (netif_running(ndev))
3786                 ql_adapter_down(qdev);
3787
3788         pci_disable_device(pdev);
3789
3790         /* Request a slot reset. */
3791         return PCI_ERS_RESULT_NEED_RESET;
3792 }
3793
3794 /*
3795  * This callback is called after the PCI buss has been reset.
3796  * Basically, this tries to restart the card from scratch.
3797  * This is a shortened version of the device probe/discovery code,
3798  * it resembles the first-half of the () routine.
3799  */
3800 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
3801 {
3802         struct net_device *ndev = pci_get_drvdata(pdev);
3803         struct ql_adapter *qdev = netdev_priv(ndev);
3804
3805         if (pci_enable_device(pdev)) {
3806                 QPRINTK(qdev, IFUP, ERR,
3807                         "Cannot re-enable PCI device after reset.\n");
3808                 return PCI_ERS_RESULT_DISCONNECT;
3809         }
3810
3811         pci_set_master(pdev);
3812
3813         netif_carrier_off(ndev);
3814         netif_stop_queue(ndev);
3815         ql_adapter_reset(qdev);
3816
3817         /* Make sure the EEPROM is good */
3818         memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
3819
3820         if (!is_valid_ether_addr(ndev->perm_addr)) {
3821                 QPRINTK(qdev, IFUP, ERR, "After reset, invalid MAC address.\n");
3822                 return PCI_ERS_RESULT_DISCONNECT;
3823         }
3824
3825         return PCI_ERS_RESULT_RECOVERED;
3826 }
3827
3828 static void qlge_io_resume(struct pci_dev *pdev)
3829 {
3830         struct net_device *ndev = pci_get_drvdata(pdev);
3831         struct ql_adapter *qdev = netdev_priv(ndev);
3832
3833         pci_set_master(pdev);
3834
3835         if (netif_running(ndev)) {
3836                 if (ql_adapter_up(qdev)) {
3837                         QPRINTK(qdev, IFUP, ERR,
3838                                 "Device initialization failed after reset.\n");
3839                         return;
3840                 }
3841         }
3842
3843         netif_device_attach(ndev);
3844 }
3845
3846 static struct pci_error_handlers qlge_err_handler = {
3847         .error_detected = qlge_io_error_detected,
3848         .slot_reset = qlge_io_slot_reset,
3849         .resume = qlge_io_resume,
3850 };
3851
3852 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
3853 {
3854         struct net_device *ndev = pci_get_drvdata(pdev);
3855         struct ql_adapter *qdev = netdev_priv(ndev);
3856         int err;
3857
3858         netif_device_detach(ndev);
3859
3860         if (netif_running(ndev)) {
3861                 err = ql_adapter_down(qdev);
3862                 if (!err)
3863                         return err;
3864         }
3865
3866         err = pci_save_state(pdev);
3867         if (err)
3868                 return err;
3869
3870         pci_disable_device(pdev);
3871
3872         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3873
3874         return 0;
3875 }
3876
3877 #ifdef CONFIG_PM
3878 static int qlge_resume(struct pci_dev *pdev)
3879 {
3880         struct net_device *ndev = pci_get_drvdata(pdev);
3881         struct ql_adapter *qdev = netdev_priv(ndev);
3882         int err;
3883
3884         pci_set_power_state(pdev, PCI_D0);
3885         pci_restore_state(pdev);
3886         err = pci_enable_device(pdev);
3887         if (err) {
3888                 QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n");
3889                 return err;
3890         }
3891         pci_set_master(pdev);
3892
3893         pci_enable_wake(pdev, PCI_D3hot, 0);
3894         pci_enable_wake(pdev, PCI_D3cold, 0);
3895
3896         if (netif_running(ndev)) {
3897                 err = ql_adapter_up(qdev);
3898                 if (err)
3899                         return err;
3900         }
3901
3902         netif_device_attach(ndev);
3903
3904         return 0;
3905 }
3906 #endif /* CONFIG_PM */
3907
3908 static void qlge_shutdown(struct pci_dev *pdev)
3909 {
3910         qlge_suspend(pdev, PMSG_SUSPEND);
3911 }
3912
3913 static struct pci_driver qlge_driver = {
3914         .name = DRV_NAME,
3915         .id_table = qlge_pci_tbl,
3916         .probe = qlge_probe,
3917         .remove = __devexit_p(qlge_remove),
3918 #ifdef CONFIG_PM
3919         .suspend = qlge_suspend,
3920         .resume = qlge_resume,
3921 #endif
3922         .shutdown = qlge_shutdown,
3923         .err_handler = &qlge_err_handler
3924 };
3925
3926 static int __init qlge_init_module(void)
3927 {
3928         return pci_register_driver(&qlge_driver);
3929 }
3930
3931 static void __exit qlge_exit(void)
3932 {
3933         pci_unregister_driver(&qlge_driver);
3934 }
3935
3936 module_init(qlge_init_module);
3937 module_exit(qlge_exit);